repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/_testing.py | sklearn/utils/_testing.py | """Testing utilities."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import atexit
import contextlib
import functools
import importlib
import inspect
import os
import os.path as op
import re
import shutil
import sys
import tempfile
import textwrap
import unittest
import warnings
from collections import defaultdict, namedtuple
from collections.abc import Iterable
from dataclasses import dataclass
from difflib import context_diff
from functools import wraps
from inspect import signature
from itertools import chain, groupby
from subprocess import STDOUT, CalledProcessError, TimeoutExpired, check_output
import joblib
import numpy as np
import scipy as sp
from numpy.testing import assert_allclose as np_assert_allclose
from numpy.testing import (
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
assert_array_less,
)
from sklearn import __file__ as sklearn_path
from sklearn.utils import (
ClassifierTags,
RegressorTags,
Tags,
TargetTags,
TransformerTags,
)
from sklearn.utils._array_api import _check_array_api_dispatch
from sklearn.utils.fixes import (
_IS_32BIT,
VisibleDeprecationWarning,
_in_unstable_openblas_configuration,
)
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils.validation import check_array, check_is_fitted, check_X_y
__all__ = [
"SkipTest",
"assert_allclose",
"assert_almost_equal",
"assert_array_almost_equal",
"assert_array_equal",
"assert_array_less",
"assert_run_python_script_without_output",
]
SkipTest = unittest.case.SkipTest
def ignore_warnings(obj=None, category=Warning):
"""Context manager and decorator to ignore warnings.
Note: Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging, this is not your tool of choice.
Parameters
----------
obj : callable, default=None
callable where you want to ignore the warnings.
category : warning class, default=Warning
The category to filter. If Warning, all categories will be muted.
Examples
--------
>>> import warnings
>>> from sklearn.utils._testing import ignore_warnings
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if isinstance(obj, type) and issubclass(obj, Warning):
# Avoid common pitfall of passing category as the first positional
# argument which result in the test not being run
warning_name = obj.__name__
raise ValueError(
"'obj' should be a callable where you want to ignore warnings. "
"You passed a warning class instead: 'obj={warning_name}'. "
"If you want to pass a warning class to ignore_warnings, "
"you should use 'category={warning_name}'".format(warning_name=warning_name)
)
elif callable(obj):
return _IgnoreWarnings(category=category)(obj)
else:
return _IgnoreWarnings(category=category)
class _IgnoreWarnings:
"""Improved and simplified Python warnings context manager and decorator.
This class allows the user to ignore the warnings raised by a function.
Copied from Python 2.7.5 and modified as required.
Parameters
----------
category : tuple of warning class, default=Warning
The category to filter. By default, all the categories will be muted.
"""
def __init__(self, category):
self._record = True
self._module = sys.modules["warnings"]
self._entered = False
self.log = []
self.category = category
def __call__(self, fn):
"""Decorator to catch and hide warnings without visual nesting."""
@wraps(fn)
def wrapper(*args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore", self.category)
return fn(*args, **kwargs)
return wrapper
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules["warnings"]:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
warnings.simplefilter("ignore", self.category)
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
def assert_allclose(
actual, desired, rtol=None, atol=0.0, equal_nan=True, err_msg="", verbose=True
):
"""dtype-aware variant of numpy.testing.assert_allclose
This variant introspects the least precise floating point dtype
in the input argument and automatically sets the relative tolerance
parameter to 1e-4 float32 and use 1e-7 otherwise (typically float64
in scikit-learn).
`atol` is always left to 0. by default. It should be adjusted manually
to an assertion-specific value in case there are null values expected
in `desired`.
The aggregate tolerance is `atol + rtol * abs(desired)`.
Parameters
----------
actual : array_like
Array obtained.
desired : array_like
Array desired.
rtol : float, optional, default=None
Relative tolerance.
If None, it is set based on the provided arrays' dtypes.
atol : float, optional, default=0.
Absolute tolerance.
equal_nan : bool, optional, default=True
If True, NaNs will compare equal.
err_msg : str, optional, default=''
The error message to be printed in case of failure.
verbose : bool, optional, default=True
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal up to specified precision.
See Also
--------
numpy.testing.assert_allclose
Examples
--------
>>> import numpy as np
>>> from sklearn.utils._testing import assert_allclose
>>> x = [1e-5, 1e-3, 1e-1]
>>> y = np.arccos(np.cos(x))
>>> assert_allclose(x, y, rtol=1e-5, atol=0)
>>> a = np.full(shape=10, fill_value=1e-5, dtype=np.float32)
>>> assert_allclose(a, 1e-5)
"""
dtypes = []
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
dtypes = [actual.dtype, desired.dtype]
if rtol is None:
rtols = [1e-4 if dtype == np.float32 else 1e-7 for dtype in dtypes]
rtol = max(rtols)
np_assert_allclose(
actual,
desired,
rtol=rtol,
atol=atol,
equal_nan=equal_nan,
err_msg=err_msg,
verbose=verbose,
)
def assert_allclose_dense_sparse(x, y, rtol=1e-07, atol=1e-9, err_msg=""):
"""Assert allclose for sparse and dense data.
Both x and y need to be either sparse or dense, they
can't be mixed.
Parameters
----------
x : {array-like, sparse matrix}
First array to compare.
y : {array-like, sparse matrix}
Second array to compare.
rtol : float, default=1e-07
relative tolerance; see numpy.allclose.
atol : float, default=1e-9
absolute tolerance; see numpy.allclose. Note that the default here is
more tolerant than the default for numpy.testing.assert_allclose, where
atol=0.
err_msg : str, default=''
Error message to raise.
"""
if sp.sparse.issparse(x) and sp.sparse.issparse(y):
x = x.tocsr()
y = y.tocsr()
x.sum_duplicates()
y.sum_duplicates()
assert_array_equal(x.indices, y.indices, err_msg=err_msg)
assert_array_equal(x.indptr, y.indptr, err_msg=err_msg)
assert_allclose(x.data, y.data, rtol=rtol, atol=atol, err_msg=err_msg)
elif not sp.sparse.issparse(x) and not sp.sparse.issparse(y):
# both dense
assert_allclose(x, y, rtol=rtol, atol=atol, err_msg=err_msg)
else:
raise ValueError(
"Can only compare two sparse matrices, not a sparse matrix and an array."
)
def set_random_state(estimator, random_state=0):
"""Set random state of an estimator if it has the `random_state` param.
Parameters
----------
estimator : object
The estimator.
random_state : int, RandomState instance or None, default=0
Pseudo random number generator state.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
"""
if "random_state" in estimator.get_params():
estimator.set_params(random_state=random_state)
def _is_numpydoc():
try:
import numpydoc # noqa: F401
except (ImportError, AssertionError):
return False
else:
return True
try:
_check_array_api_dispatch(True)
ARRAY_API_COMPAT_FUNCTIONAL = True
except (ImportError, RuntimeError):
ARRAY_API_COMPAT_FUNCTIONAL = False
try:
import pytest
skip_if_32bit = pytest.mark.skipif(_IS_32BIT, reason="skipped on 32bit platforms")
fails_if_unstable_openblas = pytest.mark.xfail(
_in_unstable_openblas_configuration(),
reason="OpenBLAS is unstable for this configuration",
)
skip_if_no_parallel = pytest.mark.skipif(
not joblib.parallel.mp, reason="joblib is in serial mode"
)
skip_if_array_api_compat_not_configured = pytest.mark.skipif(
not ARRAY_API_COMPAT_FUNCTIONAL,
reason="SCIPY_ARRAY_API not set, or versions of NumPy/SciPy too old.",
)
# Decorator for tests involving both BLAS calls and multiprocessing.
#
# Under POSIX (e.g. Linux or OSX), using multiprocessing in conjunction
# with some implementation of BLAS (or other libraries that manage an
# internal posix thread pool) can cause a crash or a freeze of the Python
# process.
#
# In practice all known packaged distributions (from Linux distros or
# Anaconda) of BLAS under Linux seems to be safe. So we this problem seems
# to only impact OSX users.
#
# This wrapper makes it possible to skip tests that can possibly cause
# this crash under OS X with.
#
# Under Python 3.4+ it is possible to use the `forkserver` start method
# for multiprocessing to avoid this issue. However it can cause pickling
# errors on interactively defined functions. It therefore not enabled by
# default.
if_safe_multiprocessing_with_blas = pytest.mark.skipif(
sys.platform == "darwin", reason="Possible multi-process bug with some BLAS"
)
skip_if_no_numpydoc = pytest.mark.skipif(
not _is_numpydoc(),
reason="numpydoc is required to test the docstrings",
)
except ImportError:
pass
def check_skip_network():
if int(os.environ.get("SKLEARN_SKIP_NETWORK_TESTS", 0)):
raise SkipTest("Text tutorial requires large dataset download")
def _delete_folder(folder_path, warn=False):
"""Utility function to cleanup a temporary folder if still existing.
Copy from joblib.pool (for independence).
"""
try:
if os.path.exists(folder_path):
# This can fail under windows,
# but will succeed when called by atexit
shutil.rmtree(folder_path)
except OSError:
if warn:
warnings.warn("Could not delete temporary folder %s" % folder_path)
class TempMemmap:
"""
Parameters
----------
data
mmap_mode : str, default='r'
"""
def __init__(self, data, mmap_mode="r"):
self.mmap_mode = mmap_mode
self.data = data
def __enter__(self):
data_read_only, self.temp_folder = create_memmap_backed_data(
self.data, mmap_mode=self.mmap_mode, return_folder=True
)
return data_read_only
def __exit__(self, exc_type, exc_val, exc_tb):
_delete_folder(self.temp_folder)
def create_memmap_backed_data(data, mmap_mode="r", return_folder=False):
"""
Parameters
----------
data
mmap_mode : str, default='r'
return_folder : bool, default=False
"""
temp_folder = tempfile.mkdtemp(prefix="sklearn_testing_")
atexit.register(functools.partial(_delete_folder, temp_folder, warn=True))
filename = op.join(temp_folder, "data.pkl")
joblib.dump(data, filename)
memmap_backed_data = joblib.load(filename, mmap_mode=mmap_mode)
result = (
memmap_backed_data if not return_folder else (memmap_backed_data, temp_folder)
)
return result
# Utils to test docstrings
def _get_args(function, varargs=False):
"""Helper to get function arguments."""
try:
params = signature(function).parameters
except ValueError:
# Error on builtin C function
return []
args = [
key
for key, param in params.items()
if param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)
]
if varargs:
varargs = [
param.name
for param in params.values()
if param.kind == param.VAR_POSITIONAL
]
if len(varargs) == 0:
varargs = None
return args, varargs
else:
return args
def _get_func_name(func):
"""Get function full name.
Parameters
----------
func : callable
The function object.
Returns
-------
name : str
The function name.
"""
parts = []
module = inspect.getmodule(func)
if module:
parts.append(module.__name__)
qualname = func.__qualname__
if qualname != func.__name__:
parts.append(qualname[: qualname.find(".")])
parts.append(func.__name__)
return ".".join(parts)
def check_docstring_parameters(func, doc=None, ignore=None):
"""Helper to check docstring.
Parameters
----------
func : callable
The function object to test.
doc : str, default=None
Docstring if it is passed manually to the test.
ignore : list, default=None
Parameters to ignore.
Returns
-------
incorrect : list
A list of string describing the incorrect results.
"""
from numpydoc import docscrape
incorrect = []
ignore = [] if ignore is None else ignore
func_name = _get_func_name(func)
if not func_name.startswith("sklearn.") or func_name.startswith(
"sklearn.externals"
):
return incorrect
# Don't check docstring for property-functions
if inspect.isdatadescriptor(func):
return incorrect
# Don't check docstring for setup / teardown pytest functions
if func_name.split(".")[-1] in ("setup_module", "teardown_module"):
return incorrect
# Dont check estimator_checks module
if func_name.split(".")[2] == "estimator_checks":
return incorrect
# Get the arguments from the function signature
param_signature = list(filter(lambda x: x not in ignore, _get_args(func)))
# drop self
if len(param_signature) > 0 and param_signature[0] == "self":
param_signature.remove("self")
# Analyze function's docstring
if doc is None:
records = []
with warnings.catch_warnings(record=True):
warnings.simplefilter("error", UserWarning)
try:
doc = docscrape.FunctionDoc(func)
except UserWarning as exp:
if "potentially wrong underline length" in str(exp):
# Catch warning raised as of numpydoc 1.2 when
# the underline length for a section of a docstring
# is not consistent.
message = str(exp).split("\n")[:3]
incorrect += [f"In function: {func_name}"] + message
return incorrect
records.append(str(exp))
except Exception as exp:
incorrect += [func_name + " parsing error: " + str(exp)]
return incorrect
if len(records):
raise RuntimeError("Error for %s:\n%s" % (func_name, records[0]))
param_docs = []
for name, type_definition, param_doc in doc["Parameters"]:
# Type hints are empty only if parameter name ended with :
if not type_definition.strip():
if ":" in name and name[: name.index(":")][-1:].strip():
incorrect += [
func_name
+ " There was no space between the param name and colon (%r)" % name
]
elif name.rstrip().endswith(":"):
incorrect += [
func_name
+ " Parameter %r has an empty type spec. Remove the colon"
% (name.lstrip())
]
# Create a list of parameters to compare with the parameters gotten
# from the func signature
if "*" not in name:
param_docs.append(name.split(":")[0].strip("` "))
# If one of the docstring's parameters had an error then return that
# incorrect message
if len(incorrect) > 0:
return incorrect
# Remove the parameters that should be ignored from list
param_docs = list(filter(lambda x: x not in ignore, param_docs))
# The following is derived from pytest, Copyright (c) 2004-2017 Holger
# Krekel and others, Licensed under MIT License. See
# https://github.com/pytest-dev/pytest
message = []
for i in range(min(len(param_docs), len(param_signature))):
if param_signature[i] != param_docs[i]:
message += [
"There's a parameter name mismatch in function"
" docstring w.r.t. function signature, at index %s"
" diff: %r != %r" % (i, param_signature[i], param_docs[i])
]
break
if len(param_signature) > len(param_docs):
message += [
"Parameters in function docstring have less items w.r.t."
" function signature, first missing item: %s"
% param_signature[len(param_docs)]
]
elif len(param_signature) < len(param_docs):
message += [
"Parameters in function docstring have more items w.r.t."
" function signature, first extra item: %s"
% param_docs[len(param_signature)]
]
# If there wasn't any difference in the parameters themselves between
# docstring and signature including having the same length then return
# empty list
if len(message) == 0:
return []
import difflib
import pprint
param_docs_formatted = pprint.pformat(param_docs).splitlines()
param_signature_formatted = pprint.pformat(param_signature).splitlines()
message += ["Full diff:"]
message.extend(
line.strip()
for line in difflib.ndiff(param_signature_formatted, param_docs_formatted)
)
incorrect.extend(message)
# Prepend function name
incorrect = ["In function: " + func_name] + incorrect
return incorrect
def _check_item_included(item_name, args):
"""Helper to check if item should be included in checking."""
if args.include is not True and item_name not in args.include:
return False
if args.exclude is not None and item_name in args.exclude:
return False
return True
def _diff_key(line):
"""Key for grouping output from `context_diff`."""
if line.startswith(" "):
return " "
elif line.startswith("- "):
return "- "
elif line.startswith("+ "):
return "+ "
elif line.startswith("! "):
return "! "
return None
def _get_diff_msg(docstrings_grouped):
"""Get message showing the difference between type/desc docstrings of all objects.
`docstrings_grouped` keys should be the type/desc docstrings and values are a list
of objects with that docstring. Objects with the same type/desc docstring are
thus grouped together.
"""
msg_diff = ""
ref_str = ""
ref_group = []
for docstring, group in docstrings_grouped.items():
if not ref_str and not ref_group:
ref_str += docstring
ref_group.extend(group)
diff = list(
context_diff(
ref_str.split(),
docstring.split(),
fromfile=str(ref_group),
tofile=str(group),
n=8,
)
)
# Add header
msg_diff += "".join((diff[:3]))
# Group consecutive 'diff' words to shorten error message
for start, group in groupby(diff[3:], key=_diff_key):
if start is None:
msg_diff += "\n" + "\n".join(group)
else:
msg_diff += "\n" + start + " ".join(word[2:] for word in group)
# Add new lines at end of diff, to separate comparisons
msg_diff += "\n\n"
return msg_diff
def _check_consistency_items(
items_docs,
type_or_desc,
section,
n_objects,
descr_regex_pattern="",
ignore_types=tuple(),
):
"""Helper to check docstring consistency of all `items_docs`.
If item is not present in all objects, checking is skipped and warning raised.
If `regex` provided, match descriptions to all descriptions.
Parameters
----------
items_doc : dict of dict of str
Dictionary where the key is the string type or description, value is
a dictionary where the key is "type description" or "description"
and the value is a list of object names with the same string type or
description.
type_or_desc : {"type description", "description"}
Whether to check type description or description between objects.
section : {"Parameters", "Attributes", "Returns"}
Name of the section type.
n_objects : int
Total number of objects.
descr_regex_pattern : str, default=""
Regex pattern to match for description of all objects.
Ignored when `type_or_desc="type description".
ignore_types : tuple of str, default=()
Tuple of parameter/attribute/return names for which type description
matching is ignored. Ignored when `type_or_desc="description".
"""
skipped = []
for item_name, docstrings_grouped in items_docs.items():
# If item not found in all objects, skip
if sum([len(objs) for objs in docstrings_grouped.values()]) < n_objects:
skipped.append(item_name)
# If regex provided, match to all descriptions
elif type_or_desc == "description" and descr_regex_pattern:
not_matched = []
for docstring, group in docstrings_grouped.items():
if not re.search(descr_regex_pattern, docstring):
not_matched.extend(group)
if not_matched:
msg = textwrap.fill(
f"The description of {section[:-1]} '{item_name}' in {not_matched}"
f" does not match 'descr_regex_pattern': {descr_regex_pattern} "
)
raise AssertionError(msg)
# Skip type checking for items in `ignore_types`
elif type_or_desc == "type specification" and item_name in ignore_types:
continue
# Otherwise, if more than one key, docstrings not consistent between objects
elif len(docstrings_grouped.keys()) > 1:
msg_diff = _get_diff_msg(docstrings_grouped)
obj_groups = " and ".join(
str(group) for group in docstrings_grouped.values()
)
msg = textwrap.fill(
f"The {type_or_desc} of {section[:-1]} '{item_name}' is inconsistent "
f"between {obj_groups}:"
)
msg += msg_diff
raise AssertionError(msg)
if skipped:
warnings.warn(
f"Checking was skipped for {section}: {skipped} as they were "
"not found in all objects."
)
def assert_docstring_consistency(
objects,
include_params=False,
exclude_params=None,
include_attrs=False,
exclude_attrs=None,
include_returns=False,
exclude_returns=None,
descr_regex_pattern=None,
ignore_types=tuple(),
):
r"""Check consistency between docstring parameters/attributes/returns of objects.
Checks if parameters/attributes/returns have the same type specification and
description (ignoring whitespace) across `objects`. Intended to be used for
related classes/functions/data descriptors.
Entries that do not appear across all `objects` are ignored.
Parameters
----------
objects : list of {classes, functions, data descriptors}
Objects to check.
Objects may be classes, functions or data descriptors with docstrings that
can be parsed by numpydoc.
include_params : list of str or bool, default=False
List of parameters to be included. If True, all parameters are included,
if False, checking is skipped for parameters.
Can only be set if `exclude_params` is None.
exclude_params : list of str or None, default=None
List of parameters to be excluded. If None, no parameters are excluded.
Can only be set if `include_params` is True.
include_attrs : list of str or bool, default=False
List of attributes to be included. If True, all attributes are included,
if False, checking is skipped for attributes.
Can only be set if `exclude_attrs` is None.
exclude_attrs : list of str or None, default=None
List of attributes to be excluded. If None, no attributes are excluded.
Can only be set if `include_attrs` is True.
include_returns : list of str or bool, default=False
List of returns to be included. If True, all returns are included,
if False, checking is skipped for returns.
Can only be set if `exclude_returns` is None.
exclude_returns : list of str or None, default=None
List of returns to be excluded. If None, no returns are excluded.
Can only be set if `include_returns` is True.
descr_regex_pattern : str, default=None
Regular expression to match to all descriptions of included
parameters/attributes/returns. If None, will revert to default behavior
of comparing descriptions between objects.
ignore_types : tuple of str, default=tuple()
Tuple of parameter/attribute/return names to exclude from type description
matching between objects.
Examples
--------
>>> from sklearn.metrics import (accuracy_score, classification_report,
... mean_absolute_error, mean_squared_error, median_absolute_error)
>>> from sklearn.utils._testing import assert_docstring_consistency
... # doctest: +SKIP
>>> assert_docstring_consistency([mean_absolute_error, mean_squared_error],
... include_params=['y_true', 'y_pred', 'sample_weight']) # doctest: +SKIP
>>> assert_docstring_consistency([median_absolute_error, mean_squared_error],
... include_params=True) # doctest: +SKIP
>>> assert_docstring_consistency([accuracy_score, classification_report],
... include_params=["y_true"],
... descr_regex_pattern=r"Ground truth \(correct\) (labels|target values)")
... # doctest: +SKIP
"""
from numpydoc.docscrape import NumpyDocString
Args = namedtuple("args", ["include", "exclude", "arg_name"])
def _create_args(include, exclude, arg_name, section_name):
if exclude and include is not True:
raise TypeError(
f"The 'exclude_{arg_name}' argument can be set only when the "
f"'include_{arg_name}' argument is True."
)
if include is False:
return {}
return {section_name: Args(include, exclude, arg_name)}
section_args = {
**_create_args(include_params, exclude_params, "params", "Parameters"),
**_create_args(include_attrs, exclude_attrs, "attrs", "Attributes"),
**_create_args(include_returns, exclude_returns, "returns", "Returns"),
}
objects_doc = dict()
for obj in objects:
if (
inspect.isdatadescriptor(obj)
or inspect.isfunction(obj)
or inspect.isclass(obj)
):
objects_doc[obj.__name__] = NumpyDocString(inspect.getdoc(obj))
else:
raise TypeError(
"All 'objects' must be one of: function, class or descriptor, "
f"got a: {type(obj)}."
)
n_objects = len(objects)
for section, args in section_args.items():
type_items = defaultdict(lambda: defaultdict(list))
desc_items = defaultdict(lambda: defaultdict(list))
for obj_name, obj_doc in objects_doc.items():
for item_name, type_def, desc in obj_doc[section]:
if _check_item_included(item_name, args):
# Normalize white space
type_def = " ".join(type_def.strip().split())
desc = " ".join(chain.from_iterable(line.split() for line in desc))
# Use string type/desc as key, to group consistent objs together
type_items[item_name][type_def].append(obj_name)
desc_items[item_name][desc].append(obj_name)
_check_consistency_items(
type_items,
"type specification",
section,
n_objects,
ignore_types=ignore_types,
)
_check_consistency_items(
desc_items,
"description",
section,
n_objects,
descr_regex_pattern=descr_regex_pattern,
)
def assert_run_python_script_without_output(source_code, pattern=".+", timeout=60):
"""Utility to check assertions in an independent Python subprocess.
The script provided in the source code should return 0 and the stdtout +
stderr should not match the pattern `pattern`.
This is a port from cloudpickle https://github.com/cloudpipe/cloudpickle
Parameters
----------
source_code : str
The Python source code to execute.
pattern : str
Pattern that the stdout + stderr should not match. By default, unless
stdout + stderr are both empty, an error will be raised.
timeout : int, default=60
Time in seconds before timeout.
"""
fd, source_file = tempfile.mkstemp(suffix="_src_test_sklearn.py")
os.close(fd)
try:
with open(source_file, "wb") as f:
f.write(source_code.encode("utf-8"))
cmd = [sys.executable, source_file]
cwd = op.normpath(op.join(op.dirname(sklearn_path), ".."))
env = os.environ.copy()
try:
env["PYTHONPATH"] = os.pathsep.join([cwd, env["PYTHONPATH"]])
except KeyError:
env["PYTHONPATH"] = cwd
kwargs = {"cwd": cwd, "stderr": STDOUT, "env": env}
# If coverage is running, pass the config file to the subprocess
coverage_rc = os.environ.get("COVERAGE_PROCESS_START")
if coverage_rc:
kwargs["env"]["COVERAGE_PROCESS_START"] = coverage_rc
kwargs["timeout"] = timeout
try:
try:
out = check_output(cmd, **kwargs)
except CalledProcessError as e:
raise RuntimeError(
"script errored with output:\n%s" % e.output.decode("utf-8")
)
out = out.decode("utf-8")
if re.search(pattern, out):
if pattern == ".+":
expectation = "Expected no output"
else:
expectation = f"The output was not supposed to match {pattern!r}"
message = f"{expectation}, got the following output instead: {out!r}"
raise AssertionError(message)
except TimeoutExpired as e:
raise RuntimeError(
"script timeout, output so far:\n%s" % e.output.decode("utf-8")
)
finally:
os.unlink(source_file)
def _convert_container(
container,
constructor_name,
columns_name=None,
dtype=None,
minversion=None,
categorical_feature_names=None,
):
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/_indexing.py | sklearn/utils/_indexing.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numbers
import sys
import warnings
from collections import UserList
from itertools import compress, islice
import numpy as np
from scipy.sparse import issparse
from sklearn.utils._array_api import (
_is_numpy_namespace,
get_namespace,
get_namespace_and_device,
move_to,
)
from sklearn.utils._dataframe import (
is_pandas_df,
is_polars_df_or_series,
is_pyarrow_data,
)
from sklearn.utils._param_validation import Interval, validate_params
from sklearn.utils.extmath import _approximate_mode
from sklearn.utils.fixes import PYARROW_VERSION_BELOW_17
from sklearn.utils.validation import (
_check_sample_weight,
_is_arraylike_not_scalar,
_use_interchange_protocol,
check_array,
check_consistent_length,
check_random_state,
)
def _array_indexing(array, key, key_dtype, axis):
"""Index an array or scipy.sparse consistently across NumPy version."""
xp, is_array_api, device_ = get_namespace_and_device(array)
if is_array_api:
if hasattr(key, "shape"):
key = move_to(key, xp=xp, device=device_)
elif isinstance(key, (int, slice)):
# Passthrough for valid __getitem__ inputs as noted in the array
# API spec.
pass
else:
key = xp.asarray(key, device=device_)
if hasattr(key, "dtype"):
if xp.isdtype(key.dtype, "integral"):
return xp.take(array, key, axis=axis)
elif xp.isdtype(key.dtype, "bool"):
# Array API does not support boolean indexing for n-dim arrays
# yet hence the need to turn to equivalent integer indexing.
indices = xp.arange(array.shape[axis], device=device_)
return xp.take(array, indices[key], axis=axis)
if issparse(array) and key_dtype == "bool":
key = np.asarray(key)
if isinstance(key, tuple):
key = list(key)
return array[key, ...] if axis == 0 else array[:, key]
def _pandas_indexing(X, key, key_dtype, axis):
"""Index a pandas dataframe or a series."""
if _is_arraylike_not_scalar(key):
key = np.asarray(key)
if key_dtype == "int" and not (isinstance(key, slice) or np.isscalar(key)):
# using take() instead of iloc[] ensures the return value is a "proper"
# copy that will not raise SettingWithCopyWarning
return X.take(key, axis=axis)
else:
# check whether we should index with loc or iloc
indexer = X.iloc if key_dtype == "int" else X.loc
return indexer[:, key] if axis else indexer[key]
def _list_indexing(X, key, key_dtype):
"""Index a Python list."""
if np.isscalar(key) or isinstance(key, slice):
# key is a slice or a scalar
return X[key]
if key_dtype == "bool":
# key is a boolean array-like
return list(compress(X, key))
# key is an integer array-like of key
return [X[idx] for idx in key]
def _polars_indexing(X, key, key_dtype, axis):
"""Index a polars dataframe or series."""
# Polars behavior is more consistent with lists
if isinstance(key, np.ndarray):
# Convert each element of the array to a Python scalar
key = key.tolist()
elif not (np.isscalar(key) or isinstance(key, slice)):
key = list(key)
if axis == 1:
# Here we are certain to have a polars DataFrame; which can be indexed with
# integer and string scalar, and list of integer, string and boolean
return X[:, key]
if key_dtype == "bool":
# Boolean mask can be indexed in the same way for Series and DataFrame (axis=0)
return X.filter(key)
# Integer scalar and list of integer can be indexed in the same way for Series and
# DataFrame (axis=0)
X_indexed = X[key]
if np.isscalar(key) and len(X.shape) == 2:
# `X_indexed` is a DataFrame with a single row; we return a Series to be
# consistent with pandas
pl = sys.modules["polars"]
return pl.Series(X_indexed.row(0))
return X_indexed
def _pyarrow_indexing(X, key, key_dtype, axis):
"""Index a pyarrow data."""
scalar_key = np.isscalar(key)
if isinstance(key, slice):
if isinstance(key.stop, str):
start = X.column_names.index(key.start)
stop = X.column_names.index(key.stop) + 1
else:
start = 0 if not key.start else key.start
stop = key.stop
step = 1 if not key.step else key.step
key = list(range(start, stop, step))
if axis == 1:
# Here we are certain that X is a pyarrow Table or RecordBatch.
if key_dtype == "int" and not isinstance(key, list):
# pyarrow's X.select behavior is more consistent with integer lists.
key = np.asarray(key).tolist()
if key_dtype == "bool":
key = np.asarray(key).nonzero()[0].tolist()
if scalar_key:
return X.column(key)
return X.select(key)
# axis == 0 from here on
if scalar_key:
if hasattr(X, "shape"):
# X is a Table or RecordBatch
key = [key]
else:
return X[key].as_py()
elif not isinstance(key, list):
key = np.asarray(key)
if key_dtype == "bool":
# TODO(pyarrow): remove version checking and following if-branch when
# pyarrow==17.0.0 is the minimal version, see pyarrow issue
# https://github.com/apache/arrow/issues/42013 for more info
if PYARROW_VERSION_BELOW_17:
import pyarrow
if not isinstance(key, pyarrow.BooleanArray):
key = pyarrow.array(key, type=pyarrow.bool_())
X_indexed = X.filter(key)
else:
X_indexed = X.take(key)
if scalar_key and len(getattr(X, "shape", [0])) == 2:
# X_indexed is a dataframe-like with a single row; we return a Series to be
# consistent with pandas
pa = sys.modules["pyarrow"]
return pa.array(X_indexed.to_pylist()[0].values())
return X_indexed
def _determine_key_type(key, accept_slice=True):
"""Determine the data type of key.
Parameters
----------
key : scalar, slice or array-like
The key from which we want to infer the data type.
accept_slice : bool, default=True
Whether or not to raise an error if the key is a slice.
Returns
-------
dtype : {'int', 'str', 'bool', None}
Returns the data type of key.
"""
err_msg = (
"No valid specification of the columns. Only a scalar, list or "
"slice of all integers or all strings, or boolean mask is "
"allowed"
)
dtype_to_str = {int: "int", str: "str", bool: "bool", np.bool_: "bool"}
array_dtype_to_str = {
"i": "int",
"u": "int",
"b": "bool",
"O": "str",
"U": "str",
"S": "str",
}
if key is None:
return None
if isinstance(key, tuple(dtype_to_str.keys())):
try:
return dtype_to_str[type(key)]
except KeyError:
raise ValueError(err_msg)
if isinstance(key, slice):
if not accept_slice:
raise TypeError(
"Only array-like or scalar are supported. A Python slice was given."
)
if key.start is None and key.stop is None:
return None
key_start_type = _determine_key_type(key.start)
key_stop_type = _determine_key_type(key.stop)
if key_start_type is not None and key_stop_type is not None:
if key_start_type != key_stop_type:
raise ValueError(err_msg)
if key_start_type is not None:
return key_start_type
return key_stop_type
# TODO(1.9) remove UserList when the force_int_remainder_cols param
# of ColumnTransformer is removed
if isinstance(key, (list, tuple, UserList)):
unique_key = set(key)
key_type = {_determine_key_type(elt) for elt in unique_key}
if not key_type:
return None
if len(key_type) != 1:
raise ValueError(err_msg)
return key_type.pop()
if hasattr(key, "dtype"):
xp, is_array_api = get_namespace(key)
# NumPy arrays are special-cased in their own branch because the Array API
# cannot handle object/string-based dtypes that are often used to index
# columns of dataframes by names.
if is_array_api and not _is_numpy_namespace(xp):
if xp.isdtype(key.dtype, "bool"):
return "bool"
elif xp.isdtype(key.dtype, "integral"):
return "int"
else:
raise ValueError(err_msg)
else:
try:
return array_dtype_to_str[key.dtype.kind]
except KeyError:
raise ValueError(err_msg)
raise ValueError(err_msg)
def _safe_indexing(X, indices, *, axis=0):
"""Return rows, items or columns of X using indices.
.. warning::
This utility is documented, but **private**. This means that
backward compatibility might be broken without any deprecation
cycle.
Parameters
----------
X : array-like, sparse-matrix, list, pandas.DataFrame, pandas.Series
Data from which to sample rows, items or columns. `list` are only
supported when `axis=0`.
indices : bool, int, str, slice, array-like
- If `axis=0`, boolean and integer array-like, integer slice,
and scalar integer are supported.
- If `axis=1`:
- to select a single column, `indices` can be of `int` type for
all `X` types and `str` only for dataframe. The selected subset
will be 1D, unless `X` is a sparse matrix in which case it will
be 2D.
- to select multiples columns, `indices` can be one of the
following: `list`, `array`, `slice`. The type used in
these containers can be one of the following: `int`, 'bool' and
`str`. However, `str` is only supported when `X` is a dataframe.
The selected subset will be 2D.
axis : int, default=0
The axis along which `X` will be subsampled. `axis=0` will select
rows while `axis=1` will select columns.
Returns
-------
subset
Subset of X on axis 0 or 1.
Notes
-----
CSR, CSC, and LIL sparse matrices are supported. COO sparse matrices are
not supported.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils import _safe_indexing
>>> data = np.array([[1, 2], [3, 4], [5, 6]])
>>> _safe_indexing(data, 0, axis=0) # select the first row
array([1, 2])
>>> _safe_indexing(data, 0, axis=1) # select the first column
array([1, 3, 5])
"""
if indices is None:
return X
if axis not in (0, 1):
raise ValueError(
"'axis' should be either 0 (to index rows) or 1 (to index "
" column). Got {} instead.".format(axis)
)
indices_dtype = _determine_key_type(indices)
if axis == 0 and indices_dtype == "str":
raise ValueError(
f"String indexing (indices={indices}) is not supported with 'axis=0'. "
"Did you mean to use axis=1 for column selection?"
)
if axis == 1 and isinstance(X, list):
raise ValueError("axis=1 is not supported for lists")
if axis == 1 and (ndim := len(getattr(X, "shape", [0]))) != 2:
raise ValueError(
"'X' should be a 2D NumPy array, 2D sparse matrix or "
"dataframe when indexing the columns (i.e. 'axis=1'). "
f"Got {type(X)} instead with {ndim} dimension(s)."
)
if (
axis == 1
and indices_dtype == "str"
and not (is_pandas_df(X) or _use_interchange_protocol(X))
):
raise ValueError(
"Specifying the columns using strings is only supported for dataframes."
)
if hasattr(X, "iloc"):
# TODO: we should probably use is_pandas_df_or_series(X) instead but:
# 1) Currently, it (probably) works for dataframes compliant to pandas' API.
# 2) Updating would require updating some tests such as
# test_train_test_split_mock_pandas.
return _pandas_indexing(X, indices, indices_dtype, axis=axis)
elif is_polars_df_or_series(X):
return _polars_indexing(X, indices, indices_dtype, axis=axis)
elif is_pyarrow_data(X):
return _pyarrow_indexing(X, indices, indices_dtype, axis=axis)
elif _use_interchange_protocol(X): # pragma: no cover
# Once the dataframe X is converted into its dataframe interchange protocol
# version by calling X.__dataframe__(), it becomes very hard to turn it back
# into its original type, e.g., a pyarrow.Table, see
# https://github.com/data-apis/dataframe-api/issues/85.
raise warnings.warn(
message="A data object with support for the dataframe interchange protocol"
"was passed, but scikit-learn does currently not know how to handle this "
"kind of data. Some array/list indexing will be tried.",
category=UserWarning,
)
if hasattr(X, "shape"):
return _array_indexing(X, indices, indices_dtype, axis=axis)
else:
return _list_indexing(X, indices, indices_dtype)
def _safe_assign(X, values, *, row_indexer=None, column_indexer=None):
"""Safe assignment to a numpy array, sparse matrix, or pandas dataframe.
Parameters
----------
X : {ndarray, sparse-matrix, dataframe}
Array to be modified. It is expected to be 2-dimensional.
values : ndarray
The values to be assigned to `X`.
row_indexer : array-like, dtype={int, bool}, default=None
A 1-dimensional array to select the rows of interest. If `None`, all
rows are selected.
column_indexer : array-like, dtype={int, bool}, default=None
A 1-dimensional array to select the columns of interest. If `None`, all
columns are selected.
"""
row_indexer = slice(None, None, None) if row_indexer is None else row_indexer
column_indexer = (
slice(None, None, None) if column_indexer is None else column_indexer
)
if hasattr(X, "iloc"): # pandas dataframe
with warnings.catch_warnings():
# pandas >= 1.5 raises a warning when using iloc to set values in a column
# that does not have the same type as the column being set. It happens
# for instance when setting a categorical column with a string.
# In the future the behavior won't change and the warning should disappear.
# TODO(1.3): check if the warning is still raised or remove the filter.
warnings.simplefilter("ignore", FutureWarning)
X.iloc[row_indexer, column_indexer] = values
else: # numpy array or sparse matrix
X[row_indexer, column_indexer] = values
def _get_column_indices_for_bool_or_int(key, n_columns):
# Convert key into list of positive integer indexes
try:
idx = _safe_indexing(np.arange(n_columns), key)
except IndexError as e:
raise ValueError(
f"all features must be in [0, {n_columns - 1}] or [-{n_columns}, 0]"
) from e
return np.atleast_1d(idx).tolist()
def _get_column_indices(X, key):
"""Get feature column indices for input data X and key.
For accepted values of `key`, see the docstring of
:func:`_safe_indexing`.
"""
key_dtype = _determine_key_type(key)
if _use_interchange_protocol(X):
return _get_column_indices_interchange(X.__dataframe__(), key, key_dtype)
n_columns = X.shape[1]
if isinstance(key, (list, tuple)) and not key:
# we get an empty list
return []
elif key_dtype in ("bool", "int"):
return _get_column_indices_for_bool_or_int(key, n_columns)
else:
try:
all_columns = X.columns
except AttributeError:
raise ValueError(
"Specifying the columns using strings is only supported for dataframes."
)
if isinstance(key, str):
columns = [key]
elif isinstance(key, slice):
start, stop = key.start, key.stop
if start is not None:
start = all_columns.get_loc(start)
if stop is not None:
# pandas indexing with strings is endpoint included
stop = all_columns.get_loc(stop) + 1
else:
stop = n_columns + 1
return list(islice(range(n_columns), start, stop))
else:
columns = list(key)
try:
column_indices = []
for col in columns:
col_idx = all_columns.get_loc(col)
if not isinstance(col_idx, numbers.Integral):
raise ValueError(
f"Selected columns, {columns}, are not unique in dataframe"
)
column_indices.append(col_idx)
except KeyError as e:
raise ValueError("A given column is not a column of the dataframe") from e
return column_indices
def _get_column_indices_interchange(X_interchange, key, key_dtype):
"""Same as _get_column_indices but for X with __dataframe__ protocol."""
n_columns = X_interchange.num_columns()
if isinstance(key, (list, tuple)) and not key:
# we get an empty list
return []
elif key_dtype in ("bool", "int"):
return _get_column_indices_for_bool_or_int(key, n_columns)
else:
column_names = list(X_interchange.column_names())
if isinstance(key, slice):
if key.step not in [1, None]:
raise NotImplementedError("key.step must be 1 or None")
start, stop = key.start, key.stop
if start is not None:
start = column_names.index(start)
if stop is not None:
stop = column_names.index(stop) + 1
else:
stop = n_columns + 1
return list(islice(range(n_columns), start, stop))
selected_columns = [key] if np.isscalar(key) else key
try:
return [column_names.index(col) for col in selected_columns]
except ValueError as e:
raise ValueError("A given column is not a column of the dataframe") from e
@validate_params(
{
"replace": ["boolean"],
"n_samples": [Interval(numbers.Integral, 1, None, closed="left"), None],
"random_state": ["random_state"],
"stratify": ["array-like", "sparse matrix", None],
"sample_weight": ["array-like", None],
},
prefer_skip_nested_validation=True,
)
def resample(
*arrays,
replace=True,
n_samples=None,
random_state=None,
stratify=None,
sample_weight=None,
):
"""Resample arrays or sparse matrices in a consistent way.
The default strategy implements one step of the bootstrapping
procedure.
Parameters
----------
*arrays : sequence of array-like of shape (n_samples,) or \
(n_samples, n_outputs)
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
replace : bool, default=True
Implements resampling with replacement. It must be set to True
whenever sampling with non-uniform weights: a few data points with very large
weights are expected to be sampled several times with probability to preserve
the distribution induced by the weights. If False, this will implement
(sliced) random permutations.
n_samples : int, default=None
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
If replace is False it should not be larger than the length of
arrays.
random_state : int, RandomState instance or None, default=None
Determines random number generation for shuffling
the data.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
stratify : {array-like, sparse matrix} of shape (n_samples,) or \
(n_samples, n_outputs), default=None
If not None, data is split in a stratified fashion, using this as
the class labels.
sample_weight : array-like of shape (n_samples,), default=None
Contains weight values to be associated with each sample. Values are
normalized to sum to one and interpreted as probability for sampling
each data point.
.. versionadded:: 1.7
Returns
-------
resampled_arrays : sequence of array-like of shape (n_samples,) or \
(n_samples, n_outputs)
Sequence of resampled copies of the collections. The original arrays
are not impacted.
See Also
--------
shuffle : Shuffle arrays or sparse matrices in a consistent way.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> import numpy as np
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import resample
>>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
>>> X
array([[1., 0.],
[2., 1.],
[1., 0.]])
>>> X_sparse
<Compressed Sparse Row sparse matrix of dtype 'float64'
with 4 stored elements and shape (3, 2)>
>>> X_sparse.toarray()
array([[1., 0.],
[2., 1.],
[1., 0.]])
>>> y
array([0, 1, 0])
>>> resample(y, n_samples=2, random_state=0)
array([0, 1])
Example using stratification::
>>> y = [0, 0, 1, 1, 1, 1, 1, 1, 1]
>>> resample(y, n_samples=5, replace=False, stratify=y,
... random_state=0)
[1, 1, 1, 0, 1]
"""
max_n_samples = n_samples
random_state = check_random_state(random_state)
if len(arrays) == 0:
return None
first = arrays[0]
n_samples = first.shape[0] if hasattr(first, "shape") else len(first)
if max_n_samples is None:
max_n_samples = n_samples
elif (max_n_samples > n_samples) and (not replace):
raise ValueError(
"Cannot sample %d out of arrays with dim %d when replace is False"
% (max_n_samples, n_samples)
)
check_consistent_length(*arrays)
if sample_weight is not None and not replace:
raise NotImplementedError(
"Resampling with sample_weight is only implemented for replace=True."
)
if sample_weight is not None and stratify is not None:
raise NotImplementedError(
"Resampling with sample_weight is only implemented for stratify=None."
)
if stratify is None:
if replace:
if sample_weight is not None:
sample_weight = _check_sample_weight(
sample_weight, first, dtype=np.float64
)
p = sample_weight / sample_weight.sum()
else:
p = None
indices = random_state.choice(
n_samples,
size=max_n_samples,
p=p,
replace=True,
)
else:
indices = np.arange(n_samples)
random_state.shuffle(indices)
indices = indices[:max_n_samples]
else:
# Code adapted from StratifiedShuffleSplit()
y = check_array(stratify, ensure_2d=False, dtype=None)
if y.ndim == 2:
# for multi-label y, map each distinct row to a string repr
# using join because str(row) uses an ellipsis if len(row) > 1000
y = np.array([" ".join(row.astype("str")) for row in y])
classes, y_indices = np.unique(y, return_inverse=True)
n_classes = classes.shape[0]
class_counts = np.bincount(y_indices)
# Find the sorted list of instances for each class:
# (np.unique above performs a sort, so code is O(n logn) already)
class_indices = np.split(
np.argsort(y_indices, kind="mergesort"), np.cumsum(class_counts)[:-1]
)
n_i = _approximate_mode(class_counts, max_n_samples, random_state)
indices = []
for i in range(n_classes):
indices_i = random_state.choice(class_indices[i], n_i[i], replace=replace)
indices.extend(indices_i)
indices = random_state.permutation(indices)
# convert sparse matrices to CSR for row-based indexing
arrays = [a.tocsr() if issparse(a) else a for a in arrays]
resampled_arrays = [_safe_indexing(a, indices) for a in arrays]
if len(resampled_arrays) == 1:
# syntactic sugar for the unit argument case
return resampled_arrays[0]
else:
return resampled_arrays
def shuffle(*arrays, random_state=None, n_samples=None):
"""Shuffle arrays or sparse matrices in a consistent way.
This is a convenience alias to ``resample(*arrays, replace=False)`` to do
random permutations of the collections.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
random_state : int, RandomState instance or None, default=None
Determines random number generation for shuffling
the data.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
n_samples : int, default=None
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays. It should
not be larger than the length of arrays.
Returns
-------
shuffled_arrays : sequence of indexable data-structures
Sequence of shuffled copies of the collections. The original arrays
are not impacted.
See Also
--------
resample : Resample arrays or sparse matrices in a consistent way.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> import numpy as np
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import shuffle
>>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
>>> X
array([[0., 0.],
[2., 1.],
[1., 0.]])
>>> X_sparse
<Compressed Sparse Row sparse matrix of dtype 'float64'
with 3 stored elements and shape (3, 2)>
>>> X_sparse.toarray()
array([[0., 0.],
[2., 1.],
[1., 0.]])
>>> y
array([2, 1, 0])
>>> shuffle(y, n_samples=2, random_state=0)
array([0, 1])
"""
return resample(
*arrays, replace=False, n_samples=n_samples, random_state=random_state
)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/_available_if.py | sklearn/utils/_available_if.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from functools import update_wrapper, wraps
from types import MethodType
class _AvailableIfDescriptor:
"""Implements a conditional property using the descriptor protocol.
Using this class to create a decorator will raise an ``AttributeError``
if check(self) returns a falsey value. Note that if check raises an error
this will also result in hasattr returning false.
See https://docs.python.org/3/howto/descriptor.html for an explanation of
descriptors.
"""
def __init__(self, fn, check, attribute_name):
self.fn = fn
self.check = check
self.attribute_name = attribute_name
# update the docstring of the descriptor
update_wrapper(self, fn)
def _check(self, obj, owner):
attr_err_msg = (
f"This {owner.__name__!r} has no attribute {self.attribute_name!r}"
)
try:
check_result = self.check(obj)
except Exception as e:
raise AttributeError(attr_err_msg) from e
if not check_result:
raise AttributeError(attr_err_msg)
def __get__(self, obj, owner=None):
if obj is not None:
# delegate only on instances, not the classes.
# this is to allow access to the docstrings.
self._check(obj, owner=owner)
out = MethodType(self.fn, obj)
else:
# This makes it possible to use the decorated method as an unbound method,
# for instance when monkeypatching.
@wraps(self.fn)
def out(*args, **kwargs):
self._check(args[0], owner=owner)
return self.fn(*args, **kwargs)
return out
def available_if(check):
"""An attribute that is available only if check returns a truthy value.
Parameters
----------
check : callable
When passed the object with the decorated method, this should return
a truthy value if the attribute is available, and either return False
or raise an AttributeError if not available.
Returns
-------
callable
Callable makes the decorated method available if `check` returns
a truthy value, otherwise the decorated method is unavailable.
Examples
--------
>>> from sklearn.utils.metaestimators import available_if
>>> class HelloIfEven:
... def __init__(self, x):
... self.x = x
...
... def _x_is_even(self):
... return self.x % 2 == 0
...
... @available_if(_x_is_even)
... def say_hello(self):
... print("Hello")
...
>>> obj = HelloIfEven(1)
>>> hasattr(obj, "say_hello")
False
>>> obj.x = 2
>>> hasattr(obj, "say_hello")
True
>>> obj.say_hello()
Hello
"""
return lambda fn: _AvailableIfDescriptor(fn, check, attribute_name=fn.__name__)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/fixes.py | sklearn/utils/fixes.py | """Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fix is no longer needed.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import platform
import struct
import numpy as np
import scipy
import scipy.sparse.linalg
import scipy.stats
try:
import pandas as pd
except ImportError:
pd = None
from sklearn.externals._packaging.version import parse as parse_version
from sklearn.utils.parallel import _get_threadpool_controller
_IS_32BIT = 8 * struct.calcsize("P") == 32
_IS_WASM = platform.machine() in ["wasm32", "wasm64"]
np_version = parse_version(np.__version__)
np_base_version = parse_version(np_version.base_version)
sp_version = parse_version(scipy.__version__)
sp_base_version = parse_version(sp_version.base_version)
# TODO: We can consider removing the containers and importing
# directly from SciPy when sparse matrices will be deprecated.
CSR_CONTAINERS = [scipy.sparse.csr_matrix, scipy.sparse.csr_array]
CSC_CONTAINERS = [scipy.sparse.csc_matrix, scipy.sparse.csc_array]
COO_CONTAINERS = [scipy.sparse.coo_matrix, scipy.sparse.coo_array]
LIL_CONTAINERS = [scipy.sparse.lil_matrix, scipy.sparse.lil_array]
DOK_CONTAINERS = [scipy.sparse.dok_matrix, scipy.sparse.dok_array]
BSR_CONTAINERS = [scipy.sparse.bsr_matrix, scipy.sparse.bsr_array]
DIA_CONTAINERS = [scipy.sparse.dia_matrix, scipy.sparse.dia_array]
# Remove when minimum scipy version is 1.11.0
try:
from scipy.sparse import sparray # noqa: F401
SPARRAY_PRESENT = True
except ImportError:
SPARRAY_PRESENT = False
def _object_dtype_isnan(X):
return X != X
# TODO: Remove when SciPy 1.11 is the minimum supported version
def _mode(a, axis=0):
mode = scipy.stats.mode(a, axis=axis, keepdims=True)
if sp_version >= parse_version("1.10.999"):
# scipy.stats.mode has changed returned array shape with axis=None
# and keepdims=True, see https://github.com/scipy/scipy/pull/17561
if axis is None:
mode = np.ravel(mode)
return mode
# TODO: Remove when SciPy 1.12 is the minimum supported version
if sp_base_version >= parse_version("1.12.0"):
_sparse_linalg_cg = scipy.sparse.linalg.cg
else:
def _sparse_linalg_cg(A, b, **kwargs):
if "rtol" in kwargs:
kwargs["tol"] = kwargs.pop("rtol")
if "atol" not in kwargs:
kwargs["atol"] = "legacy"
return scipy.sparse.linalg.cg(A, b, **kwargs)
# TODO: Fuse the modern implementations of _sparse_min_max and _sparse_nan_min_max
# into the public min_max_axis function when SciPy 1.11 is the minimum supported
# version and delete the backport in the else branch below.
if sp_base_version >= parse_version("1.11.0"):
def _sparse_min_max(X, axis):
the_min = X.min(axis=axis)
the_max = X.max(axis=axis)
if axis is not None:
the_min = the_min.toarray().ravel()
the_max = the_max.toarray().ravel()
return the_min, the_max
def _sparse_nan_min_max(X, axis):
the_min = X.nanmin(axis=axis)
the_max = X.nanmax(axis=axis)
if axis is not None:
the_min = the_min.toarray().ravel()
the_max = the_max.toarray().ravel()
return the_min, the_max
else:
# This code is mostly taken from scipy 0.14 and extended to handle nans, see
# https://github.com/scikit-learn/scikit-learn/pull/11196
def _minor_reduce(X, ufunc):
major_index = np.flatnonzero(np.diff(X.indptr))
# reduceat tries casts X.indptr to intp, which errors
# if it is int64 on a 32 bit system.
# Reinitializing prevents this where possible, see #13737
X = type(X)((X.data, X.indices, X.indptr), shape=X.shape)
value = ufunc.reduceat(X.data, X.indptr[major_index])
return major_index, value
def _min_or_max_axis(X, axis, min_or_max):
N = X.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = X.shape[1 - axis]
mat = X.tocsc() if axis == 0 else X.tocsr()
mat.sum_duplicates()
major_index, value = _minor_reduce(mat, min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
if axis == 0:
res = scipy.sparse.coo_matrix(
(value, (np.zeros(len(value)), major_index)),
dtype=X.dtype,
shape=(1, M),
)
else:
res = scipy.sparse.coo_matrix(
(value, (major_index, np.zeros(len(value)))),
dtype=X.dtype,
shape=(M, 1),
)
return res.toarray().ravel()
def _sparse_min_or_max(X, axis, min_or_max):
if axis is None:
if 0 in X.shape:
raise ValueError("zero-size array to reduction operation")
zero = X.dtype.type(0)
if X.nnz == 0:
return zero
m = min_or_max.reduce(X.data.ravel())
if X.nnz != np.prod(X.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
axis += 2
if (axis == 0) or (axis == 1):
return _min_or_max_axis(X, axis, min_or_max)
else:
raise ValueError("invalid axis, use 0 for rows, or 1 for columns")
def _sparse_min_max(X, axis):
return (
_sparse_min_or_max(X, axis, np.minimum),
_sparse_min_or_max(X, axis, np.maximum),
)
def _sparse_nan_min_max(X, axis):
return (
_sparse_min_or_max(X, axis, np.fmin),
_sparse_min_or_max(X, axis, np.fmax),
)
# For +1.25 NumPy versions exceptions and warnings are being moved
# to a dedicated submodule.
if np_version >= parse_version("1.25.0"):
from numpy.exceptions import ComplexWarning, VisibleDeprecationWarning
else:
from numpy import ( # noqa: F401
ComplexWarning,
VisibleDeprecationWarning,
)
# TODO: Adapt when Pandas > 2.2 is the minimum supported version
def pd_fillna(pd, frame):
pd_version = parse_version(pd.__version__).base_version
if parse_version(pd_version) < parse_version("2.2"):
frame = frame.fillna(value=np.nan)
else:
infer_objects_kwargs = (
{} if parse_version(pd_version) >= parse_version("3") else {"copy": False}
)
if parse_version(pd_version) < parse_version("3.0"):
with pd.option_context("future.no_silent_downcasting", True):
frame = frame.fillna(value=np.nan).infer_objects(**infer_objects_kwargs)
else:
frame = frame.fillna(value=np.nan).infer_objects(**infer_objects_kwargs)
return frame
# TODO: remove when SciPy 1.12 is the minimum supported version
def _preserve_dia_indices_dtype(
sparse_container, original_container_format, requested_sparse_format
):
"""Preserve indices dtype for SciPy < 1.12 when converting from DIA to CSR/CSC.
For SciPy < 1.12, DIA arrays indices are upcasted to `np.int64` that is
inconsistent with DIA matrices. We downcast the indices dtype to `np.int32` to
be consistent with DIA matrices.
The converted indices arrays are affected back inplace to the sparse container.
Parameters
----------
sparse_container : sparse container
Sparse container to be checked.
requested_sparse_format : str or bool
The type of format of `sparse_container`.
Notes
-----
See https://github.com/scipy/scipy/issues/19245 for more details.
"""
if original_container_format == "dia_array" and requested_sparse_format in (
"csr",
"coo",
):
if requested_sparse_format == "csr":
index_dtype = _smallest_admissible_index_dtype(
arrays=(sparse_container.indptr, sparse_container.indices),
maxval=max(sparse_container.nnz, sparse_container.shape[1]),
check_contents=True,
)
sparse_container.indices = sparse_container.indices.astype(
index_dtype, copy=False
)
sparse_container.indptr = sparse_container.indptr.astype(
index_dtype, copy=False
)
else: # requested_sparse_format == "coo"
index_dtype = _smallest_admissible_index_dtype(
maxval=max(sparse_container.shape)
)
sparse_container.row = sparse_container.row.astype(index_dtype, copy=False)
sparse_container.col = sparse_container.col.astype(index_dtype, copy=False)
# TODO: remove when SciPy 1.12 is the minimum supported version
def _smallest_admissible_index_dtype(arrays=(), maxval=None, check_contents=False):
"""Based on input (integer) arrays `a`, determine a suitable index data
type that can hold the data in the arrays.
This function returns `np.int64` if it either required by `maxval` or based on the
largest precision of the dtype of the arrays passed as argument, or by their
contents (when `check_contents is True`). If none of the condition requires
`np.int64` then this function returns `np.int32`.
Parameters
----------
arrays : ndarray or tuple of ndarrays, default=()
Input arrays whose types/contents to check.
maxval : float, default=None
Maximum value needed.
check_contents : bool, default=False
Whether to check the values in the arrays and not just their types.
By default, check only the types.
Returns
-------
dtype : {np.int32, np.int64}
Suitable index data type (int32 or int64).
"""
int32min = np.int32(np.iinfo(np.int32).min)
int32max = np.int32(np.iinfo(np.int32).max)
if maxval is not None:
if maxval > np.iinfo(np.int64).max:
raise ValueError(
f"maxval={maxval} is to large to be represented as np.int64."
)
if maxval > int32max:
return np.int64
if isinstance(arrays, np.ndarray):
arrays = (arrays,)
for arr in arrays:
if not isinstance(arr, np.ndarray):
raise TypeError(
f"Arrays should be of type np.ndarray, got {type(arr)} instead."
)
if not np.issubdtype(arr.dtype, np.integer):
raise ValueError(
f"Array dtype {arr.dtype} is not supported for index dtype. We expect "
"integral values."
)
if not np.can_cast(arr.dtype, np.int32):
if not check_contents:
# when `check_contents` is False, we stay on the safe side and return
# np.int64.
return np.int64
if arr.size == 0:
# a bigger type not needed yet, let's look at the next array
continue
else:
maxval = arr.max()
minval = arr.min()
if minval < int32min or maxval > int32max:
# a big index type is actually needed
return np.int64
return np.int32
# TODO: Remove when SciPy 1.12 is the minimum supported version
if sp_version < parse_version("1.12"):
from sklearn.externals._scipy.sparse.csgraph import laplacian
else:
from scipy.sparse.csgraph import (
laplacian, # noqa: F401 # pragma: no cover
)
# TODO: Remove when Python min version >= 3.12.
def tarfile_extractall(tarfile, path):
try:
# Use filter="data" to prevent the most dangerous security issues.
# For more details, see
# https://docs.python.org/3/library/tarfile.html#tarfile.TarFile.extractall
tarfile.extractall(path, filter="data")
except TypeError:
tarfile.extractall(path)
def _in_unstable_openblas_configuration():
"""Return True if in an unstable configuration for OpenBLAS"""
# Import libraries which might load OpenBLAS.
import numpy # noqa: F401
import scipy # noqa: F401
modules_info = _get_threadpool_controller().info()
open_blas_used = any(info["internal_api"] == "openblas" for info in modules_info)
if not open_blas_used:
return False
# OpenBLAS 0.3.16 fixed instability for arm64, see:
# https://github.com/xianyi/OpenBLAS/blob/1b6db3dbba672b4f8af935bd43a1ff6cff4d20b7/Changelog.txt#L56-L58
openblas_arm64_stable_version = parse_version("0.3.16")
for info in modules_info:
if info["internal_api"] != "openblas":
continue
openblas_version = info.get("version")
openblas_architecture = info.get("architecture")
if openblas_version is None or openblas_architecture is None:
# Cannot be sure that OpenBLAS is good enough. Assume unstable:
return True # pragma: no cover
if (
openblas_architecture == "neoversen1"
and parse_version(openblas_version) < openblas_arm64_stable_version
):
# See discussions in https://github.com/numpy/numpy/issues/19411
return True # pragma: no cover
return False
# TODO: Remove when Scipy 1.15 is the minimum supported version. In scipy 1.15,
# the internal info details (via 'iprint' and 'disp' options) were dropped,
# following the LBFGS rewrite from Fortran to C, see
# https://github.com/scipy/scipy/issues/23186#issuecomment-2987801035. For
# scipy 1.15, 'iprint' and 'disp' have no effect and for scipy >= 1.16 a
# DeprecationWarning is emitted.
def _get_additional_lbfgs_options_dict(key, value):
return {} if sp_version >= parse_version("1.15") else {key: value}
# TODO(pyarrow): Remove when minimum pyarrow version is 17.0.0
PYARROW_VERSION_BELOW_17 = False
try:
import pyarrow
pyarrow_version = parse_version(pyarrow.__version__)
if pyarrow_version < parse_version("17.0.0"):
PYARROW_VERSION_BELOW_17 = True
except ModuleNotFoundError: # pragma: no cover
pass
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/validation.py | sklearn/utils/validation.py | """Functions to validate input and parameters within scikit-learn estimators."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numbers
import operator
import warnings
from collections.abc import Sequence
from contextlib import suppress
from functools import reduce, wraps
from inspect import Parameter, isclass, signature
import joblib
import numpy as np
import scipy.sparse as sp
from sklearn import get_config as _get_config
from sklearn.exceptions import (
DataConversionWarning,
NotFittedError,
PositiveSpectrumWarning,
)
from sklearn.utils._array_api import (
_asarray_with_order,
_convert_to_numpy,
_is_numpy_namespace,
_max_precision_float_dtype,
get_namespace,
get_namespace_and_device,
)
from sklearn.utils._dataframe import is_pandas_df, is_pandas_df_or_series
from sklearn.utils._isfinite import FiniteStatus, cy_isfinite
from sklearn.utils._tags import get_tags
from sklearn.utils.fixes import (
ComplexWarning,
_object_dtype_isnan,
_preserve_dia_indices_dtype,
)
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
# This function is not used anymore at this moment in the code base but we keep it in
# case that we merge a new public function without kwarg only by mistake, which would
# require a deprecation cycle to fix.
def _deprecate_positional_args(func=None, *, version="1.3"):
"""Decorator for methods that issues warnings for positional arguments.
Using the keyword-only argument syntax in pep 3102, arguments after the
* will issue a warning when passed as a positional argument.
Parameters
----------
func : callable, default=None
Function to check arguments on.
version : callable, default="1.3"
The version when positional arguments will result in error.
"""
def _inner_deprecate_positional_args(f):
sig = signature(f)
kwonly_args = []
all_args = []
for name, param in sig.parameters.items():
if param.kind == Parameter.POSITIONAL_OR_KEYWORD:
all_args.append(name)
elif param.kind == Parameter.KEYWORD_ONLY:
kwonly_args.append(name)
@wraps(f)
def inner_f(*args, **kwargs):
extra_args = len(args) - len(all_args)
if extra_args <= 0:
return f(*args, **kwargs)
# extra_args > 0
args_msg = [
"{}={}".format(name, arg)
for name, arg in zip(kwonly_args[:extra_args], args[-extra_args:])
]
args_msg = ", ".join(args_msg)
warnings.warn(
(
f"Pass {args_msg} as keyword args. From version "
f"{version} passing these as positional arguments "
"will result in an error"
),
FutureWarning,
)
kwargs.update(zip(sig.parameters, args))
return f(**kwargs)
return inner_f
if func is not None:
return _inner_deprecate_positional_args(func)
return _inner_deprecate_positional_args
def _assert_all_finite(
X, allow_nan=False, msg_dtype=None, estimator_name=None, input_name=""
):
"""Like assert_all_finite, but only for ndarray."""
xp, is_array_api = get_namespace(X)
if _get_config()["assume_finite"]:
return
X = xp.asarray(X)
# for object dtype data, we only check for NaNs (GH-13254)
if not is_array_api and X.dtype == np.dtype("object") and not allow_nan:
if _object_dtype_isnan(X).any():
raise ValueError("Input contains NaN")
# We need only consider float arrays, hence can early return for all else.
if not xp.isdtype(X.dtype, ("real floating", "complex floating")):
return
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space `np.isinf/isnan` or custom
# Cython implementation to prevent false positives and provide a detailed
# error message.
with np.errstate(over="ignore"):
first_pass_isfinite = xp.isfinite(xp.sum(X))
if first_pass_isfinite:
return
_assert_all_finite_element_wise(
X,
xp=xp,
allow_nan=allow_nan,
msg_dtype=msg_dtype,
estimator_name=estimator_name,
input_name=input_name,
)
def _assert_all_finite_element_wise(
X, *, xp, allow_nan, msg_dtype=None, estimator_name=None, input_name=""
):
# Cython implementation doesn't support FP16 or complex numbers
use_cython = (
xp is np and X.data.contiguous and X.dtype.type in {np.float32, np.float64}
)
if use_cython:
out = cy_isfinite(X.reshape(-1), allow_nan=allow_nan)
has_nan_error = False if allow_nan else out == FiniteStatus.has_nan
has_inf = out == FiniteStatus.has_infinite
else:
has_inf = xp.any(xp.isinf(X))
has_nan_error = False if allow_nan else xp.any(xp.isnan(X))
if has_inf or has_nan_error:
if has_nan_error:
type_err = "NaN"
else:
msg_dtype = msg_dtype if msg_dtype is not None else X.dtype
type_err = f"infinity or a value too large for {msg_dtype!r}"
padded_input_name = input_name + " " if input_name else ""
msg_err = f"Input {padded_input_name}contains {type_err}."
if estimator_name and input_name == "X" and has_nan_error:
# Improve the error message on how to handle missing values in
# scikit-learn.
msg_err += (
f"\n{estimator_name} does not accept missing values"
" encoded as NaN natively. For supervised learning, you might want"
" to consider sklearn.ensemble.HistGradientBoostingClassifier and"
" Regressor which accept missing values encoded as NaNs natively."
" Alternatively, it is possible to preprocess the data, for"
" instance by using an imputer transformer in a pipeline or drop"
" samples with missing values. See"
" https://scikit-learn.org/stable/modules/impute.html"
" You can find a list of all estimators that handle NaN values"
" at the following page:"
" https://scikit-learn.org/stable/modules/impute.html"
"#estimators-that-handle-nan-values"
)
raise ValueError(msg_err)
def assert_all_finite(
X,
*,
allow_nan=False,
estimator_name=None,
input_name="",
):
"""Throw a ValueError if X contains NaN or infinity.
Parameters
----------
X : {ndarray, sparse matrix}
The input data.
allow_nan : bool, default=False
If True, do not throw error when `X` contains NaN.
estimator_name : str, default=None
The estimator name, used to construct the error message.
input_name : str, default=""
The data name used to construct the error message. In particular
if `input_name` is "X" and the data has NaN values and
allow_nan is False, the error message will link to the imputer
documentation.
Examples
--------
>>> from sklearn.utils import assert_all_finite
>>> import numpy as np
>>> array = np.array([1, np.inf, np.nan, 4])
>>> try:
... assert_all_finite(array)
... print("Test passed: Array contains only finite values.")
... except ValueError:
... print("Test failed: Array contains non-finite values.")
Test failed: Array contains non-finite values.
"""
_assert_all_finite(
X.data if sp.issparse(X) else X,
allow_nan=allow_nan,
estimator_name=estimator_name,
input_name=input_name,
)
def as_float_array(X, *, copy=True, ensure_all_finite=True):
"""Convert an array-like to an array of floats.
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
The input data.
copy : bool, default=True
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
ensure_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in X. The
possibilities are:
- True: Force all values of X to be finite.
- False: accepts np.inf, np.nan, pd.NA in X.
- 'allow-nan': accepts only np.nan and pd.NA values in X. Values cannot
be infinite.
.. versionadded:: 1.6
`force_all_finite` was renamed to `ensure_all_finite`.
Returns
-------
XT : {ndarray, sparse matrix}
An array of type float.
Examples
--------
>>> from sklearn.utils import as_float_array
>>> import numpy as np
>>> array = np.array([0, 0, 1, 2, 2], dtype=np.int64)
>>> as_float_array(array)
array([0., 0., 1., 2., 2.])
"""
if isinstance(X, np.matrix) or (
not isinstance(X, np.ndarray) and not sp.issparse(X)
):
return check_array(
X,
accept_sparse=["csr", "csc", "coo"],
dtype=np.float64,
copy=copy,
ensure_all_finite=ensure_all_finite,
ensure_2d=False,
)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy("F" if X.flags["F_CONTIGUOUS"] else "C") if copy else X
else:
if X.dtype.kind in "uib" and X.dtype.itemsize <= 4:
return_dtype = np.float32
else:
return_dtype = np.float64
return X.astype(return_dtype)
def _is_arraylike(x):
"""Returns whether the input is array-like."""
if sp.issparse(x):
return False
return hasattr(x, "__len__") or hasattr(x, "shape") or hasattr(x, "__array__")
def _is_arraylike_not_scalar(array):
"""Return True if array is array-like and not a scalar"""
return _is_arraylike(array) and not np.isscalar(array)
def _use_interchange_protocol(X):
"""Use interchange protocol for non-pandas dataframes that follow the protocol.
Note: at this point we chose not to use the interchange API on pandas dataframe
to ensure strict behavioral backward compatibility with older versions of
scikit-learn.
"""
return not is_pandas_df(X) and hasattr(X, "__dataframe__")
def _num_features(X):
"""Return the number of features in an array-like X.
This helper function tries hard to avoid to materialize an array version
of X unless necessary. For instance, if X is a list of lists,
this function will return the length of the first element, assuming
that subsequent elements are all lists of the same length without
checking.
Parameters
----------
X : array-like
array-like to get the number of features.
Returns
-------
features : int
Number of features
"""
type_ = type(X)
if type_.__module__ == "builtins":
type_name = type_.__qualname__
else:
type_name = f"{type_.__module__}.{type_.__qualname__}"
message = f"Unable to find the number of features from X of type {type_name}"
if not hasattr(X, "__len__") and not hasattr(X, "shape"):
if not hasattr(X, "__array__"):
raise TypeError(message)
# Only convert X to a numpy array if there is no cheaper, heuristic
# option.
X = np.asarray(X)
if hasattr(X, "shape"):
if not hasattr(X.shape, "__len__") or len(X.shape) <= 1:
message += f" with shape {X.shape}"
raise TypeError(message)
return X.shape[1]
first_sample = X[0]
# Do not consider an array-like of strings or dicts to be a 2D array
if isinstance(first_sample, (str, bytes, dict)):
message += f" where the samples are of type {type(first_sample).__qualname__}"
raise TypeError(message)
try:
# If X is a list of lists, for instance, we assume that all nested
# lists have the same length without checking or converting to
# a numpy array to keep this function call as cheap as possible.
return len(first_sample)
except Exception as err:
raise TypeError(message) from err
def _num_samples(x):
"""Return number of samples in array-like x."""
message = "Expected sequence or array-like, got %s" % type(x)
if hasattr(x, "fit") and callable(x.fit):
# Don't get num_samples from an ensembles length!
raise TypeError(message)
if _use_interchange_protocol(x):
return x.__dataframe__().num_rows()
if not hasattr(x, "__len__") and not hasattr(x, "shape"):
if hasattr(x, "__array__"):
xp, _ = get_namespace(x)
x = xp.asarray(x)
else:
raise TypeError(message)
if hasattr(x, "shape") and x.shape is not None:
if len(x.shape) == 0:
raise TypeError(
"Input should have at least 1 dimension i.e. satisfy "
f"`len(x.shape) > 0`, got scalar `{x!r}` instead."
)
# Check that shape is returning an integer or default to len
# Dask dataframes may not return numeric shape[0] value
if isinstance(x.shape[0], numbers.Integral):
return x.shape[0]
try:
return len(x)
except TypeError as type_error:
raise TypeError(message) from type_error
def check_memory(memory):
"""Check that ``memory`` is joblib.Memory-like.
joblib.Memory-like means that ``memory`` can be converted into a
joblib.Memory instance (typically a str denoting the ``location``)
or has the same interface (has a ``cache`` method).
Parameters
----------
memory : None, str or object with the joblib.Memory interface
- If string, the location where to create the `joblib.Memory` interface.
- If None, no caching is done and the Memory object is completely transparent.
Returns
-------
memory : object with the joblib.Memory interface
A correct joblib.Memory object.
Raises
------
ValueError
If ``memory`` is not joblib.Memory-like.
Examples
--------
>>> from sklearn.utils.validation import check_memory
>>> check_memory("caching_dir")
Memory(location=caching_dir/joblib)
"""
if memory is None or isinstance(memory, str):
memory = joblib.Memory(location=memory, verbose=0)
elif not hasattr(memory, "cache"):
raise ValueError(
"'memory' should be None, a string or have the same"
" interface as joblib.Memory."
" Got memory='{}' instead.".format(memory)
)
return memory
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
Examples
--------
>>> from sklearn.utils.validation import check_consistent_length
>>> a = [1, 2, 3]
>>> b = [2, 3, 4]
>>> check_consistent_length(a, b)
"""
lengths = [_num_samples(X) for X in arrays if X is not None]
if len(set(lengths)) > 1:
raise ValueError(
"Found input variables with inconsistent numbers of samples: %r"
% [int(l) for l in lengths]
)
def _make_indexable(iterable):
"""Ensure iterable supports indexing or convert to an indexable variant.
Convert sparse matrices to csr and other non-indexable iterable to arrays.
Let `None` and indexable objects (e.g. pandas dataframes) pass unchanged.
Parameters
----------
iterable : {list, dataframe, ndarray, sparse matrix} or None
Object to be converted to an indexable iterable.
"""
if sp.issparse(iterable):
return iterable.tocsr()
elif hasattr(iterable, "__getitem__") or hasattr(iterable, "iloc"):
return iterable
elif iterable is None:
return iterable
return np.array(iterable)
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-iterable objects to arrays.
Parameters
----------
*iterables : {lists, dataframes, ndarrays, sparse matrices}
List of objects to ensure sliceability.
Returns
-------
result : list of {ndarray, sparse matrix, dataframe} or None
Returns a list containing indexable arrays (i.e. NumPy array,
sparse matrix, or dataframe) or `None`.
Examples
--------
>>> from sklearn.utils import indexable
>>> from scipy.sparse import csr_matrix
>>> import numpy as np
>>> iterables = [
... [1, 2, 3], np.array([2, 3, 4]), None, csr_matrix([[5], [6], [7]])
... ]
>>> indexable(*iterables)
[[1, 2, 3], array([2, 3, 4]), None, <...Sparse...dtype 'int64'...shape (3, 1)>]
"""
result = [_make_indexable(X) for X in iterables]
check_consistent_length(*result)
return result
def _ensure_sparse_format(
sparse_container,
accept_sparse,
dtype,
copy,
ensure_all_finite,
accept_large_sparse,
estimator_name=None,
input_name="",
):
"""Convert a sparse container to a given format.
Checks the sparse format of `sparse_container` and converts if necessary.
Parameters
----------
sparse_container : sparse matrix or array
Input to validate and convert.
accept_sparse : str, bool or list/tuple of str
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). If the input is sparse but
not in the allowed format, it will be converted to the first listed
format. True allows the input to be any format. False means
that a sparse matrix input will raise an error.
dtype : str, type or None
Data type of result. If None, the dtype of the input is preserved.
copy : bool
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
ensure_all_finite : bool or 'allow-nan'
Whether to raise an error on np.inf, np.nan, pd.NA in X. The
possibilities are:
- True: Force all values of X to be finite.
- False: accepts np.inf, np.nan, pd.NA in X.
- 'allow-nan': accepts only np.nan and pd.NA values in X. Values cannot
be infinite.
.. versionadded:: 0.20
``ensure_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`
estimator_name : str, default=None
The estimator name, used to construct the error message.
input_name : str, default=""
The data name used to construct the error message. In particular
if `input_name` is "X" and the data has NaN values and
allow_nan is False, the error message will link to the imputer
documentation.
Returns
-------
sparse_container_converted : sparse matrix or array
Sparse container (matrix/array) that is ensured to have an allowed type.
"""
if dtype is None:
dtype = sparse_container.dtype
changed_format = False
sparse_container_type_name = type(sparse_container).__name__
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# Indices dtype validation
_check_large_sparse(sparse_container, accept_large_sparse)
if accept_sparse is False:
padded_input = " for " + input_name if input_name else ""
raise TypeError(
f"Sparse data was passed{padded_input}, but dense data is required. "
"Use '.toarray()' to convert to a dense numpy array."
)
elif isinstance(accept_sparse, (list, tuple)):
if len(accept_sparse) == 0:
raise ValueError(
"When providing 'accept_sparse' as a tuple or list, it must contain at "
"least one string value."
)
# ensure correct sparse format
if sparse_container.format not in accept_sparse:
# create new with correct sparse
sparse_container = sparse_container.asformat(accept_sparse[0])
changed_format = True
elif accept_sparse is not True:
# any other type
raise ValueError(
"Parameter 'accept_sparse' should be a string, boolean or list of strings."
f" You provided 'accept_sparse={accept_sparse}'."
)
if dtype != sparse_container.dtype:
# convert dtype
sparse_container = sparse_container.astype(dtype)
elif copy and not changed_format:
# force copy
sparse_container = sparse_container.copy()
if ensure_all_finite:
if not hasattr(sparse_container, "data"):
warnings.warn(
f"Can't check {sparse_container.format} sparse matrix for nan or inf.",
stacklevel=2,
)
else:
_assert_all_finite(
sparse_container.data,
allow_nan=ensure_all_finite == "allow-nan",
estimator_name=estimator_name,
input_name=input_name,
)
# TODO: Remove when the minimum version of SciPy supported is 1.12
# With SciPy sparse arrays, conversion from DIA format to COO, CSR, or BSR
# triggers the use of `np.int64` indices even if the data is such that it could
# be more efficiently represented with `np.int32` indices.
# https://github.com/scipy/scipy/issues/19245 Since not all scikit-learn
# algorithms support large indices, the following code downcasts to `np.int32`
# indices when it's safe to do so.
if changed_format:
# accept_sparse is specified to a specific format and a conversion occurred
requested_sparse_format = accept_sparse[0]
_preserve_dia_indices_dtype(
sparse_container, sparse_container_type_name, requested_sparse_format
)
return sparse_container
def _ensure_no_complex_data(array):
if (
hasattr(array, "dtype")
and array.dtype is not None
and hasattr(array.dtype, "kind")
and array.dtype.kind == "c"
):
raise ValueError("Complex data not supported\n{}\n".format(array))
def _check_estimator_name(estimator):
if estimator is not None:
if isinstance(estimator, str):
return estimator
else:
return estimator.__class__.__name__
return None
def _pandas_dtype_needs_early_conversion(pd_dtype):
"""Return True if pandas extension pd_dtype need to be converted early."""
# Check these early for pandas versions without extension dtypes
from pandas import SparseDtype
from pandas.api.types import (
is_bool_dtype,
is_float_dtype,
is_integer_dtype,
)
if is_bool_dtype(pd_dtype):
# bool and extension booleans need early conversion because __array__
# converts mixed dtype dataframes into object dtypes
return True
if isinstance(pd_dtype, SparseDtype):
# Sparse arrays will be converted later in `check_array`
return False
try:
from pandas.api.types import is_extension_array_dtype
except ImportError:
return False
if isinstance(pd_dtype, SparseDtype) or not is_extension_array_dtype(pd_dtype):
# Sparse arrays will be converted later in `check_array`
# Only handle extension arrays for integer and floats
return False
elif is_float_dtype(pd_dtype):
# Float ndarrays can normally support nans. They need to be converted
# first to map pd.NA to np.nan
return True
elif is_integer_dtype(pd_dtype):
# XXX: Warn when converting from a high integer to a float
return True
return False
def _is_extension_array_dtype(array):
# Pandas extension arrays have a dtype with an na_value
return hasattr(array, "dtype") and hasattr(array.dtype, "na_value")
def check_array(
array,
accept_sparse=False,
*,
accept_large_sparse=True,
dtype="numeric",
order=None,
copy=False,
force_writeable=False,
ensure_all_finite=True,
ensure_non_negative=False,
ensure_2d=True,
allow_nd=False,
ensure_min_samples=1,
ensure_min_features=1,
estimator=None,
input_name="",
):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is checked to be a non-empty 2D array containing
only finite values. If the dtype of the array is object, attempt
converting to float, raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : str, bool or list/tuple of str, default=False
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
accept_large_sparse : bool, default=True
If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by
accept_sparse, accept_large_sparse=False will cause it to be accepted
only if its indices are stored with a 32-bit dtype.
.. versionadded:: 0.20
dtype : 'numeric', type, list of type or None, default='numeric'
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : {'F', 'C'} or None, default=None
Whether an array will be forced to be fortran or c-style.
When order is None (default), then if copy=False, nothing is ensured
about the memory layout of the output array; otherwise (copy=True)
the memory layout of the returned array is kept as close as possible
to the original array.
copy : bool, default=False
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_writeable : bool, default=False
Whether to force the output array to be writeable. If True, the returned array
is guaranteed to be writeable, which may require a copy. Otherwise the
writeability of the input array is preserved.
.. versionadded:: 1.6
ensure_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 1.6
`force_all_finite` was renamed to `ensure_all_finite`.
ensure_non_negative : bool, default=False
Make sure the array has only non-negative values. If True, an array that
contains negative values will raise a ValueError.
.. versionadded:: 1.6
ensure_2d : bool, default=True
Whether to raise a value error if array is not 2D.
allow_nd : bool, default=False
Whether to allow array.ndim > 2.
ensure_min_samples : int, default=1
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int, default=1
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
estimator : str or estimator instance, default=None
If passed, include the name of the estimator in warning messages.
input_name : str, default=""
The data name used to construct the error message. In particular
if `input_name` is "X" and the data has NaN values and
allow_nan is False, the error message will link to the imputer
documentation.
.. versionadded:: 1.1.0
Returns
-------
array_converted : object
The converted and validated array.
Examples
--------
>>> from sklearn.utils.validation import check_array
>>> X = [[1, 2, 3], [4, 5, 6]]
>>> X_checked = check_array(X)
>>> X_checked
array([[1, 2, 3], [4, 5, 6]])
"""
if isinstance(array, np.matrix):
raise TypeError(
"np.matrix is not supported. Please convert to a numpy array with "
"np.asarray. For more information see: "
"https://numpy.org/doc/stable/reference/generated/numpy.matrix.html"
)
xp, is_array_api_compliant = get_namespace(array)
# store reference to original array to check if copy is needed when
# function returns
array_orig = array
# store whether originally we wanted numeric dtype
dtype_numeric = isinstance(dtype, str) and dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not is_array_api_compliant and not hasattr(dtype_orig, "kind"):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
# check if the object contains several dtypes (typically a pandas
# DataFrame), and store them. If not, store None.
dtypes_orig = None
pandas_requires_conversion = False
# track if we have a Series-like object to raise a better error message
type_if_series = None
if hasattr(array, "dtypes") and hasattr(array.dtypes, "__array__"):
# throw warning if columns are sparse. If all columns are sparse, then
# array.sparse exists and sparsity will be preserved (later).
with suppress(ImportError):
from pandas import SparseDtype
def is_sparse(dtype):
return isinstance(dtype, SparseDtype)
if not hasattr(array, "sparse") and array.dtypes.apply(is_sparse).any():
warnings.warn(
"pandas.DataFrame with sparse columns found."
"It will be converted to a dense numpy array."
)
dtypes_orig = list(array.dtypes)
pandas_requires_conversion = any(
_pandas_dtype_needs_early_conversion(i) for i in dtypes_orig
)
if all(isinstance(dtype_iter, np.dtype) for dtype_iter in dtypes_orig):
dtype_orig = np.result_type(*dtypes_orig)
elif pandas_requires_conversion and any(d == object for d in dtypes_orig):
# Force object if any of the dtypes is an object
dtype_orig = object
elif (_is_extension_array_dtype(array) or hasattr(array, "iloc")) and hasattr(
array, "dtype"
):
# array is a pandas series
type_if_series = type(array)
pandas_requires_conversion = _pandas_dtype_needs_early_conversion(array.dtype)
if isinstance(array.dtype, np.dtype):
dtype_orig = array.dtype
else:
# Set to None to let array.astype work out the best dtype
dtype_orig = None
if dtype_numeric:
if (
dtype_orig is not None
and hasattr(dtype_orig, "kind")
and dtype_orig.kind == "O"
):
# if input is object, convert to float.
dtype = xp.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if pandas_requires_conversion:
# pandas dataframe requires conversion earlier to handle extension dtypes with
# nans
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/_mocking.py | sklearn/utils/_mocking.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils._metadata_requests import RequestMethod
from sklearn.utils.metaestimators import available_if
from sklearn.utils.validation import (
_check_sample_weight,
_num_samples,
check_array,
check_is_fitted,
check_random_state,
)
class ArraySlicingWrapper:
"""
Parameters
----------
array
"""
def __init__(self, array):
self.array = array
def __getitem__(self, aslice):
return MockDataFrame(self.array[aslice])
class MockDataFrame:
"""
Parameters
----------
array
"""
# have shape and length but don't support indexing.
def __init__(self, array):
self.array = array
self.values = array
self.shape = array.shape
self.ndim = array.ndim
# ugly hack to make iloc work.
self.iloc = ArraySlicingWrapper(array)
def __len__(self):
return len(self.array)
def __array__(self, dtype=None):
# Pandas data frames also are array-like: we want to make sure that
# input validation in cross-validation does not try to call that
# method.
return self.array
def __eq__(self, other):
return MockDataFrame(self.array == other.array)
def __ne__(self, other):
return not self == other
def take(self, indices, axis=0):
return MockDataFrame(self.array.take(indices, axis=axis))
class CheckingClassifier(ClassifierMixin, BaseEstimator):
"""Dummy classifier to test pipelining and meta-estimators.
Checks some property of `X` and `y`in fit / predict.
This allows testing whether pipelines / cross-validation or metaestimators
changed the input.
Can also be used to check if `fit_params` are passed correctly, and
to force a certain score to be returned.
Parameters
----------
check_y, check_X : callable, default=None
The callable used to validate `X` and `y`. These callable should return
a bool where `False` will trigger an `AssertionError`. If `None`, the
data is not validated. Default is `None`.
check_y_params, check_X_params : dict, default=None
The optional parameters to pass to `check_X` and `check_y`. If `None`,
then no parameters are passed in.
methods_to_check : "all" or list of str, default="all"
The methods in which the checks should be applied. By default,
all checks will be done on all methods (`fit`, `predict`,
`predict_proba`, `decision_function` and `score`).
foo_param : int, default=0
A `foo` param. When `foo > 1`, the output of :meth:`score` will be 1
otherwise it is 0.
expected_sample_weight : bool, default=False
Whether to check if a valid `sample_weight` was passed to `fit`.
expected_fit_params : list of str, default=None
A list of the expected parameters given when calling `fit`.
Attributes
----------
classes_ : int
The classes seen during `fit`.
n_features_in_ : int
The number of features seen during `fit`.
Examples
--------
>>> from sklearn.utils._mocking import CheckingClassifier
This helper allow to assert to specificities regarding `X` or `y`. In this
case we expect `check_X` or `check_y` to return a boolean.
>>> from sklearn.datasets import load_iris
>>> X, y = load_iris(return_X_y=True)
>>> clf = CheckingClassifier(check_X=lambda x: x.shape == (150, 4))
>>> clf.fit(X, y)
CheckingClassifier(...)
We can also provide a check which might raise an error. In this case, we
expect `check_X` to return `X` and `check_y` to return `y`.
>>> from sklearn.utils import check_array
>>> clf = CheckingClassifier(check_X=check_array)
>>> clf.fit(X, y)
CheckingClassifier(...)
"""
def __init__(
self,
*,
check_y=None,
check_y_params=None,
check_X=None,
check_X_params=None,
methods_to_check="all",
foo_param=0,
expected_sample_weight=None,
expected_fit_params=None,
random_state=None,
):
self.check_y = check_y
self.check_y_params = check_y_params
self.check_X = check_X
self.check_X_params = check_X_params
self.methods_to_check = methods_to_check
self.foo_param = foo_param
self.expected_sample_weight = expected_sample_weight
self.expected_fit_params = expected_fit_params
self.random_state = random_state
def _check_X_y(self, X, y=None, should_be_fitted=True):
"""Validate X and y and make extra check.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data set.
`X` is checked only if `check_X` is not `None` (default is None).
y : array-like of shape (n_samples), default=None
The corresponding target, by default `None`.
`y` is checked only if `check_y` is not `None` (default is None).
should_be_fitted : bool, default=True
Whether or not the classifier should be already fitted.
By default True.
Returns
-------
X, y
"""
if should_be_fitted:
check_is_fitted(self)
if self.check_X is not None:
params = {} if self.check_X_params is None else self.check_X_params
checked_X = self.check_X(X, **params)
if isinstance(checked_X, (bool, np.bool_)):
assert checked_X
else:
X = checked_X
if y is not None and self.check_y is not None:
params = {} if self.check_y_params is None else self.check_y_params
checked_y = self.check_y(y, **params)
if isinstance(checked_y, (bool, np.bool_)):
assert checked_y
else:
y = checked_y
return X, y
def fit(self, X, y, sample_weight=None, **fit_params):
"""Fit classifier.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples, n_outputs) or (n_samples,), \
default=None
Target relative to X for classification or regression;
None for unsupervised learning.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of the estimator
Returns
-------
self
"""
assert _num_samples(X) == _num_samples(y)
if self.methods_to_check == "all" or "fit" in self.methods_to_check:
X, y = self._check_X_y(X, y, should_be_fitted=False)
self.n_features_in_ = np.shape(X)[1]
self.classes_ = np.unique(check_array(y, ensure_2d=False, allow_nd=True))
if self.expected_fit_params:
missing = set(self.expected_fit_params) - set(fit_params)
if missing:
raise AssertionError(
f"Expected fit parameter(s) {list(missing)} not seen."
)
for key, value in fit_params.items():
if _num_samples(value) != _num_samples(X):
raise AssertionError(
f"Fit parameter {key} has length {_num_samples(value)}"
f"; expected {_num_samples(X)}."
)
if self.expected_sample_weight:
if sample_weight is None:
raise AssertionError("Expected sample_weight to be passed")
_check_sample_weight(sample_weight, X)
return self
def predict(self, X):
"""Predict the first class seen in `classes_`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input data.
Returns
-------
preds : ndarray of shape (n_samples,)
Predictions of the first class seen in `classes_`.
"""
if self.methods_to_check == "all" or "predict" in self.methods_to_check:
X, y = self._check_X_y(X)
rng = check_random_state(self.random_state)
return rng.choice(self.classes_, size=_num_samples(X))
def predict_proba(self, X):
"""Predict probabilities for each class.
Here, the dummy classifier will provide a probability of 1 for the
first class of `classes_` and 0 otherwise.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input data.
Returns
-------
proba : ndarray of shape (n_samples, n_classes)
The probabilities for each sample and class.
"""
if self.methods_to_check == "all" or "predict_proba" in self.methods_to_check:
X, y = self._check_X_y(X)
rng = check_random_state(self.random_state)
proba = rng.randn(_num_samples(X), len(self.classes_))
proba = np.abs(proba, out=proba)
proba /= np.sum(proba, axis=1)[:, np.newaxis]
return proba
def decision_function(self, X):
"""Confidence score.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input data.
Returns
-------
decision : ndarray of shape (n_samples,) if n_classes == 2\
else (n_samples, n_classes)
Confidence score.
"""
if (
self.methods_to_check == "all"
or "decision_function" in self.methods_to_check
):
X, y = self._check_X_y(X)
rng = check_random_state(self.random_state)
if len(self.classes_) == 2:
# for binary classifier, the confidence score is related to
# classes_[1] and therefore should be null.
return rng.randn(_num_samples(X))
else:
return rng.randn(_num_samples(X), len(self.classes_))
def score(self, X=None, Y=None):
"""Fake score.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
Y : array-like of shape (n_samples, n_output) or (n_samples,)
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Either 0 or 1 depending of `foo_param` (i.e. `foo_param > 1 =>
score=1` otherwise `score=0`).
"""
if self.methods_to_check == "all" or "score" in self.methods_to_check:
self._check_X_y(X, Y)
if self.foo_param > 1:
score = 1.0
else:
score = 0.0
return score
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags._skip_test = True
tags.input_tags.two_d_array = False
tags.target_tags.one_d_labels = True
return tags
# Deactivate key validation for CheckingClassifier because we want to be able to
# call fit with arbitrary fit_params and record them. Without this change, we
# would get an error because those arbitrary params are not expected.
CheckingClassifier.set_fit_request = RequestMethod( # type: ignore[assignment,method-assign]
name="fit", keys=[], validate_keys=False
)
class NoSampleWeightWrapper(BaseEstimator):
"""Wrap estimator which will not expose `sample_weight`.
Parameters
----------
est : estimator, default=None
The estimator to wrap.
"""
def __init__(self, est=None):
self.est = est
def fit(self, X, y):
return self.est.fit(X, y)
def predict(self, X):
return self.est.predict(X)
def predict_proba(self, X):
return self.est.predict_proba(X)
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags._skip_test = True
return tags
def _check_response(method):
def check(self):
return self.response_methods is not None and method in self.response_methods
return check
class _MockEstimatorOnOffPrediction(BaseEstimator):
"""Estimator for which we can turn on/off the prediction methods.
Parameters
----------
response_methods: list of \
{"predict", "predict_proba", "decision_function"}, default=None
List containing the response implemented by the estimator. When, the
response is in the list, it will return the name of the response method
when called. Otherwise, an `AttributeError` is raised. It allows to
use `getattr` as any conventional estimator. By default, no response
methods are mocked.
"""
def __init__(self, response_methods=None):
self.response_methods = response_methods
def fit(self, X, y):
self.classes_ = np.unique(y)
return self
@available_if(_check_response("predict"))
def predict(self, X):
return "predict"
@available_if(_check_response("predict_proba"))
def predict_proba(self, X):
return "predict_proba"
@available_if(_check_response("decision_function"))
def decision_function(self, X):
return "decision_function"
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/_dataframe.py | sklearn/utils/_dataframe.py | """Functions to determine if an object is a dataframe or series."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import sys
def is_df_or_series(X):
"""Return True if the X is a dataframe or series.
Parameters
----------
X : {array-like, dataframe}
The array-like or dataframe object to check.
Returns
-------
bool
True if the X is a dataframe or series, False otherwise.
"""
return is_pandas_df_or_series(X) or is_polars_df_or_series(X) or is_pyarrow_data(X)
def is_pandas_df_or_series(X):
"""Return True if the X is a pandas dataframe or series.
Parameters
----------
X : {array-like, dataframe}
The array-like or dataframe object to check.
Returns
-------
bool
True if the X is a pandas dataframe or series, False otherwise.
"""
try:
pd = sys.modules["pandas"]
except KeyError:
return False
return isinstance(X, (pd.DataFrame, pd.Series))
def is_pandas_df(X):
"""Return True if the X is a pandas dataframe.
Parameters
----------
X : {array-like, dataframe}
The array-like or dataframe object to check.
Returns
-------
bool
True if the X is a pandas dataframe, False otherwise.
"""
try:
pd = sys.modules["pandas"]
except KeyError:
return False
return isinstance(X, pd.DataFrame)
def is_pyarrow_data(X):
"""Return True if the X is a pyarrow Table, RecordBatch, Array or ChunkedArray.
Parameters
----------
X : {array-like, dataframe}
The array-like or dataframe object to check.
Returns
-------
bool
True if the X is a pyarrow Table, RecordBatch, Array or ChunkedArray,
False otherwise.
"""
try:
pa = sys.modules["pyarrow"]
except KeyError:
return False
return isinstance(X, (pa.Table, pa.RecordBatch, pa.Array, pa.ChunkedArray))
def is_polars_df_or_series(X):
"""Return True if the X is a polars dataframe or series.
Parameters
----------
X : {array-like, dataframe}
The array-like or dataframe object to check.
Returns
-------
bool
True if the X is a polars dataframe or series, False otherwise.
"""
try:
pl = sys.modules["polars"]
except KeyError:
return False
return isinstance(X, (pl.DataFrame, pl.Series))
def is_polars_df(X):
"""Return True if the X is a polars dataframe.
Parameters
----------
X : {array-like, dataframe}
The array-like or dataframe object to check.
Returns
-------
bool
True if the X is a polarsdataframe, False otherwise.
"""
try:
pl = sys.modules["polars"]
except KeyError:
return False
return isinstance(X, pl.DataFrame)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/random.py | sklearn/utils/random.py | """Utilities for random sampling."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import array
import numpy as np
import scipy.sparse as sp
from sklearn.utils import check_random_state
from sklearn.utils._random import sample_without_replacement
__all__ = ["sample_without_replacement"]
def _random_choice_csc(n_samples, classes, class_probability=None, random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of \
shape (n_classes,), default=None
Class distribution of each column. If None, uniform distribution is
assumed.
random_state : int, RandomState instance or None, default=None
Controls the randomness of the sampled classes.
See :term:`Glossary <random_state>`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array("i")
indices = array.array("i")
indptr = array.array("i", [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != "i":
raise ValueError("class dtype %s is not supported" % classes[j].dtype)
classes[j] = classes[j].astype(np.int64, copy=False)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if not np.isclose(np.sum(class_prob_j), 1.0):
raise ValueError(
"Probability array at index {0} does not sum to one".format(j)
)
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError(
"classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(
j, classes[j].shape[0], class_prob_j.shape[0]
)
)
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
rng = check_random_state(random_state)
if classes[j].shape[0] > 1:
index_class_0 = np.flatnonzero(classes[j] == 0).item()
p_nonzero = 1 - class_prob_j[index_class_0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(
n_population=n_samples, n_samples=nnz, random_state=random_state
)
indices.extend(ind_sample)
# Normalize probabilities for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = class_probability_nz / np.sum(
class_probability_nz
)
classes_ind = np.searchsorted(
class_probability_nz_norm.cumsum(), rng.uniform(size=nnz)
)
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr), (n_samples, len(classes)), dtype=int)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/_pprint.py | sklearn/utils/_pprint.py | """This module contains the _EstimatorPrettyPrinter class used in
BaseEstimator.__repr__ for pretty-printing estimators"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
# 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018 Python Software Foundation;
# All Rights Reserved
# Authors: Fred L. Drake, Jr. <fdrake@acm.org> (built-in CPython pprint module)
# Nicolas Hug (scikit-learn specific changes)
# License: PSF License version 2 (see below)
# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
# --------------------------------------------
# 1. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"),
# and the Individual or Organization ("Licensee") accessing and otherwise
# using this software ("Python") in source or binary form and its associated
# documentation.
# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
# grants Licensee a nonexclusive, royalty-free, world-wide license to
# reproduce, analyze, test, perform and/or display publicly, prepare
# derivative works, distribute, and otherwise use Python alone or in any
# derivative version, provided, however, that PSF's License Agreement and
# PSF's notice of copyright, i.e., "Copyright (c) 2001, 2002, 2003, 2004,
# 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016,
# 2017, 2018 Python Software Foundation; All Rights Reserved" are retained in
# Python alone or in any derivative version prepared by Licensee.
# 3. In the event Licensee prepares a derivative work that is based on or
# incorporates Python or any part thereof, and wants to make the derivative
# work available to others as provided herein, then Licensee hereby agrees to
# include in any such work a brief summary of the changes made to Python.
# 4. PSF is making Python available to Licensee on an "AS IS" basis. PSF MAKES
# NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT
# NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF
# MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF
# PYTHON WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON FOR ANY
# INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF
# MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, OR ANY DERIVATIVE
# THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
# 6. This License Agreement will automatically terminate upon a material
# breach of its terms and conditions.
# 7. Nothing in this License Agreement shall be deemed to create any
# relationship of agency, partnership, or joint venture between PSF and
# Licensee. This License Agreement does not grant permission to use PSF
# trademarks or trade name in a trademark sense to endorse or promote products
# or services of Licensee, or any third party.
# 8. By copying, installing or otherwise using Python, Licensee agrees to be
# bound by the terms and conditions of this License Agreement.
# Brief summary of changes to original code:
# - "compact" parameter is supported for dicts, not just lists or tuples
# - estimators have a custom handler, they're not just treated as objects
# - long sequences (lists, tuples, dict items) with more than N elements are
# shortened using ellipsis (', ...') at the end.
import inspect
import pprint
from sklearn._config import get_config
from sklearn.base import BaseEstimator
from sklearn.utils._missing import is_scalar_nan
class KeyValTuple(tuple):
"""Dummy class for correctly rendering key-value tuples from dicts."""
def __repr__(self):
# needed for _dispatch[tuple.__repr__] not to be overridden
return super().__repr__()
class KeyValTupleParam(KeyValTuple):
"""Dummy class for correctly rendering key-value tuples from parameters."""
pass
def _changed_params(estimator):
"""Return dict (param_name: value) of parameters that were given to
estimator with non-default values."""
params = estimator.get_params(deep=False)
init_func = getattr(estimator.__init__, "deprecated_original", estimator.__init__)
init_params = inspect.signature(init_func).parameters
init_params = {name: param.default for name, param in init_params.items()}
def has_changed(k, v):
if k not in init_params: # happens if k is part of a **kwargs
return True
if init_params[k] == inspect._empty: # k has no default value
return True
# try to avoid calling repr on nested estimators
if isinstance(v, BaseEstimator) and v.__class__ != init_params[k].__class__:
return True
# Use repr as a last resort. It may be expensive.
if repr(v) != repr(init_params[k]) and not (
is_scalar_nan(init_params[k]) and is_scalar_nan(v)
):
return True
return False
return {k: v for k, v in params.items() if has_changed(k, v)}
class _EstimatorPrettyPrinter(pprint.PrettyPrinter):
"""Pretty Printer class for estimator objects.
This extends the pprint.PrettyPrinter class, because:
- we need estimators to be printed with their parameters, e.g.
Estimator(param1=value1, ...) which is not supported by default.
- the 'compact' parameter of PrettyPrinter is ignored for dicts, which
may lead to very long representations that we want to avoid.
Quick overview of pprint.PrettyPrinter (see also
https://stackoverflow.com/questions/49565047/pprint-with-hex-numbers):
- the entry point is the _format() method which calls format() (overridden
here)
- format() directly calls _safe_repr() for a first try at rendering the
object
- _safe_repr formats the whole object recursively, only calling itself,
not caring about line length or anything
- back to _format(), if the output string is too long, _format() then calls
the appropriate _pprint_TYPE() method (e.g. _pprint_list()) depending on
the type of the object. This where the line length and the compact
parameters are taken into account.
- those _pprint_TYPE() methods will internally use the format() method for
rendering the nested objects of an object (e.g. the elements of a list)
In the end, everything has to be implemented twice: in _safe_repr and in
the custom _pprint_TYPE methods. Unfortunately PrettyPrinter is really not
straightforward to extend (especially when we want a compact output), so
the code is a bit convoluted.
This class overrides:
- format() to support the changed_only parameter
- _safe_repr to support printing of estimators (for when they fit on a
single line)
- _format_dict_items so that dict are correctly 'compacted'
- _format_items so that ellipsis is used on long lists and tuples
When estimators cannot be printed on a single line, the builtin _format()
will call _pprint_estimator() because it was registered to do so (see
_dispatch[BaseEstimator.__repr__] = _pprint_estimator).
both _format_dict_items() and _pprint_estimator() use the
_format_params_or_dict_items() method that will format parameters and
key-value pairs respecting the compact parameter. This method needs another
subroutine _pprint_key_val_tuple() used when a parameter or a key-value
pair is too long to fit on a single line. This subroutine is called in
_format() and is registered as well in the _dispatch dict (just like
_pprint_estimator). We had to create the two classes KeyValTuple and
KeyValTupleParam for this.
"""
def __init__(
self,
indent=1,
width=80,
depth=None,
stream=None,
*,
compact=False,
indent_at_name=True,
n_max_elements_to_show=None,
):
super().__init__(indent, width, depth, stream, compact=compact)
self._indent_at_name = indent_at_name
if self._indent_at_name:
self._indent_per_level = 1 # ignore indent param
self._changed_only = get_config()["print_changed_only"]
# Max number of elements in a list, dict, tuple until we start using
# ellipsis. This also affects the number of arguments of an estimators
# (they are treated as dicts)
self.n_max_elements_to_show = n_max_elements_to_show
def format(self, object, context, maxlevels, level):
return _safe_repr(
object, context, maxlevels, level, changed_only=self._changed_only
)
def _pprint_estimator(self, object, stream, indent, allowance, context, level):
stream.write(object.__class__.__name__ + "(")
if self._indent_at_name:
indent += len(object.__class__.__name__)
if self._changed_only:
params = _changed_params(object)
else:
params = object.get_params(deep=False)
self._format_params(
sorted(params.items()), stream, indent, allowance + 1, context, level
)
stream.write(")")
def _format_dict_items(self, items, stream, indent, allowance, context, level):
return self._format_params_or_dict_items(
items, stream, indent, allowance, context, level, is_dict=True
)
def _format_params(self, items, stream, indent, allowance, context, level):
return self._format_params_or_dict_items(
items, stream, indent, allowance, context, level, is_dict=False
)
def _format_params_or_dict_items(
self, object, stream, indent, allowance, context, level, is_dict
):
"""Format dict items or parameters respecting the compact=True
parameter. For some reason, the builtin rendering of dict items doesn't
respect compact=True and will use one line per key-value if all cannot
fit in a single line.
Dict items will be rendered as <'key': value> while params will be
rendered as <key=value>. The implementation is mostly copy/pasting from
the builtin _format_items().
This also adds ellipsis if the number of items is greater than
self.n_max_elements_to_show.
"""
write = stream.write
indent += self._indent_per_level
delimnl = ",\n" + " " * indent
delim = ""
width = max_width = self._width - indent + 1
it = iter(object)
try:
next_ent = next(it)
except StopIteration:
return
last = False
n_items = 0
while not last:
if n_items == self.n_max_elements_to_show:
write(", ...")
break
n_items += 1
ent = next_ent
try:
next_ent = next(it)
except StopIteration:
last = True
max_width -= allowance
width -= allowance
if self._compact:
k, v = ent
krepr = self._repr(k, context, level)
vrepr = self._repr(v, context, level)
if not is_dict:
krepr = krepr.strip("'")
middle = ": " if is_dict else "="
rep = krepr + middle + vrepr
w = len(rep) + 2
if width < w:
width = max_width
if delim:
delim = delimnl
if width >= w:
width -= w
write(delim)
delim = ", "
write(rep)
continue
write(delim)
delim = delimnl
class_ = KeyValTuple if is_dict else KeyValTupleParam
self._format(
class_(ent), stream, indent, allowance if last else 1, context, level
)
def _format_items(self, items, stream, indent, allowance, context, level):
"""Format the items of an iterable (list, tuple...). Same as the
built-in _format_items, with support for ellipsis if the number of
elements is greater than self.n_max_elements_to_show.
"""
write = stream.write
indent += self._indent_per_level
if self._indent_per_level > 1:
write((self._indent_per_level - 1) * " ")
delimnl = ",\n" + " " * indent
delim = ""
width = max_width = self._width - indent + 1
it = iter(items)
try:
next_ent = next(it)
except StopIteration:
return
last = False
n_items = 0
while not last:
if n_items == self.n_max_elements_to_show:
write(", ...")
break
n_items += 1
ent = next_ent
try:
next_ent = next(it)
except StopIteration:
last = True
max_width -= allowance
width -= allowance
if self._compact:
rep = self._repr(ent, context, level)
w = len(rep) + 2
if width < w:
width = max_width
if delim:
delim = delimnl
if width >= w:
width -= w
write(delim)
delim = ", "
write(rep)
continue
write(delim)
delim = delimnl
self._format(ent, stream, indent, allowance if last else 1, context, level)
def _pprint_key_val_tuple(self, object, stream, indent, allowance, context, level):
"""Pretty printing for key-value tuples from dict or parameters."""
k, v = object
rep = self._repr(k, context, level)
if isinstance(object, KeyValTupleParam):
rep = rep.strip("'")
middle = "="
else:
middle = ": "
stream.write(rep)
stream.write(middle)
self._format(
v, stream, indent + len(rep) + len(middle), allowance, context, level
)
# Note: need to copy _dispatch to prevent instances of the builtin
# PrettyPrinter class to call methods of _EstimatorPrettyPrinter (see issue
# 12906)
# mypy error: "Type[PrettyPrinter]" has no attribute "_dispatch"
_dispatch = pprint.PrettyPrinter._dispatch.copy() # type: ignore[attr-defined]
_dispatch[BaseEstimator.__repr__] = _pprint_estimator
_dispatch[KeyValTuple.__repr__] = _pprint_key_val_tuple
def _safe_repr(object, context, maxlevels, level, changed_only=False):
"""Same as the builtin _safe_repr, with added support for Estimator
objects."""
typ = type(object)
if typ in pprint._builtin_scalars:
return repr(object), True, False
r = getattr(typ, "__repr__", None)
if issubclass(typ, dict) and r is dict.__repr__:
if not object:
return "{}", True, False
objid = id(object)
if maxlevels and level >= maxlevels:
return "{...}", False, objid in context
if objid in context:
return pprint._recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
saferepr = _safe_repr
items = sorted(object.items(), key=pprint._safe_tuple)
for k, v in items:
krepr, kreadable, krecur = saferepr(
k, context, maxlevels, level, changed_only=changed_only
)
vrepr, vreadable, vrecur = saferepr(
v, context, maxlevels, level, changed_only=changed_only
)
append("%s: %s" % (krepr, vrepr))
readable = readable and kreadable and vreadable
if krecur or vrecur:
recursive = True
del context[objid]
return "{%s}" % ", ".join(components), readable, recursive
if (issubclass(typ, list) and r is list.__repr__) or (
issubclass(typ, tuple) and r is tuple.__repr__
):
if issubclass(typ, list):
if not object:
return "[]", True, False
format = "[%s]"
elif len(object) == 1:
format = "(%s,)"
else:
if not object:
return "()", True, False
format = "(%s)"
objid = id(object)
if maxlevels and level >= maxlevels:
return format % "...", False, objid in context
if objid in context:
return pprint._recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
for o in object:
orepr, oreadable, orecur = _safe_repr(
o, context, maxlevels, level, changed_only=changed_only
)
append(orepr)
if not oreadable:
readable = False
if orecur:
recursive = True
del context[objid]
return format % ", ".join(components), readable, recursive
if issubclass(typ, BaseEstimator):
objid = id(object)
if maxlevels and level >= maxlevels:
return f"{typ.__name__}(...)", False, objid in context
if objid in context:
return pprint._recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
if changed_only:
params = _changed_params(object)
else:
params = object.get_params(deep=False)
components = []
append = components.append
level += 1
saferepr = _safe_repr
items = sorted(params.items(), key=pprint._safe_tuple)
for k, v in items:
krepr, kreadable, krecur = saferepr(
k, context, maxlevels, level, changed_only=changed_only
)
vrepr, vreadable, vrecur = saferepr(
v, context, maxlevels, level, changed_only=changed_only
)
append("%s=%s" % (krepr.strip("'"), vrepr))
readable = readable and kreadable and vreadable
if krecur or vrecur:
recursive = True
del context[objid]
return ("%s(%s)" % (typ.__name__, ", ".join(components)), readable, recursive)
rep = repr(object)
return rep, (rep and not rep.startswith("<")), False
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/__init__.py | sklearn/utils/__init__.py | """Various utilities to help with development."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from sklearn.exceptions import DataConversionWarning
from sklearn.utils import metadata_routing
from sklearn.utils._bunch import Bunch
from sklearn.utils._chunking import gen_batches, gen_even_slices
# Make _safe_indexing importable from here for backward compat as this particular
# helper is considered semi-private and typically very useful for third-party
# libraries that want to comply with scikit-learn's estimator API. In particular,
# _safe_indexing was included in our public API documentation despite the leading
# `_` in its name.
from sklearn.utils._indexing import _safe_indexing, resample, shuffle
from sklearn.utils._mask import safe_mask
from sklearn.utils._repr_html.base import _HTMLDocumentationLinkMixin # noqa: F401
from sklearn.utils._repr_html.estimator import estimator_html_repr
from sklearn.utils._tags import (
ClassifierTags,
InputTags,
RegressorTags,
Tags,
TargetTags,
TransformerTags,
get_tags,
)
from sklearn.utils.class_weight import compute_class_weight, compute_sample_weight
from sklearn.utils.deprecation import deprecated
from sklearn.utils.discovery import all_estimators
from sklearn.utils.extmath import safe_sqr
from sklearn.utils.murmurhash import murmurhash3_32
from sklearn.utils.validation import (
as_float_array,
assert_all_finite,
check_array,
check_consistent_length,
check_random_state,
check_scalar,
check_symmetric,
check_X_y,
column_or_1d,
indexable,
)
__all__ = [
"Bunch",
"ClassifierTags",
"DataConversionWarning",
"InputTags",
"RegressorTags",
"Tags",
"TargetTags",
"TransformerTags",
"_safe_indexing",
"all_estimators",
"as_float_array",
"assert_all_finite",
"check_X_y",
"check_array",
"check_consistent_length",
"check_random_state",
"check_scalar",
"check_symmetric",
"column_or_1d",
"compute_class_weight",
"compute_sample_weight",
"deprecated",
"estimator_html_repr",
"gen_batches",
"gen_even_slices",
"get_tags",
"indexable",
"metadata_routing",
"murmurhash3_32",
"resample",
"safe_mask",
"safe_sqr",
"shuffle",
]
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/_plotting.py | sklearn/utils/_plotting.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from collections.abc import Mapping
import numpy as np
from sklearn.utils import check_consistent_length
from sklearn.utils._optional_dependencies import check_matplotlib_support
from sklearn.utils._response import _get_response_values_binary
from sklearn.utils.fixes import parse_version
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.validation import _check_pos_label_consistency, _num_samples
class _BinaryClassifierCurveDisplayMixin:
"""Mixin class to be used in Displays requiring a binary classifier.
The aim of this class is to centralize some validations regarding the estimator and
the target and gather the response of the estimator.
"""
def _validate_plot_params(self, *, ax=None, name=None):
check_matplotlib_support(f"{self.__class__.__name__}.plot")
import matplotlib.pyplot as plt
if ax is None:
_, ax = plt.subplots()
# Display classes are in process of changing from `estimator_name` to `name`.
# Try old attr name: `estimator_name` first.
if name is None:
name = getattr(self, "estimator_name", getattr(self, "name", None))
return ax, ax.figure, name
@classmethod
def _validate_and_get_response_values(
cls, estimator, X, y, *, response_method="auto", pos_label=None, name=None
):
check_matplotlib_support(f"{cls.__name__}.from_estimator")
name = estimator.__class__.__name__ if name is None else name
y_pred, pos_label = _get_response_values_binary(
estimator,
X,
response_method=response_method,
pos_label=pos_label,
)
return y_pred, pos_label, name
@classmethod
def _validate_from_predictions_params(
cls, y_true, y_pred, *, sample_weight=None, pos_label=None, name=None
):
check_matplotlib_support(f"{cls.__name__}.from_predictions")
if type_of_target(y_true) != "binary":
raise ValueError(
f"The target y is not binary. Got {type_of_target(y_true)} type of"
" target."
)
check_consistent_length(y_true, y_pred, sample_weight)
pos_label = _check_pos_label_consistency(pos_label, y_true)
name = name if name is not None else "Classifier"
return pos_label, name
@classmethod
def _validate_from_cv_results_params(
cls,
cv_results,
X,
y,
*,
sample_weight,
):
check_matplotlib_support(f"{cls.__name__}.from_cv_results")
required_keys = {"estimator", "indices"}
if not all(key in cv_results for key in required_keys):
raise ValueError(
"`cv_results` does not contain one of the following required keys: "
f"{required_keys}. Set explicitly the parameters "
"`return_estimator=True` and `return_indices=True` to the function"
"`cross_validate`."
)
train_size, test_size = (
len(cv_results["indices"]["train"][0]),
len(cv_results["indices"]["test"][0]),
)
if _num_samples(X) != train_size + test_size:
raise ValueError(
"`X` does not contain the correct number of samples. "
f"Expected {train_size + test_size}, got {_num_samples(X)}."
)
if type_of_target(y) != "binary":
raise ValueError(
f"The target `y` is not binary. Got {type_of_target(y)} type of target."
)
check_consistent_length(X, y, sample_weight)
@staticmethod
def _get_legend_label(curve_legend_metric, curve_name, legend_metric_name):
"""Helper to get legend label using `name` and `legend_metric`"""
if curve_legend_metric is not None and curve_name is not None:
label = f"{curve_name} ({legend_metric_name} = {curve_legend_metric:0.2f})"
elif curve_legend_metric is not None:
label = f"{legend_metric_name} = {curve_legend_metric:0.2f}"
elif curve_name is not None:
label = curve_name
else:
label = None
return label
@staticmethod
def _validate_curve_kwargs(
n_curves,
name,
legend_metric,
legend_metric_name,
curve_kwargs,
default_curve_kwargs=None,
default_multi_curve_kwargs=None,
**kwargs,
):
"""Get validated line kwargs for each curve.
Parameters
----------
n_curves : int
Number of curves.
name : list of str or None
Name for labeling legend entries.
legend_metric : dict
Dictionary with "mean" and "std" keys, or "metric" key of metric
values for each curve. If None, "label" will not contain metric values.
legend_metric_name : str
Name of the summary value provided in `legend_metrics`.
curve_kwargs : dict or list of dict or None
Dictionary with keywords passed to the matplotlib's `plot` function
to draw the individual curves. If a list is provided, the
parameters are applied to the curves sequentially. If a single
dictionary is provided, the same parameters are applied to all
curves.
default_curve_kwargs : dict, default=None
Default curve kwargs, to be added to all curves. Individual kwargs
are over-ridden by `curve_kwargs`, if kwarg also set in `curve_kwargs`.
default_multi_curve_kwargs : dict, default=None
Default curve kwargs for multi-curve plots. Individual kwargs
are over-ridden by `curve_kwargs`, if kwarg also set in `curve_kwargs`.
**kwargs : dict
Deprecated. Keyword arguments to be passed to matplotlib's `plot`.
"""
# TODO(1.9): Remove deprecated **kwargs
if curve_kwargs and kwargs:
raise ValueError(
"Cannot provide both `curve_kwargs` and `kwargs`. `**kwargs` is "
"deprecated in 1.7 and will be removed in 1.9. Pass all matplotlib "
"arguments to `curve_kwargs` as a dictionary."
)
if kwargs:
warnings.warn(
"`**kwargs` is deprecated and will be removed in 1.9. Pass all "
"matplotlib arguments to `curve_kwargs` as a dictionary instead.",
FutureWarning,
)
curve_kwargs = kwargs
if isinstance(curve_kwargs, list) and len(curve_kwargs) != n_curves:
raise ValueError(
f"`curve_kwargs` must be None, a dictionary or a list of length "
f"{n_curves}. Got: {curve_kwargs}."
)
# Ensure valid `name` and `curve_kwargs` combination.
if (
isinstance(name, list)
and len(name) != 1
and not isinstance(curve_kwargs, list)
):
raise ValueError(
"To avoid labeling individual curves that have the same appearance, "
f"`curve_kwargs` should be a list of {n_curves} dictionaries. "
"Alternatively, set `name` to `None` or a single string to label "
"a single legend entry with mean ROC AUC score of all curves."
)
# Ensure `name` is of the correct length
if isinstance(name, str):
name = [name]
if isinstance(name, list) and len(name) == 1:
name = name * n_curves
name = [None] * n_curves if name is None else name
# Ensure `curve_kwargs` is of correct length
if isinstance(curve_kwargs, Mapping):
curve_kwargs = [curve_kwargs] * n_curves
elif curve_kwargs is None:
curve_kwargs = [{}] * n_curves
if default_curve_kwargs is None:
default_curve_kwargs = {}
if default_multi_curve_kwargs is None:
default_multi_curve_kwargs = {}
if n_curves > 1:
default_curve_kwargs.update(default_multi_curve_kwargs)
labels = []
if "mean" in legend_metric:
label_aggregate = _BinaryClassifierCurveDisplayMixin._get_legend_label(
legend_metric["mean"], name[0], legend_metric_name
)
# Note: "std" always `None` when "mean" is `None` - no metric value added
# to label in this case
if legend_metric["std"] is not None:
# Add the "+/- std" to the end (in brackets if name provided)
if name[0] is not None:
label_aggregate = (
label_aggregate[:-1] + f" +/- {legend_metric['std']:0.2f})"
)
else:
label_aggregate = (
label_aggregate + f" +/- {legend_metric['std']:0.2f}"
)
# Add `label` for first curve only, set to `None` for remaining curves
labels.extend([label_aggregate] + [None] * (n_curves - 1))
else:
for curve_legend_metric, curve_name in zip(legend_metric["metric"], name):
labels.append(
_BinaryClassifierCurveDisplayMixin._get_legend_label(
curve_legend_metric, curve_name, legend_metric_name
)
)
curve_kwargs_ = [
_validate_style_kwargs(
{"label": label, **default_curve_kwargs}, curve_kwargs[fold_idx]
)
for fold_idx, label in enumerate(labels)
]
return curve_kwargs_
def _validate_score_name(score_name, scoring, negate_score):
"""Validate the `score_name` parameter.
If `score_name` is provided, we just return it as-is.
If `score_name` is `None`, we use `Score` if `negate_score` is `False` and
`Negative score` otherwise.
If `score_name` is a string or a callable, we infer the name. We replace `_` by
spaces and capitalize the first letter. We remove `neg_` and replace it by
`"Negative"` if `negate_score` is `False` or just remove it otherwise.
"""
if score_name is not None:
return score_name
elif scoring is None:
return "Negative score" if negate_score else "Score"
else:
score_name = scoring.__name__ if callable(scoring) else scoring
if negate_score:
if score_name.startswith("neg_"):
score_name = score_name[4:]
else:
score_name = f"Negative {score_name}"
elif score_name.startswith("neg_"):
score_name = f"Negative {score_name[4:]}"
score_name = score_name.replace("_", " ")
return score_name.capitalize()
def _interval_max_min_ratio(data):
"""Compute the ratio between the largest and smallest inter-point distances.
A value larger than 5 typically indicates that the parameter range would
better be displayed with a log scale while a linear scale would be more
suitable otherwise.
"""
diff = np.diff(np.sort(data))
return diff.max() / diff.min()
def _validate_style_kwargs(default_style_kwargs, user_style_kwargs):
"""Create valid style kwargs by avoiding Matplotlib alias errors.
Matplotlib raises an error when, for example, 'color' and 'c', or 'linestyle' and
'ls', are specified together. To avoid this, we automatically keep only the one
specified by the user and raise an error if the user specifies both.
Parameters
----------
default_style_kwargs : dict
The Matplotlib style kwargs used by default in the scikit-learn display.
user_style_kwargs : dict
The user-defined Matplotlib style kwargs.
Returns
-------
valid_style_kwargs : dict
The validated style kwargs taking into account both default and user-defined
Matplotlib style kwargs.
"""
invalid_to_valid_kw = {
"ls": "linestyle",
"c": "color",
"ec": "edgecolor",
"fc": "facecolor",
"lw": "linewidth",
"mec": "markeredgecolor",
"mfcalt": "markerfacecoloralt",
"ms": "markersize",
"mew": "markeredgewidth",
"mfc": "markerfacecolor",
"aa": "antialiased",
"ds": "drawstyle",
"font": "fontproperties",
"family": "fontfamily",
"name": "fontname",
"size": "fontsize",
"stretch": "fontstretch",
"style": "fontstyle",
"variant": "fontvariant",
"weight": "fontweight",
"ha": "horizontalalignment",
"va": "verticalalignment",
"ma": "multialignment",
}
for invalid_key, valid_key in invalid_to_valid_kw.items():
if invalid_key in user_style_kwargs and valid_key in user_style_kwargs:
raise TypeError(
f"Got both {invalid_key} and {valid_key}, which are aliases of one "
"another"
)
valid_style_kwargs = default_style_kwargs.copy()
for key in user_style_kwargs.keys():
if key in invalid_to_valid_kw:
valid_style_kwargs[invalid_to_valid_kw[key]] = user_style_kwargs[key]
else:
valid_style_kwargs[key] = user_style_kwargs[key]
return valid_style_kwargs
def _despine(ax):
"""Remove the top and right spines of the plot.
Parameters
----------
ax : matplotlib.axes.Axes
The axes of the plot to despine.
"""
for s in ["top", "right"]:
ax.spines[s].set_visible(False)
for s in ["bottom", "left"]:
ax.spines[s].set_bounds(0, 1)
def _deprecate_estimator_name(estimator_name, name, version):
"""Deprecate `estimator_name` in favour of `name`."""
version = parse_version(version)
version_remove = f"{version.major}.{version.minor + 2}"
if estimator_name != "deprecated":
if name:
raise ValueError(
"Cannot provide both `estimator_name` and `name`. `estimator_name` "
f"is deprecated in {version} and will be removed in {version_remove}. "
"Use `name` only."
)
warnings.warn(
f"`estimator_name` is deprecated in {version} and will be removed in "
f"{version_remove}. Use `name` instead.",
FutureWarning,
)
return estimator_name
return name
def _convert_to_list_leaving_none(param):
"""Convert parameters to a list, leaving `None` as is."""
if param is None:
return None
if isinstance(param, list):
return param
return [param]
def _check_param_lengths(required, optional, class_name):
"""Check required and optional parameters are of the same length."""
optional_provided = {}
for name, param in optional.items():
if isinstance(param, list):
optional_provided[name] = param
all_params = {**required, **optional_provided}
if len({len(param) for param in all_params.values()}) > 1:
param_keys = [key for key in all_params.keys()]
# Note: below code requires `len(param_keys) >= 2`, which is the case for all
# display classes
params_formatted = " and ".join([", ".join(param_keys[:-1]), param_keys[-1]])
or_plot = ""
if "'name' (or self.name)" in param_keys:
or_plot = " (or `plot`)"
lengths_formatted = ", ".join(
f"{key}: {len(value)}" for key, value in all_params.items()
)
raise ValueError(
f"{params_formatted} from `{class_name}` initialization{or_plot}, "
f"should all be lists of the same length. Got: {lengths_formatted}"
)
# TODO(1.10): remove after the end of the deprecation period of `y_pred`
def _deprecate_y_pred_parameter(y_score, y_pred, version):
"""Deprecate `y_pred` in favour of of `y_score`."""
version = parse_version(version)
version_remove = f"{version.major}.{version.minor + 2}"
if y_score is not None and not (isinstance(y_pred, str) and y_pred == "deprecated"):
raise ValueError(
"`y_pred` and `y_score` cannot be both specified. Please use `y_score`"
f" only as `y_pred` was deprecated in {version} and will be "
f"removed in {version_remove}."
)
if not (isinstance(y_pred, str) and y_pred == "deprecated"):
warnings.warn(
(
f"y_pred was deprecated in {version} and will be removed in"
f" {version_remove}. Please use `y_score` instead."
),
FutureWarning,
)
return y_pred
return y_score
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/_user_interface.py | sklearn/utils/_user_interface.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import timeit
from contextlib import contextmanager
def _message_with_time(source, message, time):
"""Create one line message for logging purposes.
Parameters
----------
source : str
String indicating the source or the reference of the message.
message : str
Short message.
time : int
Time in seconds.
"""
start_message = "[%s] " % source
# adapted from joblib.logger.short_format_time without the Windows -.1s
# adjustment
if time > 60:
time_str = "%4.1fmin" % (time / 60)
else:
time_str = " %5.1fs" % time
end_message = " %s, total=%s" % (message, time_str)
dots_len = 70 - len(start_message) - len(end_message)
return "%s%s%s" % (start_message, dots_len * ".", end_message)
@contextmanager
def _print_elapsed_time(source, message=None):
"""Log elapsed time to stdout when the context is exited.
Parameters
----------
source : str
String indicating the source or the reference of the message.
message : str, default=None
Short message. If None, nothing will be printed.
Returns
-------
context_manager
Prints elapsed time upon exit if verbose.
"""
if message is None:
yield
else:
start = timeit.default_timer()
yield
print(_message_with_time(source, message, timeit.default_timer() - start))
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/_optional_dependencies.py | sklearn/utils/_optional_dependencies.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
def check_matplotlib_support(caller_name):
"""Raise ImportError with detailed error message if mpl is not installed.
Plot utilities like any of the Display's plotting functions should lazily import
matplotlib and call this helper before any computation.
Parameters
----------
caller_name : str
The name of the caller that requires matplotlib.
"""
try:
import matplotlib # noqa: F401
except ImportError as e:
raise ImportError(
"{} requires matplotlib. You can install matplotlib with "
"`pip install matplotlib`".format(caller_name)
) from e
def check_pandas_support(caller_name):
"""Raise ImportError with detailed error message if pandas is not installed.
Plot utilities like :func:`fetch_openml` should lazily import
pandas and call this helper before any computation.
Parameters
----------
caller_name : str
The name of the caller that requires pandas.
Returns
-------
pandas
The pandas package.
"""
try:
import pandas
return pandas
except ImportError as e:
raise ImportError("{} requires pandas.".format(caller_name)) from e
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/_unique.py | sklearn/utils/_unique.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from sklearn.utils._array_api import get_namespace
def _attach_unique(y):
"""Attach unique values of y to y and return the result.
The result is a view of y, and the metadata (unique) is not attached to y.
"""
if not isinstance(y, np.ndarray):
return y
try:
# avoid recalculating unique in nested calls.
if "unique" in y.dtype.metadata:
return y
except (AttributeError, TypeError):
pass
unique = np.unique(y)
unique_dtype = np.dtype(y.dtype, metadata={"unique": unique})
return y.view(dtype=unique_dtype)
def attach_unique(*ys, return_tuple=False):
"""Attach unique values of ys to ys and return the results.
The result is a view of y, and the metadata (unique) is not attached to y.
IMPORTANT: The output of this function should NEVER be returned in functions.
This is to avoid this pattern:
.. code:: python
y = np.array([1, 2, 3])
y = attach_unique(y)
y[1] = -1
# now np.unique(y) will be different from cached_unique(y)
Parameters
----------
*ys : sequence of array-like
Input data arrays.
return_tuple : bool, default=False
If True, always return a tuple even if there is only one array.
Returns
-------
ys : tuple of array-like or array-like
Input data with unique values attached.
"""
res = tuple(_attach_unique(y) for y in ys)
if len(res) == 1 and not return_tuple:
return res[0]
return res
def _cached_unique(y, xp=None):
"""Return the unique values of y.
Use the cached values from dtype.metadata if present.
This function does NOT cache the values in y, i.e. it doesn't change y.
Call `attach_unique` to attach the unique values to y.
"""
try:
if y.dtype.metadata is not None and "unique" in y.dtype.metadata:
return y.dtype.metadata["unique"]
except AttributeError:
# in case y is not a numpy array
pass
xp, _ = get_namespace(y, xp=xp)
return xp.unique_values(y)
def cached_unique(*ys, xp=None):
"""Return the unique values of ys.
Use the cached values from dtype.metadata if present.
This function does NOT cache the values in y, i.e. it doesn't change y.
Call `attach_unique` to attach the unique values to y.
Parameters
----------
*ys : sequence of array-like
Input data arrays.
xp : module, default=None
Precomputed array namespace module. When passed, typically from a caller
that has already performed inspection of its own inputs, skips array
namespace inspection.
Returns
-------
res : tuple of array-like or array-like
Unique values of ys.
"""
res = tuple(_cached_unique(y, xp=xp) for y in ys)
if len(res) == 1:
return res[0]
return res
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/stats.py | sklearn/utils/stats.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from sklearn.utils._array_api import (
_find_matching_floating_dtype,
get_namespace_and_device,
)
def _weighted_percentile(
array, sample_weight, percentile_rank=50, average=False, xp=None
):
"""Compute the weighted percentile.
Implement an array API compatible (weighted version) of NumPy's 'inverted_cdf'
method when `average=False` (default) and 'averaged_inverted_cdf' when
`average=True`.
For an array ordered by increasing values, when the percentile lies exactly on a
data point:
* 'inverted_cdf' takes the exact data point.
* 'averaged_inverted_cdf' takes the average of the exact data point and the one
above it (this means it gives the same result as `median` for unit weights).
E.g., for the array [1, 2, 3, 4] the percentile rank at each data point would
be [25, 50, 75, 100]. Percentile rank 50 lies on '2'. 'average_inverted_cdf'
computes the average of '2' and '3', making it 'symmetrical' because if you
reverse the array, rank 50 would fall on '3'. It also matches 'median'.
On the other hand, 'inverted_cdf', which does not satisfy the symmetry property,
would give '2'.
When the requested percentile lies between two data points, both methods return
the higher data point.
E.g., for the array [1, 2, 3, 4, 5] the percentile rank at each data point would
be [20, 40, 60, 80, 100]. Percentile rank 50, lies between '2' and '3'. Taking the
higher data point is symmetrical because if you reverse the array, 50 would lie
between '4' and '3'. Both methods match median in this case.
If `array` is a 2D array, the `values` are selected along axis 0.
`NaN` values are ignored by setting their weights to 0. If `array` is 2D, this
is done in a column-isolated manner: a `NaN` in the second column, does not impact
the percentile computed for the first column even if `sample_weight` is 1D.
.. versionchanged:: 0.24
Accepts 2D `array`.
.. versionchanged:: 1.7
Supports handling of `NaN` values.
.. versionchanged:: 1.8
Supports `average`, which calculates percentile using the
"averaged_inverted_cdf" method.
Parameters
----------
array : 1D or 2D array
Values to take the weighted percentile of.
sample_weight: 1D or 2D array
Weights for each value in `array`. Must be same shape as `array` or of shape
`(array.shape[0],)`.
percentile_rank: scalar or 1D array, default=50
The probability level(s) of the percentile(s) to compute, in percent. Must be
between 0 and 100. If a 1D array, computes all percentiles (along each
axis 0 if `array` is 2D).
average : bool, default=False
If `True`, uses the "averaged_inverted_cdf" quantile method, otherwise
defaults to "inverted_cdf". "averaged_inverted_cdf" is symmetrical with
unit `sample_weight`, such that the total of `sample_weight` below or equal to
`_weighted_percentile(percentile_rank)` is the same as the total of
`sample_weight` above or equal to `_weighted_percentile(100-percentile_rank)`.
This symmetry is not guaranteed with non-unit weights.
xp : array_namespace, default=None
The standard-compatible namespace for `array`. Default: infer.
Returns
-------
percentile : scalar, 1D array, or 2D array
Weighted percentile at the requested probability level(s).
If `array` is 1D and `percentile_rank` is scalar, returns a scalar.
If `array` is 2D and `percentile_rank` is scalar, returns a 1D array
of shape `(array.shape[1],)`
If `array` is 1D and `percentile_rank` is 1D, returns a 1D array
of shape `(percentile_rank.shape[0],)`
If `array` is 2D and `percentile_rank` is 1D, returns a 2D array
of shape `(array.shape[1], percentile_rank.shape[0])`
"""
xp, _, device = get_namespace_and_device(array)
# `sample_weight` should follow `array` for dtypes
floating_dtype = _find_matching_floating_dtype(array, xp=xp)
array = xp.asarray(array, dtype=floating_dtype, device=device)
sample_weight = xp.asarray(sample_weight, dtype=floating_dtype, device=device)
percentile_rank = xp.asarray(percentile_rank, dtype=floating_dtype, device=device)
n_dim = array.ndim
if n_dim == 0:
return array
if array.ndim == 1:
array = xp.reshape(array, (-1, 1))
# When sample_weight 1D, repeat for each array.shape[1]
if array.shape != sample_weight.shape and array.shape[0] == sample_weight.shape[0]:
sample_weight = xp.tile(sample_weight, (array.shape[1], 1)).T
n_dim_percentile = percentile_rank.ndim
if n_dim_percentile == 0:
percentile_rank = xp.reshape(percentile_rank, (1,))
# Sort `array` and `sample_weight` along axis=0:
sorted_idx = xp.argsort(array, axis=0, stable=False)
sorted_weights = xp.take_along_axis(sample_weight, sorted_idx, axis=0)
# Set NaN values in `sample_weight` to 0. Only perform this operation if NaN
# values present to avoid temporary allocations of size `(n_samples, n_features)`.
n_features = array.shape[1]
largest_value_per_column = array[
sorted_idx[-1, ...], xp.arange(n_features, device=device)
]
# NaN values get sorted to end (largest value)
if xp.any(xp.isnan(largest_value_per_column)):
sorted_nan_mask = xp.take_along_axis(xp.isnan(array), sorted_idx, axis=0)
sorted_weights[sorted_nan_mask] = 0
# Compute the weighted cumulative distribution function (CDF) based on
# `sample_weight` and scale `percentile_rank` along it.
#
# Note: we call `xp.cumulative_sum` on the transposed `sorted_weights` to
# ensure that the result is of shape `(n_features, n_samples)` so
# `xp.searchsorted` calls take contiguous inputs as a result (for
# performance reasons).
weight_cdf = xp.cumulative_sum(sorted_weights.T, axis=1)
n_percentiles = percentile_rank.shape[0]
result = xp.empty((n_features, n_percentiles), dtype=floating_dtype, device=device)
for p_idx, p_rank in enumerate(percentile_rank):
adjusted_percentile_rank = p_rank / 100 * weight_cdf[..., -1]
# Ignore leading `sample_weight=0` observations
# when `percentile_rank=0` (#20528)
mask = adjusted_percentile_rank == 0
adjusted_percentile_rank[mask] = xp.nextafter(
adjusted_percentile_rank[mask], adjusted_percentile_rank[mask] + 1
)
# For each feature with index j, find sample index i of the scalar value
# `adjusted_percentile_rank[j]` in 1D array `weight_cdf[j]`, such that:
# weight_cdf[j, i-1] < adjusted_percentile_rank[j] <= weight_cdf[j, i].
# Note `searchsorted` defaults to equality on the right, whereas Hyndman and Fan
# reference equation has equality on the left.
percentile_indices = xp.stack(
[
xp.searchsorted(
weight_cdf[feature_idx, ...], adjusted_percentile_rank[feature_idx]
)
for feature_idx in range(weight_cdf.shape[0])
],
)
# `percentile_indices` may be equal to `sorted_idx.shape[0]` due to floating
# point error (see #11813)
max_idx = sorted_idx.shape[0] - 1
percentile_indices = xp.clip(percentile_indices, 0, max_idx)
col_indices = xp.arange(array.shape[1], device=device)
percentile_in_sorted = sorted_idx[percentile_indices, col_indices]
if average:
# From Hyndman and Fan (1996), `fraction_above` is `g`
fraction_above = (
weight_cdf[col_indices, percentile_indices] - adjusted_percentile_rank
)
is_fraction_above = fraction_above > xp.finfo(floating_dtype).eps
percentile_plus_one_indices = xp.clip(percentile_indices + 1, 0, max_idx)
percentile_plus_one_in_sorted = sorted_idx[
percentile_plus_one_indices, col_indices
]
# Handle case when next index ('plus one') has sample weight of 0
zero_weight_cols = col_indices[
sample_weight[percentile_plus_one_in_sorted, col_indices] == 0
]
for col_idx in zero_weight_cols:
cdf_val = weight_cdf[col_idx, percentile_indices[col_idx]]
# Search for next index where `weighted_cdf` is greater
next_index = xp.searchsorted(
weight_cdf[col_idx, ...], cdf_val, side="right"
)
# Handle case where there are trailing 0 sample weight samples
# and `percentile_indices` is already max index
if next_index >= max_idx:
# use original `percentile_indices` again
next_index = percentile_indices[col_idx]
percentile_plus_one_in_sorted[col_idx] = sorted_idx[next_index, col_idx]
result[..., p_idx] = xp.where(
is_fraction_above,
array[percentile_in_sorted, col_indices],
(
array[percentile_in_sorted, col_indices]
+ array[percentile_plus_one_in_sorted, col_indices]
)
/ 2,
)
else:
result[..., p_idx] = array[percentile_in_sorted, col_indices]
if n_dim_percentile == 0:
result = result[..., 0]
return result[0, ...] if n_dim == 1 else result
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/parallel.py | sklearn/utils/parallel.py | """Customizations of :mod:`joblib` and :mod:`threadpoolctl` tools for scikit-learn
usage.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import functools
import warnings
from functools import update_wrapper
import joblib
from threadpoolctl import ThreadpoolController
from sklearn._config import config_context, get_config
# Global threadpool controller instance that can be used to locally limit the number of
# threads without looping through all shared libraries every time.
# It should not be accessed directly and _get_threadpool_controller should be used
# instead.
_threadpool_controller = None
def _with_config_and_warning_filters(delayed_func, config, warning_filters):
"""Helper function that intends to attach a config to a delayed function."""
if hasattr(delayed_func, "with_config_and_warning_filters"):
return delayed_func.with_config_and_warning_filters(config, warning_filters)
else:
warnings.warn(
(
"`sklearn.utils.parallel.Parallel` needs to be used in "
"conjunction with `sklearn.utils.parallel.delayed` instead of "
"`joblib.delayed` to correctly propagate the scikit-learn "
"configuration to the joblib workers."
),
UserWarning,
)
return delayed_func
class Parallel(joblib.Parallel):
"""Tweak of :class:`joblib.Parallel` that propagates the scikit-learn configuration.
This subclass of :class:`joblib.Parallel` ensures that the active configuration
(thread-local) of scikit-learn is propagated to the parallel workers for the
duration of the execution of the parallel tasks.
The API does not change and you can refer to :class:`joblib.Parallel`
documentation for more details.
.. versionadded:: 1.3
"""
def __call__(self, iterable):
"""Dispatch the tasks and return the results.
Parameters
----------
iterable : iterable
Iterable containing tuples of (delayed_function, args, kwargs) that should
be consumed.
Returns
-------
results : list
List of results of the tasks.
"""
# Capture the thread-local scikit-learn configuration at the time
# Parallel.__call__ is issued since the tasks can be dispatched
# in a different thread depending on the backend and on the value of
# pre_dispatch and n_jobs.
config = get_config()
# In free-threading Python >= 3.14, warnings filters are managed through a
# ContextVar and warnings.filters is not modified inside a
# warnings.catch_warnings context. You need to use warnings._get_filters().
# For more details, see
# https://docs.python.org/3.14/whatsnew/3.14.html#concurrent-safe-warnings-control
filters_func = getattr(warnings, "_get_filters", None)
warning_filters = (
filters_func() if filters_func is not None else warnings.filters
)
iterable_with_config_and_warning_filters = (
(
_with_config_and_warning_filters(delayed_func, config, warning_filters),
args,
kwargs,
)
for delayed_func, args, kwargs in iterable
)
return super().__call__(iterable_with_config_and_warning_filters)
# remove when https://github.com/joblib/joblib/issues/1071 is fixed
def delayed(function):
"""Decorator used to capture the arguments of a function.
This alternative to `joblib.delayed` is meant to be used in conjunction
with `sklearn.utils.parallel.Parallel`. The latter captures the scikit-
learn configuration by calling `sklearn.get_config()` in the current
thread, prior to dispatching the first task. The captured configuration is
then propagated and enabled for the duration of the execution of the
delayed function in the joblib workers.
.. versionchanged:: 1.3
`delayed` was moved from `sklearn.utils.fixes` to `sklearn.utils.parallel`
in scikit-learn 1.3.
Parameters
----------
function : callable
The function to be delayed.
Returns
-------
output: tuple
Tuple containing the delayed function, the positional arguments, and the
keyword arguments.
"""
@functools.wraps(function)
def delayed_function(*args, **kwargs):
return _FuncWrapper(function), args, kwargs
return delayed_function
class _FuncWrapper:
"""Load the global configuration before calling the function."""
def __init__(self, function):
self.function = function
update_wrapper(self, self.function)
def with_config_and_warning_filters(self, config, warning_filters):
self.config = config
self.warning_filters = warning_filters
return self
def __call__(self, *args, **kwargs):
config = getattr(self, "config", {})
warning_filters = getattr(self, "warning_filters", [])
if not config or not warning_filters:
warnings.warn(
(
"`sklearn.utils.parallel.delayed` should be used with"
" `sklearn.utils.parallel.Parallel` to make it possible to"
" propagate the scikit-learn configuration of the current thread to"
" the joblib workers."
),
UserWarning,
)
with config_context(**config), warnings.catch_warnings():
# TODO is there a simpler way that resetwarnings+ filterwarnings?
warnings.resetwarnings()
warning_filter_keys = ["action", "message", "category", "module", "lineno"]
for filter_args in warning_filters:
this_warning_filter_dict = {
k: v
for k, v in zip(warning_filter_keys, filter_args)
if v is not None
}
# Some small discrepancy between warnings filters and what
# filterwarnings expect. simplefilter is more lenient, e.g.
# accepts a tuple as category. We try simplefilter first and
# use filterwarnings in more complicated cases
if (
"message" not in this_warning_filter_dict
and "module" not in this_warning_filter_dict
):
warnings.simplefilter(**this_warning_filter_dict, append=True)
else:
# 'message' and 'module' are most of the time regex.Pattern but
# can be str as well and filterwarnings wants a str
for special_key in ["message", "module"]:
this_value = this_warning_filter_dict.get(special_key)
if this_value is not None and not isinstance(this_value, str):
this_warning_filter_dict[special_key] = this_value.pattern
warnings.filterwarnings(**this_warning_filter_dict, append=True)
return self.function(*args, **kwargs)
def _get_threadpool_controller():
"""Return the global threadpool controller instance."""
global _threadpool_controller
if _threadpool_controller is None:
_threadpool_controller = ThreadpoolController()
return _threadpool_controller
def _threadpool_controller_decorator(limits=1, user_api="blas"):
"""Decorator to limit the number of threads used at the function level.
It should be preferred over `threadpoolctl.ThreadpoolController.wrap` because this
one only loads the shared libraries when the function is called while the latter
loads them at import time.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
controller = _get_threadpool_controller()
with controller.limit(limits=limits, user_api=user_api):
return func(*args, **kwargs)
return wrapper
return decorator
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/_chunking.py | sklearn/utils/_chunking.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from itertools import islice
from numbers import Integral
import numpy as np
from sklearn._config import get_config
from sklearn.utils._param_validation import Interval, validate_params
def chunk_generator(gen, chunksize):
"""Chunk generator, ``gen`` into lists of length ``chunksize``. The last
chunk may have a length less than ``chunksize``."""
while True:
chunk = list(islice(gen, chunksize))
if chunk:
yield chunk
else:
return
@validate_params(
{
"n": [Interval(Integral, 1, None, closed="left")],
"batch_size": [Interval(Integral, 1, None, closed="left")],
"min_batch_size": [Interval(Integral, 0, None, closed="left")],
},
prefer_skip_nested_validation=True,
)
def gen_batches(n, batch_size, *, min_batch_size=0):
"""Generator to create slices containing `batch_size` elements from 0 to `n`.
The last slice may contain less than `batch_size` elements, when
`batch_size` does not divide `n`.
Parameters
----------
n : int
Size of the sequence.
batch_size : int
Number of elements in each batch.
min_batch_size : int, default=0
Minimum number of elements in each batch.
Yields
------
slice of `batch_size` elements
See Also
--------
gen_even_slices: Generator to create n_packs slices going up to n.
Examples
--------
>>> from sklearn.utils import gen_batches
>>> list(gen_batches(7, 3))
[slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
>>> list(gen_batches(6, 3))
[slice(0, 3, None), slice(3, 6, None)]
>>> list(gen_batches(2, 3))
[slice(0, 2, None)]
>>> list(gen_batches(7, 3, min_batch_size=0))
[slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
>>> list(gen_batches(7, 3, min_batch_size=2))
[slice(0, 3, None), slice(3, 7, None)]
"""
start = 0
for _ in range(int(n // batch_size)):
end = start + batch_size
if end + min_batch_size > n:
continue
yield slice(start, end)
start = end
if start < n:
yield slice(start, n)
@validate_params(
{
"n": [Interval(Integral, 1, None, closed="left")],
"n_packs": [Interval(Integral, 1, None, closed="left")],
"n_samples": [Interval(Integral, 1, None, closed="left"), None],
},
prefer_skip_nested_validation=True,
)
def gen_even_slices(n, n_packs, *, n_samples=None):
"""Generator to create `n_packs` evenly spaced slices going up to `n`.
If `n_packs` does not divide `n`, except for the first `n % n_packs`
slices, remaining slices may contain fewer elements.
Parameters
----------
n : int
Size of the sequence.
n_packs : int
Number of slices to generate.
n_samples : int, default=None
Number of samples. Pass `n_samples` when the slices are to be used for
sparse matrix indexing; slicing off-the-end raises an exception, while
it works for NumPy arrays.
Yields
------
`slice` representing a set of indices from 0 to n.
See Also
--------
gen_batches: Generator to create slices containing batch_size elements
from 0 to n.
Examples
--------
>>> from sklearn.utils import gen_even_slices
>>> list(gen_even_slices(10, 1))
[slice(0, 10, None)]
>>> list(gen_even_slices(10, 10))
[slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
>>> list(gen_even_slices(10, 5))
[slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
>>> list(gen_even_slices(10, 3))
[slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
"""
start = 0
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
if n_samples is not None:
end = min(n_samples, end)
yield slice(start, end, None)
start = end
def get_chunk_n_rows(row_bytes, *, max_n_rows=None, working_memory=None):
"""Calculate how many rows can be processed within `working_memory`.
Parameters
----------
row_bytes : int
The expected number of bytes of memory that will be consumed
during the processing of each row.
max_n_rows : int, default=None
The maximum return value.
working_memory : int or float, default=None
The number of rows to fit inside this number of MiB will be
returned. When None (default), the value of
``sklearn.get_config()['working_memory']`` is used.
Returns
-------
int
The number of rows which can be processed within `working_memory`.
Warns
-----
Issues a UserWarning if `row_bytes exceeds `working_memory` MiB.
"""
if working_memory is None:
working_memory = get_config()["working_memory"]
chunk_n_rows = int(working_memory * (2**20) // row_bytes)
if max_n_rows is not None:
chunk_n_rows = min(chunk_n_rows, max_n_rows)
if chunk_n_rows < 1:
warnings.warn(
"Could not adhere to working_memory config. "
"Currently %.0fMiB, %.0fMiB required."
% (working_memory, np.ceil(row_bytes * 2**-20))
)
chunk_n_rows = 1
return chunk_n_rows
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/class_weight.py | sklearn/utils/class_weight.py | """Utilities for handling weights based on class labels."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from scipy import sparse
from sklearn.utils._param_validation import StrOptions, validate_params
from sklearn.utils.validation import _check_sample_weight
@validate_params(
{
"class_weight": [dict, StrOptions({"balanced"}), None],
"classes": [np.ndarray],
"y": ["array-like"],
"sample_weight": ["array-like", None],
},
prefer_skip_nested_validation=True,
)
def compute_class_weight(class_weight, *, classes, y, sample_weight=None):
"""Estimate class weights for unbalanced datasets.
Parameters
----------
class_weight : dict, "balanced" or None
If "balanced", class weights will be given by
`n_samples / (n_classes * np.bincount(y))` or their weighted equivalent if
`sample_weight` is provided.
If a dictionary is given, keys are classes and values are corresponding class
weights.
If `None` is given, the class weights will be uniform.
classes : ndarray
Array of the classes occurring in the data, as given by
`np.unique(y_org)` with `y_org` the original class labels.
y : array-like of shape (n_samples,)
Array of original class labels per sample.
sample_weight : array-like of shape (n_samples,), default=None
Array of weights that are assigned to individual samples. Only used when
`class_weight='balanced'`.
Returns
-------
class_weight_vect : ndarray of shape (n_classes,)
Array with `class_weight_vect[i]` the weight for i-th class.
References
----------
The "balanced" heuristic is inspired by
Logistic Regression in Rare Events Data, King, Zen, 2001.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.class_weight import compute_class_weight
>>> y = [1, 1, 1, 1, 0, 0]
>>> compute_class_weight(class_weight="balanced", classes=np.unique(y), y=y)
array([1.5 , 0.75])
"""
# Import error caused by circular imports.
from sklearn.preprocessing import LabelEncoder
if set(y) - set(classes):
raise ValueError("classes should include all valid labels that can be in y")
if class_weight is None or len(class_weight) == 0:
# uniform class weights
weight = np.ones(classes.shape[0], dtype=np.float64, order="C")
elif class_weight == "balanced":
# Find the weight of each class as present in y.
le = LabelEncoder()
y_ind = le.fit_transform(y)
if not all(np.isin(classes, le.classes_)):
raise ValueError("classes should have valid labels that are in y")
sample_weight = _check_sample_weight(sample_weight, y)
weighted_class_counts = np.bincount(y_ind, weights=sample_weight)
recip_freq = weighted_class_counts.sum() / (
len(le.classes_) * weighted_class_counts
)
weight = recip_freq[le.transform(classes)]
else:
# user-defined dictionary
weight = np.ones(classes.shape[0], dtype=np.float64, order="C")
unweighted_classes = []
for i, c in enumerate(classes):
if c in class_weight:
weight[i] = class_weight[c]
else:
unweighted_classes.append(c)
n_weighted_classes = len(classes) - len(unweighted_classes)
if unweighted_classes and n_weighted_classes != len(class_weight):
unweighted_classes_user_friendly_str = np.array(unweighted_classes).tolist()
raise ValueError(
f"The classes, {unweighted_classes_user_friendly_str}, are not in"
" class_weight"
)
return weight
@validate_params(
{
"class_weight": [dict, list, StrOptions({"balanced"}), None],
"y": ["array-like", "sparse matrix"],
"indices": ["array-like", None],
},
prefer_skip_nested_validation=True,
)
def compute_sample_weight(class_weight, y, *, indices=None):
"""Estimate sample weights by class for unbalanced datasets.
Parameters
----------
class_weight : dict, list of dicts, "balanced", or None
Weights associated with classes in the form `{class_label: weight}`.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
Note that for multioutput (including multilabel) weights should be
defined for each class of every column in its own dict. For example,
for four-class multilabel classification weights should be
`[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}]` instead of
`[{1:1}, {2:5}, {3:1}, {4:1}]`.
The `"balanced"` mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data:
`n_samples / (n_classes * np.bincount(y))`.
For multi-output, the weights of each column of y will be multiplied.
y : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_outputs)
Array of original class labels per sample.
indices : array-like of shape (n_subsample,), default=None
Array of indices to be used in a subsample. Can be of length less than
`n_samples` in the case of a subsample, or equal to `n_samples` in the
case of a bootstrap subsample with repeated indices. If `None`, the
sample weight will be calculated over the full sample. Only `"balanced"`
is supported for `class_weight` if this is provided.
Returns
-------
sample_weight_vect : ndarray of shape (n_samples,)
Array with sample weights as applied to the original `y`.
Examples
--------
>>> from sklearn.utils.class_weight import compute_sample_weight
>>> y = [1, 1, 1, 1, 0, 0]
>>> compute_sample_weight(class_weight="balanced", y=y)
array([0.75, 0.75, 0.75, 0.75, 1.5 , 1.5 ])
"""
# Ensure y is 2D. Sparse matrices are already 2D.
if not sparse.issparse(y):
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
if indices is not None and class_weight != "balanced":
raise ValueError(
"The only valid class_weight for subsampling is 'balanced'. "
f"Given {class_weight}."
)
elif n_outputs > 1:
if class_weight is None or isinstance(class_weight, dict):
raise ValueError(
"For multi-output, class_weight should be a list of dicts, or the "
"string 'balanced'."
)
elif isinstance(class_weight, list) and len(class_weight) != n_outputs:
raise ValueError(
"For multi-output, number of elements in class_weight should match "
f"number of outputs. Got {len(class_weight)} element(s) while having "
f"{n_outputs} outputs."
)
expanded_class_weight = []
for k in range(n_outputs):
if sparse.issparse(y):
# Ok to densify a single column at a time
y_full = y[:, [k]].toarray().flatten()
else:
y_full = y[:, k]
classes_full = np.unique(y_full)
classes_missing = None
if class_weight == "balanced" or n_outputs == 1:
class_weight_k = class_weight
else:
class_weight_k = class_weight[k]
if indices is not None:
# Get class weights for the subsample, covering all classes in
# case some labels that were present in the original data are
# missing from the sample.
y_subsample = y_full[indices]
classes_subsample = np.unique(y_subsample)
weight_k = np.take(
compute_class_weight(
class_weight_k, classes=classes_subsample, y=y_subsample
),
np.searchsorted(classes_subsample, classes_full),
mode="clip",
)
classes_missing = set(classes_full) - set(classes_subsample)
else:
weight_k = compute_class_weight(
class_weight_k, classes=classes_full, y=y_full
)
weight_k = weight_k[np.searchsorted(classes_full, y_full)]
if classes_missing:
# Make missing classes' weight zero
weight_k[np.isin(y_full, list(classes_missing))] = 0.0
expanded_class_weight.append(weight_k)
expanded_class_weight = np.prod(expanded_class_weight, axis=0, dtype=np.float64)
return expanded_class_weight
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/_test_common/__init__.py | sklearn/utils/_test_common/__init__.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/_test_common/instance_generator.py | sklearn/utils/_test_common/instance_generator.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import re
import sys
import warnings
from contextlib import suppress
from functools import partial
from inspect import isfunction
import numpy as np
from sklearn import clone, config_context
from sklearn.calibration import CalibratedClassifierCV
from sklearn.cluster import (
HDBSCAN,
AffinityPropagation,
AgglomerativeClustering,
Birch,
BisectingKMeans,
FeatureAgglomeration,
KMeans,
MeanShift,
MiniBatchKMeans,
SpectralBiclustering,
SpectralClustering,
SpectralCoclustering,
)
from sklearn.compose import ColumnTransformer
from sklearn.covariance import GraphicalLasso, GraphicalLassoCV
from sklearn.cross_decomposition import CCA, PLSSVD, PLSCanonical, PLSRegression
from sklearn.decomposition import (
NMF,
PCA,
DictionaryLearning,
FactorAnalysis,
FastICA,
IncrementalPCA,
KernelPCA,
LatentDirichletAllocation,
MiniBatchDictionaryLearning,
MiniBatchNMF,
MiniBatchSparsePCA,
SparseCoder,
SparsePCA,
TruncatedSVD,
)
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.dummy import DummyClassifier
from sklearn.ensemble import (
AdaBoostClassifier,
AdaBoostRegressor,
BaggingClassifier,
BaggingRegressor,
ExtraTreesClassifier,
ExtraTreesRegressor,
GradientBoostingClassifier,
GradientBoostingRegressor,
HistGradientBoostingClassifier,
HistGradientBoostingRegressor,
IsolationForest,
RandomForestClassifier,
RandomForestRegressor,
RandomTreesEmbedding,
StackingClassifier,
StackingRegressor,
VotingClassifier,
VotingRegressor,
)
from sklearn.exceptions import SkipTestWarning
from sklearn.experimental import enable_halving_search_cv # noqa: F401
from sklearn.feature_selection import (
RFE,
RFECV,
SelectFdr,
SelectFromModel,
SelectKBest,
SequentialFeatureSelector,
)
from sklearn.frozen import FrozenEstimator
from sklearn.kernel_approximation import (
Nystroem,
PolynomialCountSketch,
RBFSampler,
SkewedChi2Sampler,
)
from sklearn.linear_model import (
ARDRegression,
BayesianRidge,
ElasticNet,
ElasticNetCV,
GammaRegressor,
HuberRegressor,
LarsCV,
Lasso,
LassoCV,
LassoLars,
LassoLarsCV,
LassoLarsIC,
LinearRegression,
LogisticRegression,
LogisticRegressionCV,
MultiTaskElasticNet,
MultiTaskElasticNetCV,
MultiTaskLasso,
MultiTaskLassoCV,
OrthogonalMatchingPursuitCV,
PassiveAggressiveClassifier,
PassiveAggressiveRegressor,
Perceptron,
PoissonRegressor,
QuantileRegressor,
RANSACRegressor,
Ridge,
RidgeClassifier,
SGDClassifier,
SGDOneClassSVM,
SGDRegressor,
TheilSenRegressor,
TweedieRegressor,
)
from sklearn.manifold import (
MDS,
TSNE,
Isomap,
LocallyLinearEmbedding,
SpectralEmbedding,
)
from sklearn.mixture import BayesianGaussianMixture, GaussianMixture
from sklearn.model_selection import (
FixedThresholdClassifier,
GridSearchCV,
HalvingGridSearchCV,
HalvingRandomSearchCV,
RandomizedSearchCV,
TunedThresholdClassifierCV,
)
from sklearn.multiclass import (
OneVsOneClassifier,
OneVsRestClassifier,
OutputCodeClassifier,
)
from sklearn.multioutput import (
ClassifierChain,
MultiOutputClassifier,
MultiOutputRegressor,
RegressorChain,
)
from sklearn.neighbors import (
KernelDensity,
KNeighborsClassifier,
KNeighborsRegressor,
KNeighborsTransformer,
NeighborhoodComponentsAnalysis,
RadiusNeighborsTransformer,
)
from sklearn.neural_network import BernoulliRBM, MLPClassifier, MLPRegressor
from sklearn.pipeline import FeatureUnion, Pipeline
from sklearn.preprocessing import (
KBinsDiscretizer,
OneHotEncoder,
SplineTransformer,
StandardScaler,
TargetEncoder,
)
from sklearn.random_projection import GaussianRandomProjection, SparseRandomProjection
from sklearn.semi_supervised import (
LabelPropagation,
LabelSpreading,
SelfTrainingClassifier,
)
from sklearn.svm import SVC, SVR, LinearSVC, LinearSVR, NuSVC, NuSVR, OneClassSVM
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import all_estimators
from sklearn.utils._tags import get_tags
from sklearn.utils._testing import SkipTest
from sklearn.utils.fixes import _IS_32BIT, parse_version, sp_base_version
CROSS_DECOMPOSITION = ["PLSCanonical", "PLSRegression", "CCA", "PLSSVD"]
rng = np.random.RandomState(0)
# The following dictionary is to indicate constructor arguments suitable for the test
# suite, which uses very small datasets, and is intended to run rather quickly.
INIT_PARAMS = {
AdaBoostClassifier: dict(n_estimators=5),
AdaBoostRegressor: dict(n_estimators=5),
AffinityPropagation: dict(max_iter=5),
AgglomerativeClustering: dict(n_clusters=2),
ARDRegression: dict(max_iter=5),
BaggingClassifier: dict(n_estimators=5),
BaggingRegressor: dict(n_estimators=5),
BayesianGaussianMixture: dict(n_init=2, max_iter=5),
BayesianRidge: dict(max_iter=5),
BernoulliRBM: dict(n_iter=5, batch_size=10),
Birch: dict(n_clusters=2),
BisectingKMeans: dict(n_init=2, n_clusters=2, max_iter=5),
CalibratedClassifierCV: dict(estimator=LogisticRegression(C=1), cv=3),
CCA: dict(n_components=1, max_iter=5),
ClassifierChain: dict(estimator=LogisticRegression(C=1), cv=3),
ColumnTransformer: dict(transformers=[("trans1", StandardScaler(), [0, 1])]),
DictionaryLearning: dict(max_iter=20, transform_algorithm="lasso_lars"),
# the default strategy prior would output constant predictions and fail
# for check_classifiers_predictions
DummyClassifier: [dict(strategy="stratified"), dict(strategy="most_frequent")],
ElasticNetCV: dict(max_iter=5, cv=3),
ElasticNet: dict(max_iter=5),
ExtraTreesClassifier: dict(n_estimators=5),
ExtraTreesRegressor: dict(n_estimators=5),
FactorAnalysis: dict(max_iter=5),
FastICA: dict(max_iter=5),
FeatureAgglomeration: dict(n_clusters=2),
FeatureUnion: dict(transformer_list=[("trans1", StandardScaler())]),
FixedThresholdClassifier: dict(estimator=LogisticRegression(C=1)),
GammaRegressor: dict(max_iter=5),
GaussianMixture: dict(n_init=2, max_iter=5),
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
GaussianRandomProjection: dict(n_components=2),
GradientBoostingClassifier: dict(n_estimators=5),
GradientBoostingRegressor: dict(n_estimators=5),
GraphicalLassoCV: dict(max_iter=5, cv=3),
GraphicalLasso: dict(max_iter=5),
GridSearchCV: [
dict(
cv=2,
error_score="raise",
estimator=Ridge(),
param_grid={"alpha": [0.1, 1.0]},
),
dict(
cv=2,
error_score="raise",
estimator=LogisticRegression(),
param_grid={"C": [0.1, 1.0]},
),
dict(
cv=2,
error_score="raise",
estimator=Pipeline(steps=[("pca", PCA()), ("ridge", Ridge())]),
param_grid={"ridge__alpha": [0.1, 1.0]},
),
dict(
cv=2,
error_score="raise",
estimator=Pipeline(
steps=[("pca", PCA()), ("logisticregression", LogisticRegression())]
),
param_grid={"logisticregression__C": [0.1, 1.0]},
),
],
HalvingGridSearchCV: [
dict(
cv=2,
error_score="raise",
estimator=Ridge(),
min_resources="smallest",
param_grid={"alpha": [0.1, 1.0]},
random_state=0,
),
dict(
cv=2,
error_score="raise",
estimator=LogisticRegression(),
min_resources="smallest",
param_grid={"C": [0.1, 1.0]},
random_state=0,
),
dict(
cv=2,
error_score="raise",
estimator=Pipeline(steps=[("pca", PCA()), ("ridge", Ridge())]),
min_resources="smallest",
param_grid={"ridge__alpha": [0.1, 1.0]},
random_state=0,
),
dict(
cv=2,
error_score="raise",
estimator=Pipeline(
steps=[("pca", PCA()), ("logisticregression", LogisticRegression())]
),
min_resources="smallest",
param_grid={"logisticregression__C": [0.1, 1.0]},
random_state=0,
),
],
HalvingRandomSearchCV: [
dict(
cv=2,
error_score="raise",
estimator=Ridge(),
param_distributions={"alpha": [0.1, 1.0]},
random_state=0,
),
dict(
cv=2,
error_score="raise",
estimator=LogisticRegression(),
param_distributions={"C": [0.1, 1.0]},
random_state=0,
),
dict(
cv=2,
error_score="raise",
estimator=Pipeline(steps=[("pca", PCA()), ("ridge", Ridge())]),
param_distributions={"ridge__alpha": [0.1, 1.0]},
random_state=0,
),
dict(
cv=2,
error_score="raise",
estimator=Pipeline(
steps=[("pca", PCA()), ("logisticregression", LogisticRegression())]
),
param_distributions={"logisticregression__C": [0.1, 1.0]},
random_state=0,
),
],
HDBSCAN: dict(min_samples=1),
# The default min_samples_leaf (20) isn't appropriate for small
# datasets (only very shallow trees are built) that the checks use.
HistGradientBoostingClassifier: dict(max_iter=5, min_samples_leaf=5),
HistGradientBoostingRegressor: dict(max_iter=5, min_samples_leaf=5),
HuberRegressor: dict(max_iter=5),
IncrementalPCA: dict(batch_size=10),
IsolationForest: dict(n_estimators=5),
KMeans: dict(n_init=2, n_clusters=2, max_iter=5),
KNeighborsClassifier: [dict(n_neighbors=2), dict(metric="precomputed")],
KNeighborsRegressor: [dict(n_neighbors=2), dict(metric="precomputed")],
LabelPropagation: dict(max_iter=5),
LabelSpreading: dict(max_iter=5),
LarsCV: dict(max_iter=5, cv=3),
LassoCV: dict(max_iter=5, cv=3),
Lasso: dict(max_iter=5),
LassoLarsCV: dict(max_iter=5, cv=3),
LassoLars: dict(max_iter=5),
# Noise variance estimation does not work when `n_samples < n_features`.
# We need to provide the noise variance explicitly.
LassoLarsIC: dict(max_iter=5, noise_variance=1.0),
LatentDirichletAllocation: dict(max_iter=5, batch_size=10),
LinearSVC: dict(max_iter=20),
LinearSVR: dict(max_iter=20),
LocallyLinearEmbedding: dict(max_iter=5),
LogisticRegressionCV: dict(max_iter=5, cv=3, use_legacy_attributes=False),
LogisticRegression: dict(max_iter=5),
MDS: dict(n_init=2, max_iter=5),
# In the case of check_fit2d_1sample, bandwidth is set to None and
# is thus estimated. De facto it is 0.0 as a single sample is provided
# and this makes the test fails. Hence we give it a placeholder value.
MeanShift: dict(max_iter=5, bandwidth=1.0),
MiniBatchDictionaryLearning: dict(batch_size=10, max_iter=5),
MiniBatchKMeans: dict(n_init=2, n_clusters=2, max_iter=5, batch_size=10),
MiniBatchNMF: dict(batch_size=10, max_iter=20, fresh_restarts=True),
MiniBatchSparsePCA: dict(max_iter=5, batch_size=10),
MLPClassifier: dict(max_iter=100),
MLPRegressor: dict(max_iter=100),
MultiOutputClassifier: dict(estimator=LogisticRegression(C=1)),
MultiOutputRegressor: dict(estimator=Ridge()),
MultiTaskElasticNetCV: dict(max_iter=5, cv=3),
MultiTaskElasticNet: dict(max_iter=5),
MultiTaskLassoCV: dict(max_iter=5, cv=3),
MultiTaskLasso: dict(max_iter=5),
NeighborhoodComponentsAnalysis: dict(max_iter=5),
NMF: dict(max_iter=500),
NuSVC: dict(max_iter=-1),
NuSVR: dict(max_iter=-1),
OneClassSVM: dict(max_iter=-1),
OneHotEncoder: dict(handle_unknown="ignore"),
OneVsOneClassifier: dict(estimator=LogisticRegression(C=1)),
OneVsRestClassifier: dict(estimator=LogisticRegression(C=1)),
OrthogonalMatchingPursuitCV: dict(cv=3),
OutputCodeClassifier: dict(estimator=LogisticRegression(C=1)),
PassiveAggressiveClassifier: dict(max_iter=5),
PassiveAggressiveRegressor: dict(max_iter=5),
Perceptron: dict(max_iter=5),
Pipeline: [
{"steps": [("scaler", StandardScaler()), ("final_estimator", Ridge())]},
{
"steps": [
("scaler", StandardScaler()),
("final_estimator", LogisticRegression()),
]
},
],
PLSCanonical: dict(n_components=1, max_iter=5),
PLSRegression: dict(n_components=1, max_iter=5),
PLSSVD: dict(n_components=1),
PoissonRegressor: dict(max_iter=5),
RandomForestClassifier: dict(n_estimators=5),
RandomForestRegressor: dict(n_estimators=5),
RandomizedSearchCV: [
dict(
cv=2,
error_score="raise",
estimator=Ridge(),
param_distributions={"alpha": [0.1, 1.0]},
random_state=0,
),
dict(
cv=2,
error_score="raise",
estimator=LogisticRegression(),
param_distributions={"C": [0.1, 1.0]},
random_state=0,
),
dict(
cv=2,
error_score="raise",
estimator=Pipeline(steps=[("pca", PCA()), ("ridge", Ridge())]),
param_distributions={"ridge__alpha": [0.1, 1.0]},
random_state=0,
),
dict(
cv=2,
error_score="raise",
estimator=Pipeline(
steps=[("pca", PCA()), ("logisticregression", LogisticRegression())]
),
param_distributions={"logisticregression__C": [0.1, 1.0]},
random_state=0,
),
],
RandomTreesEmbedding: dict(n_estimators=5),
# `RANSACRegressor` will raise an error with any model other
# than `LinearRegression` if we don't fix the `min_samples` parameter.
# For common tests, we can enforce using `LinearRegression` that
# is the default estimator in `RANSACRegressor` instead of `Ridge`.
RANSACRegressor: dict(estimator=LinearRegression(), max_trials=10),
RegressorChain: dict(estimator=Ridge(), cv=3),
RFECV: dict(estimator=LogisticRegression(C=1), cv=3),
RFE: dict(estimator=LogisticRegression(C=1)),
# be tolerant of noisy datasets (not actually speed)
SelectFdr: dict(alpha=0.5),
# Increases coverage because SGDRegressor has partial_fit
SelectFromModel: dict(estimator=SGDRegressor(random_state=0)),
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
SelectKBest: dict(k=1),
SelfTrainingClassifier: dict(estimator=LogisticRegression(C=1), max_iter=5),
SequentialFeatureSelector: dict(estimator=LogisticRegression(C=1), cv=3),
SGDClassifier: dict(max_iter=5),
SGDOneClassSVM: dict(max_iter=5),
SGDRegressor: dict(max_iter=5),
SparseCoder: dict(dictionary=rng.normal(size=(5, 3))),
SparsePCA: dict(max_iter=5),
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
SparseRandomProjection: dict(n_components=2),
SpectralBiclustering: dict(n_init=2, n_best=1, n_clusters=2),
SpectralClustering: dict(n_init=2, n_clusters=2),
SpectralCoclustering: dict(n_init=2, n_clusters=2),
# Default "auto" parameter can lead to different ordering of eigenvalues on
# windows: #24105
SpectralEmbedding: dict(eigen_tol=1e-05),
StackingClassifier: dict(
estimators=[
("est1", DecisionTreeClassifier(max_depth=3, random_state=0)),
("est2", DecisionTreeClassifier(max_depth=3, random_state=1)),
],
cv=3,
),
StackingRegressor: dict(
estimators=[
("est1", DecisionTreeRegressor(max_depth=3, random_state=0)),
("est2", DecisionTreeRegressor(max_depth=3, random_state=1)),
],
cv=3,
),
SVC: [dict(max_iter=-1), dict(kernel="precomputed")],
SVR: [dict(max_iter=-1), dict(kernel="precomputed")],
TargetEncoder: dict(cv=3),
TheilSenRegressor: dict(max_iter=5, max_subpopulation=100),
# TruncatedSVD doesn't run with n_components = n_features
TruncatedSVD: dict(n_iter=5, n_components=1),
TSNE: dict(perplexity=2),
TunedThresholdClassifierCV: dict(estimator=LogisticRegression(C=1), cv=3),
TweedieRegressor: dict(max_iter=5),
VotingClassifier: dict(
estimators=[
("est1", DecisionTreeClassifier(max_depth=3, random_state=0)),
("est2", DecisionTreeClassifier(max_depth=3, random_state=1)),
]
),
VotingRegressor: dict(
estimators=[
("est1", DecisionTreeRegressor(max_depth=3, random_state=0)),
("est2", DecisionTreeRegressor(max_depth=3, random_state=1)),
]
),
}
# This dictionary stores parameters for specific checks. It also enables running the
# same check with multiple instances of the same estimator with different parameters.
# The special key "*" allows to apply the parameters to all checks.
# TODO(devtools): allow third-party developers to pass test specific params to checks
PER_ESTIMATOR_CHECK_PARAMS: dict = {
# TODO(devtools): check that function names here exist in checks for the estimator
AgglomerativeClustering: {"check_dict_unchanged": dict(n_clusters=1)},
BayesianGaussianMixture: {"check_dict_unchanged": dict(max_iter=5, n_init=2)},
BernoulliRBM: {"check_dict_unchanged": dict(n_components=1, n_iter=5)},
Birch: {"check_dict_unchanged": dict(n_clusters=1)},
BisectingKMeans: {"check_dict_unchanged": dict(max_iter=5, n_clusters=1, n_init=2)},
CCA: {"check_dict_unchanged": dict(max_iter=5, n_components=1)},
DecisionTreeRegressor: {
"check_sample_weight_equivalence_on_dense_data": [
dict(criterion="squared_error"),
dict(criterion="absolute_error"),
dict(criterion="friedman_mse"),
dict(criterion="poisson"),
],
"check_sample_weight_equivalence_on_sparse_data": [
dict(criterion="squared_error"),
dict(criterion="absolute_error"),
dict(criterion="friedman_mse"),
dict(criterion="poisson"),
],
},
DecisionTreeClassifier: {
"check_sample_weight_equivalence_on_dense_data": [
dict(criterion="gini"),
dict(criterion="log_loss"),
dict(criterion="entropy"),
],
"check_sample_weight_equivalence_on_sparse_data": [
dict(criterion="gini"),
dict(criterion="log_loss"),
dict(criterion="entropy"),
],
},
DictionaryLearning: {
"check_dict_unchanged": dict(
max_iter=20, n_components=1, transform_algorithm="lasso_lars"
)
},
FactorAnalysis: {"check_dict_unchanged": dict(max_iter=5, n_components=1)},
FastICA: {"check_dict_unchanged": dict(max_iter=5, n_components=1)},
FeatureAgglomeration: {"check_dict_unchanged": dict(n_clusters=1)},
FeatureUnion: {
"check_estimator_sparse_tag": [
dict(transformer_list=[("trans1", StandardScaler())]),
dict(
transformer_list=[
("trans1", StandardScaler(with_mean=False)),
("trans2", "drop"),
("trans3", "passthrough"),
]
),
]
},
GammaRegressor: {
"check_sample_weight_equivalence_on_dense_data": [
dict(solver="newton-cholesky"),
dict(solver="lbfgs"),
],
},
GaussianMixture: {"check_dict_unchanged": dict(max_iter=5, n_init=2)},
GaussianRandomProjection: {"check_dict_unchanged": dict(n_components=1)},
IncrementalPCA: {"check_dict_unchanged": dict(batch_size=10, n_components=1)},
Isomap: {"check_dict_unchanged": dict(n_components=1)},
KMeans: {"check_dict_unchanged": dict(max_iter=5, n_clusters=1, n_init=2)},
# TODO(1.9) simplify when averaged_inverted_cdf is the default
KBinsDiscretizer: {
"check_sample_weight_equivalence_on_dense_data": [
# Using subsample != None leads to a stochastic fit that is not
# handled by the check_sample_weight_equivalence_on_dense_data test.
dict(strategy="quantile", subsample=None, quantile_method="inverted_cdf"),
dict(
strategy="quantile",
subsample=None,
quantile_method="averaged_inverted_cdf",
),
dict(strategy="uniform", subsample=None),
# The "kmeans" strategy leads to a stochastic fit that is not
# handled by the check_sample_weight_equivalence test.
],
"check_sample_weights_list": dict(
strategy="quantile", quantile_method="averaged_inverted_cdf"
),
"check_sample_weights_pandas_series": dict(
strategy="quantile", quantile_method="averaged_inverted_cdf"
),
"check_sample_weights_shape": dict(
strategy="quantile", quantile_method="averaged_inverted_cdf"
),
"check_sample_weights_not_an_array": dict(
strategy="quantile", quantile_method="averaged_inverted_cdf"
),
"check_sample_weights_not_overwritten": dict(
strategy="quantile", quantile_method="averaged_inverted_cdf"
),
},
KernelPCA: {"check_dict_unchanged": dict(n_components=1)},
LassoLars: {"check_non_transformer_estimators_n_iter": dict(alpha=0.0)},
LatentDirichletAllocation: {
"check_dict_unchanged": dict(batch_size=10, max_iter=5, n_components=1)
},
LinearDiscriminantAnalysis: {"check_dict_unchanged": dict(n_components=1)},
LinearSVC: {
"check_sample_weight_equivalence": [
# TODO: dual=True is a stochastic solver: we cannot rely on
# check_sample_weight_equivalence to check the correct handling of
# sample_weight and we would need a statistical test instead, see
# meta-issue #162298.
# dict(max_iter=20, dual=True, tol=1e-12),
dict(dual=False, tol=1e-12),
dict(dual=False, tol=1e-12, class_weight="balanced"),
]
},
LinearRegression: {
"check_estimator_sparse_tag": [dict(positive=False), dict(positive=True)],
"check_sample_weight_equivalence_on_dense_data": [
dict(positive=False),
dict(positive=True),
],
"check_sample_weight_equivalence_on_sparse_data": [dict(tol=1e-12)],
},
LocallyLinearEmbedding: {"check_dict_unchanged": dict(max_iter=5, n_components=1)},
LogisticRegression: {
"check_sample_weight_equivalence_on_dense_data": [
dict(solver="lbfgs"),
dict(solver="liblinear"),
dict(solver="newton-cg"),
dict(solver="newton-cholesky"),
dict(solver="newton-cholesky", class_weight="balanced"),
]
},
LogisticRegressionCV: {
"check_sample_weight_equivalence": [
dict(solver="lbfgs", use_legacy_attributes=False),
dict(solver="newton-cholesky", use_legacy_attributes=False),
dict(
solver="newton-cholesky",
class_weight="balanced",
use_legacy_attributes=False,
),
],
"check_sample_weight_equivalence_on_sparse_data": [
dict(solver="liblinear"),
],
},
MDS: {"check_dict_unchanged": dict(max_iter=5, n_components=1, n_init=2)},
MLPClassifier: {
"check_sample_weight_equivalence_on_dense_data": [
dict(solver="lbfgs"),
]
},
MLPRegressor: {
"check_sample_weight_equivalence_on_dense_data": [
dict(solver="sgd", tol=1e-2, random_state=42),
]
},
MiniBatchDictionaryLearning: {
"check_dict_unchanged": dict(batch_size=10, max_iter=5, n_components=1)
},
MiniBatchKMeans: {
"check_dict_unchanged": dict(batch_size=10, max_iter=5, n_clusters=1, n_init=2)
},
MiniBatchNMF: {
"check_dict_unchanged": dict(
batch_size=10, fresh_restarts=True, max_iter=20, n_components=1
)
},
MiniBatchSparsePCA: {
"check_dict_unchanged": dict(batch_size=10, max_iter=5, n_components=1)
},
NMF: {"check_dict_unchanged": dict(max_iter=500, n_components=1)},
NeighborhoodComponentsAnalysis: {
"check_dict_unchanged": dict(max_iter=5, n_components=1)
},
Nystroem: {"check_dict_unchanged": dict(n_components=1)},
PCA: {"check_dict_unchanged": dict(n_components=1)},
PLSCanonical: {"check_dict_unchanged": dict(max_iter=5, n_components=1)},
PLSRegression: {"check_dict_unchanged": dict(max_iter=5, n_components=1)},
PLSSVD: {"check_dict_unchanged": dict(n_components=1)},
PoissonRegressor: {
"check_sample_weight_equivalence_on_dense_data": [
dict(solver="newton-cholesky"),
dict(solver="lbfgs"),
],
},
PolynomialCountSketch: {"check_dict_unchanged": dict(n_components=1)},
QuantileRegressor: {
"check_sample_weight_equivalence_on_dense_data": [
dict(quantile=0.5),
dict(quantile=0.75),
dict(solver="highs-ds"),
dict(solver="highs-ipm"),
],
},
RBFSampler: {"check_dict_unchanged": dict(n_components=1)},
Ridge: {
"check_sample_weight_equivalence_on_dense_data": [
dict(solver="svd"),
dict(solver="cholesky"),
dict(solver="sparse_cg"),
dict(solver="lsqr"),
dict(solver="lbfgs", positive=True),
],
"check_sample_weight_equivalence_on_sparse_data": [
dict(solver="sparse_cg"),
dict(solver="lsqr"),
],
},
RidgeClassifier: {
"check_sample_weight_equivalence_on_dense_data": [
dict(solver="svd"),
dict(solver="cholesky"),
dict(solver="sparse_cg"),
dict(solver="lsqr"),
],
"check_sample_weight_equivalence_on_sparse_data": [
dict(solver="sparse_cg"),
dict(solver="lsqr"),
],
},
SkewedChi2Sampler: {"check_dict_unchanged": dict(n_components=1)},
SparseCoder: {
"check_estimators_dtypes": dict(dictionary=rng.normal(size=(5, 5))),
"check_dtype_object": dict(dictionary=rng.normal(size=(5, 10))),
"check_transformers_unfitted_stateless": dict(
dictionary=rng.normal(size=(5, 5))
),
"check_fit_idempotent": dict(dictionary=rng.normal(size=(5, 2))),
"check_transformer_preserve_dtypes": dict(
dictionary=rng.normal(size=(5, 3)).astype(np.float32)
),
"check_set_output_transform": dict(dictionary=rng.normal(size=(5, 5))),
"check_global_output_transform_pandas": dict(
dictionary=rng.normal(size=(5, 5))
),
"check_set_output_transform_pandas": dict(dictionary=rng.normal(size=(5, 5))),
"check_set_output_transform_polars": dict(dictionary=rng.normal(size=(5, 5))),
"check_global_set_output_transform_polars": dict(
dictionary=rng.normal(size=(5, 5))
),
"check_dataframe_column_names_consistency": dict(
dictionary=rng.normal(size=(5, 8))
),
"check_estimators_overwrite_params": dict(dictionary=rng.normal(size=(5, 2))),
"check_estimators_fit_returns_self": dict(dictionary=rng.normal(size=(5, 2))),
"check_readonly_memmap_input": dict(dictionary=rng.normal(size=(5, 2))),
"check_n_features_in_after_fitting": dict(dictionary=rng.normal(size=(5, 4))),
"check_fit_check_is_fitted": dict(dictionary=rng.normal(size=(5, 2))),
"check_n_features_in": dict(dictionary=rng.normal(size=(5, 2))),
"check_positive_only_tag_during_fit": dict(dictionary=rng.normal(size=(5, 4))),
"check_fit2d_1sample": dict(dictionary=rng.normal(size=(5, 10))),
"check_fit2d_1feature": dict(dictionary=rng.normal(size=(5, 1))),
},
SparsePCA: {"check_dict_unchanged": dict(max_iter=5, n_components=1)},
SparseRandomProjection: {"check_dict_unchanged": dict(n_components=1)},
SpectralBiclustering: {
"check_dict_unchanged": dict(n_best=1, n_clusters=1, n_components=1, n_init=2)
},
SpectralClustering: {
"check_dict_unchanged": dict(n_clusters=1, n_components=1, n_init=2)
},
SpectralCoclustering: {"check_dict_unchanged": dict(n_clusters=1, n_init=2)},
SpectralEmbedding: {"check_dict_unchanged": dict(eigen_tol=1e-05, n_components=1)},
StandardScaler: {
"check_sample_weight_equivalence_on_dense_data": [
dict(with_mean=True),
dict(with_mean=False),
],
"check_sample_weight_equivalence_on_sparse_data": [
dict(with_mean=False),
],
},
TSNE: {"check_dict_unchanged": dict(n_components=1, perplexity=2)},
TruncatedSVD: {"check_dict_unchanged": dict(n_components=1)},
TweedieRegressor: {
"check_sample_weight_equivalence_on_dense_data": [
dict(solver="newton-cholesky"),
dict(solver="lbfgs"),
],
},
}
def _tested_estimators(type_filter=None):
for _, Estimator in all_estimators(type_filter=type_filter):
with suppress(SkipTest):
for estimator in _construct_instances(Estimator):
yield estimator
SKIPPED_ESTIMATORS = [FrozenEstimator]
def _construct_instances(Estimator):
"""Construct Estimator instances if possible.
If parameter sets in INIT_PARAMS are provided, use them. If there are a list
of parameter sets, return one instance for each set.
"""
if Estimator in SKIPPED_ESTIMATORS:
msg = f"Can't instantiate estimator {Estimator.__name__}"
# raise additional warning to be shown by pytest
warnings.warn(msg, SkipTestWarning)
raise SkipTest(msg)
if Estimator in INIT_PARAMS:
param_sets = INIT_PARAMS[Estimator]
if not isinstance(param_sets, list):
param_sets = [param_sets]
for params in param_sets:
est = Estimator(**params)
yield est
else:
yield Estimator()
def _get_check_estimator_ids(obj):
"""Create pytest ids for checks.
When `obj` is an estimator, this returns the pprint version of the
estimator (with `print_changed_only=True`). When `obj` is a function, the
name of the function is returned with its keyword arguments.
`_get_check_estimator_ids` is designed to be used as the `id` in
`pytest.mark.parametrize` where `check_estimator(..., generate_only=True)`
is yielding estimators and checks.
Parameters
----------
obj : estimator or function
Items generated by `check_estimator`.
Returns
-------
id : str or None
See Also
--------
check_estimator
"""
if isfunction(obj):
return obj.__name__
if isinstance(obj, partial):
if not obj.keywords:
return obj.func.__name__
kwstring = ",".join(["{}={}".format(k, v) for k, v in obj.keywords.items()])
return "{}({})".format(obj.func.__name__, kwstring)
if hasattr(obj, "get_params"):
with config_context(print_changed_only=True):
return re.sub(r"\s", "", str(obj))
def _yield_instances_for_check(check, estimator_orig):
"""Yield instances for a check.
For most estimators, this is a no-op.
For estimators which have an entry in PER_ESTIMATOR_CHECK_PARAMS, this will yield
an estimator for each parameter set in PER_ESTIMATOR_CHECK_PARAMS[estimator].
"""
# TODO(devtools): enable this behavior for third party estimators as well
if type(estimator_orig) not in PER_ESTIMATOR_CHECK_PARAMS:
yield estimator_orig
return
check_params = PER_ESTIMATOR_CHECK_PARAMS[type(estimator_orig)]
try:
check_name = check.__name__
except AttributeError:
# partial tests
check_name = check.func.__name__
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_metaestimators.py | sklearn/utils/tests/test_metaestimators.py | import pickle
import pytest
from sklearn.utils.metaestimators import available_if
class AvailableParameterEstimator:
"""This estimator's `available` parameter toggles the presence of a method"""
def __init__(self, available=True, return_value=1):
self.available = available
self.return_value = return_value
@available_if(lambda est: est.available)
def available_func(self):
"""This is a mock available_if function"""
return self.return_value
def test_available_if_docstring():
assert "This is a mock available_if function" in str(
AvailableParameterEstimator.__dict__["available_func"].__doc__
)
assert "This is a mock available_if function" in str(
AvailableParameterEstimator.available_func.__doc__
)
assert "This is a mock available_if function" in str(
AvailableParameterEstimator().available_func.__doc__
)
def test_available_if():
assert hasattr(AvailableParameterEstimator(), "available_func")
assert not hasattr(AvailableParameterEstimator(available=False), "available_func")
def test_available_if_unbound_method():
# This is a non regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/20614
# to make sure that decorated functions can be used as an unbound method,
# for instance when monkeypatching.
est = AvailableParameterEstimator()
AvailableParameterEstimator.available_func(est)
est = AvailableParameterEstimator(available=False)
with pytest.raises(
AttributeError,
match="This 'AvailableParameterEstimator' has no attribute 'available_func'",
):
AvailableParameterEstimator.available_func(est)
def test_available_if_methods_can_be_pickled():
"""Check that available_if methods can be pickled.
Non-regression test for #21344.
"""
return_value = 10
est = AvailableParameterEstimator(available=True, return_value=return_value)
pickled_bytes = pickle.dumps(est.available_func)
unpickled_func = pickle.loads(pickled_bytes)
assert unpickled_func() == return_value
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_arrayfuncs.py | sklearn/utils/tests/test_arrayfuncs.py | import numpy as np
import pytest
from sklearn.utils._testing import assert_allclose
from sklearn.utils.arrayfuncs import _all_with_any_reduction_axis_1, min_pos
def test_min_pos():
# Check that min_pos returns a positive value and that it's consistent
# between float and double
X = np.random.RandomState(0).randn(100)
min_double = min_pos(X)
min_float = min_pos(X.astype(np.float32))
assert_allclose(min_double, min_float)
assert min_double >= 0
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_min_pos_no_positive(dtype):
# Check that the return value of min_pos is the maximum representable
# value of the input dtype when all input elements are <= 0 (#19328)
X = np.full(100, -1.0).astype(dtype, copy=False)
assert min_pos(X) == np.finfo(dtype).max
@pytest.mark.parametrize(
"dtype", [np.int16, np.int32, np.int64, np.float32, np.float64]
)
@pytest.mark.parametrize("value", [0, 1.5, -1])
def test_all_with_any_reduction_axis_1(dtype, value):
# Check that return value is False when there is no row equal to `value`
X = np.arange(12, dtype=dtype).reshape(3, 4)
assert not _all_with_any_reduction_axis_1(X, value=value)
# Make a row equal to `value`
X[1, :] = value
assert _all_with_any_reduction_axis_1(X, value=value)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_unique.py | sklearn/utils/tests/test_unique.py | import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils._unique import attach_unique, cached_unique
from sklearn.utils.validation import check_array
def test_attach_unique_attaches_unique_to_array():
arr = np.array([1, 2, 2, 3, 4, 4, 5])
arr_ = attach_unique(arr)
assert_array_equal(arr_.dtype.metadata["unique"], np.array([1, 2, 3, 4, 5]))
assert_array_equal(arr_, arr)
def test_cached_unique_returns_cached_unique():
my_dtype = np.dtype(np.float64, metadata={"unique": np.array([1, 2])})
arr = np.array([1, 2, 2, 3, 4, 4, 5], dtype=my_dtype)
assert_array_equal(cached_unique(arr), np.array([1, 2]))
def test_attach_unique_not_ndarray():
"""Test that when not np.ndarray, we don't touch the array."""
arr = [1, 2, 2, 3, 4, 4, 5]
arr_ = attach_unique(arr)
assert arr_ is arr
def test_attach_unique_returns_view():
"""Test that attach_unique returns a view of the array."""
arr = np.array([1, 2, 2, 3, 4, 4, 5])
arr_ = attach_unique(arr)
assert arr_.base is arr
def test_attach_unique_return_tuple():
"""Test return_tuple argument of the function."""
arr = np.array([1, 2, 2, 3, 4, 4, 5])
arr_tuple = attach_unique(arr, return_tuple=True)
assert isinstance(arr_tuple, tuple)
assert len(arr_tuple) == 1
assert_array_equal(arr_tuple[0], arr)
arr_single = attach_unique(arr, return_tuple=False)
assert isinstance(arr_single, np.ndarray)
assert_array_equal(arr_single, arr)
def test_check_array_keeps_unique():
"""Test that check_array keeps the unique metadata."""
arr = np.array([[1, 2, 2, 3, 4, 4, 5]])
arr_ = attach_unique(arr)
arr_ = check_array(arr_)
assert_array_equal(arr_.dtype.metadata["unique"], np.array([1, 2, 3, 4, 5]))
assert_array_equal(arr_, arr)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_param_validation.py | sklearn/utils/tests/test_param_validation.py | from numbers import Integral, Real
import numpy as np
import pytest
from scipy.sparse import csr_matrix
from sklearn._config import config_context, get_config
from sklearn.base import BaseEstimator, _fit_context
from sklearn.model_selection import LeaveOneOut
from sklearn.utils import deprecated
from sklearn.utils._param_validation import (
HasMethods,
Hidden,
Interval,
InvalidParameterError,
MissingValues,
Options,
RealNotInt,
StrOptions,
_ArrayLikes,
_Booleans,
_Callables,
_CVObjects,
_InstancesOf,
_IterablesNotString,
_NanConstraint,
_NoneConstraint,
_PandasNAConstraint,
_RandomStates,
_SparseMatrices,
_VerboseHelper,
generate_invalid_param_val,
generate_valid_param,
make_constraint,
validate_params,
)
from sklearn.utils.fixes import CSR_CONTAINERS
# Some helpers for the tests
@validate_params(
{"a": [Real], "b": [Real], "c": [Real], "d": [Real]},
prefer_skip_nested_validation=True,
)
def _func(a, b=0, *args, c, d=0, **kwargs):
"""A function to test the validation of functions."""
class _Class:
"""A class to test the _InstancesOf constraint and the validation of methods."""
@validate_params({"a": [Real]}, prefer_skip_nested_validation=True)
def _method(self, a):
"""A validated method"""
@deprecated()
@validate_params({"a": [Real]}, prefer_skip_nested_validation=True)
def _deprecated_method(self, a):
"""A deprecated validated method"""
class _Estimator(BaseEstimator):
"""An estimator to test the validation of estimator parameters."""
_parameter_constraints: dict = {"a": [Real]}
def __init__(self, a):
self.a = a
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X=None, y=None):
pass
@pytest.mark.parametrize("interval_type", [Integral, Real])
def test_interval_range(interval_type):
"""Check the range of values depending on closed."""
interval = Interval(interval_type, -2, 2, closed="left")
assert -2 in interval
assert 2 not in interval
interval = Interval(interval_type, -2, 2, closed="right")
assert -2 not in interval
assert 2 in interval
interval = Interval(interval_type, -2, 2, closed="both")
assert -2 in interval
assert 2 in interval
interval = Interval(interval_type, -2, 2, closed="neither")
assert -2 not in interval
assert 2 not in interval
@pytest.mark.parametrize("interval_type", [Integral, Real])
def test_interval_large_integers(interval_type):
"""Check that Interval constraint work with large integers.
non-regression test for #26648.
"""
interval = Interval(interval_type, 0, 2, closed="neither")
assert 2**65 not in interval
assert 2**128 not in interval
assert float(2**65) not in interval
assert float(2**128) not in interval
interval = Interval(interval_type, 0, 2**128, closed="neither")
assert 2**65 in interval
assert 2**128 not in interval
assert float(2**65) in interval
assert float(2**128) not in interval
assert 2**1024 not in interval
def test_interval_inf_in_bounds():
"""Check that inf is included iff a bound is closed and set to None.
Only valid for real intervals.
"""
interval = Interval(Real, 0, None, closed="right")
assert np.inf in interval
interval = Interval(Real, None, 0, closed="left")
assert -np.inf in interval
interval = Interval(Real, None, None, closed="neither")
assert np.inf not in interval
assert -np.inf not in interval
@pytest.mark.parametrize(
"interval",
[Interval(Real, 0, 1, closed="left"), Interval(Real, None, None, closed="both")],
)
def test_nan_not_in_interval(interval):
"""Check that np.nan is not in any interval."""
assert np.nan not in interval
@pytest.mark.parametrize(
"params, error, match",
[
(
{"type": Integral, "left": 1.0, "right": 2, "closed": "both"},
TypeError,
r"Expecting left to be an int for an interval over the integers",
),
(
{"type": Integral, "left": 1, "right": 2.0, "closed": "neither"},
TypeError,
"Expecting right to be an int for an interval over the integers",
),
(
{"type": Integral, "left": None, "right": 0, "closed": "left"},
ValueError,
r"left can't be None when closed == left",
),
(
{"type": Integral, "left": 0, "right": None, "closed": "right"},
ValueError,
r"right can't be None when closed == right",
),
(
{"type": Integral, "left": 1, "right": -1, "closed": "both"},
ValueError,
r"right can't be less than left",
),
],
)
def test_interval_errors(params, error, match):
"""Check that informative errors are raised for invalid combination of parameters"""
with pytest.raises(error, match=match):
Interval(**params)
def test_stroptions():
"""Sanity check for the StrOptions constraint"""
options = StrOptions({"a", "b", "c"}, deprecated={"c"})
assert options.is_satisfied_by("a")
assert options.is_satisfied_by("c")
assert not options.is_satisfied_by("d")
assert "'c' (deprecated)" in str(options)
def test_options():
"""Sanity check for the Options constraint"""
options = Options(Real, {-0.5, 0.5, np.inf}, deprecated={-0.5})
assert options.is_satisfied_by(-0.5)
assert options.is_satisfied_by(np.inf)
assert not options.is_satisfied_by(1.23)
assert "-0.5 (deprecated)" in str(options)
@pytest.mark.parametrize(
"type, expected_type_name",
[
(int, "int"),
(Integral, "int"),
(Real, "float"),
(np.ndarray, "numpy.ndarray"),
],
)
def test_instances_of_type_human_readable(type, expected_type_name):
"""Check the string representation of the _InstancesOf constraint."""
constraint = _InstancesOf(type)
assert str(constraint) == f"an instance of '{expected_type_name}'"
def test_hasmethods():
"""Check the HasMethods constraint."""
constraint = HasMethods(["a", "b"])
class _Good:
def a(self):
pass # pragma: no cover
def b(self):
pass # pragma: no cover
class _Bad:
def a(self):
pass # pragma: no cover
assert constraint.is_satisfied_by(_Good())
assert not constraint.is_satisfied_by(_Bad())
assert str(constraint) == "an object implementing 'a' and 'b'"
@pytest.mark.parametrize(
"constraint",
[
Interval(Real, None, 0, closed="left"),
Interval(Real, 0, None, closed="left"),
Interval(Real, None, None, closed="neither"),
StrOptions({"a", "b", "c"}),
MissingValues(),
MissingValues(numeric_only=True),
_VerboseHelper(),
HasMethods("fit"),
_IterablesNotString(),
_CVObjects(),
],
)
def test_generate_invalid_param_val(constraint):
"""Check that the value generated does not satisfy the constraint"""
bad_value = generate_invalid_param_val(constraint)
assert not constraint.is_satisfied_by(bad_value)
@pytest.mark.parametrize(
"integer_interval, real_interval",
[
(
Interval(Integral, None, 3, closed="right"),
Interval(RealNotInt, -5, 5, closed="both"),
),
(
Interval(Integral, None, 3, closed="right"),
Interval(RealNotInt, -5, 5, closed="neither"),
),
(
Interval(Integral, None, 3, closed="right"),
Interval(RealNotInt, 4, 5, closed="both"),
),
(
Interval(Integral, None, 3, closed="right"),
Interval(RealNotInt, 5, None, closed="left"),
),
(
Interval(Integral, None, 3, closed="right"),
Interval(RealNotInt, 4, None, closed="neither"),
),
(
Interval(Integral, 3, None, closed="left"),
Interval(RealNotInt, -5, 5, closed="both"),
),
(
Interval(Integral, 3, None, closed="left"),
Interval(RealNotInt, -5, 5, closed="neither"),
),
(
Interval(Integral, 3, None, closed="left"),
Interval(RealNotInt, 1, 2, closed="both"),
),
(
Interval(Integral, 3, None, closed="left"),
Interval(RealNotInt, None, -5, closed="left"),
),
(
Interval(Integral, 3, None, closed="left"),
Interval(RealNotInt, None, -4, closed="neither"),
),
(
Interval(Integral, -5, 5, closed="both"),
Interval(RealNotInt, None, 1, closed="right"),
),
(
Interval(Integral, -5, 5, closed="both"),
Interval(RealNotInt, 1, None, closed="left"),
),
(
Interval(Integral, -5, 5, closed="both"),
Interval(RealNotInt, -10, -4, closed="neither"),
),
(
Interval(Integral, -5, 5, closed="both"),
Interval(RealNotInt, -10, -4, closed="right"),
),
(
Interval(Integral, -5, 5, closed="neither"),
Interval(RealNotInt, 6, 10, closed="neither"),
),
(
Interval(Integral, -5, 5, closed="neither"),
Interval(RealNotInt, 6, 10, closed="left"),
),
(
Interval(Integral, 2, None, closed="left"),
Interval(RealNotInt, 0, 1, closed="both"),
),
(
Interval(Integral, 1, None, closed="left"),
Interval(RealNotInt, 0, 1, closed="both"),
),
],
)
def test_generate_invalid_param_val_2_intervals(integer_interval, real_interval):
"""Check that the value generated for an interval constraint does not satisfy any of
the interval constraints.
"""
bad_value = generate_invalid_param_val(constraint=real_interval)
assert not real_interval.is_satisfied_by(bad_value)
assert not integer_interval.is_satisfied_by(bad_value)
bad_value = generate_invalid_param_val(constraint=integer_interval)
assert not real_interval.is_satisfied_by(bad_value)
assert not integer_interval.is_satisfied_by(bad_value)
@pytest.mark.parametrize(
"constraint",
[
_ArrayLikes(),
_InstancesOf(list),
_Callables(),
_NoneConstraint(),
_RandomStates(),
_SparseMatrices(),
_Booleans(),
Interval(Integral, None, None, closed="neither"),
],
)
def test_generate_invalid_param_val_all_valid(constraint):
"""Check that the function raises NotImplementedError when there's no invalid value
for the constraint.
"""
with pytest.raises(NotImplementedError):
generate_invalid_param_val(constraint)
@pytest.mark.parametrize(
"constraint",
[
_ArrayLikes(),
_Callables(),
_InstancesOf(list),
_NoneConstraint(),
_RandomStates(),
_SparseMatrices(),
_Booleans(),
_VerboseHelper(),
MissingValues(),
MissingValues(numeric_only=True),
StrOptions({"a", "b", "c"}),
Options(Integral, {1, 2, 3}),
Interval(Integral, None, None, closed="neither"),
Interval(Integral, 0, 10, closed="neither"),
Interval(Integral, 0, None, closed="neither"),
Interval(Integral, None, 0, closed="neither"),
Interval(Real, 0, 1, closed="neither"),
Interval(Real, 0, None, closed="both"),
Interval(Real, None, 0, closed="right"),
HasMethods("fit"),
_IterablesNotString(),
_CVObjects(),
],
)
def test_generate_valid_param(constraint):
"""Check that the value generated does satisfy the constraint."""
value = generate_valid_param(constraint)
assert constraint.is_satisfied_by(value)
@pytest.mark.parametrize(
"constraint_declaration, value",
[
(Interval(Real, 0, 1, closed="both"), 0.42),
(Interval(Integral, 0, None, closed="neither"), 42),
(StrOptions({"a", "b", "c"}), "b"),
(Options(type, {np.float32, np.float64}), np.float64),
(callable, lambda x: x + 1),
(None, None),
("array-like", [[1, 2], [3, 4]]),
("array-like", np.array([[1, 2], [3, 4]])),
("sparse matrix", csr_matrix([[1, 2], [3, 4]])),
*[
("sparse matrix", container([[1, 2], [3, 4]]))
for container in CSR_CONTAINERS
],
("random_state", 0),
("random_state", np.random.RandomState(0)),
("random_state", None),
(_Class, _Class()),
(int, 1),
(Real, 0.5),
("boolean", False),
("verbose", 1),
("nan", np.nan),
(MissingValues(), -1),
(MissingValues(), -1.0),
(MissingValues(), 2**1028),
(MissingValues(), None),
(MissingValues(), float("nan")),
(MissingValues(), np.nan),
(MissingValues(), "missing"),
(HasMethods("fit"), _Estimator(a=0)),
("cv_object", 5),
],
)
def test_is_satisfied_by(constraint_declaration, value):
"""Sanity check for the is_satisfied_by method"""
constraint = make_constraint(constraint_declaration)
assert constraint.is_satisfied_by(value)
@pytest.mark.parametrize(
"constraint_declaration, expected_constraint_class",
[
(Interval(Real, 0, 1, closed="both"), Interval),
(StrOptions({"option1", "option2"}), StrOptions),
(Options(Real, {0.42, 1.23}), Options),
("array-like", _ArrayLikes),
("sparse matrix", _SparseMatrices),
("random_state", _RandomStates),
(None, _NoneConstraint),
(callable, _Callables),
(int, _InstancesOf),
("boolean", _Booleans),
("verbose", _VerboseHelper),
(MissingValues(numeric_only=True), MissingValues),
(HasMethods("fit"), HasMethods),
("cv_object", _CVObjects),
("nan", _NanConstraint),
(np.nan, _NanConstraint),
],
)
def test_make_constraint(constraint_declaration, expected_constraint_class):
"""Check that make_constraint dispatches to the appropriate constraint class"""
constraint = make_constraint(constraint_declaration)
assert constraint.__class__ is expected_constraint_class
def test_make_constraint_unknown():
"""Check that an informative error is raised when an unknown constraint is passed"""
with pytest.raises(ValueError, match="Unknown constraint"):
make_constraint("not a valid constraint")
def test_validate_params():
"""Check that validate_params works no matter how the arguments are passed"""
with pytest.raises(
InvalidParameterError, match="The 'a' parameter of _func must be"
):
_func("wrong", c=1)
with pytest.raises(
InvalidParameterError, match="The 'b' parameter of _func must be"
):
_func(*[1, "wrong"], c=1)
with pytest.raises(
InvalidParameterError, match="The 'c' parameter of _func must be"
):
_func(1, **{"c": "wrong"})
with pytest.raises(
InvalidParameterError, match="The 'd' parameter of _func must be"
):
_func(1, c=1, d="wrong")
# check in the presence of extra positional and keyword args
with pytest.raises(
InvalidParameterError, match="The 'b' parameter of _func must be"
):
_func(0, *["wrong", 2, 3], c=4, **{"e": 5})
with pytest.raises(
InvalidParameterError, match="The 'c' parameter of _func must be"
):
_func(0, *[1, 2, 3], c="four", **{"e": 5})
def test_validate_params_missing_params():
"""Check that no error is raised when there are parameters without
constraints
"""
@validate_params({"a": [int]}, prefer_skip_nested_validation=True)
def func(a, b):
pass
func(1, 2)
def test_decorate_validated_function():
"""Check that validate_params functions can be decorated"""
decorated_function = deprecated()(_func)
with pytest.warns(FutureWarning, match="Function _func is deprecated"):
decorated_function(1, 2, c=3)
# outer decorator does not interfere with validation
with pytest.warns(FutureWarning, match="Function _func is deprecated"):
with pytest.raises(
InvalidParameterError, match=r"The 'c' parameter of _func must be"
):
decorated_function(1, 2, c="wrong")
def test_validate_params_method():
"""Check that validate_params works with methods"""
with pytest.raises(
InvalidParameterError, match="The 'a' parameter of _Class._method must be"
):
_Class()._method("wrong")
# validated method can be decorated
with pytest.warns(FutureWarning, match="Function _deprecated_method is deprecated"):
with pytest.raises(
InvalidParameterError,
match="The 'a' parameter of _Class._deprecated_method must be",
):
_Class()._deprecated_method("wrong")
def test_validate_params_estimator():
"""Check that validate_params works with Estimator instances"""
# no validation in init
est = _Estimator("wrong")
with pytest.raises(
InvalidParameterError, match="The 'a' parameter of _Estimator must be"
):
est.fit()
def test_stroptions_deprecated_subset():
"""Check that the deprecated parameter must be a subset of options."""
with pytest.raises(ValueError, match="deprecated options must be a subset"):
StrOptions({"a", "b", "c"}, deprecated={"a", "d"})
def test_hidden_constraint():
"""Check that internal constraints are not exposed in the error message."""
@validate_params(
{"param": [Hidden(list), dict]}, prefer_skip_nested_validation=True
)
def f(param):
pass
# list and dict are valid params
f({"a": 1, "b": 2, "c": 3})
f([1, 2, 3])
with pytest.raises(
InvalidParameterError, match="The 'param' parameter"
) as exc_info:
f(param="bad")
# the list option is not exposed in the error message
err_msg = str(exc_info.value)
assert "an instance of 'dict'" in err_msg
assert "an instance of 'list'" not in err_msg
def test_hidden_stroptions():
"""Check that we can have 2 StrOptions constraints, one being hidden."""
@validate_params(
{"param": [StrOptions({"auto"}), Hidden(StrOptions({"warn"}))]},
prefer_skip_nested_validation=True,
)
def f(param):
pass
# "auto" and "warn" are valid params
f("auto")
f("warn")
with pytest.raises(
InvalidParameterError, match="The 'param' parameter"
) as exc_info:
f(param="bad")
# the "warn" option is not exposed in the error message
err_msg = str(exc_info.value)
assert "auto" in err_msg
assert "warn" not in err_msg
def test_validate_params_set_param_constraints_attribute():
"""Check that the validate_params decorator properly sets the parameter constraints
as attribute of the decorated function/method.
"""
assert hasattr(_func, "_skl_parameter_constraints")
assert hasattr(_Class()._method, "_skl_parameter_constraints")
def test_boolean_constraint_deprecated_int():
"""Check that validate_params raise a deprecation message but still passes
validation when using an int for a parameter accepting a boolean.
"""
@validate_params({"param": ["boolean"]}, prefer_skip_nested_validation=True)
def f(param):
pass
# True/False and np.bool_(True/False) are valid params
f(True)
f(np.bool_(False))
def test_no_validation():
"""Check that validation can be skipped for a parameter."""
@validate_params(
{"param1": [int, None], "param2": "no_validation"},
prefer_skip_nested_validation=True,
)
def f(param1=None, param2=None):
pass
# param1 is validated
with pytest.raises(InvalidParameterError, match="The 'param1' parameter"):
f(param1="wrong")
# param2 is not validated: any type is valid.
class SomeType:
pass
f(param2=SomeType)
f(param2=SomeType())
def test_pandas_na_constraint_with_pd_na():
"""Add a specific test for checking support for `pandas.NA`."""
pd = pytest.importorskip("pandas")
na_constraint = _PandasNAConstraint()
assert na_constraint.is_satisfied_by(pd.NA)
assert not na_constraint.is_satisfied_by(np.array([1, 2, 3]))
def test_iterable_not_string():
"""Check that a string does not satisfy the _IterableNotString constraint."""
constraint = _IterablesNotString()
assert constraint.is_satisfied_by([1, 2, 3])
assert constraint.is_satisfied_by(range(10))
assert not constraint.is_satisfied_by("some string")
def test_cv_objects():
"""Check that the _CVObjects constraint accepts all current ways
to pass cv objects."""
constraint = _CVObjects()
assert constraint.is_satisfied_by(5)
assert constraint.is_satisfied_by(LeaveOneOut())
assert constraint.is_satisfied_by([([1, 2], [3, 4]), ([3, 4], [1, 2])])
assert constraint.is_satisfied_by(None)
assert not constraint.is_satisfied_by("not a CV object")
def test_third_party_estimator():
"""Check that the validation from a scikit-learn estimator inherited by a third
party estimator does not impose a match between the dict of constraints and the
parameters of the estimator.
"""
class ThirdPartyEstimator(_Estimator):
def __init__(self, b):
self.b = b
super().__init__(a=0)
def fit(self, X=None, y=None):
super().fit(X, y)
# does not raise, even though "b" is not in the constraints dict and "a" is not
# a parameter of the estimator.
ThirdPartyEstimator(b=0).fit()
def test_interval_real_not_int():
"""Check for the type RealNotInt in the Interval constraint."""
constraint = Interval(RealNotInt, 0, 1, closed="both")
assert constraint.is_satisfied_by(1.0)
assert not constraint.is_satisfied_by(1)
def test_real_not_int():
"""Check for the RealNotInt type."""
assert isinstance(1.0, RealNotInt)
assert not isinstance(1, RealNotInt)
assert isinstance(np.float64(1), RealNotInt)
assert not isinstance(np.int64(1), RealNotInt)
def test_skip_param_validation():
"""Check that param validation can be skipped using config_context."""
@validate_params({"a": [int]}, prefer_skip_nested_validation=True)
def f(a):
pass
with pytest.raises(InvalidParameterError, match="The 'a' parameter"):
f(a="1")
# does not raise
with config_context(skip_parameter_validation=True):
f(a="1")
@pytest.mark.parametrize("prefer_skip_nested_validation", [True, False])
def test_skip_nested_validation(prefer_skip_nested_validation):
"""Check that nested validation can be skipped."""
@validate_params({"a": [int]}, prefer_skip_nested_validation=True)
def f(a):
pass
@validate_params(
{"b": [int]},
prefer_skip_nested_validation=prefer_skip_nested_validation,
)
def g(b):
# calls f with a bad parameter type
return f(a="invalid_param_value")
# Validation for g is never skipped.
with pytest.raises(InvalidParameterError, match="The 'b' parameter"):
g(b="invalid_param_value")
if prefer_skip_nested_validation:
g(b=1) # does not raise because inner f is not validated
else:
with pytest.raises(InvalidParameterError, match="The 'a' parameter"):
g(b=1)
@pytest.mark.parametrize(
"skip_parameter_validation, prefer_skip_nested_validation, expected_skipped",
[
(True, True, True),
(True, False, True),
(False, True, True),
(False, False, False),
],
)
def test_skip_nested_validation_and_config_context(
skip_parameter_validation, prefer_skip_nested_validation, expected_skipped
):
"""Check interaction between global skip and local skip."""
@validate_params(
{"a": [int]}, prefer_skip_nested_validation=prefer_skip_nested_validation
)
def g(a):
return get_config()["skip_parameter_validation"]
with config_context(skip_parameter_validation=skip_parameter_validation):
actual_skipped = g(1)
assert actual_skipped == expected_skipped
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_multiclass.py | sklearn/utils/tests/test_multiclass.py | import warnings
from itertools import product
import numpy as np
import pytest
from scipy.sparse import issparse
from sklearn import config_context, datasets
from sklearn.model_selection import ShuffleSplit
from sklearn.svm import SVC
from sklearn.utils._array_api import (
_get_namespace_device_dtype_ids,
yield_namespace_device_dtype_combinations,
)
from sklearn.utils._testing import (
_array_api_for_tests,
_convert_container,
assert_allclose,
assert_array_almost_equal,
assert_array_equal,
)
from sklearn.utils.estimator_checks import _NotAnArray
from sklearn.utils.fixes import (
COO_CONTAINERS,
CSC_CONTAINERS,
CSR_CONTAINERS,
DOK_CONTAINERS,
LIL_CONTAINERS,
)
from sklearn.utils.metaestimators import _safe_split
from sklearn.utils.multiclass import (
_ovr_decision_function,
check_classification_targets,
class_distribution,
is_multilabel,
type_of_target,
unique_labels,
)
multilabel_explicit_zero = np.array([[0, 1], [1, 0]])
multilabel_explicit_zero[:, 0] = 0
def _generate_sparse(
data,
sparse_containers=tuple(
COO_CONTAINERS
+ CSC_CONTAINERS
+ CSR_CONTAINERS
+ DOK_CONTAINERS
+ LIL_CONTAINERS
),
dtypes=(bool, int, np.int8, np.uint8, float, np.float32),
):
return [
sparse_container(data, dtype=dtype)
for sparse_container in sparse_containers
for dtype in dtypes
]
EXAMPLES = {
"multilabel-indicator": [
# valid when the data is formatted as sparse or dense, identified
# by CSR format when the testing takes place
*_generate_sparse(
np.random.RandomState(42).randint(2, size=(10, 10)),
sparse_containers=CSR_CONTAINERS,
dtypes=(int,),
),
[[0, 1], [1, 0]],
[[0, 1]],
*_generate_sparse(
multilabel_explicit_zero, sparse_containers=CSC_CONTAINERS, dtypes=(int,)
),
*_generate_sparse([[0, 1], [1, 0]]),
*_generate_sparse([[0, 0], [0, 0]]),
*_generate_sparse([[0, 1]]),
# Only valid when data is dense
[[-1, 1], [1, -1]],
np.array([[-1, 1], [1, -1]]),
np.array([[-3, 3], [3, -3]]),
_NotAnArray(np.array([[-3, 3], [3, -3]])),
],
"multiclass": [
[1, 0, 2, 2, 1, 4, 2, 4, 4, 4],
np.array([1, 0, 2]),
np.array([1, 0, 2], dtype=np.int8),
np.array([1, 0, 2], dtype=np.uint8),
np.array([1, 0, 2], dtype=float),
np.array([1, 0, 2], dtype=np.float32),
np.array([[1], [0], [2]]),
_NotAnArray(np.array([1, 0, 2])),
[0, 1, 2],
["a", "b", "c"],
np.array(["a", "b", "c"]),
np.array(["a", "b", "c"], dtype=object),
np.array(["a", "b", "c"], dtype=object),
],
"multiclass-multioutput": [
[[1, 0, 2, 2], [1, 4, 2, 4]],
[["a", "b"], ["c", "d"]],
np.array([[1, 0, 2, 2], [1, 4, 2, 4]]),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.int8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.uint8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=float),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float32),
*_generate_sparse(
[[1, 0, 2, 2], [1, 4, 2, 4]],
sparse_containers=CSC_CONTAINERS + CSR_CONTAINERS,
dtypes=(int, np.int8, np.uint8, float, np.float32),
),
np.array([["a", "b"], ["c", "d"]]),
np.array([["a", "b"], ["c", "d"]]),
np.array([["a", "b"], ["c", "d"]], dtype=object),
np.array([[1, 0, 2]]),
_NotAnArray(np.array([[1, 0, 2]])),
],
"binary": [
[0, 1],
[1, 1],
[],
[0],
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1]),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=bool),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.int8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.uint8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=float),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float32),
np.array([[0], [1]]),
_NotAnArray(np.array([[0], [1]])),
[1, -1],
[3, 5],
["a"],
["a", "b"],
["abc", "def"],
np.array(["abc", "def"]),
["a", "b"],
np.array(["abc", "def"], dtype=object),
],
"continuous": [
[1e-5],
[0, 0.5],
np.array([[0], [0.5]]),
np.array([[0], [0.5]], dtype=np.float32),
],
"continuous-multioutput": [
np.array([[0, 0.5], [0.5, 0]]),
np.array([[0, 0.5], [0.5, 0]], dtype=np.float32),
np.array([[0, 0.5]]),
*_generate_sparse(
[[0, 0.5], [0.5, 0]],
sparse_containers=CSC_CONTAINERS + CSR_CONTAINERS,
dtypes=(float, np.float32),
),
*_generate_sparse(
[[0, 0.5]],
sparse_containers=CSC_CONTAINERS + CSR_CONTAINERS,
dtypes=(float, np.float32),
),
],
"unknown": [
[[]],
np.array([[]], dtype=object),
[()],
# sequence of sequences that weren't supported even before deprecation
np.array([np.array([]), np.array([1, 2, 3])], dtype=object),
[np.array([]), np.array([1, 2, 3])],
[{1, 2, 3}, {1, 2}],
[frozenset([1, 2, 3]), frozenset([1, 2])],
# and also confusable as sequences of sequences
[{0: "a", 1: "b"}, {0: "a"}],
# ndim 0
np.array(0),
# empty second dimension
np.array([[], []]),
# 3d
np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),
],
}
ARRAY_API_EXAMPLES = {
"multilabel-indicator": [
np.random.RandomState(42).randint(2, size=(10, 10)),
[[0, 1], [1, 0]],
[[0, 1]],
multilabel_explicit_zero,
[[0, 0], [0, 0]],
[[-1, 1], [1, -1]],
np.array([[-1, 1], [1, -1]]),
np.array([[-3, 3], [3, -3]]),
_NotAnArray(np.array([[-3, 3], [3, -3]])),
],
"multiclass": [
[1, 0, 2, 2, 1, 4, 2, 4, 4, 4],
np.array([1, 0, 2]),
np.array([1, 0, 2], dtype=np.int8),
np.array([1, 0, 2], dtype=np.uint8),
np.array([1, 0, 2], dtype=float),
np.array([1, 0, 2], dtype=np.float32),
np.array([[1], [0], [2]]),
_NotAnArray(np.array([1, 0, 2])),
[0, 1, 2],
],
"multiclass-multioutput": [
[[1, 0, 2, 2], [1, 4, 2, 4]],
np.array([[1, 0, 2, 2], [1, 4, 2, 4]]),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.int8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.uint8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=float),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float32),
np.array([[1, 0, 2]]),
_NotAnArray(np.array([[1, 0, 2]])),
],
"binary": [
[0, 1],
[1, 1],
[],
[0],
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1]),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=bool),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.int8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.uint8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=float),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float32),
np.array([[0], [1]]),
_NotAnArray(np.array([[0], [1]])),
[1, -1],
[3, 5],
],
"continuous": [
[1e-5],
[0, 0.5],
np.array([[0], [0.5]]),
np.array([[0], [0.5]], dtype=np.float32),
],
"continuous-multioutput": [
np.array([[0, 0.5], [0.5, 0]]),
np.array([[0, 0.5], [0.5, 0]], dtype=np.float32),
np.array([[0, 0.5]]),
],
"unknown": [
[[]],
[()],
np.array(0),
np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),
],
}
NON_ARRAY_LIKE_EXAMPLES = [
{1, 2, 3},
{0: "a", 1: "b"},
{0: [5], 1: [5]},
"abc",
frozenset([1, 2, 3]),
None,
]
MULTILABEL_SEQUENCES = [
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
np.array([[], [1, 2]], dtype="object"),
_NotAnArray(np.array([[], [1, 2]], dtype="object")),
]
def test_unique_labels():
# Empty iterable
with pytest.raises(ValueError):
unique_labels()
# Multiclass problem
assert_array_equal(unique_labels(range(10)), np.arange(10))
assert_array_equal(unique_labels(np.arange(10)), np.arange(10))
assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4]))
# Multilabel indicator
assert_array_equal(
unique_labels(np.array([[0, 0, 1], [1, 0, 1], [0, 0, 0]])), np.arange(3)
)
assert_array_equal(unique_labels(np.array([[0, 0, 1], [0, 0, 0]])), np.arange(3))
# Several arrays passed
assert_array_equal(unique_labels([4, 0, 2], range(5)), np.arange(5))
assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)), np.arange(3))
# Border line case with binary indicator matrix
with pytest.raises(ValueError):
unique_labels([4, 0, 2], np.ones((5, 5)))
with pytest.raises(ValueError):
unique_labels(np.ones((5, 4)), np.ones((5, 5)))
assert_array_equal(unique_labels(np.ones((4, 5)), np.ones((5, 5))), np.arange(5))
def test_check_classification_targets_too_many_unique_classes():
"""Check that we raise a warning when the number of unique classes is greater than
50% of the number of samples.
We need to check that we don't raise if we have less than 20 samples.
"""
# Create array of unique labels. This does raise a warning.
y = np.arange(25)
msg = r"The number of unique classes is greater than 50% of the number of samples."
with pytest.warns(UserWarning, match=msg):
check_classification_targets(y)
# less than 20 samples, no warning should be raised
y = np.arange(10)
with warnings.catch_warnings():
warnings.simplefilter("error")
check_classification_targets(y)
def test_unique_labels_non_specific():
# Test unique_labels with a variety of collected examples
# Smoke test for all supported format
for format in ["binary", "multiclass", "multilabel-indicator"]:
for y in EXAMPLES[format]:
unique_labels(y)
# We don't support those format at the moment
for example in NON_ARRAY_LIKE_EXAMPLES:
with pytest.raises(ValueError):
unique_labels(example)
for y_type in [
"unknown",
"continuous",
"continuous-multioutput",
"multiclass-multioutput",
]:
for example in EXAMPLES[y_type]:
with pytest.raises(ValueError):
unique_labels(example)
def test_unique_labels_mixed_types():
# Mix with binary or multiclass and multilabel
mix_clf_format = product(
EXAMPLES["multilabel-indicator"], EXAMPLES["multiclass"] + EXAMPLES["binary"]
)
for y_multilabel, y_multiclass in mix_clf_format:
with pytest.raises(ValueError):
unique_labels(y_multiclass, y_multilabel)
with pytest.raises(ValueError):
unique_labels(y_multilabel, y_multiclass)
with pytest.raises(ValueError):
unique_labels([[1, 2]], [["a", "d"]])
with pytest.raises(ValueError):
unique_labels(["1", 2])
with pytest.raises(ValueError):
unique_labels([["1", 2], [1, 3]])
with pytest.raises(ValueError):
unique_labels([["1", "2"], [2, 3]])
def test_is_multilabel():
for group, group_examples in EXAMPLES.items():
dense_exp = group == "multilabel-indicator"
for example in group_examples:
# Only mark explicitly defined sparse examples as valid sparse
# multilabel-indicators
sparse_exp = dense_exp and issparse(example)
if issparse(example) or (
hasattr(example, "__array__")
and np.asarray(example).ndim == 2
and np.asarray(example).dtype.kind in "biuf"
and np.asarray(example).shape[1] > 0
):
examples_sparse = [
sparse_container(example)
for sparse_container in (
COO_CONTAINERS
+ CSC_CONTAINERS
+ CSR_CONTAINERS
+ DOK_CONTAINERS
+ LIL_CONTAINERS
)
]
for exmpl_sparse in examples_sparse:
assert sparse_exp == is_multilabel(exmpl_sparse), (
f"is_multilabel({exmpl_sparse!r}) should be {sparse_exp}"
)
# Densify sparse examples before testing
if issparse(example):
example = example.toarray()
assert dense_exp == is_multilabel(example), (
f"is_multilabel({example!r}) should be {dense_exp}"
)
@pytest.mark.parametrize(
"array_namespace, device, dtype_name",
yield_namespace_device_dtype_combinations(),
ids=_get_namespace_device_dtype_ids,
)
def test_is_multilabel_array_api_compliance(array_namespace, device, dtype_name):
xp = _array_api_for_tests(array_namespace, device)
for group, group_examples in ARRAY_API_EXAMPLES.items():
dense_exp = group == "multilabel-indicator"
for example in group_examples:
if np.asarray(example).dtype.kind == "f":
example = np.asarray(example, dtype=dtype_name)
else:
example = np.asarray(example)
example = xp.asarray(example, device=device)
with config_context(array_api_dispatch=True):
assert dense_exp == is_multilabel(example), (
f"is_multilabel({example!r}) should be {dense_exp}"
)
def test_check_classification_targets():
for y_type in EXAMPLES.keys():
if y_type in ["unknown", "continuous", "continuous-multioutput"]:
for example in EXAMPLES[y_type]:
msg = "Unknown label type: "
with pytest.raises(ValueError, match=msg):
check_classification_targets(example)
else:
for example in EXAMPLES[y_type]:
check_classification_targets(example)
def test_type_of_target():
for group, group_examples in EXAMPLES.items():
for example in group_examples:
assert type_of_target(example) == group, (
"type_of_target(%r) should be %r, got %r"
% (
example,
group,
type_of_target(example),
)
)
for example in NON_ARRAY_LIKE_EXAMPLES:
msg_regex = r"Expected array-like \(array or non-string sequence\).*"
with pytest.raises(ValueError, match=msg_regex):
type_of_target(example)
for example in MULTILABEL_SEQUENCES:
msg = (
"You appear to be using a legacy multi-label data "
"representation. Sequence of sequences are no longer supported;"
" use a binary array or sparse matrix instead."
)
with pytest.raises(ValueError, match=msg):
type_of_target(example)
def test_type_of_target_pandas_sparse():
pd = pytest.importorskip("pandas")
y = pd.arrays.SparseArray([1, np.nan, np.nan, 1, np.nan])
msg = "y cannot be class 'SparseSeries' or 'SparseArray'"
with pytest.raises(ValueError, match=msg):
type_of_target(y)
def test_type_of_target_pandas_nullable():
"""Check that type_of_target works with pandas nullable dtypes."""
pd = pytest.importorskip("pandas")
for dtype in ["Int32", "Float32"]:
y_true = pd.Series([1, 0, 2, 3, 4], dtype=dtype)
assert type_of_target(y_true) == "multiclass"
y_true = pd.Series([1, 0, 1, 0], dtype=dtype)
assert type_of_target(y_true) == "binary"
y_true = pd.DataFrame([[1.4, 3.1], [3.1, 1.4]], dtype="Float32")
assert type_of_target(y_true) == "continuous-multioutput"
y_true = pd.DataFrame([[0, 1], [1, 1]], dtype="Int32")
assert type_of_target(y_true) == "multilabel-indicator"
y_true = pd.DataFrame([[1, 2], [3, 1]], dtype="Int32")
assert type_of_target(y_true) == "multiclass-multioutput"
@pytest.mark.parametrize("dtype", ["Int64", "Float64", "boolean"])
def test_unique_labels_pandas_nullable(dtype):
"""Checks that unique_labels work with pandas nullable dtypes.
Non-regression test for gh-25634.
"""
pd = pytest.importorskip("pandas")
y_true = pd.Series([1, 0, 0, 1, 0, 1, 1, 0, 1], dtype=dtype)
y_predicted = pd.Series([0, 0, 1, 1, 0, 1, 1, 1, 1], dtype="int64")
labels = unique_labels(y_true, y_predicted)
assert_array_equal(labels, [0, 1])
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
def test_class_distribution(csc_container):
y = np.array(
[
[1, 0, 0, 1],
[2, 2, 0, 1],
[1, 3, 0, 1],
[4, 2, 0, 1],
[2, 0, 0, 1],
[1, 3, 0, 1],
]
)
# Define the sparse matrix with a mix of implicit and explicit zeros
data = np.array([1, 2, 1, 4, 2, 1, 0, 2, 3, 2, 3, 1, 1, 1, 1, 1, 1])
indices = np.array([0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 5, 0, 1, 2, 3, 4, 5])
indptr = np.array([0, 6, 11, 11, 17])
y_sp = csc_container((data, indices, indptr), shape=(6, 4))
classes, n_classes, class_prior = class_distribution(y)
classes_sp, n_classes_sp, class_prior_sp = class_distribution(y_sp)
classes_expected = [[1, 2, 4], [0, 2, 3], [0], [1]]
n_classes_expected = [3, 3, 1, 1]
class_prior_expected = [[3 / 6, 2 / 6, 1 / 6], [1 / 3, 1 / 3, 1 / 3], [1.0], [1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
# Test again with explicit sample weights
(classes, n_classes, class_prior) = class_distribution(
y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0]
)
(classes_sp, n_classes_sp, class_prior_sp) = class_distribution(
y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0]
)
class_prior_expected = [[4 / 9, 3 / 9, 2 / 9], [2 / 9, 4 / 9, 3 / 9], [1.0], [1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = datasets.load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = ShuffleSplit(test_size=0.25, random_state=0)
train, test = next(iter(cv.split(X)))
X_train, y_train = _safe_split(clf, X, y, train)
K_train, y_train2 = _safe_split(clfp, K, y, train)
assert_array_almost_equal(K_train, np.dot(X_train, X_train.T))
assert_array_almost_equal(y_train, y_train2)
X_test, y_test = _safe_split(clf, X, y, test, train)
K_test, y_test2 = _safe_split(clfp, K, y, test, train)
assert_array_almost_equal(K_test, np.dot(X_test, X_train.T))
assert_array_almost_equal(y_test, y_test2)
def test_ovr_decision_function():
# test properties for ovr decision function
predictions = np.array([[0, 1, 1], [0, 1, 0], [0, 1, 1], [0, 1, 1]])
confidences = np.array(
[[-1e16, 0, -1e16], [1.0, 2.0, -3.0], [-5.0, 2.0, 5.0], [-0.5, 0.2, 0.5]]
)
n_classes = 3
dec_values = _ovr_decision_function(predictions, confidences, n_classes)
# check that the decision values are within 0.5 range of the votes
votes = np.array([[1, 0, 2], [1, 1, 1], [1, 0, 2], [1, 0, 2]])
assert_allclose(votes, dec_values, atol=0.5)
# check that the prediction are what we expect
# highest vote or highest confidence if there is a tie.
# for the second sample we have a tie (should be won by 1)
expected_prediction = np.array([2, 1, 2, 2])
assert_array_equal(np.argmax(dec_values, axis=1), expected_prediction)
# third and fourth sample have the same vote but third sample
# has higher confidence, this should reflect on the decision values
assert dec_values[2, 2] > dec_values[3, 2]
# assert subset invariance.
dec_values_one = [
_ovr_decision_function(
np.array([predictions[i]]), np.array([confidences[i]]), n_classes
)[0]
for i in range(4)
]
assert_allclose(dec_values, dec_values_one, atol=1e-6)
@pytest.mark.parametrize("input_type", ["list", "array"])
def test_labels_in_bytes_format_error(input_type):
# check that we raise an error with bytes encoded labels
# non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/16980
target = _convert_container([b"a", b"b"], input_type)
err_msg = "Support for labels represented as bytes is not supported"
with pytest.raises(TypeError, match=err_msg):
type_of_target(target)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_chunking.py | sklearn/utils/tests/test_chunking.py | import warnings
from itertools import chain
import pytest
from sklearn import config_context
from sklearn.utils._chunking import gen_even_slices, get_chunk_n_rows
from sklearn.utils._testing import assert_array_equal
def test_gen_even_slices():
# check that gen_even_slices contains all samples
some_range = range(10)
joined_range = list(chain(*[some_range[slice] for slice in gen_even_slices(10, 3)]))
assert_array_equal(some_range, joined_range)
@pytest.mark.parametrize(
("row_bytes", "max_n_rows", "working_memory", "expected"),
[
(1024, None, 1, 1024),
(1024, None, 0.99999999, 1023),
(1023, None, 1, 1025),
(1025, None, 1, 1023),
(1024, None, 2, 2048),
(1024, 7, 1, 7),
(1024 * 1024, None, 1, 1),
],
)
def test_get_chunk_n_rows(row_bytes, max_n_rows, working_memory, expected):
with warnings.catch_warnings():
warnings.simplefilter("error", UserWarning)
actual = get_chunk_n_rows(
row_bytes=row_bytes,
max_n_rows=max_n_rows,
working_memory=working_memory,
)
assert actual == expected
assert type(actual) is type(expected)
with config_context(working_memory=working_memory):
with warnings.catch_warnings():
warnings.simplefilter("error", UserWarning)
actual = get_chunk_n_rows(row_bytes=row_bytes, max_n_rows=max_n_rows)
assert actual == expected
assert type(actual) is type(expected)
def test_get_chunk_n_rows_warns():
"""Check that warning is raised when working_memory is too low."""
row_bytes = 1024 * 1024 + 1
max_n_rows = None
working_memory = 1
expected = 1
warn_msg = (
"Could not adhere to working_memory config. Currently 1MiB, 2MiB required."
)
with pytest.warns(UserWarning, match=warn_msg):
actual = get_chunk_n_rows(
row_bytes=row_bytes,
max_n_rows=max_n_rows,
working_memory=working_memory,
)
assert actual == expected
assert type(actual) is type(expected)
with config_context(working_memory=working_memory):
with pytest.warns(UserWarning, match=warn_msg):
actual = get_chunk_n_rows(row_bytes=row_bytes, max_n_rows=max_n_rows)
assert actual == expected
assert type(actual) is type(expected)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_deprecation.py | sklearn/utils/tests/test_deprecation.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import pickle
from inspect import signature
import pytest
from sklearn.utils.deprecation import _is_deprecated, deprecated
@deprecated("qwerty")
class MockClass1:
pass
class MockClass2:
@deprecated("mockclass2_method")
def method(self):
pass
@deprecated("n_features_ is deprecated") # type: ignore[prop-decorator]
@property
def n_features_(self):
"""Number of input features."""
return 10
class MockClass3:
@deprecated()
def __init__(self):
pass
class MockClass4:
pass
class MockClass5(MockClass1):
"""Inherit from deprecated class but does not call super().__init__."""
def __init__(self, a):
self.a = a
@deprecated("a message")
class MockClass6:
"""A deprecated class that overrides __new__."""
def __new__(cls, *args, **kwargs):
assert len(args) > 0
return super().__new__(cls)
@deprecated()
def mock_function():
return 10
def test_deprecated():
with pytest.warns(FutureWarning, match="qwerty"):
MockClass1()
with pytest.warns(FutureWarning, match="mockclass2_method"):
MockClass2().method()
with pytest.warns(FutureWarning, match="deprecated"):
MockClass3()
with pytest.warns(FutureWarning, match="qwerty"):
MockClass5(42)
with pytest.warns(FutureWarning, match="a message"):
MockClass6(42)
with pytest.warns(FutureWarning, match="deprecated"):
val = mock_function()
assert val == 10
def test_is_deprecated():
# Test if _is_deprecated helper identifies wrapping via deprecated
# NOTE it works only for class methods and functions
assert _is_deprecated(MockClass1.__new__)
assert _is_deprecated(MockClass2().method)
assert _is_deprecated(MockClass3.__init__)
assert not _is_deprecated(MockClass4.__init__)
assert _is_deprecated(MockClass5.__new__)
assert _is_deprecated(mock_function)
def test_pickle():
pickle.loads(pickle.dumps(mock_function))
def test_deprecated_class_signature():
@deprecated()
class MockClass:
def __init__(self, a, b=1, c=2):
pass
assert list(signature(MockClass).parameters.keys()) == ["a", "b", "c"]
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_stats.py | sklearn/utils/tests/test_stats.py | import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
from pytest import approx
from sklearn._config import config_context
from sklearn.utils._array_api import (
_convert_to_numpy,
get_namespace,
yield_namespace_device_dtype_combinations,
)
from sklearn.utils._array_api import device as array_device
from sklearn.utils.estimator_checks import _array_api_for_tests
from sklearn.utils.fixes import np_version, parse_version
from sklearn.utils.stats import _weighted_percentile
@pytest.mark.parametrize("average", [True, False])
@pytest.mark.parametrize("size", [10, 15])
def test_weighted_percentile_matches_median(size, average):
"""Ensure `_weighted_percentile` matches `median` when expected.
With unit `sample_weight`, `_weighted_percentile` should match the median except
when `average=False` and the number of samples is even.
For an even array and `average=False`, `percentile_rank=50` gives the lower
of the two 'middle' values, that are averaged when calculating the `median`.
"""
y = np.arange(size)
sample_weight = np.ones_like(y)
score = _weighted_percentile(y, sample_weight, 50, average=average)
# `_weighted_percentile(average=False)` does not match `median` when n is even
if size % 2 == 0 and average is False:
assert score != np.median(y)
else:
assert approx(score) == np.median(y)
@pytest.mark.parametrize("average", [True, False])
@pytest.mark.parametrize("percentile_rank", [20, 35, 61, [5, 47]])
@pytest.mark.parametrize("size", [10, 15])
def test_weighted_percentile_matches_numpy(
global_random_seed, size, percentile_rank, average
):
"""Check `_weighted_percentile` with unit weights is correct.
`average=True` results should be the same as `np.percentile`'s
'averaged_inverted_cdf'.
`average=False` results should be the same as `np.percentile`'s
'inverted_cdf'.
Note `np.percentile` is the same as `np.quantile` except `q` is in range [0, 100].
We parametrize through different `percentile_rank` and `size` to
ensure we get cases where `g=0` and `g>0` (see Hyndman and Fan 1996 for details).
"""
rng = np.random.RandomState(global_random_seed)
y = rng.randint(20, size=size)
sw = np.ones_like(y)
score = _weighted_percentile(y, sw, percentile_rank, average=average)
if average:
method = "averaged_inverted_cdf"
else:
method = "inverted_cdf"
assert approx(score) == np.percentile(y, percentile_rank, method=method)
@pytest.mark.parametrize("percentile_rank", [50, 100])
def test_weighted_percentile_plus_one_clip_max(percentile_rank):
"""Check `j+1` index is clipped to max, when `average=True`.
`percentile_plus_one_indices` can exceed max index when `percentile_indices`
is already at max index.
Note that when `g` (Hyndman and Fan) / `fraction_above` is greater than 0,
`j+1` (Hyndman and Fan) / `percentile_plus_one_indices` is calculated but
never used, so it does not matter what this value is.
When percentile of percentile rank 100 falls exactly on the last value in the
`weighted_cdf`, `g=0` and `percentile_indices` is at max index. In this case
we set `percentile_plus_one_indices` to be max index as well, so the result is
the average of 2x the max index (i.e. last value of `weighted_cdf`).
"""
# Note for both `percentile_rank`s 50 and 100,`percentile_indices` is already at
# max index
y = np.array([[0, 0], [1, 1]])
sw = np.array([[0.1, 0.2], [2, 3]])
score = _weighted_percentile(y, sw, percentile_rank, average=True)
for idx in range(2):
assert score[idx] == approx(1.0)
def test_weighted_percentile_equal():
"""Check `weighted_percentile` with unit weights and all 0 values in `array`."""
y = np.zeros(102, dtype=np.float64)
sw = np.ones(102, dtype=np.float64)
score = _weighted_percentile(y, sw, 50)
assert approx(score) == 0
# XXX: is this really what we want? Shouldn't we raise instead?
# https://github.com/scikit-learn/scikit-learn/issues/31032
def test_weighted_percentile_all_zero_weights():
"""Check `weighted_percentile` with all weights equal to 0 returns last index."""
y = np.arange(10)
sw = np.zeros(10)
value = _weighted_percentile(y, sw, 50)
assert approx(value) == 9.0
@pytest.mark.parametrize("average", [True, False])
@pytest.mark.parametrize("percentile_rank, expected_value", [(0, 2), (50, 3), (100, 5)])
def test_weighted_percentile_ignores_zero_weight(
average, percentile_rank, expected_value
):
"""Check leading, trailing and middle 0 weights behave correctly.
Check that leading zero-weight observations are ignored when `percentile_rank=0`.
See #20528 for details.
Check that when `average=True` and the `j+1` ('plus one') index has sample weight
of 0, it is ignored. Also check that trailing zero weight observations are ignored
(e.g., when `percentile_rank=100`).
"""
y = np.array([0, 1, 2, 3, 4, 5, 6])
sw = np.array([0, 0, 1, 1, 0, 1, 0])
value = _weighted_percentile(
np.vstack((y, y)).T, np.vstack((sw, sw)).T, percentile_rank, average=average
)
for idx in range(2):
assert approx(value[idx]) == expected_value
@pytest.mark.parametrize("average", [True, False])
@pytest.mark.parametrize("percentile_rank", [20, 35, 50, 61])
def test_weighted_percentile_frequency_weight_semantics(
global_random_seed, percentile_rank, average
):
"""Check integer weights give the same result as repeating values."""
rng = np.random.RandomState(global_random_seed)
x = rng.randint(20, size=10)
weights = rng.choice(5, size=10)
x_repeated = np.repeat(x, weights)
percentile_weights = _weighted_percentile(
x, weights, percentile_rank, average=average
)
percentile_repeated = _weighted_percentile(
x_repeated, np.ones_like(x_repeated), percentile_rank, average=average
)
assert percentile_weights == approx(percentile_repeated)
# Also check `percentile_rank=50` matches `median`
if percentile_rank == 50 and average:
assert percentile_weights == approx(np.median(x_repeated))
@pytest.mark.parametrize("constant", [5, 8])
@pytest.mark.parametrize("average", [True, False])
@pytest.mark.parametrize("percentile_rank", [20, 35, 50, 61, [20, 35, 50, 61]])
def test_weighted_percentile_constant_multiplier(
global_random_seed, percentile_rank, average, constant
):
"""Check multiplying weights by a constant does not change the result.
Note scale invariance does not always hold when multiplying by a
float due to cumulative sum numerical error (which grows proportional to n).
"""
rng = np.random.RandomState(global_random_seed)
x = rng.randint(20, size=20)
weights = rng.choice(5, size=20)
weights_multiplied = weights * constant
percentile = _weighted_percentile(x, weights, percentile_rank, average=average)
percentile_multiplier = _weighted_percentile(
x, weights_multiplied, percentile_rank, average=average
)
assert percentile == approx(percentile_multiplier)
@pytest.mark.parametrize("percentile_rank", [50, [20, 35, 50]])
@pytest.mark.parametrize("average", [True, False])
def test_weighted_percentile_2d(global_random_seed, percentile_rank, average):
"""Check `_weighted_percentile` behaviour is correct when `array` is 2D."""
# Check for when array 2D and sample_weight 1D
rng = np.random.RandomState(global_random_seed)
x1 = rng.randint(10, size=10)
w1 = rng.choice(5, size=10)
x2 = rng.randint(20, size=10)
x_2d = np.vstack((x1, x2)).T
wp = _weighted_percentile(
x_2d, w1, percentile_rank=percentile_rank, average=average
)
if isinstance(percentile_rank, list):
p_list = []
for pr in percentile_rank:
p_list.append(
[
_weighted_percentile(
x_2d[:, i], w1, percentile_rank=pr, average=average
)
for i in range(x_2d.shape[1])
]
)
p_axis_0 = np.stack(p_list, axis=-1)
assert wp.shape == (x_2d.shape[1], len(percentile_rank))
else:
# percentile_rank is scalar
p_axis_0 = [
_weighted_percentile(
x_2d[:, i], w1, percentile_rank=percentile_rank, average=average
)
for i in range(x_2d.shape[1])
]
assert wp.shape == (x_2d.shape[1],)
assert_allclose(wp, p_axis_0)
# Check when array and sample_weight both 2D
w2 = rng.choice(5, size=10)
w_2d = np.vstack((w1, w2)).T
wp = _weighted_percentile(
x_2d, w_2d, percentile_rank=percentile_rank, average=average
)
if isinstance(percentile_rank, list):
p_list = []
for pr in percentile_rank:
p_list.append(
[
_weighted_percentile(
x_2d[:, i], w_2d[:, i], percentile_rank=pr, average=average
)
for i in range(x_2d.shape[1])
]
)
p_axis_0 = np.stack(p_list, axis=-1)
assert wp.shape == (x_2d.shape[1], len(percentile_rank))
else:
# percentile_rank is scalar
p_axis_0 = [
_weighted_percentile(
x_2d[:, i], w_2d[:, i], percentile_rank=percentile_rank, average=average
)
for i in range(x_2d.shape[1])
]
assert wp.shape == (x_2d.shape[1],)
assert_allclose(wp, p_axis_0)
@pytest.mark.parametrize(
"array_namespace, device, dtype_name", yield_namespace_device_dtype_combinations()
)
@pytest.mark.parametrize(
"data, weights, percentile",
[
# NumPy scalars input (handled as 0D arrays on array API)
(np.float32(42), np.int32(1), 50),
# Random 1D array, constant weights
(lambda rng: rng.rand(50), np.ones(50).astype(np.int32), 50),
# Random 2D array and random 1D weights
(lambda rng: rng.rand(50, 3), lambda rng: rng.rand(50).astype(np.float32), 75),
# Random 2D array and random 2D weights
(
lambda rng: rng.rand(20, 3),
lambda rng: rng.rand(20, 3).astype(np.float32),
[25, 75],
),
# zero-weights and `rank_percentile=0` (#20528) (`sample_weight` dtype: int64)
(np.array([0, 1, 2, 3, 4, 5]), np.array([0, 0, 1, 1, 1, 0]), 0),
# np.nan's in data and some zero-weights (`sample_weight` dtype: int64)
(np.array([np.nan, np.nan, 0, 3, 4, 5]), np.array([0, 1, 1, 1, 1, 0]), 0),
# `sample_weight` dtype: int32
(
np.array([0, 1, 2, 3, 4, 5]),
np.array([0, 1, 1, 1, 1, 0], dtype=np.int32),
[25, 75],
),
],
)
def test_weighted_percentile_array_api_consistency(
global_random_seed, array_namespace, device, dtype_name, data, weights, percentile
):
"""Check `_weighted_percentile` gives consistent results with array API."""
xp = _array_api_for_tests(array_namespace, device)
# Skip test for percentile=0 edge case (#20528) on namespace/device where
# xp.nextafter is broken. This is the case for torch with MPS device:
# https://github.com/pytorch/pytorch/issues/150027
zero = xp.zeros(1, device=device)
one = xp.ones(1, device=device)
if percentile == 0 and xp.all(xp.nextafter(zero, one) == zero):
pytest.xfail(f"xp.nextafter is broken on {device}")
rng = np.random.RandomState(global_random_seed)
X_np = data(rng) if callable(data) else data
weights_np = weights(rng) if callable(weights) else weights
# Ensure `data` of correct dtype
X_np = X_np.astype(dtype_name)
result_np = _weighted_percentile(X_np, weights_np, percentile)
# Convert to Array API arrays
X_xp = xp.asarray(X_np, device=device)
weights_xp = xp.asarray(weights_np, device=device)
with config_context(array_api_dispatch=True):
result_xp = _weighted_percentile(X_xp, weights_xp, percentile)
assert array_device(result_xp) == array_device(X_xp)
assert get_namespace(result_xp)[0] == get_namespace(X_xp)[0]
result_xp_np = _convert_to_numpy(result_xp, xp=xp)
assert result_xp_np.dtype == result_np.dtype
assert result_xp_np.shape == result_np.shape
assert_allclose(result_np, result_xp_np)
# Check dtype correct (`sample_weight` should follow `array`)
if dtype_name == "float32":
assert result_xp_np.dtype == result_np.dtype == np.float32
else:
assert result_xp_np.dtype == np.float64
@pytest.mark.parametrize("average", [True, False])
@pytest.mark.parametrize("sample_weight_ndim", [1, 2])
def test_weighted_percentile_nan_filtered(
global_random_seed, sample_weight_ndim, average
):
"""Test `_weighted_percentile` ignores NaNs.
Calling `_weighted_percentile` on an array with nan values returns the same
results as calling `_weighted_percentile` on a filtered version of the data.
We test both with sample_weight of the same shape as the data and with
one-dimensional sample_weight.
"""
rng = np.random.RandomState(global_random_seed)
array_with_nans = rng.rand(100, 10)
array_with_nans[rng.rand(*array_with_nans.shape) < 0.5] = np.nan
nan_mask = np.isnan(array_with_nans)
if sample_weight_ndim == 2:
sample_weight = rng.randint(1, 6, size=(100, 10))
else:
sample_weight = rng.randint(1, 6, size=(100,))
# Find the weighted percentile on the array with nans:
results = _weighted_percentile(array_with_nans, sample_weight, 30, average=average)
# Find the weighted percentile on the filtered array:
filtered_array = [
array_with_nans[~nan_mask[:, col], col]
for col in range(array_with_nans.shape[1])
]
if sample_weight.ndim == 1:
sample_weight = np.repeat(sample_weight, array_with_nans.shape[1]).reshape(
array_with_nans.shape[0], array_with_nans.shape[1]
)
filtered_weights = [
sample_weight[~nan_mask[:, col], col] for col in range(array_with_nans.shape[1])
]
expected_results = np.array(
[
_weighted_percentile(
filtered_array[col], filtered_weights[col], 30, average=average
)
for col in range(array_with_nans.shape[1])
]
)
assert_array_equal(expected_results, results)
@pytest.mark.parametrize(
"percentile_rank, expected",
[
(90, [np.nan, 5]),
([50, 90], [[np.nan, np.nan], [2.0, 5.0]]),
],
)
def test_weighted_percentile_all_nan_column(percentile_rank, expected):
"""Check that nans are ignored in general, except for all NaN columns."""
array = np.array(
[
[np.nan, 5],
[np.nan, 1],
[np.nan, np.nan],
[np.nan, np.nan],
[np.nan, 2],
[np.nan, np.nan],
]
)
weights = np.ones_like(array)
values = _weighted_percentile(array, weights, percentile_rank)
# The percentile of the second column should be `5` even though there are many nan
# values present; the percentile of the first column can only be nan, since there
# are no other possible values:
assert np.array_equal(values, expected, equal_nan=True)
@pytest.mark.skipif(
np_version < parse_version("2.0"),
reason="np.quantile only accepts weights since version 2.0",
)
@pytest.mark.parametrize("percentile", [66, 10, 50])
@pytest.mark.parametrize("average", [False, True])
@pytest.mark.parametrize("uniform_weight", [False, True])
def test_weighted_percentile_like_numpy_quantile(
percentile, average, uniform_weight, global_random_seed
):
"""Check `_weighted_percentile` is equivalent to `np.quantile` with weights."""
# TODO: remove the following skip once no longer applicable.
if average and not uniform_weight:
pytest.skip(
"np.quantile does not support weights with method='averaged_inverted_cdf'"
)
rng = np.random.RandomState(global_random_seed)
array = rng.rand(10, 100)
if uniform_weight:
sample_weight = np.ones_like(array) * rng.randint(1, 6, size=1)
else:
sample_weight = rng.randint(1, 6, size=(10, 100))
percentile_weighted_percentile = _weighted_percentile(
array, sample_weight, percentile, average=average
)
percentile_numpy_quantile = np.quantile(
array,
percentile / 100,
weights=sample_weight if not uniform_weight else None,
method="averaged_inverted_cdf" if average else "inverted_cdf",
axis=0,
)
assert_array_equal(percentile_weighted_percentile, percentile_numpy_quantile)
@pytest.mark.skipif(
np_version < parse_version("2.0"),
reason="np.nanquantile only accepts weights since version 2.0",
)
@pytest.mark.parametrize("percentile", [66, 10, 50])
@pytest.mark.parametrize("average", [False, True])
@pytest.mark.parametrize("uniform_weight", [False, True])
def test_weighted_percentile_like_numpy_nanquantile(
percentile, average, uniform_weight, global_random_seed
):
"""Check `_weighted_percentile` equivalent to `np.nanquantile` with weights."""
# TODO: remove the following skip once no longer applicable.
if average and not uniform_weight:
pytest.skip(
"np.nanquantile does not support weights with "
"method='averaged_inverted_cdf'"
)
rng = np.random.RandomState(global_random_seed)
array_with_nans = rng.rand(10, 100)
array_with_nans[rng.rand(*array_with_nans.shape) < 0.5] = np.nan
if uniform_weight:
sample_weight = np.ones_like(array_with_nans) * rng.randint(
1,
6,
size=1,
)
else:
sample_weight = rng.randint(1, 6, size=(10, 100))
percentile_weighted_percentile = _weighted_percentile(
array_with_nans, sample_weight, percentile, average=average
)
percentile_numpy_nanquantile = np.nanquantile(
array_with_nans,
percentile / 100,
weights=sample_weight if not uniform_weight else None,
method="averaged_inverted_cdf" if average else "inverted_cdf",
axis=0,
)
assert_array_equal(percentile_weighted_percentile, percentile_numpy_nanquantile)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_indexing.py | sklearn/utils/tests/test_indexing.py | import warnings
from copy import copy
from unittest import SkipTest
import numpy as np
import pytest
from scipy.stats import kstest
import sklearn
from sklearn.externals._packaging.version import parse as parse_version
from sklearn.utils import _safe_indexing, resample, shuffle
from sklearn.utils._array_api import (
_convert_to_numpy,
_get_namespace_device_dtype_ids,
device,
move_to,
yield_namespace_device_dtype_combinations,
)
from sklearn.utils._indexing import (
_determine_key_type,
_get_column_indices,
_safe_assign,
)
from sklearn.utils._mocking import MockDataFrame
from sklearn.utils._testing import (
_array_api_for_tests,
_convert_container,
assert_allclose,
assert_allclose_dense_sparse,
assert_array_equal,
skip_if_array_api_compat_not_configured,
)
from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS
# toy array
X_toy = np.arange(9).reshape((3, 3))
def test_polars_indexing():
"""Check _safe_indexing for polars as expected."""
pl = pytest.importorskip("polars", minversion="0.18.2")
df = pl.DataFrame(
{"a": [1, 2, 3, 4], "b": [4, 5, 6, 8], "c": [1, 4, 1, 10]}, orient="row"
)
from polars.testing import assert_frame_equal
str_keys = [["b"], ["a", "b"], ["b", "a", "c"], ["c"], ["a"]]
for key in str_keys:
out = _safe_indexing(df, key, axis=1)
assert_frame_equal(df[key], out)
bool_keys = [([True, False, True], ["a", "c"]), ([False, False, True], ["c"])]
for bool_key, str_key in bool_keys:
out = _safe_indexing(df, bool_key, axis=1)
assert_frame_equal(df[:, str_key], out)
int_keys = [([0, 1], ["a", "b"]), ([2], ["c"])]
for int_key, str_key in int_keys:
out = _safe_indexing(df, int_key, axis=1)
assert_frame_equal(df[:, str_key], out)
axis_0_keys = [[0, 1], [1, 3], [3, 2]]
for key in axis_0_keys:
out = _safe_indexing(df, key, axis=0)
assert_frame_equal(df[key], out)
@pytest.mark.parametrize(
"key, dtype",
[
(0, "int"),
("0", "str"),
(True, "bool"),
(np.bool_(True), "bool"),
([0, 1, 2], "int"),
(["0", "1", "2"], "str"),
((0, 1, 2), "int"),
(("0", "1", "2"), "str"),
(slice(None, None), None),
(slice(0, 2), "int"),
(np.array([0, 1, 2], dtype=np.int32), "int"),
(np.array([0, 1, 2], dtype=np.int64), "int"),
(np.array([0, 1, 2], dtype=np.uint8), "int"),
([True, False], "bool"),
((True, False), "bool"),
(np.array([True, False]), "bool"),
("col_0", "str"),
(["col_0", "col_1", "col_2"], "str"),
(("col_0", "col_1", "col_2"), "str"),
(slice("begin", "end"), "str"),
(np.array(["col_0", "col_1", "col_2"]), "str"),
(np.array(["col_0", "col_1", "col_2"], dtype=object), "str"),
],
)
def test_determine_key_type(key, dtype):
assert _determine_key_type(key) == dtype
def test_determine_key_type_error():
with pytest.raises(ValueError, match="No valid specification of the"):
_determine_key_type(1.0)
def test_determine_key_type_slice_error():
with pytest.raises(TypeError, match="Only array-like or scalar are"):
_determine_key_type(slice(0, 2, 1), accept_slice=False)
@skip_if_array_api_compat_not_configured
@pytest.mark.parametrize(
"array_namespace, device_, dtype_name",
yield_namespace_device_dtype_combinations(),
ids=_get_namespace_device_dtype_ids,
)
def test_determine_key_type_array_api(array_namespace, device_, dtype_name):
xp = _array_api_for_tests(array_namespace, device_)
with sklearn.config_context(array_api_dispatch=True):
int_array_key = xp.asarray([1, 2, 3], device=device_)
assert _determine_key_type(int_array_key) == "int"
bool_array_key = xp.asarray([True, False, True], device=device_)
assert _determine_key_type(bool_array_key) == "bool"
try:
complex_array_key = xp.asarray([1 + 1j, 2 + 2j, 3 + 3j], device=device_)
except TypeError:
# Complex numbers are not supported by all Array API libraries.
complex_array_key = None
if complex_array_key is not None:
with pytest.raises(ValueError, match="No valid specification of the"):
_determine_key_type(complex_array_key)
@skip_if_array_api_compat_not_configured
@pytest.mark.parametrize(
"array_namespace, device_, dtype_name",
yield_namespace_device_dtype_combinations(),
ids=_get_namespace_device_dtype_ids,
)
@pytest.mark.parametrize(
"indexing_key",
(
0,
-1,
[1, 3],
np.array([1, 3]),
slice(1, 2),
[True, False, True, True],
np.asarray([False, False, False, False]),
),
)
@pytest.mark.parametrize("axis", [0, 1])
def test_safe_indexing_array_api_support(
array_namespace, device_, dtype_name, indexing_key, axis
):
xp = _array_api_for_tests(array_namespace, device_)
array_to_index_np = np.arange(16).reshape(4, 4)
expected_result = _safe_indexing(array_to_index_np, indexing_key, axis=axis)
array_to_index_xp = move_to(array_to_index_np, xp=xp, device=device_)
with sklearn.config_context(array_api_dispatch=True):
indexed_array_xp = _safe_indexing(array_to_index_xp, indexing_key, axis=axis)
assert device(indexed_array_xp) == device(array_to_index_xp)
assert indexed_array_xp.dtype == array_to_index_xp.dtype
assert_allclose(_convert_to_numpy(indexed_array_xp, xp=xp), expected_result)
@pytest.mark.parametrize(
"array_type", ["list", "array", "sparse", "dataframe", "polars", "pyarrow"]
)
@pytest.mark.parametrize("indices_type", ["list", "tuple", "array", "series", "slice"])
def test_safe_indexing_2d_container_axis_0(array_type, indices_type):
indices = [1, 2]
if indices_type == "slice" and isinstance(indices[1], int):
indices[1] += 1
array = _convert_container([[1, 2, 3], [4, 5, 6], [7, 8, 9]], array_type)
indices = _convert_container(indices, indices_type)
subset = _safe_indexing(array, indices, axis=0)
assert_allclose_dense_sparse(
subset, _convert_container([[4, 5, 6], [7, 8, 9]], array_type)
)
@pytest.mark.parametrize(
"array_type", ["list", "array", "series", "polars_series", "pyarrow_array"]
)
@pytest.mark.parametrize("indices_type", ["list", "tuple", "array", "series", "slice"])
def test_safe_indexing_1d_container(array_type, indices_type):
indices = [1, 2]
if indices_type == "slice" and isinstance(indices[1], int):
indices[1] += 1
array = _convert_container([1, 2, 3, 4, 5, 6, 7, 8, 9], array_type)
indices = _convert_container(indices, indices_type)
subset = _safe_indexing(array, indices, axis=0)
assert_allclose_dense_sparse(subset, _convert_container([2, 3], array_type))
@pytest.mark.parametrize(
"array_type", ["array", "sparse", "dataframe", "polars", "pyarrow"]
)
@pytest.mark.parametrize("indices_type", ["list", "tuple", "array", "series", "slice"])
@pytest.mark.parametrize("indices", [[1, 2], ["col_1", "col_2"]])
def test_safe_indexing_2d_container_axis_1(array_type, indices_type, indices):
# validation of the indices
# we make a copy because indices is mutable and shared between tests
indices_converted = copy(indices)
if indices_type == "slice" and isinstance(indices[1], int):
indices_converted[1] += 1
columns_name = ["col_0", "col_1", "col_2"]
array = _convert_container(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], array_type, columns_name
)
indices_converted = _convert_container(indices_converted, indices_type)
if isinstance(indices[0], str) and array_type in ("array", "sparse"):
err_msg = (
"Specifying the columns using strings is only supported for dataframes"
)
with pytest.raises(ValueError, match=err_msg):
_safe_indexing(array, indices_converted, axis=1)
else:
subset = _safe_indexing(array, indices_converted, axis=1)
assert_allclose_dense_sparse(
subset, _convert_container([[2, 3], [5, 6], [8, 9]], array_type)
)
@pytest.mark.parametrize("array_read_only", [True, False])
@pytest.mark.parametrize("indices_read_only", [True, False])
@pytest.mark.parametrize(
"array_type", ["array", "sparse", "dataframe", "polars", "pyarrow"]
)
@pytest.mark.parametrize("indices_type", ["array", "series"])
@pytest.mark.parametrize(
"axis, expected_array", [(0, [[4, 5, 6], [7, 8, 9]]), (1, [[2, 3], [5, 6], [8, 9]])]
)
def test_safe_indexing_2d_read_only_axis_1(
array_read_only, indices_read_only, array_type, indices_type, axis, expected_array
):
array = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
if array_read_only:
array.setflags(write=False)
array = _convert_container(array, array_type)
indices = np.array([1, 2])
if indices_read_only:
indices.setflags(write=False)
indices = _convert_container(indices, indices_type)
subset = _safe_indexing(array, indices, axis=axis)
assert_allclose_dense_sparse(subset, _convert_container(expected_array, array_type))
@pytest.mark.parametrize(
"array_type", ["list", "array", "series", "polars_series", "pyarrow_array"]
)
@pytest.mark.parametrize("indices_type", ["list", "tuple", "array", "series"])
def test_safe_indexing_1d_container_mask(array_type, indices_type):
indices = [False] + [True] * 2 + [False] * 6
array = _convert_container([1, 2, 3, 4, 5, 6, 7, 8, 9], array_type)
indices = _convert_container(indices, indices_type)
subset = _safe_indexing(array, indices, axis=0)
assert_allclose_dense_sparse(subset, _convert_container([2, 3], array_type))
@pytest.mark.parametrize(
"array_type", ["array", "sparse", "dataframe", "polars", "pyarrow"]
)
@pytest.mark.parametrize("indices_type", ["list", "tuple", "array", "series"])
@pytest.mark.parametrize(
"axis, expected_subset",
[(0, [[4, 5, 6], [7, 8, 9]]), (1, [[2, 3], [5, 6], [8, 9]])],
)
def test_safe_indexing_2d_mask(array_type, indices_type, axis, expected_subset):
columns_name = ["col_0", "col_1", "col_2"]
array = _convert_container(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], array_type, columns_name
)
indices = [False, True, True]
indices = _convert_container(indices, indices_type)
subset = _safe_indexing(array, indices, axis=axis)
assert_allclose_dense_sparse(
subset, _convert_container(expected_subset, array_type)
)
@pytest.mark.parametrize(
"array_type, expected_output_type",
[
("list", "list"),
("array", "array"),
("sparse", "sparse"),
("dataframe", "series"),
("polars", "polars_series"),
("pyarrow", "pyarrow_array"),
],
)
def test_safe_indexing_2d_scalar_axis_0(array_type, expected_output_type):
array = _convert_container([[1, 2, 3], [4, 5, 6], [7, 8, 9]], array_type)
indices = 2
subset = _safe_indexing(array, indices, axis=0)
expected_array = _convert_container([7, 8, 9], expected_output_type)
assert_allclose_dense_sparse(subset, expected_array)
@pytest.mark.parametrize(
"array_type", ["list", "array", "series", "polars_series", "pyarrow_array"]
)
def test_safe_indexing_1d_scalar(array_type):
array = _convert_container([1, 2, 3, 4, 5, 6, 7, 8, 9], array_type)
indices = 2
subset = _safe_indexing(array, indices, axis=0)
assert subset == 3
@pytest.mark.parametrize(
"array_type, expected_output_type",
[
("array", "array"),
("sparse", "sparse"),
("dataframe", "series"),
("polars", "polars_series"),
("pyarrow", "pyarrow_array"),
],
)
@pytest.mark.parametrize("indices", [2, "col_2"])
def test_safe_indexing_2d_scalar_axis_1(array_type, expected_output_type, indices):
columns_name = ["col_0", "col_1", "col_2"]
array = _convert_container(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], array_type, columns_name
)
if isinstance(indices, str) and array_type in ("array", "sparse"):
err_msg = (
"Specifying the columns using strings is only supported for dataframes"
)
with pytest.raises(ValueError, match=err_msg):
_safe_indexing(array, indices, axis=1)
else:
subset = _safe_indexing(array, indices, axis=1)
expected_output = [3, 6, 9]
if expected_output_type == "sparse":
# sparse matrix are keeping the 2D shape
expected_output = [[3], [6], [9]]
expected_array = _convert_container(expected_output, expected_output_type)
assert_allclose_dense_sparse(subset, expected_array)
@pytest.mark.parametrize("array_type", ["list", "array", "sparse"])
def test_safe_indexing_None_axis_0(array_type):
X = _convert_container([[1, 2, 3], [4, 5, 6], [7, 8, 9]], array_type)
X_subset = _safe_indexing(X, None, axis=0)
assert_allclose_dense_sparse(X_subset, X)
def test_safe_indexing_pandas_no_matching_cols_error():
pd = pytest.importorskip("pandas")
err_msg = "No valid specification of the columns."
X = pd.DataFrame(X_toy)
with pytest.raises(ValueError, match=err_msg):
_safe_indexing(X, [1.0], axis=1)
@pytest.mark.parametrize("axis", [None, 3])
def test_safe_indexing_error_axis(axis):
with pytest.raises(ValueError, match="'axis' should be either 0"):
_safe_indexing(X_toy, [0, 1], axis=axis)
@pytest.mark.parametrize(
"X_constructor", ["array", "series", "polars_series", "pyarrow_array"]
)
def test_safe_indexing_1d_array_error(X_constructor):
# check that we are raising an error if the array-like passed is 1D and
# we try to index on the 2nd dimension
X = list(range(5))
if X_constructor == "array":
X_constructor = np.asarray(X)
elif X_constructor == "series":
pd = pytest.importorskip("pandas")
X_constructor = pd.Series(X)
elif X_constructor == "polars_series":
pl = pytest.importorskip("polars")
X_constructor = pl.Series(values=X)
elif X_constructor == "pyarrow_array":
pa = pytest.importorskip("pyarrow")
X_constructor = pa.array(X)
err_msg = "'X' should be a 2D NumPy array, 2D sparse matrix or dataframe"
with pytest.raises(ValueError, match=err_msg):
_safe_indexing(X_constructor, [0, 1], axis=1)
def test_safe_indexing_container_axis_0_unsupported_type():
indices = ["col_1", "col_2"]
array = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
err_msg = r"String indexing.*is not supported with 'axis=0'"
with pytest.raises(ValueError, match=err_msg):
_safe_indexing(array, indices, axis=0)
def test_safe_indexing_pandas_no_settingwithcopy_warning():
# Using safe_indexing with an array-like indexer gives a copy of the
# DataFrame -> ensure it doesn't raise a warning if modified
pd = pytest.importorskip("pandas")
pd_version = parse_version(pd.__version__)
pd_base_version = parse_version(pd_version.base_version)
if pd_base_version >= parse_version("3"):
raise SkipTest("SettingWithCopyWarning has been removed in pandas 3.0.0.dev")
X = pd.DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]})
subset = _safe_indexing(X, [0, 1], axis=0)
if hasattr(pd.errors, "SettingWithCopyWarning"):
SettingWithCopyWarning = pd.errors.SettingWithCopyWarning
else:
# backward compatibility for pandas < 1.5
SettingWithCopyWarning = pd.core.common.SettingWithCopyWarning
with warnings.catch_warnings():
warnings.simplefilter("error", SettingWithCopyWarning)
subset.iloc[0, 0] = 10
# The original dataframe is unaffected by the assignment on the subset:
assert X.iloc[0, 0] == 1
@pytest.mark.parametrize("indices", [0, [0, 1], slice(0, 2), np.array([0, 1])])
def test_safe_indexing_list_axis_1_unsupported(indices):
"""Check that we raise a ValueError when axis=1 with input as list."""
X = [[1, 2], [4, 5], [7, 8]]
err_msg = "axis=1 is not supported for lists"
with pytest.raises(ValueError, match=err_msg):
_safe_indexing(X, indices, axis=1)
@pytest.mark.parametrize("array_type", ["array", "sparse", "dataframe"])
def test_safe_assign(array_type):
"""Check that `_safe_assign` works as expected."""
rng = np.random.RandomState(0)
X_array = rng.randn(10, 5)
row_indexer = [1, 2]
values = rng.randn(len(row_indexer), X_array.shape[1])
X = _convert_container(X_array, array_type)
_safe_assign(X, values, row_indexer=row_indexer)
assigned_portion = _safe_indexing(X, row_indexer, axis=0)
assert_allclose_dense_sparse(
assigned_portion, _convert_container(values, array_type)
)
column_indexer = [1, 2]
values = rng.randn(X_array.shape[0], len(column_indexer))
X = _convert_container(X_array, array_type)
_safe_assign(X, values, column_indexer=column_indexer)
assigned_portion = _safe_indexing(X, column_indexer, axis=1)
assert_allclose_dense_sparse(
assigned_portion, _convert_container(values, array_type)
)
row_indexer, column_indexer = None, None
values = rng.randn(*X.shape)
X = _convert_container(X_array, array_type)
_safe_assign(X, values, column_indexer=column_indexer)
assert_allclose_dense_sparse(X, _convert_container(values, array_type))
@pytest.mark.parametrize(
"key, err_msg",
[
(10, r"all features must be in \[0, 2\]"),
("whatever", "A given column is not a column of the dataframe"),
(object(), "No valid specification of the columns"),
],
)
def test_get_column_indices_error(key, err_msg):
pd = pytest.importorskip("pandas")
X_df = pd.DataFrame(X_toy, columns=["col_0", "col_1", "col_2"])
with pytest.raises(ValueError, match=err_msg):
_get_column_indices(X_df, key)
@pytest.mark.parametrize(
"key", [["col1"], ["col2"], ["col1", "col2"], ["col1", "col3"], ["col2", "col3"]]
)
def test_get_column_indices_pandas_nonunique_columns_error(key):
pd = pytest.importorskip("pandas")
toy = np.zeros((1, 5), dtype=int)
columns = ["col1", "col1", "col2", "col3", "col2"]
X = pd.DataFrame(toy, columns=columns)
err_msg = "Selected columns, {}, are not unique in dataframe".format(key)
with pytest.raises(ValueError) as exc_info:
_get_column_indices(X, key)
assert str(exc_info.value) == err_msg
def test_get_column_indices_interchange():
"""Check _get_column_indices for edge cases with the interchange"""
pl = pytest.importorskip("polars")
# Polars dataframes go down the interchange path.
df = pl.DataFrame([[1, 2, 3], [4, 5, 6]], schema=["a", "b", "c"])
key_results = [
(slice(1, None), [1, 2]),
(slice(None, 2), [0, 1]),
(slice(1, 2), [1]),
(["b", "c"], [1, 2]),
(slice("a", "b"), [0, 1]),
(slice("a", None), [0, 1, 2]),
(slice(None, "a"), [0]),
(["c", "a"], [2, 0]),
([], []),
]
for key, result in key_results:
assert _get_column_indices(df, key) == result
msg = "A given column is not a column of the dataframe"
with pytest.raises(ValueError, match=msg):
_get_column_indices(df, ["not_a_column"])
msg = "key.step must be 1 or None"
with pytest.raises(NotImplementedError, match=msg):
_get_column_indices(df, slice("a", None, 2))
def test_resample():
# Border case not worth mentioning in doctests
assert resample() is None
# Check that invalid arguments yield ValueError
with pytest.raises(ValueError):
resample([0], [0, 1])
with pytest.raises(ValueError):
resample([0, 1], [0, 1], replace=False, n_samples=3)
# Issue:6581, n_samples can be more when replace is True (default).
assert len(resample([1, 2], n_samples=5)) == 5
def test_resample_weighted():
# Check that sampling with replacement with integer weights yields the
# samples from the same distribution as sampling uniformly with
# repeated data points.
data = np.array([-1, 0, 1])
sample_weight = np.asarray([0, 100, 1])
mean_repeated = []
mean_reweighted = []
for seed in range(100):
mean_repeated.append(
resample(
data.repeat(sample_weight),
replace=True,
random_state=seed,
n_samples=data.shape[0],
).mean()
)
mean_reweighted.append(
resample(
data,
sample_weight=sample_weight,
replace=True,
random_state=seed,
n_samples=data.shape[0],
).mean()
)
mean_repeated = np.asarray(mean_repeated)
mean_reweighted = np.asarray(mean_reweighted)
test_result = kstest(mean_repeated, mean_reweighted)
# Should never be negative because -1 has a 0 weight.
assert np.all(mean_reweighted >= 0)
# The null-hypothesis (the computed means are identically distributed)
# cannot be rejected.
assert test_result.pvalue > 0.05
def test_resample_stratified():
# Make sure resample can stratify
rng = np.random.RandomState(0)
n_samples = 100
p = 0.9
X = rng.normal(size=(n_samples, 1))
y = rng.binomial(1, p, size=n_samples)
_, y_not_stratified = resample(X, y, n_samples=10, random_state=0, stratify=None)
assert np.all(y_not_stratified == 1)
_, y_stratified = resample(X, y, n_samples=10, random_state=0, stratify=y)
assert not np.all(y_stratified == 1)
assert np.sum(y_stratified) == 9 # all 1s, one 0
def test_resample_stratified_replace():
# Make sure stratified resampling supports the replace parameter
rng = np.random.RandomState(0)
n_samples = 100
X = rng.normal(size=(n_samples, 1))
y = rng.randint(0, 2, size=n_samples)
X_replace, _ = resample(
X, y, replace=True, n_samples=50, random_state=rng, stratify=y
)
X_no_replace, _ = resample(
X, y, replace=False, n_samples=50, random_state=rng, stratify=y
)
assert np.unique(X_replace).shape[0] < 50
assert np.unique(X_no_replace).shape[0] == 50
# make sure n_samples can be greater than X.shape[0] if we sample with
# replacement
X_replace, _ = resample(
X, y, replace=True, n_samples=1000, random_state=rng, stratify=y
)
assert X_replace.shape[0] == 1000
assert np.unique(X_replace).shape[0] == 100
def test_resample_stratify_2dy():
# Make sure y can be 2d when stratifying
rng = np.random.RandomState(0)
n_samples = 100
X = rng.normal(size=(n_samples, 1))
y = rng.randint(0, 2, size=(n_samples, 2))
X, y = resample(X, y, n_samples=50, random_state=rng, stratify=y)
assert y.ndim == 2
def test_notimplementederror():
with pytest.raises(
NotImplementedError,
match="Resampling with sample_weight is only implemented for replace=True.",
):
resample([0, 1], [0, 1], sample_weight=[1, 1], replace=False)
with pytest.raises(
NotImplementedError,
match="Resampling with sample_weight is only implemented for stratify=None",
):
resample([0, 1], [0, 1], sample_weight=[1, 1], stratify=[0, 1])
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_resample_stratify_sparse_error(csr_container):
# resample must be ndarray
rng = np.random.RandomState(0)
n_samples = 100
X = rng.normal(size=(n_samples, 2))
y = rng.randint(0, 2, size=n_samples)
stratify = csr_container(y.reshape(-1, 1))
with pytest.raises(TypeError, match="Sparse data was passed"):
X, y = resample(X, y, n_samples=50, random_state=rng, stratify=stratify)
def test_shuffle_on_ndim_equals_three():
def to_tuple(A): # to make the inner arrays hashable
return tuple(tuple(tuple(C) for C in B) for B in A)
A = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # A.shape = (2,2,2)
S = set(to_tuple(A))
shuffle(A) # shouldn't raise a ValueError for dim = 3
assert set(to_tuple(A)) == S
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
def test_shuffle_dont_convert_to_array(csc_container):
# Check that shuffle does not try to convert to numpy arrays with float
# dtypes can let any indexable datastructure pass-through.
a = ["a", "b", "c"]
b = np.array(["a", "b", "c"], dtype=object)
c = [1, 2, 3]
d = MockDataFrame(np.array([["a", 0], ["b", 1], ["c", 2]], dtype=object))
e = csc_container(np.arange(6).reshape(3, 2))
a_s, b_s, c_s, d_s, e_s = shuffle(a, b, c, d, e, random_state=0)
assert a_s == ["c", "b", "a"]
assert type(a_s) == list
assert_array_equal(b_s, ["c", "b", "a"])
assert b_s.dtype == object
assert c_s == [3, 2, 1]
assert type(c_s) == list
assert_array_equal(d_s, np.array([["c", 2], ["b", 1], ["a", 0]], dtype=object))
assert type(d_s) == MockDataFrame
assert_array_equal(e_s.toarray(), np.array([[4, 5], [2, 3], [0, 1]]))
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_tags.py | sklearn/utils/tests/test_tags.py | from dataclasses import dataclass, fields
import numpy as np
import pytest
from sklearn.base import (
BaseEstimator,
ClassifierMixin,
RegressorMixin,
TransformerMixin,
)
from sklearn.pipeline import Pipeline
from sklearn.utils import (
Tags,
get_tags,
)
from sklearn.utils.estimator_checks import (
check_estimator_tags_renamed,
check_valid_tag_types,
)
class EmptyClassifier(ClassifierMixin, BaseEstimator):
pass
class EmptyTransformer(TransformerMixin, BaseEstimator):
pass
class EmptyRegressor(RegressorMixin, BaseEstimator):
pass
@pytest.mark.parametrize(
"estimator, value",
[
[EmptyClassifier(), True],
[EmptyTransformer(), False],
[EmptyRegressor(), True],
[BaseEstimator(), False],
],
)
def test_requires_y(estimator, value):
assert get_tags(estimator).target_tags.required == value
def test_no___sklearn_tags__with_more_tags():
"""Test that calling `get_tags` on a class that defines `_more_tags` but not
`__sklearn_tags__` raises an error.
"""
class MoreTagsEstimator(BaseEstimator):
def _more_tags(self):
return {"requires_y": True} # pragma: no cover
with pytest.raises(
TypeError, match="has defined either `_more_tags` or `_get_tags`"
):
check_estimator_tags_renamed("MoreTagsEstimator", MoreTagsEstimator())
def test_tag_test_passes_with_inheritance():
@dataclass
class MyTags(Tags):
my_tag: bool = True # type: ignore[annotation-unchecked]
class MyEstimator(BaseEstimator):
def __sklearn_tags__(self):
tags_orig = super().__sklearn_tags__()
as_dict = {
field.name: getattr(tags_orig, field.name)
for field in fields(tags_orig)
}
tags = MyTags(**as_dict)
tags.my_tag = True
return tags
check_valid_tag_types("MyEstimator", MyEstimator())
def test_tags_no_sklearn_tags_concrete_implementation():
"""Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/30479
Either the estimator doesn't implement `__sklearn_tags` or there is no class
implementing `__sklearn_tags__` without calling `super().__sklearn_tags__()` in
its mro. Thus, we raise an error and request to inherit from
`BaseEstimator` that implements `__sklearn_tags__`.
"""
X = np.array([[1, 2], [2, 3], [3, 4]])
y = np.array([1, 0, 1])
# 1st case, the estimator inherits from a class that only implements
# `__sklearn_tags__` by calling `super().__sklearn_tags__()`.
class MyEstimator(ClassifierMixin):
def __init__(self, *, param=1):
self.param = param
def fit(self, X, y=None):
self.is_fitted_ = True
return self
def predict(self, X):
return np.full(shape=X.shape[0], fill_value=self.param)
my_pipeline = Pipeline([("estimator", MyEstimator(param=1))])
with pytest.raises(AttributeError, match="The following error was raised"):
my_pipeline.fit(X, y).predict(X)
# 2nd case, the estimator doesn't implement `__sklearn_tags__` at all.
class MyEstimator2:
def __init__(self, *, param=1):
self.param = param
def fit(self, X, y=None):
self.is_fitted_ = True
return self
def predict(self, X):
return np.full(shape=X.shape[0], fill_value=self.param)
my_pipeline = Pipeline([("estimator", MyEstimator2(param=1))])
with pytest.raises(AttributeError, match="The following error was raised"):
my_pipeline.fit(X, y).predict(X)
# check that we still raise an error if it is not an AttributeError or related to
# __sklearn_tags__
class MyEstimator3(MyEstimator, BaseEstimator):
def __init__(self, *, param=1, error_type=AttributeError):
self.param = param
self.error_type = error_type
def __sklearn_tags__(self):
super().__sklearn_tags__()
raise self.error_type("test")
for error_type in (AttributeError, TypeError, ValueError):
estimator = MyEstimator3(param=1, error_type=error_type)
with pytest.raises(error_type):
get_tags(estimator)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_random.py | sklearn/utils/tests/test_random.py | import numpy as np
import pytest
import scipy.sparse as sp
from numpy.testing import assert_array_almost_equal
from scipy.special import comb
from sklearn.utils._random import _our_rand_r_py
from sklearn.utils.random import _random_choice_csc, sample_without_replacement
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
with pytest.raises(ValueError):
sample_without_replacement(5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(
n_population, n_samples, random_state=None
):
return sample_without_replacement(
n_population, n_samples, method=m, random_state=random_state
)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_population < n_sample
with pytest.raises(ValueError):
sample_without_replacement(0, 1)
with pytest.raises(ValueError):
sample_without_replacement(1, 2)
# n_population == n_samples
assert sample_without_replacement(0, 0).shape == (0,)
assert sample_without_replacement(1, 1).shape == (1,)
# n_population >= n_samples
assert sample_without_replacement(5, 0).shape == (0,)
assert sample_without_replacement(5, 1).shape == (1,)
# n_population < 0 or n_samples < 0
with pytest.raises(ValueError):
sample_without_replacement(-1, 5)
with pytest.raises(ValueError):
sample_without_replacement(5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert len(s) == n_samples
unique = np.unique(s)
assert np.size(unique) == n_samples
assert np.all(unique < n_population)
# test edge case n_population == n_samples == 0
assert np.size(sample_without_replacement(0, 0)) == 0
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = comb(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population, n_samples))] = (
None
)
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)"
% (len(output), n_expected)
)
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilities = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = _random_choice_csc(n_samples, classes, class_probabilities, random_state)
assert sp.issparse(got)
for k in range(len(classes)):
p = np.bincount(got[:, [k]].toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilities[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilities = [np.array([0.5, 0.5]), np.array([0, 1 / 2, 1 / 2])]
got = _random_choice_csc(
n_samples=n_samples, classes=classes, random_state=random_state
)
assert sp.issparse(got)
for k in range(len(classes)):
p = np.bincount(got[:, [k]].toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilities[k], p, decimal=1)
# Edge case probabilities 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilities = [np.array([0.0, 1.0]), np.array([0.0, 1.0, 0.0])]
got = _random_choice_csc(n_samples, classes, class_probabilities, random_state)
assert sp.issparse(got)
for k in range(len(classes)):
p = (
np.bincount(
got[:, [k]].toarray().ravel(), minlength=len(class_probabilities[k])
)
/ n_samples
)
assert_array_almost_equal(class_probabilities[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilities = [np.array([0.0, 1.0]), np.array([1.0])]
got = _random_choice_csc(
n_samples=n_samples, classes=classes, random_state=random_state
)
assert sp.issparse(got)
for k in range(len(classes)):
p = np.bincount(got[:, [k]].toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilities[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilities is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilities = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
with pytest.raises(ValueError):
_random_choice_csc(4, classes, class_probabilities, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilities = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
with pytest.raises(ValueError):
_random_choice_csc(4, classes, class_probabilities, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilities = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
with pytest.raises(ValueError):
_random_choice_csc(4, classes, class_probabilities, 1)
# Given probabilities don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilities = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
with pytest.raises(ValueError):
_random_choice_csc(4, classes, class_probabilities, 1)
def test_our_rand_r():
assert 131541053 == _our_rand_r_py(1273642419)
assert 270369 == _our_rand_r_py(0)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_murmurhash.py | sklearn/utils/tests/test_murmurhash.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from sklearn.utils.murmurhash import murmurhash3_32
def test_mmhash3_int():
assert murmurhash3_32(3) == 847579505
assert murmurhash3_32(3, seed=0) == 847579505
assert murmurhash3_32(3, seed=42) == -1823081949
assert murmurhash3_32(3, positive=False) == 847579505
assert murmurhash3_32(3, seed=0, positive=False) == 847579505
assert murmurhash3_32(3, seed=42, positive=False) == -1823081949
assert murmurhash3_32(3, positive=True) == 847579505
assert murmurhash3_32(3, seed=0, positive=True) == 847579505
assert murmurhash3_32(3, seed=42, positive=True) == 2471885347
def test_mmhash3_int_array():
rng = np.random.RandomState(42)
keys = rng.randint(-5342534, 345345, size=3 * 2 * 1).astype(np.int32)
keys = keys.reshape((3, 2, 1))
for seed in [0, 42]:
expected = np.array([murmurhash3_32(int(k), seed) for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed), expected)
for seed in [0, 42]:
expected = np.array([murmurhash3_32(k, seed, positive=True) for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed, positive=True), expected)
def test_mmhash3_bytes():
assert murmurhash3_32(b"foo", 0) == -156908512
assert murmurhash3_32(b"foo", 42) == -1322301282
assert murmurhash3_32(b"foo", 0, positive=True) == 4138058784
assert murmurhash3_32(b"foo", 42, positive=True) == 2972666014
def test_mmhash3_unicode():
assert murmurhash3_32("foo", 0) == -156908512
assert murmurhash3_32("foo", 42) == -1322301282
assert murmurhash3_32("foo", 0, positive=True) == 4138058784
assert murmurhash3_32("foo", 42, positive=True) == 2972666014
def test_no_collision_on_byte_range():
previous_hashes = set()
for i in range(100):
h = murmurhash3_32(" " * i, 0)
assert h not in previous_hashes, "Found collision on growing empty string"
def test_uniform_distribution():
n_bins, n_samples = 10, 100000
bins = np.zeros(n_bins, dtype=np.float64)
for i in range(n_samples):
bins[murmurhash3_32(i, positive=True) % n_bins] += 1
means = bins / n_samples
expected = np.full(n_bins, 1.0 / n_bins)
assert_array_almost_equal(means / expected, np.ones(n_bins), 2)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_pprint.py | sklearn/utils/tests/test_pprint.py | import re
from pprint import PrettyPrinter
import numpy as np
import pytest
from sklearn import config_context
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import LogisticRegressionCV
from sklearn.pipeline import make_pipeline
from sklearn.utils._pprint import _EstimatorPrettyPrinter
# Constructors excerpted to test pprinting
class LogisticRegression(BaseEstimator):
def __init__(
self,
C=1.0,
l1_ratio=0,
dual=False,
tol=1e-4,
fit_intercept=True,
intercept_scaling=1,
class_weight=None,
random_state=None,
solver="warn",
max_iter=100,
multi_class="warn",
verbose=0,
warm_start=False,
n_jobs=None,
):
self.C = C
self.l1_ratio = l1_ratio
self.dual = dual
self.tol = tol
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
self.warm_start = warm_start
self.n_jobs = n_jobs
def fit(self, X, y):
return self
class StandardScaler(TransformerMixin, BaseEstimator):
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def transform(self, X, copy=None):
return self
class RFE(BaseEstimator):
def __init__(self, estimator, n_features_to_select=None, step=1, verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.verbose = verbose
class GridSearchCV(BaseEstimator):
def __init__(
self,
estimator,
param_grid,
scoring=None,
n_jobs=None,
iid="warn",
refit=True,
cv="warn",
verbose=0,
pre_dispatch="2*n_jobs",
error_score="raise-deprecating",
return_train_score=False,
):
self.estimator = estimator
self.param_grid = param_grid
self.scoring = scoring
self.n_jobs = n_jobs
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
self.return_train_score = return_train_score
class CountVectorizer(BaseEstimator):
def __init__(
self,
input="content",
encoding="utf-8",
decode_error="strict",
strip_accents=None,
lowercase=True,
preprocessor=None,
tokenizer=None,
stop_words=None,
token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1),
analyzer="word",
max_df=1.0,
min_df=1,
max_features=None,
vocabulary=None,
binary=False,
dtype=np.int64,
):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
self.max_features = max_features
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
class Pipeline(BaseEstimator):
def __init__(self, steps, memory=None):
self.steps = steps
self.memory = memory
class SVC(BaseEstimator):
def __init__(
self,
C=1.0,
kernel="rbf",
degree=3,
gamma="auto_deprecated",
coef0=0.0,
shrinking=True,
probability=False,
tol=1e-3,
cache_size=200,
class_weight=None,
verbose=False,
max_iter=-1,
decision_function_shape="ovr",
random_state=None,
):
self.kernel = kernel
self.degree = degree
self.gamma = gamma
self.coef0 = coef0
self.tol = tol
self.C = C
self.shrinking = shrinking
self.probability = probability
self.cache_size = cache_size
self.class_weight = class_weight
self.verbose = verbose
self.max_iter = max_iter
self.decision_function_shape = decision_function_shape
self.random_state = random_state
class PCA(BaseEstimator):
def __init__(
self,
n_components=None,
copy=True,
whiten=False,
svd_solver="auto",
tol=0.0,
iterated_power="auto",
random_state=None,
):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
self.svd_solver = svd_solver
self.tol = tol
self.iterated_power = iterated_power
self.random_state = random_state
class NMF(BaseEstimator):
def __init__(
self,
n_components=None,
init=None,
solver="cd",
beta_loss="frobenius",
tol=1e-4,
max_iter=200,
random_state=None,
alpha=0.0,
l1_ratio=0.0,
verbose=0,
shuffle=False,
):
self.n_components = n_components
self.init = init
self.solver = solver
self.beta_loss = beta_loss
self.tol = tol
self.max_iter = max_iter
self.random_state = random_state
self.alpha = alpha
self.l1_ratio = l1_ratio
self.verbose = verbose
self.shuffle = shuffle
class SimpleImputer(BaseEstimator):
def __init__(
self,
missing_values=np.nan,
strategy="mean",
fill_value=None,
verbose=0,
copy=True,
):
self.missing_values = missing_values
self.strategy = strategy
self.fill_value = fill_value
self.verbose = verbose
self.copy = copy
@config_context(print_changed_only=False)
def test_basic():
# Basic pprint test
lr = LogisticRegression()
expected = """
LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,
intercept_scaling=1, l1_ratio=0, max_iter=100,
multi_class='warn', n_jobs=None, random_state=None,
solver='warn', tol=0.0001, verbose=0, warm_start=False)"""
expected = expected[1:] # remove first \n
assert lr.__repr__() == expected
def test_changed_only():
# Make sure the changed_only param is correctly used when True (default)
lr = LogisticRegression(C=99)
expected = """LogisticRegression(C=99)"""
assert lr.__repr__() == expected
# Check with a repr that doesn't fit on a single line
lr = LogisticRegression(
C=99, class_weight=0.4, fit_intercept=False, tol=1234, verbose=True
)
expected = """
LogisticRegression(C=99, class_weight=0.4, fit_intercept=False, tol=1234,
verbose=True)"""
expected = expected[1:] # remove first \n
assert lr.__repr__() == expected
imputer = SimpleImputer(missing_values=0)
expected = """SimpleImputer(missing_values=0)"""
assert imputer.__repr__() == expected
# Defaults to np.nan, trying with float('NaN')
imputer = SimpleImputer(missing_values=float("NaN"))
expected = """SimpleImputer()"""
assert imputer.__repr__() == expected
# make sure array parameters don't throw error (see #13583)
repr(LogisticRegressionCV(Cs=np.array([0.1, 1]), use_legacy_attributes=False))
@config_context(print_changed_only=False)
def test_pipeline():
# Render a pipeline object
pipeline = make_pipeline(StandardScaler(), LogisticRegression(C=999))
expected = """
Pipeline(memory=None,
steps=[('standardscaler',
StandardScaler(copy=True, with_mean=True, with_std=True)),
('logisticregression',
LogisticRegression(C=999, class_weight=None, dual=False,
fit_intercept=True, intercept_scaling=1,
l1_ratio=0, max_iter=100,
multi_class='warn', n_jobs=None,
random_state=None, solver='warn',
tol=0.0001, verbose=0, warm_start=False))],
transform_input=None, verbose=False)"""
expected = expected[1:] # remove first \n
assert pipeline.__repr__() == expected
@config_context(print_changed_only=False)
def test_deeply_nested():
# Render a deeply nested estimator
rfe = RFE(RFE(RFE(RFE(RFE(RFE(RFE(LogisticRegression())))))))
expected = """
RFE(estimator=RFE(estimator=RFE(estimator=RFE(estimator=RFE(estimator=RFE(estimator=RFE(estimator=LogisticRegression(C=1.0,
class_weight=None,
dual=False,
fit_intercept=True,
intercept_scaling=1,
l1_ratio=0,
max_iter=100,
multi_class='warn',
n_jobs=None,
random_state=None,
solver='warn',
tol=0.0001,
verbose=0,
warm_start=False),
n_features_to_select=None,
step=1,
verbose=0),
n_features_to_select=None,
step=1,
verbose=0),
n_features_to_select=None,
step=1, verbose=0),
n_features_to_select=None, step=1,
verbose=0),
n_features_to_select=None, step=1, verbose=0),
n_features_to_select=None, step=1, verbose=0),
n_features_to_select=None, step=1, verbose=0)"""
expected = expected[1:] # remove first \n
assert rfe.__repr__() == expected
@pytest.mark.parametrize(
("print_changed_only", "expected"),
[
(True, "RFE(estimator=RFE(...))"),
(
False,
"RFE(estimator=RFE(...), n_features_to_select=None, step=1, verbose=0)",
),
],
)
def test_print_estimator_max_depth(print_changed_only, expected):
with config_context(print_changed_only=print_changed_only):
pp = _EstimatorPrettyPrinter(depth=1)
rfe = RFE(RFE(RFE(RFE(RFE(LogisticRegression())))))
assert pp.pformat(rfe) == expected
@config_context(print_changed_only=False)
def test_gridsearch():
# render a gridsearch
param_grid = [
{"kernel": ["rbf"], "gamma": [1e-3, 1e-4], "C": [1, 10, 100, 1000]},
{"kernel": ["linear"], "C": [1, 10, 100, 1000]},
]
gs = GridSearchCV(SVC(), param_grid, cv=5)
expected = """
GridSearchCV(cv=5, error_score='raise-deprecating',
estimator=SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape='ovr', degree=3,
gamma='auto_deprecated', kernel='rbf', max_iter=-1,
probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False),
iid='warn', n_jobs=None,
param_grid=[{'C': [1, 10, 100, 1000], 'gamma': [0.001, 0.0001],
'kernel': ['rbf']},
{'C': [1, 10, 100, 1000], 'kernel': ['linear']}],
pre_dispatch='2*n_jobs', refit=True, return_train_score=False,
scoring=None, verbose=0)"""
expected = expected[1:] # remove first \n
assert gs.__repr__() == expected
@config_context(print_changed_only=False)
def test_gridsearch_pipeline():
# render a pipeline inside a gridsearch
pp = _EstimatorPrettyPrinter(compact=True, indent=1, indent_at_name=True)
pipeline = Pipeline([("reduce_dim", PCA()), ("classify", SVC())])
N_FEATURES_OPTIONS = [2, 4, 8]
C_OPTIONS = [1, 10, 100, 1000]
param_grid = [
{
"reduce_dim": [PCA(iterated_power=7), NMF()],
"reduce_dim__n_components": N_FEATURES_OPTIONS,
"classify__C": C_OPTIONS,
},
{
"reduce_dim": [SelectKBest(chi2)],
"reduce_dim__k": N_FEATURES_OPTIONS,
"classify__C": C_OPTIONS,
},
]
gspipeline = GridSearchCV(pipeline, cv=3, n_jobs=1, param_grid=param_grid)
expected = """
GridSearchCV(cv=3, error_score='raise-deprecating',
estimator=Pipeline(memory=None,
steps=[('reduce_dim',
PCA(copy=True, iterated_power='auto',
n_components=None,
random_state=None,
svd_solver='auto', tol=0.0,
whiten=False)),
('classify',
SVC(C=1.0, cache_size=200,
class_weight=None, coef0=0.0,
decision_function_shape='ovr',
degree=3, gamma='auto_deprecated',
kernel='rbf', max_iter=-1,
probability=False,
random_state=None, shrinking=True,
tol=0.001, verbose=False))]),
iid='warn', n_jobs=1,
param_grid=[{'classify__C': [1, 10, 100, 1000],
'reduce_dim': [PCA(copy=True, iterated_power=7,
n_components=None,
random_state=None,
svd_solver='auto', tol=0.0,
whiten=False),
NMF(alpha=0.0, beta_loss='frobenius',
init=None, l1_ratio=0.0,
max_iter=200, n_components=None,
random_state=None, shuffle=False,
solver='cd', tol=0.0001,
verbose=0)],
'reduce_dim__n_components': [2, 4, 8]},
{'classify__C': [1, 10, 100, 1000],
'reduce_dim': [SelectKBest(k=10,
score_func=<function chi2 at some_address>)],
'reduce_dim__k': [2, 4, 8]}],
pre_dispatch='2*n_jobs', refit=True, return_train_score=False,
scoring=None, verbose=0)""" # noqa: E501
expected = expected[1:] # remove first \n
repr_ = pp.pformat(gspipeline)
# Remove address of '<function chi2 at 0x.....>' for reproducibility
repr_ = re.sub("function chi2 at 0x.*>", "function chi2 at some_address>", repr_)
assert repr_ == expected
@config_context(print_changed_only=False)
def test_n_max_elements_to_show():
n_max_elements_to_show = 30
pp = _EstimatorPrettyPrinter(
compact=True,
indent=1,
indent_at_name=True,
n_max_elements_to_show=n_max_elements_to_show,
)
# No ellipsis
vocabulary = {i: i for i in range(n_max_elements_to_show)}
vectorizer = CountVectorizer(vocabulary=vocabulary)
expected = r"""
CountVectorizer(analyzer='word', binary=False, decode_error='strict',
dtype=<class 'numpy.int64'>, encoding='utf-8', input='content',
lowercase=True, max_df=1.0, max_features=None, min_df=1,
ngram_range=(1, 1), preprocessor=None, stop_words=None,
strip_accents=None, token_pattern='(?u)\\b\\w\\w+\\b',
tokenizer=None,
vocabulary={0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7,
8: 8, 9: 9, 10: 10, 11: 11, 12: 12, 13: 13, 14: 14,
15: 15, 16: 16, 17: 17, 18: 18, 19: 19, 20: 20,
21: 21, 22: 22, 23: 23, 24: 24, 25: 25, 26: 26,
27: 27, 28: 28, 29: 29})"""
expected = expected[1:] # remove first \n
assert pp.pformat(vectorizer) == expected
# Now with ellipsis
vocabulary = {i: i for i in range(n_max_elements_to_show + 1)}
vectorizer = CountVectorizer(vocabulary=vocabulary)
expected = r"""
CountVectorizer(analyzer='word', binary=False, decode_error='strict',
dtype=<class 'numpy.int64'>, encoding='utf-8', input='content',
lowercase=True, max_df=1.0, max_features=None, min_df=1,
ngram_range=(1, 1), preprocessor=None, stop_words=None,
strip_accents=None, token_pattern='(?u)\\b\\w\\w+\\b',
tokenizer=None,
vocabulary={0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7,
8: 8, 9: 9, 10: 10, 11: 11, 12: 12, 13: 13, 14: 14,
15: 15, 16: 16, 17: 17, 18: 18, 19: 19, 20: 20,
21: 21, 22: 22, 23: 23, 24: 24, 25: 25, 26: 26,
27: 27, 28: 28, 29: 29, ...})"""
expected = expected[1:] # remove first \n
assert pp.pformat(vectorizer) == expected
# Also test with lists
param_grid = {"C": list(range(n_max_elements_to_show))}
gs = GridSearchCV(SVC(), param_grid)
expected = """
GridSearchCV(cv='warn', error_score='raise-deprecating',
estimator=SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape='ovr', degree=3,
gamma='auto_deprecated', kernel='rbf', max_iter=-1,
probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False),
iid='warn', n_jobs=None,
param_grid={'C': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29]},
pre_dispatch='2*n_jobs', refit=True, return_train_score=False,
scoring=None, verbose=0)"""
expected = expected[1:] # remove first \n
assert pp.pformat(gs) == expected
# Now with ellipsis
param_grid = {"C": list(range(n_max_elements_to_show + 1))}
gs = GridSearchCV(SVC(), param_grid)
expected = """
GridSearchCV(cv='warn', error_score='raise-deprecating',
estimator=SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape='ovr', degree=3,
gamma='auto_deprecated', kernel='rbf', max_iter=-1,
probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False),
iid='warn', n_jobs=None,
param_grid={'C': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29, ...]},
pre_dispatch='2*n_jobs', refit=True, return_train_score=False,
scoring=None, verbose=0)"""
expected = expected[1:] # remove first \n
assert pp.pformat(gs) == expected
@config_context(print_changed_only=False)
def test_bruteforce_ellipsis():
# Check that the bruteforce ellipsis (used when the number of non-blank
# characters exceeds N_CHAR_MAX) renders correctly.
lr = LogisticRegression()
# test when the left and right side of the ellipsis aren't on the same
# line.
expected = """
LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,
in...
multi_class='warn', n_jobs=None, random_state=None,
solver='warn', tol=0.0001, verbose=0, warm_start=False)"""
expected = expected[1:] # remove first \n
assert lr.__repr__(N_CHAR_MAX=150) == expected
# test with very small N_CHAR_MAX
# Note that N_CHAR_MAX is not strictly enforced, but it's normal: to avoid
# weird reprs we still keep the whole line of the right part (after the
# ellipsis).
expected = """
Lo...
solver='warn', tol=0.0001, verbose=0, warm_start=False)"""
expected = expected[1:] # remove first \n
assert lr.__repr__(N_CHAR_MAX=4) == expected
# test with N_CHAR_MAX == number of non-blank characters: In this case we
# don't want ellipsis
full_repr = lr.__repr__(N_CHAR_MAX=float("inf"))
n_nonblank = len("".join(full_repr.split()))
assert lr.__repr__(N_CHAR_MAX=n_nonblank) == full_repr
assert "..." not in full_repr
# test with N_CHAR_MAX == number of non-blank characters - 10: the left and
# right side of the ellispsis are on different lines. In this case we
# want to expend the whole line of the right side
expected = """
LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,
intercept_scaling=1, l1_ratio=0,...00,
multi_class='warn', n_jobs=None, random_state=None,
solver='warn', tol=0.0001, verbose=0, warm_start=False)"""
expected = expected[1:] # remove first \n
assert lr.__repr__(N_CHAR_MAX=n_nonblank - 10) == expected
# test with N_CHAR_MAX == number of non-blank characters - 10: the left and
# right side of the ellispsis are on the same line. In this case we don't
# want to expend the whole line of the right side, just add the ellispsis
# between the 2 sides.
expected = """
LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,
intercept_scaling=1, l1_ratio=0, max...r=100,
multi_class='warn', n_jobs=None, random_state=None,
solver='warn', tol=0.0001, verbose=0, warm_start=False)"""
expected = expected[1:] # remove first \n
assert lr.__repr__(N_CHAR_MAX=n_nonblank - 4) == expected
# test with N_CHAR_MAX == number of non-blank characters - 2: the left and
# right side of the ellispsis are on the same line, but adding the ellipsis
# would actually make the repr longer. So we don't add the ellipsis.
expected = """
LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,
intercept_scaling=1, l1_ratio=0, max_iter=100,
multi_class='warn', n_jobs=None, random_state=None,
solver='warn', tol=0.0001, verbose=0, warm_start=False)"""
expected = expected[1:] # remove first \n
assert lr.__repr__(N_CHAR_MAX=n_nonblank - 2) == expected
def test_builtin_prettyprinter():
# non regression test than ensures we can still use the builtin
# PrettyPrinter class for estimators (as done e.g. by joblib).
# Used to be a bug
PrettyPrinter().pprint(LogisticRegression())
def test_kwargs_in_init():
# Make sure the changed_only=True mode is OK when an argument is passed as
# kwargs.
# Non-regression test for
# https://github.com/scikit-learn/scikit-learn/issues/17206
class WithKWargs(BaseEstimator):
# Estimator with a kwargs argument. These need to hack around
# set_params and get_params. Here we mimic what LightGBM does.
def __init__(self, a="willchange", b="unchanged", **kwargs):
self.a = a
self.b = b
self._other_params = {}
self.set_params(**kwargs)
def get_params(self, deep=True):
params = super().get_params(deep=deep)
params.update(self._other_params)
return params
def set_params(self, **params):
for key, value in params.items():
setattr(self, key, value)
self._other_params[key] = value
return self
est = WithKWargs(a="something", c="abcd", d=None)
expected = "WithKWargs(a='something', c='abcd', d=None)"
assert est.__repr__() == expected
with config_context(print_changed_only=False):
expected = "WithKWargs(a='something', b='unchanged', c='abcd', d=None)"
assert est.__repr__() == expected
def test_complexity_print_changed_only():
# Make sure `__repr__` is called the same amount of times
# whether `print_changed_only` is True or False
# Non-regression test for
# https://github.com/scikit-learn/scikit-learn/issues/18490
class DummyEstimator(TransformerMixin, BaseEstimator):
nb_times_repr_called = 0
def __init__(self, estimator=None):
self.estimator = estimator
def __repr__(self):
DummyEstimator.nb_times_repr_called += 1
return super().__repr__()
def transform(self, X, copy=None): # pragma: no cover
return X
estimator = DummyEstimator(
make_pipeline(DummyEstimator(DummyEstimator()), DummyEstimator(), "passthrough")
)
with config_context(print_changed_only=False):
repr(estimator)
nb_repr_print_changed_only_false = DummyEstimator.nb_times_repr_called
DummyEstimator.nb_times_repr_called = 0
with config_context(print_changed_only=True):
repr(estimator)
nb_repr_print_changed_only_true = DummyEstimator.nb_times_repr_called
assert nb_repr_print_changed_only_false == nb_repr_print_changed_only_true
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_estimator_checks.py | sklearn/utils/tests/test_estimator_checks.py | # We can not use pytest here, because we run
# build_tools/azure/test_pytest_soft_dependency.sh on these
# tests to make sure estimator_checks works without pytest.
import importlib
import re
import sys
import unittest
import warnings
from inspect import isgenerator
from numbers import Integral, Real
import joblib
import numpy as np
import scipy.sparse as sp
from sklearn import config_context, get_config
from sklearn.base import BaseEstimator, ClassifierMixin, OutlierMixin, TransformerMixin
from sklearn.cluster import MiniBatchKMeans
from sklearn.datasets import (
load_iris,
make_multilabel_classification,
)
from sklearn.decomposition import PCA
from sklearn.exceptions import (
ConvergenceWarning,
EstimatorCheckFailedWarning,
SkipTestWarning,
)
from sklearn.linear_model import (
LinearRegression,
LogisticRegression,
MultiTaskElasticNet,
SGDClassifier,
)
from sklearn.mixture import GaussianMixture
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC, NuSVC
from sklearn.utils import _array_api, all_estimators, deprecated
from sklearn.utils._param_validation import Interval, StrOptions
from sklearn.utils._test_common.instance_generator import (
_construct_instances,
_get_expected_failed_checks,
)
from sklearn.utils._testing import (
MinimalClassifier,
MinimalRegressor,
MinimalTransformer,
SkipTest,
ignore_warnings,
raises,
)
from sklearn.utils.estimator_checks import (
_check_name,
_NotAnArray,
_yield_all_checks,
check_array_api_input,
check_class_weight_balanced_linear_classifier,
check_classifier_data_not_an_array,
check_classifier_not_supporting_multiclass,
check_classifiers_multilabel_output_format_decision_function,
check_classifiers_multilabel_output_format_predict,
check_classifiers_multilabel_output_format_predict_proba,
check_classifiers_one_label_sample_weights,
check_dataframe_column_names_consistency,
check_decision_proba_consistency,
check_dict_unchanged,
check_dont_overwrite_parameters,
check_estimator,
check_estimator_cloneable,
check_estimator_repr,
check_estimator_sparse_array,
check_estimator_sparse_matrix,
check_estimator_sparse_tag,
check_estimator_tags_renamed,
check_estimators_nan_inf,
check_estimators_overwrite_params,
check_estimators_unfitted,
check_fit_check_is_fitted,
check_fit_score_takes_y,
check_methods_sample_order_invariance,
check_methods_subset_invariance,
check_mixin_order,
check_no_attributes_set_in_init,
check_outlier_contamination,
check_outlier_corruption,
check_parameters_default_constructible,
check_positive_only_tag_during_fit,
check_regressor_data_not_an_array,
check_requires_y_none,
check_sample_weights_pandas_series,
check_set_params,
estimator_checks_generator,
set_random_state,
)
from sklearn.utils.fixes import CSR_CONTAINERS, SPARRAY_PRESENT
from sklearn.utils.metaestimators import available_if
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.validation import (
check_array,
check_is_fitted,
check_X_y,
validate_data,
)
def _mark_thread_unsafe_if_pytest_imported(f):
pytest = sys.modules.get("pytest")
if pytest is not None:
return pytest.mark.thread_unsafe(f)
else:
return f
class CorrectNotFittedError(ValueError):
"""Exception class to raise if estimator is used before fitting.
Like NotFittedError, it inherits from ValueError, but not from
AttributeError. Used for testing only.
"""
class BaseBadClassifier(ClassifierMixin, BaseEstimator):
def fit(self, X, y):
return self
def predict(self, X):
return np.ones(X.shape[0])
class ChangesDict(BaseEstimator):
def __init__(self, key=0):
self.key = key
def fit(self, X, y=None):
X, y = validate_data(self, X, y)
return self
def predict(self, X):
X = check_array(X)
self.key = 1000
return np.ones(X.shape[0])
class SetsWrongAttribute(BaseEstimator):
def __init__(self, acceptable_key=0):
self.acceptable_key = acceptable_key
def fit(self, X, y=None):
self.wrong_attribute = 0
X, y = validate_data(self, X, y)
return self
class ChangesWrongAttribute(BaseEstimator):
def __init__(self, wrong_attribute=0):
self.wrong_attribute = wrong_attribute
def fit(self, X, y=None):
self.wrong_attribute = 1
X, y = validate_data(self, X, y)
return self
class ChangesUnderscoreAttribute(BaseEstimator):
def fit(self, X, y=None):
self._good_attribute = 1
X, y = validate_data(self, X, y)
return self
class RaisesErrorInSetParams(BaseEstimator):
def __init__(self, p=0):
self.p = p
def set_params(self, **kwargs):
if "p" in kwargs:
p = kwargs.pop("p")
if p < 0:
raise ValueError("p can't be less than 0")
self.p = p
return super().set_params(**kwargs)
def fit(self, X, y=None):
X, y = validate_data(self, X, y)
return self
class HasMutableParameters(BaseEstimator):
def __init__(self, p=object()):
self.p = p
def fit(self, X, y=None):
X, y = validate_data(self, X, y)
return self
class HasImmutableParameters(BaseEstimator):
# Note that object is an uninitialized class, thus immutable.
def __init__(self, p=42, q=np.int32(42), r=object):
self.p = p
self.q = q
self.r = r
def fit(self, X, y=None):
X, y = validate_data(self, X, y)
return self
class ModifiesValueInsteadOfRaisingError(BaseEstimator):
def __init__(self, p=0):
self.p = p
def set_params(self, **kwargs):
if "p" in kwargs:
p = kwargs.pop("p")
if p < 0:
p = 0
self.p = p
return super().set_params(**kwargs)
def fit(self, X, y=None):
X, y = validate_data(self, X, y)
return self
class ModifiesAnotherValue(BaseEstimator):
def __init__(self, a=0, b="method1"):
self.a = a
self.b = b
def set_params(self, **kwargs):
if "a" in kwargs:
a = kwargs.pop("a")
self.a = a
if a is None:
kwargs.pop("b")
self.b = "method2"
return super().set_params(**kwargs)
def fit(self, X, y=None):
X, y = validate_data(self, X, y)
return self
class NoCheckinPredict(BaseBadClassifier):
def fit(self, X, y):
X, y = validate_data(self, X, y)
return self
class NoSparseClassifier(BaseBadClassifier):
def __init__(self, raise_for_type=None):
# raise_for_type : str, expects "sparse_array" or "sparse_matrix"
self.raise_for_type = raise_for_type
def fit(self, X, y):
X, y = validate_data(self, X, y, accept_sparse=["csr", "csc"])
if self.raise_for_type == "sparse_array":
correct_type = isinstance(X, sp.sparray)
elif self.raise_for_type == "sparse_matrix":
correct_type = isinstance(X, sp.spmatrix)
if correct_type:
raise ValueError("Nonsensical Error")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
class CorrectNotFittedErrorClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = validate_data(self, X, y)
self.coef_ = np.ones(X.shape[1])
return self
def predict(self, X):
check_is_fitted(self)
X = check_array(X)
return np.ones(X.shape[0])
class NoSampleWeightPandasSeriesType(BaseEstimator):
def fit(self, X, y, sample_weight=None):
# Convert data
X, y = validate_data(
self, X, y, accept_sparse=("csr", "csc"), multi_output=True, y_numeric=True
)
# Function is only called after we verify that pandas is installed
from pandas import Series
if isinstance(sample_weight, Series):
raise ValueError(
"Estimator does not accept 'sample_weight'of type pandas.Series"
)
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
class BadBalancedWeightsClassifier(BaseBadClassifier):
def __init__(self, class_weight=None):
self.class_weight = class_weight
def fit(self, X, y):
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import compute_class_weight
label_encoder = LabelEncoder().fit(y)
classes = label_encoder.classes_
class_weight = compute_class_weight(self.class_weight, classes=classes, y=y)
# Intentionally modify the balanced class_weight
# to simulate a bug and raise an exception
if self.class_weight == "balanced":
class_weight += 1.0
# Simply assigning coef_ to the class_weight
self.coef_ = class_weight
return self
class BadTransformerWithoutMixin(BaseEstimator):
def fit(self, X, y=None):
X = validate_data(self, X)
return self
def transform(self, X):
check_is_fitted(self)
X = validate_data(self, X, reset=False)
return X
class NotInvariantPredict(BaseEstimator):
def fit(self, X, y):
# Convert data
X, y = validate_data(
self, X, y, accept_sparse=("csr", "csc"), multi_output=True, y_numeric=True
)
return self
def predict(self, X):
# return 1 if X has more than one element else return 0
X = check_array(X)
if X.shape[0] > 1:
return np.ones(X.shape[0])
return np.zeros(X.shape[0])
class NotInvariantSampleOrder(BaseEstimator):
def fit(self, X, y):
X, y = validate_data(
self, X, y, accept_sparse=("csr", "csc"), multi_output=True, y_numeric=True
)
# store the original X to check for sample order later
self._X = X
return self
def predict(self, X):
X = check_array(X)
# if the input contains the same elements but different sample order,
# then just return zeros.
if (
np.array_equiv(np.sort(X, axis=0), np.sort(self._X, axis=0))
and (X != self._X).any()
):
return np.zeros(X.shape[0])
return X[:, 0]
class OneClassSampleErrorClassifier(BaseBadClassifier):
"""Classifier allowing to trigger different behaviors when `sample_weight` reduces
the number of classes to 1."""
def __init__(self, raise_when_single_class=False):
self.raise_when_single_class = raise_when_single_class
def fit(self, X, y, sample_weight=None):
X, y = check_X_y(
X, y, accept_sparse=("csr", "csc"), multi_output=True, y_numeric=True
)
self.has_single_class_ = False
self.classes_, y = np.unique(y, return_inverse=True)
n_classes_ = self.classes_.shape[0]
if n_classes_ < 2 and self.raise_when_single_class:
self.has_single_class_ = True
raise ValueError("normal class error")
# find the number of class after trimming
if sample_weight is not None:
if isinstance(sample_weight, np.ndarray) and len(sample_weight) > 0:
n_classes_ = np.count_nonzero(np.bincount(y, sample_weight))
if n_classes_ < 2:
self.has_single_class_ = True
raise ValueError("Nonsensical Error")
return self
def predict(self, X):
check_is_fitted(self)
X = check_array(X)
if self.has_single_class_:
return np.zeros(X.shape[0])
return np.ones(X.shape[0])
class LargeSparseNotSupportedClassifier(BaseEstimator):
"""Estimator that claims to support large sparse data
(accept_large_sparse=True), but doesn't"""
def __init__(self, raise_for_type=None):
# raise_for_type : str, expects "sparse_array" or "sparse_matrix"
self.raise_for_type = raise_for_type
def fit(self, X, y):
X, y = validate_data(
self,
X,
y,
accept_sparse=("csr", "csc", "coo"),
accept_large_sparse=True,
multi_output=True,
y_numeric=True,
)
if self.raise_for_type == "sparse_array":
correct_type = isinstance(X, sp.sparray)
elif self.raise_for_type == "sparse_matrix":
correct_type = isinstance(X, sp.spmatrix)
if correct_type:
if X.format == "coo":
if X.row.dtype == "int64" or X.col.dtype == "int64":
raise ValueError("Estimator doesn't support 64-bit indices")
elif X.format in ["csc", "csr"]:
assert "int64" not in (
X.indices.dtype,
X.indptr.dtype,
), "Estimator doesn't support 64-bit indices"
return self
class SparseTransformer(TransformerMixin, BaseEstimator):
def __init__(self, sparse_container=None):
self.sparse_container = sparse_container
def fit(self, X, y=None):
validate_data(self, X)
return self
def fit_transform(self, X, y=None):
return self.fit(X, y).transform(X)
def transform(self, X):
check_is_fitted(self)
X = validate_data(self, X, accept_sparse=True, reset=False)
return self.sparse_container(X)
class EstimatorInconsistentForPandas(BaseEstimator):
def fit(self, X, y):
try:
from pandas import DataFrame
if isinstance(X, DataFrame):
self.value_ = X.iloc[0, 0]
else:
X = check_array(X)
self.value_ = X[1, 0]
return self
except ImportError:
X = check_array(X)
self.value_ = X[1, 0]
return self
def predict(self, X):
X = check_array(X)
return np.array([self.value_] * X.shape[0])
class UntaggedBinaryClassifier(SGDClassifier):
# Toy classifier that only supports binary classification, will fail tests.
def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None):
super().fit(X, y, coef_init, intercept_init, sample_weight)
if len(self.classes_) > 2:
raise ValueError("Only 2 classes are supported")
return self
def partial_fit(self, X, y, classes=None, sample_weight=None):
super().partial_fit(X=X, y=y, classes=classes, sample_weight=sample_weight)
if len(self.classes_) > 2:
raise ValueError("Only 2 classes are supported")
return self
class TaggedBinaryClassifier(UntaggedBinaryClassifier):
def fit(self, X, y):
y_type = type_of_target(y, input_name="y", raise_unknown=True)
if y_type != "binary":
raise ValueError(
"Only binary classification is supported. The type of the target "
f"is {y_type}."
)
return super().fit(X, y)
# Toy classifier that only supports binary classification.
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.classifier_tags.multi_class = False
return tags
class RequiresPositiveXRegressor(LinearRegression):
def fit(self, X, y):
# reject sparse X to be able to call (X < 0).any()
X, y = validate_data(self, X, y, accept_sparse=False, multi_output=True)
if (X < 0).any():
raise ValueError("Negative values in data passed to X.")
return super().fit(X, y)
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.positive_only = True
# reject sparse X to be able to call (X < 0).any()
tags.input_tags.sparse = False
return tags
class RequiresPositiveYRegressor(LinearRegression):
def fit(self, X, y):
X, y = validate_data(self, X, y, accept_sparse=True, multi_output=True)
if (y <= 0).any():
raise ValueError("negative y values not supported!")
return super().fit(X, y)
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.target_tags.positive_only = True
return tags
class PoorScoreLogisticRegression(LogisticRegression):
def decision_function(self, X):
return super().decision_function(X) + 1
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.classifier_tags.poor_score = True
return tags
class PartialFitChecksName(BaseEstimator):
def fit(self, X, y):
validate_data(self, X, y)
return self
def partial_fit(self, X, y):
reset = not hasattr(self, "_fitted")
validate_data(self, X, y, reset=reset)
self._fitted = True
return self
class BrokenArrayAPI(BaseEstimator):
"""Make different predictions when using Numpy and the Array API"""
def fit(self, X, y):
return self
def predict(self, X):
enabled = get_config()["array_api_dispatch"]
xp, _ = _array_api.get_namespace(X)
if enabled:
return xp.asarray([1, 2, 3])
else:
return np.array([3, 2, 1])
def test_check_array_api_input():
try:
importlib.import_module("array_api_strict")
except ModuleNotFoundError: # pragma: nocover
raise SkipTest("array-api-strict is required to run this test")
with raises(AssertionError, match="Not equal to tolerance"):
check_array_api_input(
"BrokenArrayAPI",
BrokenArrayAPI(),
array_namespace="array_api_strict",
check_values=True,
)
def test_not_an_array_array_function():
not_array = _NotAnArray(np.ones(10))
msg = "Don't want to call array_function sum!"
with raises(TypeError, match=msg):
np.sum(not_array)
# always returns True
assert np.may_share_memory(not_array, None)
def test_check_fit_score_takes_y_works_on_deprecated_fit():
# Tests that check_fit_score_takes_y works on a class with
# a deprecated fit method
class TestEstimatorWithDeprecatedFitMethod(BaseEstimator):
@deprecated("Deprecated for the purpose of testing check_fit_score_takes_y")
def fit(self, X, y):
return self
check_fit_score_takes_y("test", TestEstimatorWithDeprecatedFitMethod())
def test_check_estimator_with_class_removed():
"""Test that passing a class instead of an instance fails."""
msg = "Passing a class was deprecated"
with raises(TypeError, match=msg):
check_estimator(LogisticRegression)
def test_mutable_default_params():
"""Test that constructor cannot have mutable default parameters."""
msg = (
"Parameter 'p' of estimator 'HasMutableParameters' is of type "
"object which is not allowed"
)
# check that the "default_constructible" test checks for mutable parameters
check_parameters_default_constructible(
"Immutable", HasImmutableParameters()
) # should pass
with raises(AssertionError, match=msg):
check_parameters_default_constructible("Mutable", HasMutableParameters())
@_mark_thread_unsafe_if_pytest_imported
def test_check_set_params():
"""Check set_params doesn't fail and sets the right values."""
# check that values returned by get_params match set_params
msg = "get_params result does not match what was passed to set_params"
with raises(AssertionError, match=msg):
check_set_params("test", ModifiesValueInsteadOfRaisingError())
with warnings.catch_warnings(record=True) as records:
check_set_params("test", RaisesErrorInSetParams())
assert UserWarning in [rec.category for rec in records]
with raises(AssertionError, match=msg):
check_set_params("test", ModifiesAnotherValue())
def test_check_estimators_nan_inf():
# check that predict does input validation (doesn't accept dicts in input)
msg = "Estimator NoCheckinPredict doesn't check for NaN and inf in predict"
with raises(AssertionError, match=msg):
check_estimators_nan_inf("NoCheckinPredict", NoCheckinPredict())
def test_check_dict_unchanged():
# check that estimator state does not change
# at transform/predict/predict_proba time
msg = "Estimator changes __dict__ during predict"
with raises(AssertionError, match=msg):
check_dict_unchanged("test", ChangesDict())
def test_check_sample_weights_pandas_series():
# check that sample_weights in fit accepts pandas.Series type
try:
from pandas import Series # noqa: F401
msg = (
"Estimator NoSampleWeightPandasSeriesType raises error if "
"'sample_weight' parameter is of type pandas.Series"
)
with raises(ValueError, match=msg):
check_sample_weights_pandas_series(
"NoSampleWeightPandasSeriesType", NoSampleWeightPandasSeriesType()
)
except ImportError:
pass
def test_check_estimators_overwrite_params():
# check that `fit` only changes attributes that
# are private (start with an _ or end with a _).
msg = (
"Estimator ChangesWrongAttribute should not change or mutate "
"the parameter wrong_attribute from 0 to 1 during fit."
)
with raises(AssertionError, match=msg):
check_estimators_overwrite_params(
"ChangesWrongAttribute", ChangesWrongAttribute()
)
check_estimators_overwrite_params("test", ChangesUnderscoreAttribute())
def test_check_dont_overwrite_parameters():
# check that `fit` doesn't add any public attribute
msg = (
r"Estimator adds public attribute\(s\) during the fit method."
" Estimators are only allowed to add private attributes"
" either started with _ or ended"
" with _ but wrong_attribute added"
)
with raises(AssertionError, match=msg):
check_dont_overwrite_parameters("test", SetsWrongAttribute())
def test_check_methods_sample_order_invariance():
# check for sample order invariance
name = NotInvariantSampleOrder.__name__
method = "predict"
msg = (
"{method} of {name} is not invariant when applied to a dataset"
"with different sample order."
).format(method=method, name=name)
with raises(AssertionError, match=msg):
check_methods_sample_order_invariance(
"NotInvariantSampleOrder", NotInvariantSampleOrder()
)
def test_check_methods_subset_invariance():
# check for invariant method
name = NotInvariantPredict.__name__
method = "predict"
msg = ("{method} of {name} is not invariant when applied to a subset.").format(
method=method, name=name
)
with raises(AssertionError, match=msg):
check_methods_subset_invariance("NotInvariantPredict", NotInvariantPredict())
def test_check_estimator_sparse_data():
# check for sparse data input handling
name = NoSparseClassifier.__name__
msg = "Estimator %s doesn't seem to fail gracefully on sparse data" % name
with raises(AssertionError, match=msg):
check_estimator_sparse_matrix(name, NoSparseClassifier("sparse_matrix"))
if SPARRAY_PRESENT:
with raises(AssertionError, match=msg):
check_estimator_sparse_array(name, NoSparseClassifier("sparse_array"))
# Large indices test on bad estimator
msg = (
"Estimator LargeSparseNotSupportedClassifier doesn't seem to "
r"support \S{3}_64 matrix, and is not failing gracefully.*"
)
with raises(AssertionError, match=msg):
check_estimator_sparse_matrix(
"LargeSparseNotSupportedClassifier",
LargeSparseNotSupportedClassifier("sparse_matrix"),
)
if SPARRAY_PRESENT:
with raises(AssertionError, match=msg):
check_estimator_sparse_array(
"LargeSparseNotSupportedClassifier",
LargeSparseNotSupportedClassifier("sparse_array"),
)
def test_check_classifiers_one_label_sample_weights():
# check for classifiers reducing to less than two classes via sample weights
name = OneClassSampleErrorClassifier.__name__
msg = (
f"{name} failed when fitted on one label after sample_weight "
"trimming. Error message is not explicit, it should have "
"'class'."
)
with raises(AssertionError, match=msg):
check_classifiers_one_label_sample_weights(
"OneClassSampleErrorClassifier", OneClassSampleErrorClassifier()
)
def test_check_estimator_not_fail_fast():
"""Check the contents of the results returned with on_fail!="raise".
This results should contain details about the observed failures, expected
or not.
"""
check_results = check_estimator(BaseEstimator(), on_fail=None)
assert isinstance(check_results, list)
assert len(check_results) > 0
assert all(
isinstance(item, dict)
and set(item.keys())
== {
"estimator",
"check_name",
"exception",
"status",
"expected_to_fail",
"expected_to_fail_reason",
}
for item in check_results
)
# Some tests are expected to fail, some are expected to pass.
assert any(item["status"] == "failed" for item in check_results)
assert any(item["status"] == "passed" for item in check_results)
# Some estimator checks rely on warnings in deep functions calls. This is not
# automatically detected by pytest-run-parallel shallow AST inspection, so we
# need to mark the test function as thread-unsafe.
@_mark_thread_unsafe_if_pytest_imported
def test_check_estimator():
# tests that the estimator actually fails on "bad" estimators.
# not a complete test of all checks, which are very extensive.
# check that we have a fit method
msg = "object has no attribute 'fit'"
with raises(AttributeError, match=msg):
check_estimator(BaseEstimator())
# does error on binary_only untagged estimator
msg = "Only 2 classes are supported"
with raises(ValueError, match=msg):
check_estimator(UntaggedBinaryClassifier())
for csr_container in CSR_CONTAINERS:
# non-regression test for estimators transforming to sparse data
check_estimator(SparseTransformer(sparse_container=csr_container))
# doesn't error on actual estimator
check_estimator(LogisticRegression())
check_estimator(LogisticRegression(C=0.01))
check_estimator(MultiTaskElasticNet())
# doesn't error on binary_only tagged estimator
check_estimator(TaggedBinaryClassifier())
check_estimator(RequiresPositiveXRegressor())
# Check regressor with requires_positive_y estimator tag
msg = "negative y values not supported!"
with raises(ValueError, match=msg):
check_estimator(RequiresPositiveYRegressor())
# Does not raise error on classifier with poor_score tag
check_estimator(PoorScoreLogisticRegression())
def test_check_outlier_corruption():
# should raise AssertionError
decision = np.array([0.0, 1.0, 1.5, 2.0])
with raises(AssertionError):
check_outlier_corruption(1, 2, decision)
# should pass
decision = np.array([0.0, 1.0, 1.0, 2.0])
check_outlier_corruption(1, 2, decision)
def test_check_estimator_sparse_tag():
"""Test that check_estimator_sparse_tag raises error when sparse tag is
misaligned."""
class EstimatorWithSparseConfig(BaseEstimator):
def __init__(self, tag_sparse, accept_sparse, fit_error=None):
self.tag_sparse = tag_sparse
self.accept_sparse = accept_sparse
self.fit_error = fit_error
def fit(self, X, y=None):
if self.fit_error:
raise self.fit_error
validate_data(self, X, y, accept_sparse=self.accept_sparse)
return self
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = self.tag_sparse
return tags
test_cases = [
{"tag_sparse": True, "accept_sparse": True, "error_type": None},
{"tag_sparse": False, "accept_sparse": False, "error_type": None},
{"tag_sparse": False, "accept_sparse": True, "error_type": AssertionError},
{"tag_sparse": True, "accept_sparse": False, "error_type": AssertionError},
]
for test_case in test_cases:
estimator = EstimatorWithSparseConfig(
test_case["tag_sparse"],
test_case["accept_sparse"],
)
if test_case["error_type"] is None:
check_estimator_sparse_tag(estimator.__class__.__name__, estimator)
else:
with raises(test_case["error_type"]):
check_estimator_sparse_tag(estimator.__class__.__name__, estimator)
# estimator `tag_sparse=accept_sparse=False` fails on sparse data
# but does not raise the appropriate error
for fit_error in [TypeError("unexpected error"), KeyError("other error")]:
estimator = EstimatorWithSparseConfig(False, False, fit_error)
with raises(AssertionError):
check_estimator_sparse_tag(estimator.__class__.__name__, estimator)
def test_check_estimator_transformer_no_mixin():
# check that TransformerMixin is not required for transformer tests to run
# but it fails since the tag is not set
with raises(RuntimeError, "the `transformer_tags` tag is not set"):
check_estimator(BadTransformerWithoutMixin())
def test_check_estimator_clones():
# check that check_estimator doesn't modify the estimator it receives
iris = load_iris()
for Estimator in [
GaussianMixture,
LinearRegression,
SGDClassifier,
PCA,
MiniBatchKMeans,
]:
# without fitting
with ignore_warnings(category=ConvergenceWarning):
est = Estimator()
set_random_state(est)
old_hash = joblib.hash(est)
check_estimator(
est, expected_failed_checks=_get_expected_failed_checks(est)
)
assert old_hash == joblib.hash(est)
# with fitting
with ignore_warnings(category=ConvergenceWarning):
est = Estimator()
set_random_state(est)
est.fit(iris.data, iris.target)
old_hash = joblib.hash(est)
check_estimator(
est, expected_failed_checks=_get_expected_failed_checks(est)
)
assert old_hash == joblib.hash(est)
def test_check_estimators_unfitted():
# check that a ValueError/AttributeError is raised when calling predict
# on an unfitted estimator
msg = "Estimator should raise a NotFittedError when calling"
with raises(AssertionError, match=msg):
check_estimators_unfitted("estimator", NoSparseClassifier())
# check that CorrectNotFittedError inherit from either ValueError
# or AttributeError
check_estimators_unfitted("estimator", CorrectNotFittedErrorClassifier())
def test_check_no_attributes_set_in_init():
class NonConformantEstimatorPrivateSet(BaseEstimator):
def __init__(self):
self.you_should_not_set_this_ = None
class NonConformantEstimatorNoParamSet(BaseEstimator):
def __init__(self, you_should_set_this_=None):
pass
class ConformantEstimatorClassAttribute(BaseEstimator):
# making sure our __metadata_request__* class attributes are okay!
__metadata_request__fit = {"foo": True}
def fit(self, X, y=None):
return self # pragma: no cover
msg = (
"Estimator estimator_name should not set any"
" attribute apart from parameters during init."
r" Found attributes \['you_should_not_set_this_'\]."
)
with raises(AssertionError, match=msg):
check_no_attributes_set_in_init(
"estimator_name", NonConformantEstimatorPrivateSet()
)
msg = (
"Estimator estimator_name should store all parameters as an attribute"
" during init"
)
with raises(AttributeError, match=msg):
check_no_attributes_set_in_init(
"estimator_name", NonConformantEstimatorNoParamSet()
)
# a private class attribute is okay!
check_no_attributes_set_in_init(
"estimator_name", ConformantEstimatorClassAttribute()
)
# also check if cloning an estimator which has non-default set requests is
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_array_api.py | sklearn/utils/tests/test_array_api.py | import os
from functools import partial
import numpy
import pytest
import scipy
import scipy.sparse as sp
from numpy.testing import assert_allclose
from sklearn._config import config_context
from sklearn._loss import HalfMultinomialLoss
from sklearn.base import BaseEstimator
from sklearn.utils._array_api import (
_add_to_diagonal,
_asarray_with_order,
_atol_for_type,
_average,
_convert_to_numpy,
_count_nonzero,
_estimator_with_converted_arrays,
_fill_diagonal,
_get_namespace_device_dtype_ids,
_half_multinomial_loss,
_is_numpy_namespace,
_isin,
_logsumexp,
_max_precision_float_dtype,
_median,
_nanmax,
_nanmean,
_nanmin,
_ravel,
_validate_diagonal_args,
device,
get_namespace,
get_namespace_and_device,
indexing_dtype,
move_to,
np_compat,
supported_float_dtypes,
yield_namespace_device_dtype_combinations,
)
from sklearn.utils._testing import (
SkipTest,
_array_api_for_tests,
_convert_container,
assert_array_equal,
skip_if_array_api_compat_not_configured,
)
from sklearn.utils.fixes import _IS_32BIT, CSR_CONTAINERS, np_version, parse_version
@pytest.mark.parametrize("X", [numpy.asarray([1, 2, 3]), [1, 2, 3]])
def test_get_namespace_ndarray_default(X):
"""Check that get_namespace returns NumPy wrapper"""
xp_out, is_array_api_compliant = get_namespace(X)
assert xp_out is np_compat
assert not is_array_api_compliant
def test_get_namespace_ndarray_creation_device():
"""Check expected behavior with device and creation functions."""
X = numpy.asarray([1, 2, 3])
xp_out, _ = get_namespace(X)
full_array = xp_out.full(10, fill_value=2.0, device="cpu")
assert_allclose(full_array, [2.0] * 10)
with pytest.raises(ValueError, match="Unsupported device"):
xp_out.zeros(10, device="cuda")
@skip_if_array_api_compat_not_configured
def test_get_namespace_ndarray_with_dispatch():
"""Test get_namespace on NumPy ndarrays."""
X_np = numpy.asarray([[1, 2, 3]])
with config_context(array_api_dispatch=True):
xp_out, is_array_api_compliant = get_namespace(X_np)
assert is_array_api_compliant
# In the future, NumPy should become API compliant library and we should have
# assert xp_out is numpy
assert xp_out is np_compat
@skip_if_array_api_compat_not_configured
@pytest.mark.parametrize(
"constructor_name", ["pyarrow", "dataframe", "polars", "series"]
)
def test_get_namespace_df_with_dispatch(constructor_name):
"""Test get_namespace on dataframes and series."""
df = _convert_container([[1, 4, 2], [3, 3, 6]], constructor_name)
with config_context(array_api_dispatch=True):
xp_out, is_array_api_compliant = get_namespace(df)
assert not is_array_api_compliant
# When operating on dataframes or series the Numpy namespace is
# the right thing to use.
assert xp_out is np_compat
@skip_if_array_api_compat_not_configured
def test_get_namespace_sparse_with_dispatch():
"""Test get_namespace on sparse arrays."""
with config_context(array_api_dispatch=True):
xp_out, is_array_api_compliant = get_namespace(sp.csr_array([[1, 2, 3]]))
assert not is_array_api_compliant
# When operating on sparse arrays the Numpy namespace is
# the right thing to use.
assert xp_out is np_compat
@skip_if_array_api_compat_not_configured
def test_get_namespace_array_api(monkeypatch):
"""Test get_namespace for ArrayAPI arrays."""
xp = pytest.importorskip("array_api_strict")
X_np = numpy.asarray([[1, 2, 3]])
X_xp = xp.asarray(X_np)
with config_context(array_api_dispatch=True):
xp_out, is_array_api_compliant = get_namespace(X_xp)
assert is_array_api_compliant
with pytest.raises(TypeError):
xp_out, is_array_api_compliant = get_namespace(X_xp, X_np)
def mock_getenv(key):
if key == "SCIPY_ARRAY_API":
return "0"
monkeypatch.setattr("os.environ.get", mock_getenv)
assert os.environ.get("SCIPY_ARRAY_API") != "1"
with pytest.raises(
RuntimeError,
match="scipy's own support is not enabled.",
):
get_namespace(X_xp)
@pytest.mark.parametrize(
"array_input, reference",
[
pytest.param(("cupy", None), ("torch", "cuda"), id="cupy to torch cuda"),
pytest.param(("torch", "mps"), ("numpy", None), id="torch mps to numpy"),
pytest.param(("numpy", None), ("torch", "cuda"), id="numpy to torch cuda"),
pytest.param(("numpy", None), ("torch", "mps"), id="numpy to torch mps"),
pytest.param(
("array_api_strict", None),
("torch", "mps"),
id="array_api_strict to torch mps",
),
],
)
def test_move_to_array_api_conversions(array_input, reference):
"""Check conversion between various namespace and devices."""
if array_input[0] == "array_api_strict":
array_api_strict = pytest.importorskip(
"array_api_strict", reason="array-api-strict not available"
)
xp = _array_api_for_tests(reference[0], reference[1])
xp_array = _array_api_for_tests(array_input[0], array_input[1])
with config_context(array_api_dispatch=True):
device_ = device(xp.asarray([1], device=reference[1]))
if array_input[0] == "array_api_strict":
array_device = array_api_strict.Device("CPU_DEVICE")
else:
array_device = array_input[1]
array = xp_array.asarray([1, 2, 3], device=array_device)
array_out = move_to(array, xp=xp, device=device_)
assert get_namespace(array_out)[0] == xp
assert device(array_out) == device_
def test_move_to_sparse():
"""Check sparse inputs are handled correctly."""
xp_numpy = _array_api_for_tests("numpy", None)
xp_torch = _array_api_for_tests("torch", "cpu")
sparse1 = sp.csr_array([0, 1, 2, 3])
sparse2 = sp.csr_array([0, 1, 0, 1])
numpy_array = numpy.array([1, 2, 3])
with config_context(array_api_dispatch=True):
device_cpu = xp_torch.asarray([1]).device
# sparse and None to NumPy
result1, result2 = move_to(sparse1, None, xp=xp_numpy, device=None)
assert result1 is sparse1
assert result2 is None
# sparse to non-NumPy
msg = r"Sparse arrays are only accepted \(and passed through\)"
with pytest.raises(TypeError, match=msg):
move_to(sparse1, numpy_array, xp=xp_torch, device=device_cpu)
with pytest.raises(TypeError, match=msg):
move_to(sparse1, None, xp=xp_torch, device=device_cpu)
@pytest.mark.parametrize("array_api", ["numpy", "array_api_strict"])
def test_asarray_with_order(array_api):
"""Test _asarray_with_order passes along order for NumPy arrays."""
xp = pytest.importorskip(array_api)
X = xp.asarray([1.2, 3.4, 5.1])
X_new = _asarray_with_order(X, order="F", xp=xp)
X_new_np = numpy.asarray(X_new)
assert X_new_np.flags["F_CONTIGUOUS"]
@pytest.mark.parametrize(
"array_namespace, device_, dtype_name",
yield_namespace_device_dtype_combinations(),
ids=_get_namespace_device_dtype_ids,
)
@pytest.mark.parametrize(
"weights, axis, normalize, expected",
[
# normalize = True
(None, None, True, 3.5),
(None, 0, True, [2.5, 3.5, 4.5]),
(None, 1, True, [2, 5]),
([True, False], 0, True, [1, 2, 3]), # boolean weights
([True, True, False], 1, True, [1.5, 4.5]), # boolean weights
([0.4, 0.1], 0, True, [1.6, 2.6, 3.6]),
([0.4, 0.2, 0.2], 1, True, [1.75, 4.75]),
([1, 2], 0, True, [3, 4, 5]),
([1, 1, 2], 1, True, [2.25, 5.25]),
([[1, 2, 3], [1, 2, 3]], 0, True, [2.5, 3.5, 4.5]),
([[1, 2, 1], [2, 2, 2]], 1, True, [2, 5]),
# normalize = False
(None, None, False, 21),
(None, 0, False, [5, 7, 9]),
(None, 1, False, [6, 15]),
([True, False], 0, False, [1, 2, 3]), # boolean weights
([True, True, False], 1, False, [3, 9]), # boolean weights
([0.4, 0.1], 0, False, [0.8, 1.3, 1.8]),
([0.4, 0.2, 0.2], 1, False, [1.4, 3.8]),
([1, 2], 0, False, [9, 12, 15]),
([1, 1, 2], 1, False, [9, 21]),
([[1, 2, 3], [1, 2, 3]], 0, False, [5, 14, 27]),
([[1, 2, 1], [2, 2, 2]], 1, False, [8, 30]),
],
)
def test_average(
array_namespace, device_, dtype_name, weights, axis, normalize, expected
):
xp = _array_api_for_tests(array_namespace, device_)
array_in = numpy.asarray([[1, 2, 3], [4, 5, 6]], dtype=dtype_name)
array_in = xp.asarray(array_in, device=device_)
if weights is not None:
weights = numpy.asarray(weights, dtype=dtype_name)
weights = xp.asarray(weights, device=device_)
with config_context(array_api_dispatch=True):
result = _average(array_in, axis=axis, weights=weights, normalize=normalize)
if np_version < parse_version("2.0.0") or np_version >= parse_version("2.1.0"):
# NumPy 2.0 has a problem with the device attribute of scalar arrays:
# https://github.com/numpy/numpy/issues/26850
assert device(array_in) == device(result)
result = _convert_to_numpy(result, xp)
assert_allclose(result, expected, atol=_atol_for_type(dtype_name))
@pytest.mark.parametrize(
"array_namespace, device, dtype_name",
yield_namespace_device_dtype_combinations(include_numpy_namespaces=False),
ids=_get_namespace_device_dtype_ids,
)
def test_average_raises_with_wrong_dtype(array_namespace, device, dtype_name):
xp = _array_api_for_tests(array_namespace, device)
array_in = numpy.asarray([2, 0], dtype=dtype_name) + 1j * numpy.asarray(
[4, 3], dtype=dtype_name
)
complex_type_name = array_in.dtype.name
if not hasattr(xp, complex_type_name):
# This is the case for cupy as of March 2024 for instance.
pytest.skip(f"{array_namespace} does not support {complex_type_name}")
array_in = xp.asarray(array_in, device=device)
err_msg = "Complex floating point values are not supported by average."
with (
config_context(array_api_dispatch=True),
pytest.raises(NotImplementedError, match=err_msg),
):
_average(array_in)
@pytest.mark.parametrize(
"array_namespace, device, dtype_name",
yield_namespace_device_dtype_combinations(include_numpy_namespaces=True),
ids=_get_namespace_device_dtype_ids,
)
@pytest.mark.parametrize(
"axis, weights, error, error_msg",
(
(
None,
[1, 2],
TypeError,
"Axis must be specified",
),
(
0,
[[1, 2]],
# NumPy 2 raises ValueError, NumPy 1 raises TypeError
(ValueError, TypeError),
"weights", # the message is different for NumPy 1 and 2...
),
(
0,
[1, 2, 3, 4],
ValueError,
"weights",
),
(0, [-1, 1], ZeroDivisionError, "Weights sum to zero, can't be normalized"),
),
)
def test_average_raises_with_invalid_parameters(
array_namespace, device, dtype_name, axis, weights, error, error_msg
):
xp = _array_api_for_tests(array_namespace, device)
array_in = numpy.asarray([[1, 2, 3], [4, 5, 6]], dtype=dtype_name)
array_in = xp.asarray(array_in, device=device)
weights = numpy.asarray(weights, dtype=dtype_name)
weights = xp.asarray(weights, device=device)
with config_context(array_api_dispatch=True), pytest.raises(error, match=error_msg):
_average(array_in, axis=axis, weights=weights)
def test_device_none_if_no_input():
assert device() is None
assert device(None, "name") is None
@skip_if_array_api_compat_not_configured
def test_device_inspection():
class Device:
def __init__(self, name):
self.name = name
def __eq__(self, device):
return self.name == device.name
def __hash__(self):
raise TypeError("Device object is not hashable")
def __str__(self):
return self.name
class Array:
def __init__(self, device_name):
self.device = Device(device_name)
# Sanity check: ensure our Device mock class is non hashable, to
# accurately account for non-hashable device objects in some array
# libraries, because of which the `device` inspection function shouldn't
# make use of hash lookup tables (in particular, not use `set`)
with pytest.raises(TypeError):
hash(Array("device").device)
# If array API dispatch is disabled the device should be ignored. Erroring
# early for different devices would prevent the np.asarray conversion to
# happen. For example, `r2_score(np.ones(5), torch.ones(5))` should work
# fine with array API disabled.
assert device(Array("cpu"), Array("mygpu")) is None
# Test that ValueError is raised if on different devices and array API dispatch is
# enabled.
err_msg = "Input arrays use different devices: cpu, mygpu"
with config_context(array_api_dispatch=True):
with pytest.raises(ValueError, match=err_msg):
device(Array("cpu"), Array("mygpu"))
# Test expected value is returned otherwise
array1 = Array("device")
array2 = Array("device")
assert array1.device == device(array1)
assert array1.device == device(array1, array2)
assert array1.device == device(array1, array1, array2)
# TODO: add cupy to the list of libraries once the following upstream issue
# has been fixed:
# https://github.com/cupy/cupy/issues/8180
@skip_if_array_api_compat_not_configured
@pytest.mark.parametrize("library", ["numpy", "array_api_strict", "torch"])
@pytest.mark.parametrize(
"X,reduction,expected",
[
([1, 2, numpy.nan], _nanmin, 1),
([1, -2, -numpy.nan], _nanmin, -2),
([numpy.inf, numpy.inf], _nanmin, numpy.inf),
(
[[1, 2, 3], [numpy.nan, numpy.nan, numpy.nan], [4, 5, 6.0]],
partial(_nanmin, axis=0),
[1.0, 2.0, 3.0],
),
(
[[1, 2, 3], [numpy.nan, numpy.nan, numpy.nan], [4, 5, 6.0]],
partial(_nanmin, axis=1),
[1.0, numpy.nan, 4.0],
),
([1, 2, numpy.nan], _nanmax, 2),
([1, 2, numpy.nan], _nanmax, 2),
([-numpy.inf, -numpy.inf], _nanmax, -numpy.inf),
(
[[1, 2, 3], [numpy.nan, numpy.nan, numpy.nan], [4, 5, 6.0]],
partial(_nanmax, axis=0),
[4.0, 5.0, 6.0],
),
(
[[1, 2, 3], [numpy.nan, numpy.nan, numpy.nan], [4, 5, 6.0]],
partial(_nanmax, axis=1),
[3.0, numpy.nan, 6.0],
),
([1, 2, numpy.nan], _nanmean, 1.5),
([1, -2, -numpy.nan], _nanmean, -0.5),
([-numpy.inf, -numpy.inf], _nanmean, -numpy.inf),
(
[[1, 2, 3], [numpy.nan, numpy.nan, numpy.nan], [4, 5, 6.0]],
partial(_nanmean, axis=0),
[2.5, 3.5, 4.5],
),
(
[[1, 2, 3], [numpy.nan, numpy.nan, numpy.nan], [4, 5, 6.0]],
partial(_nanmean, axis=1),
[2.0, numpy.nan, 5.0],
),
],
)
def test_nan_reductions(library, X, reduction, expected):
"""Check NaN reductions like _nanmin and _nanmax"""
xp = pytest.importorskip(library)
with config_context(array_api_dispatch=True):
result = reduction(xp.asarray(X))
result = _convert_to_numpy(result, xp)
assert_allclose(result, expected)
@pytest.mark.parametrize(
"namespace, _device, _dtype",
yield_namespace_device_dtype_combinations(),
ids=_get_namespace_device_dtype_ids,
)
def test_ravel(namespace, _device, _dtype):
xp = _array_api_for_tests(namespace, _device)
array = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
array_xp = xp.asarray(array, device=_device)
with config_context(array_api_dispatch=True):
result = _ravel(array_xp)
result = _convert_to_numpy(result, xp)
expected = numpy.ravel(array, order="C")
assert_allclose(expected, result)
if _is_numpy_namespace(xp):
assert numpy.asarray(result).flags["C_CONTIGUOUS"]
@skip_if_array_api_compat_not_configured
@pytest.mark.parametrize("library", ["cupy", "torch"])
def test_convert_to_numpy_gpu(library): # pragma: nocover
"""Check convert_to_numpy for GPU backed libraries."""
xp = pytest.importorskip(library)
if library == "torch":
if not xp.backends.cuda.is_built():
pytest.skip("test requires cuda")
X_gpu = xp.asarray([1.0, 2.0, 3.0], device="cuda")
else:
X_gpu = xp.asarray([1.0, 2.0, 3.0])
X_cpu = _convert_to_numpy(X_gpu, xp=xp)
expected_output = numpy.asarray([1.0, 2.0, 3.0])
assert_allclose(X_cpu, expected_output)
def test_convert_to_numpy_cpu():
"""Check convert_to_numpy for PyTorch CPU arrays."""
torch = pytest.importorskip("torch")
X_torch = torch.asarray([1.0, 2.0, 3.0], device="cpu")
X_cpu = _convert_to_numpy(X_torch, xp=torch)
expected_output = numpy.asarray([1.0, 2.0, 3.0])
assert_allclose(X_cpu, expected_output)
class SimpleEstimator(BaseEstimator):
def fit(self, X, y=None):
self.X_ = X
self.n_features_ = X.shape[0]
return self
@skip_if_array_api_compat_not_configured
@pytest.mark.parametrize(
"array_namespace, converter",
[
("torch", lambda array: array.cpu().numpy()),
("array_api_strict", lambda array: numpy.asarray(array)),
("cupy", lambda array: array.get()),
],
)
def test_convert_estimator_to_ndarray(array_namespace, converter):
"""Convert estimator attributes to ndarray."""
xp = pytest.importorskip(array_namespace)
X = xp.asarray([[1.3, 4.5]])
est = SimpleEstimator().fit(X)
new_est = _estimator_with_converted_arrays(est, converter)
assert isinstance(new_est.X_, numpy.ndarray)
@skip_if_array_api_compat_not_configured
def test_convert_estimator_to_array_api():
"""Convert estimator attributes to ArrayAPI arrays."""
xp = pytest.importorskip("array_api_strict")
X_np = numpy.asarray([[1.3, 4.5]])
est = SimpleEstimator().fit(X_np)
new_est = _estimator_with_converted_arrays(est, lambda array: xp.asarray(array))
assert hasattr(new_est.X_, "__array_namespace__")
@pytest.mark.parametrize(
"namespace, _device, _dtype",
yield_namespace_device_dtype_combinations(),
ids=_get_namespace_device_dtype_ids,
)
def test_indexing_dtype(namespace, _device, _dtype):
xp = _array_api_for_tests(namespace, _device)
if _IS_32BIT:
assert indexing_dtype(xp) == xp.int32
else:
assert indexing_dtype(xp) == xp.int64
@pytest.mark.parametrize(
"namespace, _device, _dtype",
yield_namespace_device_dtype_combinations(),
ids=_get_namespace_device_dtype_ids,
)
def test_max_precision_float_dtype(namespace, _device, _dtype):
xp = _array_api_for_tests(namespace, _device)
expected_dtype = xp.float32 if _device == "mps" else xp.float64
assert _max_precision_float_dtype(xp, _device) == expected_dtype
@pytest.mark.parametrize(
"array_namespace, device, _",
yield_namespace_device_dtype_combinations(),
ids=_get_namespace_device_dtype_ids,
)
@pytest.mark.parametrize("invert", [True, False])
@pytest.mark.parametrize("assume_unique", [True, False])
@pytest.mark.parametrize("element_size", [6, 10, 14])
@pytest.mark.parametrize("int_dtype", ["int16", "int32", "int64", "uint8"])
def test_isin(
array_namespace, device, _, invert, assume_unique, element_size, int_dtype
):
xp = _array_api_for_tests(array_namespace, device)
r = element_size // 2
element = 2 * numpy.arange(element_size).reshape((r, 2)).astype(int_dtype)
test_elements = numpy.array(numpy.arange(14), dtype=int_dtype)
element_xp = xp.asarray(element, device=device)
test_elements_xp = xp.asarray(test_elements, device=device)
expected = numpy.isin(
element=element,
test_elements=test_elements,
assume_unique=assume_unique,
invert=invert,
)
with config_context(array_api_dispatch=True):
result = _isin(
element=element_xp,
test_elements=test_elements_xp,
xp=xp,
assume_unique=assume_unique,
invert=invert,
)
assert_array_equal(_convert_to_numpy(result, xp=xp), expected)
@pytest.mark.skipif(
os.environ.get("SCIPY_ARRAY_API") != "1", reason="SCIPY_ARRAY_API not set to 1."
)
def test_get_namespace_and_device():
# Use torch as a library with custom Device objects:
torch = pytest.importorskip("torch")
from sklearn.externals.array_api_compat import torch as torch_compat
some_torch_tensor = torch.arange(3, device="cpu")
some_numpy_array = numpy.arange(3)
# When dispatch is disabled, get_namespace_and_device should return the
# default NumPy wrapper namespace and "cpu" device. Our code will handle such
# inputs via the usual __array__ interface without attempting to dispatch
# via the array API.
namespace, is_array_api, device = get_namespace_and_device(some_torch_tensor)
assert namespace is get_namespace(some_numpy_array)[0]
assert not is_array_api
assert device is None
# Otherwise, expose the torch namespace and device via array API compat
# wrapper.
with config_context(array_api_dispatch=True):
namespace, is_array_api, device = get_namespace_and_device(some_torch_tensor)
assert namespace is torch_compat
assert is_array_api
assert device == some_torch_tensor.device
@pytest.mark.parametrize(
"array_namespace, device_, dtype_name",
yield_namespace_device_dtype_combinations(),
ids=_get_namespace_device_dtype_ids,
)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
@pytest.mark.parametrize("axis", [0, 1, None, -1, -2])
@pytest.mark.parametrize("sample_weight_type", [None, "int", "float"])
def test_count_nonzero(
array_namespace, device_, dtype_name, csr_container, axis, sample_weight_type
):
from sklearn.utils.sparsefuncs import count_nonzero as sparse_count_nonzero
xp = _array_api_for_tests(array_namespace, device_)
array = numpy.array([[0, 3, 0], [2, -1, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]])
if sample_weight_type == "int":
sample_weight = numpy.asarray([1, 2, 2, 3, 1])
elif sample_weight_type == "float":
sample_weight = numpy.asarray([0.5, 1.5, 0.8, 3.2, 2.4], dtype=dtype_name)
else:
sample_weight = None
expected = sparse_count_nonzero(
csr_container(array), axis=axis, sample_weight=sample_weight
)
array_xp = xp.asarray(array, device=device_)
with config_context(array_api_dispatch=True):
result = _count_nonzero(
array_xp, axis=axis, sample_weight=sample_weight, xp=xp, device=device_
)
assert_allclose(_convert_to_numpy(result, xp=xp), expected)
if np_version < parse_version("2.0.0") or np_version >= parse_version("2.1.0"):
# NumPy 2.0 has a problem with the device attribute of scalar arrays:
# https://github.com/numpy/numpy/issues/26850
assert device(array_xp) == device(result)
@pytest.mark.parametrize(
"array, value, match",
[
(numpy.array([1, 2, 3]), 1, "`array` should be 2D"),
(numpy.array([[1, 2], [3, 4]]), numpy.array([1, 2, 3]), "`value` needs to be"),
(numpy.array([[1, 2], [3, 4]]), [1, 2, 3], "`value` needs to be"),
(
numpy.array([[1, 2], [3, 4]]),
numpy.array([[1, 2], [3, 4]]),
"`value` needs to be a",
),
],
)
def test_validate_diagonal_args(array, value, match):
"""Check `_validate_diagonal_args` raises the correct errors."""
xp = _array_api_for_tests("numpy", None)
with pytest.raises(ValueError, match=match):
_validate_diagonal_args(array, value, xp)
@pytest.mark.parametrize("function", ["fill", "add"])
@pytest.mark.parametrize("c_contiguity", [True, False])
def test_fill_and_add_to_diagonal(c_contiguity, function):
"""Check `_fill/add_to_diagonal` behaviour correct with numpy arrays."""
xp = _array_api_for_tests("numpy", None)
if c_contiguity:
array = numpy.zeros((3, 4))
else:
array = numpy.zeros((3, 4)).T
assert array.flags["C_CONTIGUOUS"] == c_contiguity
if function == "fill":
func = _fill_diagonal
else:
func = _add_to_diagonal
func(array, 1, xp)
assert_allclose(array.diagonal(), numpy.ones((3,)))
func(array, [0, 1, 2], xp)
if function == "fill":
expected_diag = numpy.arange(3)
else:
expected_diag = numpy.ones((3,)) + numpy.arange(3)
assert_allclose(array.diagonal(), expected_diag)
fill_array = numpy.array([11, 12, 13])
func(array, fill_array, xp)
if function == "fill":
expected_diag = fill_array
else:
expected_diag = fill_array + numpy.arange(3) + numpy.ones((3,))
assert_allclose(array.diagonal(), expected_diag)
@pytest.mark.parametrize("array", ["standard", "transposed", "non-contiguous"])
@pytest.mark.parametrize(
"array_namespace, device_, dtype_name",
yield_namespace_device_dtype_combinations(),
ids=_get_namespace_device_dtype_ids,
)
def test_fill_diagonal(array, array_namespace, device_, dtype_name):
"""Check array API `_fill_diagonal` consistent with `numpy._fill_diagonal`."""
xp = _array_api_for_tests(array_namespace, device_)
array_np = numpy.zeros((4, 5), dtype=dtype_name)
if array == "transposed":
array_xp = xp.asarray(array_np.copy(), device=device_).T
array_np = array_np.T
elif array == "non-contiguous":
array_xp = xp.asarray(array_np.copy(), device=device_)[::2, ::2]
array_np = array_np[::2, ::2]
else:
array_xp = xp.asarray(array_np.copy(), device=device_)
numpy.fill_diagonal(array_np, val=1)
with config_context(array_api_dispatch=True):
_fill_diagonal(array_xp, value=1, xp=xp)
assert_array_equal(_convert_to_numpy(array_xp, xp=xp), array_np)
@pytest.mark.parametrize(
"array_namespace, device_, dtype_name",
yield_namespace_device_dtype_combinations(),
ids=_get_namespace_device_dtype_ids,
)
def test_add_to_diagonal(array_namespace, device_, dtype_name):
"""Check `_add_to_diagonal` consistent between array API xp and numpy namespace."""
xp = _array_api_for_tests(array_namespace, device_)
np_xp = _array_api_for_tests("numpy", None)
array_np = numpy.zeros((3, 4), dtype=dtype_name)
array_xp = xp.asarray(array_np.copy(), device=device_)
add_val = [1, 2, 3]
_fill_diagonal(array_np, value=add_val, xp=np_xp)
with config_context(array_api_dispatch=True):
_fill_diagonal(array_xp, value=add_val, xp=xp)
assert_array_equal(_convert_to_numpy(array_xp, xp=xp), array_np)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
@pytest.mark.parametrize("dispatch", [True, False])
def test_sparse_device(csr_container, dispatch):
np_arr = numpy.array([1])
# For numpy < 2, the device attribute is not available on numpy arrays
expected_numpy_array_device = getattr(np_arr, "device", None) if dispatch else None
a, b = csr_container(numpy.array([[1]])), csr_container(numpy.array([[2]]))
if dispatch and os.environ.get("SCIPY_ARRAY_API") is None:
raise SkipTest("SCIPY_ARRAY_API is not set: not checking array_api input")
with config_context(array_api_dispatch=dispatch):
assert device(a, b) is None
assert device(a, np_arr) == expected_numpy_array_device
assert get_namespace_and_device(a, b)[2] is None
assert get_namespace_and_device(a, np_arr)[2] == expected_numpy_array_device
@pytest.mark.parametrize(
"namespace, device, dtype_name",
yield_namespace_device_dtype_combinations(),
ids=_get_namespace_device_dtype_ids,
)
@pytest.mark.parametrize("axis", [None, 0, 1])
def test_median(namespace, device, dtype_name, axis):
# Note: depending on the value of `axis`, this test will compare median
# computations on arrays of even (4) or odd (5) numbers of elements, hence
# will test for median computation with and without interpolation to check
# that array API namespaces yield consistent results even when the median is
# not mathematically uniquely defined.
xp = _array_api_for_tests(namespace, device)
rng = numpy.random.RandomState(0)
X_np = rng.uniform(low=0.0, high=1.0, size=(5, 4)).astype(dtype_name)
result_np = numpy.median(X_np, axis=axis)
X_xp = xp.asarray(X_np, device=device)
with config_context(array_api_dispatch=True):
result_xp = _median(X_xp, axis=axis)
if xp.__name__ != "array_api_strict":
# We convert array-api-strict arrays to numpy arrays as `median` is not
# part of the Array API spec
assert get_namespace(result_xp)[0] == xp
assert result_xp.device == X_xp.device
assert_allclose(result_np, _convert_to_numpy(result_xp, xp=xp))
@pytest.mark.parametrize(
"array_namespace, device_, dtype_name", yield_namespace_device_dtype_combinations()
)
@pytest.mark.parametrize("axis", [0, 1, None])
def test_logsumexp_like_scipy_logsumexp(array_namespace, device_, dtype_name, axis):
xp = _array_api_for_tests(array_namespace, device_)
array_np = numpy.asarray(
[
[0, 3, 1000],
[2, -1, 1000],
[-10, 0, 0],
[-50, 8, -numpy.inf],
[4, 0, 5],
],
dtype=dtype_name,
)
array_xp = xp.asarray(array_np, device=device_)
res_np = scipy.special.logsumexp(array_np, axis=axis)
rtol = 1e-6 if "float32" in str(dtype_name) else 1e-12
# if torch on CPU or array api strict on default device
# check that _logsumexp works when array API dispatch is disabled
if (array_namespace == "torch" and device_ == "cpu") or (
array_namespace == "array_api_strict" and "CPU" in str(device_)
):
assert_allclose(_logsumexp(array_xp, axis=axis), res_np, rtol=rtol)
with config_context(array_api_dispatch=True):
res_xp = _logsumexp(array_xp, axis=axis)
res_xp = _convert_to_numpy(res_xp, xp)
assert_allclose(res_np, res_xp, rtol=rtol)
# Test with NaNs and +np.inf
array_np_2 = numpy.asarray(
[
[0, numpy.nan, 1000],
[2, -1, 1000],
[numpy.inf, 0, 0],
[-50, 8, -numpy.inf],
[4, 0, 5],
],
dtype=dtype_name,
)
array_xp_2 = xp.asarray(array_np_2, device=device_)
res_np_2 = scipy.special.logsumexp(array_np_2, axis=axis)
with config_context(array_api_dispatch=True):
res_xp_2 = _logsumexp(array_xp_2, axis=axis)
res_xp_2 = _convert_to_numpy(res_xp_2, xp)
assert_allclose(res_np_2, res_xp_2, rtol=rtol)
@pytest.mark.parametrize(
("namespace", "device_", "expected_types"),
[
("numpy", None, ("float64", "float32", "float16")),
("array_api_strict", None, ("float64", "float32")),
("torch", "cpu", ("float64", "float32", "float16")),
("torch", "cuda", ("float64", "float32", "float16")),
("torch", "mps", ("float32", "float16")),
],
)
def test_supported_float_types(namespace, device_, expected_types):
xp = _array_api_for_tests(namespace, device_)
float_types = supported_float_dtypes(xp, device=device_)
expected = tuple(getattr(xp, dtype_name) for dtype_name in expected_types)
assert float_types == expected
@pytest.mark.parametrize("use_sample_weight", [False, True])
@pytest.mark.parametrize(
"namespace, device_, dtype_name", yield_namespace_device_dtype_combinations()
)
def test_half_multinomial_loss(use_sample_weight, namespace, device_, dtype_name):
"""Check that the array API version of :func:`_half_multinomial_loss` works
correctly and matches the results produced by :class:`HalfMultinomialLoss`
of the private `_loss` module.
"""
n_samples = 5
n_classes = 3
rng = numpy.random.RandomState(42)
y = rng.randint(0, n_classes, n_samples).astype(dtype_name)
pred = rng.rand(n_samples, n_classes).astype(dtype_name)
xp = _array_api_for_tests(namespace, device_)
y_xp = xp.asarray(y, device=device_)
pred_xp = xp.asarray(pred, device=device_)
if use_sample_weight:
sample_weight = numpy.ones_like(y)
sample_weight[1::2] = 2
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_missing.py | sklearn/utils/tests/test_missing.py | import numpy as np
import pytest
from sklearn.utils._missing import is_scalar_nan
@pytest.mark.parametrize(
"value, result",
[
(float("nan"), True),
(np.nan, True),
(float(np.nan), True),
(np.float32(np.nan), True),
(np.float64(np.nan), True),
(0, False),
(0.0, False),
(None, False),
("", False),
("nan", False),
([np.nan], False),
(9867966753463435747313673, False), # Python int that overflows with C type
],
)
def test_is_scalar_nan(value, result):
assert is_scalar_nan(value) is result
# make sure that we are returning a Python bool
assert isinstance(is_scalar_nan(value), bool)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_parallel.py | sklearn/utils/tests/test_parallel.py | import itertools
import re
import time
import warnings
import joblib
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from sklearn import config_context, get_config
from sklearn.compose import make_column_transformer
from sklearn.datasets import load_iris
from sklearn.ensemble import RandomForestClassifier
from sklearn.exceptions import ConvergenceWarning
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.utils.fixes import _IS_WASM
from sklearn.utils.parallel import Parallel, delayed
def get_working_memory():
return get_config()["working_memory"]
@pytest.mark.parametrize("n_jobs", [1, 2])
@pytest.mark.parametrize("backend", ["loky", "threading", "multiprocessing"])
def test_configuration_passes_through_to_joblib(n_jobs, backend):
# Tests that the global global configuration is passed to joblib jobs
with config_context(working_memory=123):
results = Parallel(n_jobs=n_jobs, backend=backend)(
delayed(get_working_memory)() for _ in range(2)
)
assert_array_equal(results, [123] * 2)
def test_parallel_delayed_warnings():
"""Informative warnings should be raised when mixing sklearn and joblib API"""
# We should issue a warning when one wants to use sklearn.utils.fixes.Parallel
# with joblib.delayed. The config will not be propagated to the workers.
warn_msg = "`sklearn.utils.parallel.Parallel` needs to be used in conjunction"
with pytest.warns(UserWarning, match=warn_msg) as records:
Parallel()(joblib.delayed(time.sleep)(0) for _ in range(10))
assert len(records) == 10
# We should issue a warning if one wants to use sklearn.utils.fixes.delayed with
# joblib.Parallel
warn_msg = (
"`sklearn.utils.parallel.delayed` should be used with "
"`sklearn.utils.parallel.Parallel` to make it possible to propagate"
)
with pytest.warns(UserWarning, match=warn_msg) as records:
joblib.Parallel()(delayed(time.sleep)(0) for _ in range(10))
assert len(records) == 10
@pytest.mark.parametrize("n_jobs", [1, 2])
def test_dispatch_config_parallel(n_jobs):
"""Check that we properly dispatch the configuration in parallel processing.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/25239
"""
pd = pytest.importorskip("pandas")
iris = load_iris(as_frame=True)
class TransformerRequiredDataFrame(StandardScaler):
def fit(self, X, y=None):
assert isinstance(X, pd.DataFrame), "X should be a DataFrame"
return super().fit(X, y)
def transform(self, X, y=None):
assert isinstance(X, pd.DataFrame), "X should be a DataFrame"
return super().transform(X, y)
dropper = make_column_transformer(
("drop", [0]),
remainder="passthrough",
n_jobs=n_jobs,
)
param_grid = {"randomforestclassifier__max_depth": [1, 2, 3]}
search_cv = GridSearchCV(
make_pipeline(
dropper,
TransformerRequiredDataFrame(),
RandomForestClassifier(n_estimators=5, n_jobs=n_jobs),
),
param_grid,
cv=5,
n_jobs=n_jobs,
error_score="raise", # this search should not fail
)
# make sure that `fit` would fail in case we don't request dataframe
with pytest.raises(AssertionError, match="X should be a DataFrame"):
search_cv.fit(iris.data, iris.target)
with config_context(transform_output="pandas"):
# we expect each intermediate steps to output a DataFrame
search_cv.fit(iris.data, iris.target)
assert not np.isnan(search_cv.cv_results_["mean_test_score"]).any()
def raise_warning():
warnings.warn("Convergence warning", ConvergenceWarning)
def _yield_n_jobs_backend_combinations():
n_jobs_values = [1, 2]
backend_values = ["loky", "threading", "multiprocessing"]
for n_jobs, backend in itertools.product(n_jobs_values, backend_values):
if n_jobs == 2 and backend == "loky":
# XXX Mark thread-unsafe to avoid:
# RuntimeError: The executor underlying Parallel has been shutdown.
# See https://github.com/joblib/joblib/issues/1743 for more details.
yield pytest.param(n_jobs, backend, marks=pytest.mark.thread_unsafe)
else:
yield n_jobs, backend
@pytest.mark.parametrize("n_jobs, backend", _yield_n_jobs_backend_combinations())
def test_filter_warning_propagates(n_jobs, backend):
"""Check warning propagates to the job."""
with warnings.catch_warnings():
warnings.simplefilter("error", category=ConvergenceWarning)
with pytest.raises(ConvergenceWarning):
Parallel(n_jobs=n_jobs, backend=backend)(
delayed(raise_warning)() for _ in range(2)
)
def get_warning_filters():
# In free-threading Python >= 3.14, warnings filters are managed through a
# ContextVar and warnings.filters is not modified inside a
# warnings.catch_warnings context. You need to use warnings._get_filters().
# For more details, see
# https://docs.python.org/3.14/whatsnew/3.14.html#concurrent-safe-warnings-control
filters_func = getattr(warnings, "_get_filters", None)
return filters_func() if filters_func is not None else warnings.filters
def test_check_warnings_threading():
"""Check that warnings filters are set correctly in the threading backend."""
with warnings.catch_warnings():
warnings.simplefilter("error", category=ConvergenceWarning)
main_warning_filters = get_warning_filters()
assert ("error", None, ConvergenceWarning, None, 0) in main_warning_filters
all_worker_warning_filters = Parallel(n_jobs=2, backend="threading")(
delayed(get_warning_filters)() for _ in range(2)
)
def normalize_main_module(filters):
# In Python 3.14 free-threaded, there is a small discrepancy main
# warning filters have an entry with module = "__main__" whereas it
# is a regex in the workers
return [
(
action,
message,
type_,
module
if "__main__" not in str(module)
or not isinstance(module, re.Pattern)
else module.pattern,
lineno,
)
for action, message, type_, module, lineno in main_warning_filters
]
for worker_warning_filter in all_worker_warning_filters:
assert normalize_main_module(
worker_warning_filter
) == normalize_main_module(main_warning_filters)
@pytest.mark.xfail(_IS_WASM, reason="Pyodide always use the sequential backend")
def test_filter_warning_propagates_no_side_effect_with_loky_backend():
with warnings.catch_warnings():
warnings.simplefilter("error", category=ConvergenceWarning)
Parallel(n_jobs=2, backend="loky")(delayed(time.sleep)(0) for _ in range(10))
# Since loky workers are reused, make sure that inside the loky workers,
# warnings filters have been reset to their original value. Using joblib
# directly should not turn ConvergenceWarning into an error.
joblib.Parallel(n_jobs=2, backend="loky")(
joblib.delayed(warnings.warn)("Convergence warning", ConvergenceWarning)
for _ in range(10)
)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_mocking.py | sklearn/utils/tests/test_mocking.py | import numpy as np
import pytest
from numpy.testing import assert_array_equal
from scipy import sparse
from sklearn.datasets import load_iris
from sklearn.utils import _safe_indexing, check_array
from sklearn.utils._mocking import (
CheckingClassifier,
_MockEstimatorOnOffPrediction,
)
from sklearn.utils._testing import _convert_container
from sklearn.utils.fixes import CSR_CONTAINERS
@pytest.fixture
def iris():
return load_iris(return_X_y=True)
def _success(x):
return True
def _fail(x):
return False
@pytest.mark.parametrize(
"kwargs",
[
{},
{"check_X": _success},
{"check_y": _success},
{"check_X": _success, "check_y": _success},
],
)
def test_check_on_fit_success(iris, kwargs):
X, y = iris
CheckingClassifier(**kwargs).fit(X, y)
@pytest.mark.parametrize(
"kwargs",
[
{"check_X": _fail},
{"check_y": _fail},
{"check_X": _success, "check_y": _fail},
{"check_X": _fail, "check_y": _success},
{"check_X": _fail, "check_y": _fail},
],
)
def test_check_on_fit_fail(iris, kwargs):
X, y = iris
clf = CheckingClassifier(**kwargs)
with pytest.raises(AssertionError):
clf.fit(X, y)
@pytest.mark.parametrize(
"pred_func", ["predict", "predict_proba", "decision_function", "score"]
)
def test_check_X_on_predict_success(iris, pred_func):
X, y = iris
clf = CheckingClassifier(check_X=_success).fit(X, y)
getattr(clf, pred_func)(X)
@pytest.mark.parametrize(
"pred_func", ["predict", "predict_proba", "decision_function", "score"]
)
def test_check_X_on_predict_fail(iris, pred_func):
X, y = iris
clf = CheckingClassifier(check_X=_success).fit(X, y)
clf.set_params(check_X=_fail)
with pytest.raises(AssertionError):
getattr(clf, pred_func)(X)
@pytest.mark.parametrize("input_type", ["list", "array", "sparse", "dataframe"])
def test_checking_classifier(iris, input_type):
# Check that the CheckingClassifier outputs what we expect
X, y = iris
X = _convert_container(X, input_type)
clf = CheckingClassifier()
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
assert len(clf.classes_) == 3
assert clf.n_features_in_ == 4
y_pred = clf.predict(X)
assert all(pred in clf.classes_ for pred in y_pred)
assert clf.score(X) == pytest.approx(0)
clf.set_params(foo_param=10)
assert clf.fit(X, y).score(X) == pytest.approx(1)
y_proba = clf.predict_proba(X)
assert y_proba.shape == (150, 3)
assert np.logical_and(y_proba >= 0, y_proba <= 1).all()
y_decision = clf.decision_function(X)
assert y_decision.shape == (150, 3)
# check the shape in case of binary classification
first_2_classes = np.logical_or(y == 0, y == 1)
X = _safe_indexing(X, first_2_classes)
y = _safe_indexing(y, first_2_classes)
clf.fit(X, y)
y_proba = clf.predict_proba(X)
assert y_proba.shape == (100, 2)
assert np.logical_and(y_proba >= 0, y_proba <= 1).all()
y_decision = clf.decision_function(X)
assert y_decision.shape == (100,)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_checking_classifier_with_params(iris, csr_container):
X, y = iris
X_sparse = csr_container(X)
clf = CheckingClassifier(check_X=sparse.issparse)
with pytest.raises(AssertionError):
clf.fit(X, y)
clf.fit(X_sparse, y)
clf = CheckingClassifier(
check_X=check_array, check_X_params={"accept_sparse": False}
)
clf.fit(X, y)
with pytest.raises(TypeError, match="Sparse data was passed"):
clf.fit(X_sparse, y)
def test_checking_classifier_fit_params(iris):
# check the error raised when the number of samples is not the one expected
X, y = iris
clf = CheckingClassifier(expected_sample_weight=True)
sample_weight = np.ones(len(X) // 2)
msg = f"sample_weight.shape == ({len(X) // 2},), expected ({len(X)},)!"
with pytest.raises(ValueError) as exc:
clf.fit(X, y, sample_weight=sample_weight)
assert exc.value.args[0] == msg
def test_checking_classifier_missing_fit_params(iris):
X, y = iris
clf = CheckingClassifier(expected_sample_weight=True)
err_msg = "Expected sample_weight to be passed"
with pytest.raises(AssertionError, match=err_msg):
clf.fit(X, y)
@pytest.mark.parametrize(
"methods_to_check",
[["predict"], ["predict", "predict_proba"]],
)
@pytest.mark.parametrize(
"predict_method", ["predict", "predict_proba", "decision_function", "score"]
)
def test_checking_classifier_methods_to_check(iris, methods_to_check, predict_method):
# check that methods_to_check allows to bypass checks
X, y = iris
clf = CheckingClassifier(
check_X=sparse.issparse,
methods_to_check=methods_to_check,
)
clf.fit(X, y)
if predict_method in methods_to_check:
with pytest.raises(AssertionError):
getattr(clf, predict_method)(X)
else:
getattr(clf, predict_method)(X)
@pytest.mark.parametrize(
"response_methods",
[
["predict"],
["predict", "predict_proba"],
["predict", "decision_function"],
["predict", "predict_proba", "decision_function"],
],
)
def test_mock_estimator_on_off_prediction(iris, response_methods):
X, y = iris
estimator = _MockEstimatorOnOffPrediction(response_methods=response_methods)
estimator.fit(X, y)
assert hasattr(estimator, "classes_")
assert_array_equal(estimator.classes_, np.unique(y))
possible_responses = ["predict", "predict_proba", "decision_function"]
for response in possible_responses:
if response in response_methods:
assert hasattr(estimator, response)
assert getattr(estimator, response)(X) == response
else:
assert not hasattr(estimator, response)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_fast_dict.py | sklearn/utils/tests/test_fast_dict.py | """Test fast_dict."""
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
from sklearn.utils._fast_dict import IntFloatDict, argmin
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
assert len(d) == len(keys)
d.append(120, 3.0)
assert d[120] == 3.0
assert len(d) == len(keys) + 1
for i in range(2000):
d.append(i + 1000, 4.0)
assert d[1100] == 4.0
def test_int_float_dict_argmin():
# Test the argmin implementation on the IntFloatDict
keys = np.arange(100, dtype=np.intp)
values = np.arange(100, dtype=np.float64)
d = IntFloatDict(keys, values)
assert argmin(d) == (0, 0)
def test_to_arrays():
# Test that an IntFloatDict is converted into arrays
# of keys and values correctly
keys_in = np.array([1, 2, 3], dtype=np.intp)
values_in = np.array([4, 5, 6], dtype=np.float64)
d = IntFloatDict(keys_in, values_in)
keys_out, values_out = d.to_arrays()
assert keys_out.dtype == keys_in.dtype
assert values_in.dtype == values_out.dtype
assert_array_equal(keys_out, keys_in)
assert_allclose(values_out, values_in)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_seq_dataset.py | sklearn/utils/tests/test_seq_dataset.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from functools import partial
from itertools import product
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from sklearn.datasets import load_iris
from sklearn.utils._seq_dataset import (
ArrayDataset32,
ArrayDataset64,
CSRDataset32,
CSRDataset64,
)
from sklearn.utils._testing import assert_allclose
from sklearn.utils.fixes import CSR_CONTAINERS
iris = load_iris()
X64 = iris.data.astype(np.float64)
y64 = iris.target.astype(np.float64)
sample_weight64 = np.arange(y64.size, dtype=np.float64)
X32 = iris.data.astype(np.float32)
y32 = iris.target.astype(np.float32)
sample_weight32 = np.arange(y32.size, dtype=np.float32)
floating = [np.float32, np.float64]
def assert_csr_equal_values(current, expected):
current.eliminate_zeros()
expected.eliminate_zeros()
expected = expected.astype(current.dtype)
assert current.shape[0] == expected.shape[0]
assert current.shape[1] == expected.shape[1]
assert_array_equal(current.data, expected.data)
assert_array_equal(current.indices, expected.indices)
assert_array_equal(current.indptr, expected.indptr)
def _make_dense_dataset(float_dtype):
if float_dtype == np.float32:
return ArrayDataset32(X32, y32, sample_weight32, seed=42)
return ArrayDataset64(X64, y64, sample_weight64, seed=42)
def _make_sparse_dataset(csr_container, float_dtype):
if float_dtype == np.float32:
X, y, sample_weight, csr_dataset = X32, y32, sample_weight32, CSRDataset32
else:
X, y, sample_weight, csr_dataset = X64, y64, sample_weight64, CSRDataset64
X = csr_container(X)
return csr_dataset(X.data, X.indptr, X.indices, y, sample_weight, seed=42)
def _dense_dataset_factories():
return [partial(_make_dense_dataset, float_dtype) for float_dtype in floating]
def _sparse_dataset_factories():
return [
partial(_make_sparse_dataset, csr_container, float_dtype)
for csr_container, float_dtype in product(CSR_CONTAINERS, floating)
]
def _fused_types_dataset_factories():
all_factories = _dense_dataset_factories() + _sparse_dataset_factories()
# group dataset by array types to get a tuple (float32, float64)
return [all_factories[idx : idx + 2] for idx in range(0, len(all_factories), 2)]
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
@pytest.mark.parametrize(
"dataset_factory", _dense_dataset_factories() + _sparse_dataset_factories()
)
def test_seq_dataset_basic_iteration(dataset_factory, csr_container):
NUMBER_OF_RUNS = 5
X_csr64 = csr_container(X64)
dataset = dataset_factory()
for _ in range(NUMBER_OF_RUNS):
# next sample
xi_, yi, swi, idx = dataset._next_py()
xi = csr_container(xi_, shape=(1, X64.shape[1]))
assert_csr_equal_values(xi, X_csr64[[idx]])
assert yi == y64[idx]
assert swi == sample_weight64[idx]
# random sample
xi_, yi, swi, idx = dataset._random_py()
xi = csr_container(xi_, shape=(1, X64.shape[1]))
assert_csr_equal_values(xi, X_csr64[[idx]])
assert yi == y64[idx]
assert swi == sample_weight64[idx]
@pytest.mark.parametrize(
"float_dtype, csr_container", product(floating, CSR_CONTAINERS)
)
def test_seq_dataset_shuffle(float_dtype, csr_container):
dense_dataset = _make_dense_dataset(float_dtype)
sparse_dataset = _make_sparse_dataset(csr_container, float_dtype)
# not shuffled
for i in range(5):
_, _, _, idx1 = dense_dataset._next_py()
_, _, _, idx2 = sparse_dataset._next_py()
assert idx1 == i
assert idx2 == i
for i in [132, 50, 9, 18, 58]:
_, _, _, idx1 = dense_dataset._random_py()
_, _, _, idx2 = sparse_dataset._random_py()
assert idx1 == i
assert idx2 == i
seed = 77
dense_dataset._shuffle_py(seed)
sparse_dataset._shuffle_py(seed)
idx_next = [63, 91, 148, 87, 29]
idx_shuffle = [137, 125, 56, 121, 127]
for i, j in zip(idx_next, idx_shuffle):
_, _, _, idx1 = dense_dataset._next_py()
_, _, _, idx2 = sparse_dataset._next_py()
assert idx1 == i
assert idx2 == i
_, _, _, idx1 = dense_dataset._random_py()
_, _, _, idx2 = sparse_dataset._random_py()
assert idx1 == j
assert idx2 == j
@pytest.mark.parametrize(
"dataset_32_factory, dataset_64_factory", _fused_types_dataset_factories()
)
def test_fused_types_consistency(dataset_32_factory, dataset_64_factory):
dataset_32, dataset_64 = dataset_32_factory(), dataset_64_factory()
NUMBER_OF_RUNS = 5
for _ in range(NUMBER_OF_RUNS):
# next sample
(xi_data32, _, _), yi32, _, _ = dataset_32._next_py()
(xi_data64, _, _), yi64, _, _ = dataset_64._next_py()
assert xi_data32.dtype == np.float32
assert xi_data64.dtype == np.float64
assert_allclose(xi_data64, xi_data32, rtol=1e-5)
assert_allclose(yi64, yi32, rtol=1e-5)
def test_buffer_dtype_mismatch_error():
with pytest.raises(ValueError, match="Buffer dtype mismatch"):
ArrayDataset64(X32, y32, sample_weight32, seed=42)
with pytest.raises(ValueError, match="Buffer dtype mismatch"):
ArrayDataset32(X64, y64, sample_weight64, seed=42)
for csr_container in CSR_CONTAINERS:
X_csr32 = csr_container(X32)
X_csr64 = csr_container(X64)
with pytest.raises(ValueError, match="Buffer dtype mismatch"):
CSRDataset64(
X_csr32.data,
X_csr32.indptr,
X_csr32.indices,
y32,
sample_weight32,
seed=42,
)
with pytest.raises(ValueError, match="Buffer dtype mismatch"):
CSRDataset32(
X_csr64.data,
X_csr64.indptr,
X_csr64.indices,
y64,
sample_weight64,
seed=42,
)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_testing.py | sklearn/utils/tests/test_testing.py | import atexit
import os
import warnings
import numpy as np
import pytest
from scipy import sparse
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils._testing import (
TempMemmap,
_convert_container,
_delete_folder,
_get_warnings_filters_info_list,
assert_allclose,
assert_allclose_dense_sparse,
assert_docstring_consistency,
assert_run_python_script_without_output,
check_docstring_parameters,
create_memmap_backed_data,
ignore_warnings,
raises,
set_random_state,
skip_if_no_numpydoc,
turn_warnings_into_errors,
)
from sklearn.utils.deprecation import deprecated
from sklearn.utils.fixes import (
_IS_WASM,
CSC_CONTAINERS,
CSR_CONTAINERS,
)
from sklearn.utils.metaestimators import available_if
def test_set_random_state():
lda = LinearDiscriminantAnalysis()
tree = DecisionTreeClassifier()
# Linear Discriminant Analysis doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert tree.random_state == 3
@pytest.mark.parametrize("csr_container", CSC_CONTAINERS)
def test_assert_allclose_dense_sparse(csr_container):
x = np.arange(9).reshape(3, 3)
msg = "Not equal to tolerance "
y = csr_container(x)
for X in [x, y]:
# basic compare
with pytest.raises(AssertionError, match=msg):
assert_allclose_dense_sparse(X, X * 2)
assert_allclose_dense_sparse(X, X)
with pytest.raises(ValueError, match="Can only compare two sparse"):
assert_allclose_dense_sparse(x, y)
A = sparse.diags(np.ones(5), offsets=0).tocsr()
B = csr_container(np.ones((1, 5)))
with pytest.raises(AssertionError, match="Arrays are not equal"):
assert_allclose_dense_sparse(B, A)
def test_ignore_warning():
# This check that ignore_warning decorator and context manager are working
# as expected
def _warning_function():
warnings.warn("deprecation warning", DeprecationWarning)
def _multiple_warning_function():
warnings.warn("deprecation warning", DeprecationWarning)
warnings.warn("deprecation warning")
# Check the function directly
with warnings.catch_warnings():
warnings.simplefilter("error")
ignore_warnings(_warning_function)
ignore_warnings(_warning_function, category=DeprecationWarning)
with pytest.warns(DeprecationWarning):
ignore_warnings(_warning_function, category=UserWarning)()
with pytest.warns() as record:
ignore_warnings(_multiple_warning_function, category=FutureWarning)()
assert len(record) == 2
assert isinstance(record[0].message, DeprecationWarning)
assert isinstance(record[1].message, UserWarning)
with pytest.warns() as record:
ignore_warnings(_multiple_warning_function, category=UserWarning)()
assert len(record) == 1
assert isinstance(record[0].message, DeprecationWarning)
with warnings.catch_warnings():
warnings.simplefilter("error")
ignore_warnings(_warning_function, category=(DeprecationWarning, UserWarning))
# Check the decorator
@ignore_warnings
def decorator_no_warning():
_warning_function()
_multiple_warning_function()
@ignore_warnings(category=(DeprecationWarning, UserWarning))
def decorator_no_warning_multiple():
_multiple_warning_function()
@ignore_warnings(category=DeprecationWarning)
def decorator_no_deprecation_warning():
_warning_function()
@ignore_warnings(category=UserWarning)
def decorator_no_user_warning():
_warning_function()
@ignore_warnings(category=DeprecationWarning)
def decorator_no_deprecation_multiple_warning():
_multiple_warning_function()
@ignore_warnings(category=UserWarning)
def decorator_no_user_multiple_warning():
_multiple_warning_function()
with warnings.catch_warnings():
warnings.simplefilter("error")
decorator_no_warning()
decorator_no_warning_multiple()
decorator_no_deprecation_warning()
with pytest.warns(DeprecationWarning):
decorator_no_user_warning()
with pytest.warns(UserWarning):
decorator_no_deprecation_multiple_warning()
with pytest.warns(DeprecationWarning):
decorator_no_user_multiple_warning()
# Check the context manager
def context_manager_no_warning():
with ignore_warnings():
_warning_function()
def context_manager_no_warning_multiple():
with ignore_warnings(category=(DeprecationWarning, UserWarning)):
_multiple_warning_function()
def context_manager_no_deprecation_warning():
with ignore_warnings(category=DeprecationWarning):
_warning_function()
def context_manager_no_user_warning():
with ignore_warnings(category=UserWarning):
_warning_function()
def context_manager_no_deprecation_multiple_warning():
with ignore_warnings(category=DeprecationWarning):
_multiple_warning_function()
def context_manager_no_user_multiple_warning():
with ignore_warnings(category=UserWarning):
_multiple_warning_function()
with warnings.catch_warnings():
warnings.simplefilter("error")
context_manager_no_warning()
context_manager_no_warning_multiple()
context_manager_no_deprecation_warning()
with pytest.warns(DeprecationWarning):
context_manager_no_user_warning()
with pytest.warns(UserWarning):
context_manager_no_deprecation_multiple_warning()
with pytest.warns(DeprecationWarning):
context_manager_no_user_multiple_warning()
# Check that passing warning class as first positional argument
warning_class = UserWarning
match = "'obj' should be a callable.+you should use 'category=UserWarning'"
with pytest.raises(ValueError, match=match):
silence_warnings_func = ignore_warnings(warning_class)(_warning_function)
silence_warnings_func()
with pytest.raises(ValueError, match=match):
@ignore_warnings(warning_class)
def test():
pass
# Tests for docstrings:
def f_ok(a, b):
"""Function f
Parameters
----------
a : int
Parameter a
b : float
Parameter b
Returns
-------
c : list
Parameter c
"""
c = a + b
return c
def f_bad_sections(a, b):
"""Function f
Parameters
----------
a : int
Parameter a
b : float
Parameter b
Results
-------
c : list
Parameter c
"""
c = a + b
return c
def f_bad_order(b, a):
"""Function f
Parameters
----------
a : int
Parameter a
b : float
Parameter b
Returns
-------
c : list
Parameter c
"""
c = a + b
return c
def f_too_many_param_docstring(a, b):
"""Function f
Parameters
----------
a : int
Parameter a
b : int
Parameter b
c : int
Parameter c
Returns
-------
d : list
Parameter c
"""
d = a + b
return d
def f_missing(a, b):
"""Function f
Parameters
----------
a : int
Parameter a
Returns
-------
c : list
Parameter c
"""
c = a + b
return c
def f_check_param_definition(a, b, c, d, e):
"""Function f
Parameters
----------
a: int
Parameter a
b:
Parameter b
c :
This is parsed correctly in numpydoc 1.2
d:int
Parameter d
e
No typespec is allowed without colon
"""
return a + b + c + d
class Klass:
def f_missing(self, X, y):
pass
def f_bad_sections(self, X, y):
"""Function f
Parameter
---------
a : int
Parameter a
b : float
Parameter b
Results
-------
c : list
Parameter c
"""
pass
class MockEst:
def __init__(self):
"""MockEstimator"""
def fit(self, X, y):
return X
def predict(self, X):
return X
def predict_proba(self, X):
return X
def score(self, X):
return 1.0
class MockMetaEstimator:
def __init__(self, delegate):
"""MetaEstimator to check if doctest on delegated methods work.
Parameters
---------
delegate : estimator
Delegated estimator.
"""
self.delegate = delegate
@available_if(lambda self: hasattr(self.delegate, "predict"))
def predict(self, X):
"""This is available only if delegate has predict.
Parameters
----------
y : ndarray
Parameter y
"""
return self.delegate.predict(X)
@available_if(lambda self: hasattr(self.delegate, "score"))
@deprecated("Testing a deprecated delegated method")
def score(self, X):
"""This is available only if delegate has score.
Parameters
---------
y : ndarray
Parameter y
"""
@available_if(lambda self: hasattr(self.delegate, "predict_proba"))
def predict_proba(self, X):
"""This is available only if delegate has predict_proba.
Parameters
---------
X : ndarray
Parameter X
"""
return X
@deprecated("Testing deprecated function with wrong params")
def fit(self, X, y):
"""Incorrect docstring but should not be tested"""
@skip_if_no_numpydoc
def test_check_docstring_parameters():
incorrect = check_docstring_parameters(f_ok)
assert incorrect == []
incorrect = check_docstring_parameters(f_ok, ignore=["b"])
assert incorrect == []
incorrect = check_docstring_parameters(f_missing, ignore=["b"])
assert incorrect == []
with pytest.raises(RuntimeError, match="Unknown section Results"):
check_docstring_parameters(f_bad_sections)
with pytest.raises(RuntimeError, match="Unknown section Parameter"):
check_docstring_parameters(Klass.f_bad_sections)
incorrect = check_docstring_parameters(f_check_param_definition)
mock_meta = MockMetaEstimator(delegate=MockEst())
mock_meta_name = mock_meta.__class__.__name__
assert incorrect == [
(
"sklearn.utils.tests.test_testing.f_check_param_definition There "
"was no space between the param name and colon ('a: int')"
),
(
"sklearn.utils.tests.test_testing.f_check_param_definition There "
"was no space between the param name and colon ('b:')"
),
(
"sklearn.utils.tests.test_testing.f_check_param_definition There "
"was no space between the param name and colon ('d:int')"
),
]
messages = [
[
"In function: sklearn.utils.tests.test_testing.f_bad_order",
(
"There's a parameter name mismatch in function docstring w.r.t."
" function signature, at index 0 diff: 'b' != 'a'"
),
"Full diff:",
"- ['b', 'a']",
"+ ['a', 'b']",
],
[
"In function: sklearn.utils.tests.test_testing.f_too_many_param_docstring",
(
"Parameters in function docstring have more items w.r.t. function"
" signature, first extra item: c"
),
"Full diff:",
"- ['a', 'b']",
"+ ['a', 'b', 'c']",
"? +++++",
],
[
"In function: sklearn.utils.tests.test_testing.f_missing",
(
"Parameters in function docstring have less items w.r.t. function"
" signature, first missing item: b"
),
"Full diff:",
"- ['a', 'b']",
"+ ['a']",
],
[
"In function: sklearn.utils.tests.test_testing.Klass.f_missing",
(
"Parameters in function docstring have less items w.r.t. function"
" signature, first missing item: X"
),
"Full diff:",
"- ['X', 'y']",
"+ []",
],
[
f"In function: sklearn.utils.tests.test_testing.{mock_meta_name}.predict",
(
"There's a parameter name mismatch in function docstring w.r.t."
" function signature, at index 0 diff: 'X' != 'y'"
),
"Full diff:",
"- ['X']",
"? ^",
"+ ['y']",
"? ^",
],
[
"In function: "
f"sklearn.utils.tests.test_testing.{mock_meta_name}."
"predict_proba",
"potentially wrong underline length... ",
"Parameters ",
"--------- in ",
],
[
f"In function: sklearn.utils.tests.test_testing.{mock_meta_name}.score",
"potentially wrong underline length... ",
"Parameters ",
"--------- in ",
],
[
f"In function: sklearn.utils.tests.test_testing.{mock_meta_name}.fit",
(
"Parameters in function docstring have less items w.r.t. function"
" signature, first missing item: X"
),
"Full diff:",
"- ['X', 'y']",
"+ []",
],
]
for msg, f in zip(
messages,
[
f_bad_order,
f_too_many_param_docstring,
f_missing,
Klass.f_missing,
mock_meta.predict,
mock_meta.predict_proba,
mock_meta.score,
mock_meta.fit,
],
):
incorrect = check_docstring_parameters(f)
assert msg == incorrect, '\n"%s"\n not in \n"%s"' % (msg, incorrect)
def f_one(a, b): # pragma: no cover
"""Function one.
Parameters
----------
a : int, float
Parameter a.
Second line.
b : str
Parameter b.
Returns
-------
c : int
Returning
d : int
Returning
"""
pass
def f_two(a, b): # pragma: no cover
"""Function two.
Parameters
----------
a : int, float
Parameter a.
Second line.
b : str
Parameter bb.
e : int
Extra parameter.
Returns
-------
c : int
Returning
d : int
Returning
"""
pass
def f_three(a, b): # pragma: no cover
"""Function two.
Parameters
----------
a : int, float
Parameter a.
b : str
Parameter B!
e :
Extra parameter.
Returns
-------
c : int
Returning.
d : int
Returning
"""
pass
@skip_if_no_numpydoc
def test_assert_docstring_consistency_object_type():
"""Check error raised when `objects` incorrect type."""
with pytest.raises(TypeError, match="All 'objects' must be one of"):
assert_docstring_consistency(["string", f_one])
@skip_if_no_numpydoc
@pytest.mark.parametrize(
"objects, kwargs, error",
[
(
[f_one, f_two],
{"include_params": ["a"], "exclude_params": ["b"]},
"The 'exclude_params' argument",
),
(
[f_one, f_two],
{"include_returns": False, "exclude_returns": ["c"]},
"The 'exclude_returns' argument",
),
],
)
def test_assert_docstring_consistency_arg_checks(objects, kwargs, error):
"""Check `assert_docstring_consistency` argument checking correct."""
with pytest.raises(TypeError, match=error):
assert_docstring_consistency(objects, **kwargs)
@skip_if_no_numpydoc
@pytest.mark.parametrize(
"objects, kwargs, error, warn",
[
pytest.param(
[f_one, f_two], {"include_params": ["a"]}, "", "", id="whitespace"
),
pytest.param([f_one, f_two], {"include_returns": True}, "", "", id="incl_all"),
pytest.param(
[f_one, f_two, f_three],
{"include_params": ["a"]},
(
r"The description of Parameter 'a' is inconsistent between "
r"\['f_one',\n'f_two'\]"
),
"",
id="2-1 group",
),
pytest.param(
[f_one, f_two, f_three],
{"include_params": ["b"]},
(
r"The description of Parameter 'b' is inconsistent between "
r"\['f_one'\] and\n\['f_two'\] and"
),
"",
id="1-1-1 group",
),
pytest.param(
[f_two, f_three],
{"include_params": ["e"]},
(
r"The type specification of Parameter 'e' is inconsistent between\n"
r"\['f_two'\] and"
),
"",
id="empty type",
),
pytest.param(
[f_one, f_two],
{"include_params": True, "exclude_params": ["b"]},
"",
r"Checking was skipped for Parameters: \['e'\]",
id="skip warn",
),
],
)
def test_assert_docstring_consistency(objects, kwargs, error, warn):
"""Check `assert_docstring_consistency` gives correct results."""
if error:
with pytest.raises(AssertionError, match=error):
assert_docstring_consistency(objects, **kwargs)
elif warn:
with pytest.warns(UserWarning, match=warn):
assert_docstring_consistency(objects, **kwargs)
else:
assert_docstring_consistency(objects, **kwargs)
def f_four(labels): # pragma: no cover
"""Function four.
Parameters
----------
labels : array-like, default=None
The set of labels to include when `average != 'binary'`, and their
order if `average is None`. Labels present in the data can be excluded.
"""
pass
def f_five(labels): # pragma: no cover
"""Function five.
Parameters
----------
labels : array-like, default=None
The set of labels to include when `average != 'binary'`, and their
order if `average is None`. This is an extra line. Labels present in the
data can be excluded.
"""
pass
def f_six(labels): # pragma: no cover
"""Function six.
Parameters
----------
labels : array-like, default=None
The group of labels to add when `average != 'binary'`, and the
order if `average is None`. Labels present on them datas can be excluded.
"""
pass
@skip_if_no_numpydoc
def test_assert_docstring_consistency_error_msg():
"""Check `assert_docstring_consistency` difference message."""
msg = r"""The description of Parameter 'labels' is inconsistent between
\['f_four'\] and \['f_five'\] and \['f_six'\]:
\*\*\* \['f_four'\]
--- \['f_five'\]
\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*
\*\*\* 10,25 \*\*\*\*
--- 10,30 ----
'binary'`, and their order if `average is None`.
\+ This is an extra line.
Labels present in the data can be excluded.
\*\*\* \['f_four'\]
--- \['f_six'\]
\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*
\*\*\* 1,25 \*\*\*\*
The
! set
of labels to
! include
when `average != 'binary'`, and
! their
order if `average is None`. Labels present
! in the data
can be excluded.
--- 1,25 ----
The
! group
of labels to
! add
when `average != 'binary'`, and
! the
order if `average is None`. Labels present
! on them datas
can be excluded."""
with pytest.raises(AssertionError, match=msg):
assert_docstring_consistency([f_four, f_five, f_six], include_params=True)
@skip_if_no_numpydoc
def test_assert_docstring_consistency_descr_regex_pattern():
"""Check `assert_docstring_consistency` `descr_regex_pattern` works."""
# Check regex that matches full parameter descriptions
regex_full = (
r"The (set|group) " # match 'set' or 'group'
r"of labels to (include|add) " # match 'include' or 'add'
r"when `average \!\= 'binary'`, and (their|the) " # match 'their' or 'the'
r"order if `average is None`\."
r"[\s\w]*\.* " # optionally match additional sentence
r"Labels present (on|in) " # match 'on' or 'in'
r"(them|the) " # match 'them' or 'the'
r"datas? can be excluded\." # match 'data' or 'datas'
)
assert_docstring_consistency(
[f_four, f_five, f_six],
include_params=True,
descr_regex_pattern=" ".join(regex_full.split()),
)
# Check we can just match a few alternate words
regex_words = r"(labels|average|binary)" # match any of these 3 words
assert_docstring_consistency(
[f_four, f_five, f_six],
include_params=True,
descr_regex_pattern=" ".join(regex_words.split()),
)
# Check error raised when regex doesn't match
regex_error = r"The set of labels to include when.+"
msg = r"The description of Parameter 'labels' in \['f_six'\] does not match"
with pytest.raises(AssertionError, match=msg):
assert_docstring_consistency(
[f_four, f_five, f_six],
include_params=True,
descr_regex_pattern=" ".join(regex_error.split()),
)
class RegistrationCounter:
def __init__(self):
self.nb_calls = 0
def __call__(self, to_register_func):
self.nb_calls += 1
assert to_register_func.func is _delete_folder
def check_memmap(input_array, mmap_data, mmap_mode="r"):
assert isinstance(mmap_data, np.memmap)
writeable = mmap_mode != "r"
assert mmap_data.flags.writeable is writeable
np.testing.assert_array_equal(input_array, mmap_data)
def test_tempmemmap(monkeypatch):
registration_counter = RegistrationCounter()
monkeypatch.setattr(atexit, "register", registration_counter)
input_array = np.ones(3)
with TempMemmap(input_array) as data:
check_memmap(input_array, data)
temp_folder = os.path.dirname(data.filename)
if os.name != "nt":
assert not os.path.exists(temp_folder)
assert registration_counter.nb_calls == 1
mmap_mode = "r+"
with TempMemmap(input_array, mmap_mode=mmap_mode) as data:
check_memmap(input_array, data, mmap_mode=mmap_mode)
temp_folder = os.path.dirname(data.filename)
if os.name != "nt":
assert not os.path.exists(temp_folder)
assert registration_counter.nb_calls == 2
def test_create_memmap_backed_data(monkeypatch):
registration_counter = RegistrationCounter()
monkeypatch.setattr(atexit, "register", registration_counter)
input_array = np.ones(3)
data = create_memmap_backed_data(input_array)
check_memmap(input_array, data)
assert registration_counter.nb_calls == 1
data, folder = create_memmap_backed_data(input_array, return_folder=True)
check_memmap(input_array, data)
assert folder == os.path.dirname(data.filename)
assert registration_counter.nb_calls == 2
mmap_mode = "r+"
data = create_memmap_backed_data(input_array, mmap_mode=mmap_mode)
check_memmap(input_array, data, mmap_mode)
assert registration_counter.nb_calls == 3
input_list = [input_array, input_array + 1, input_array + 2]
mmap_data_list = create_memmap_backed_data(input_list)
for input_array, data in zip(input_list, mmap_data_list):
check_memmap(input_array, data)
assert registration_counter.nb_calls == 4
output_data, other = create_memmap_backed_data([input_array, "not-an-array"])
check_memmap(input_array, output_data)
assert other == "not-an-array"
@pytest.mark.parametrize(
"constructor_name, container_type",
[
("list", list),
("tuple", tuple),
("array", np.ndarray),
("sparse", sparse.csr_matrix),
# using `zip` will only keep the available sparse containers
# depending of the installed SciPy version
*zip(["sparse_csr", "sparse_csr_array"], CSR_CONTAINERS),
*zip(["sparse_csc", "sparse_csc_array"], CSC_CONTAINERS),
("dataframe", lambda: pytest.importorskip("pandas").DataFrame),
("series", lambda: pytest.importorskip("pandas").Series),
("index", lambda: pytest.importorskip("pandas").Index),
("pyarrow", lambda: pytest.importorskip("pyarrow").Table),
("pyarrow_array", lambda: pytest.importorskip("pyarrow").Array),
("polars", lambda: pytest.importorskip("polars").DataFrame),
("polars_series", lambda: pytest.importorskip("polars").Series),
("slice", slice),
],
)
@pytest.mark.parametrize(
"dtype, superdtype",
[
(np.int32, np.integer),
(np.int64, np.integer),
(np.float32, np.floating),
(np.float64, np.floating),
],
)
def test_convert_container(
constructor_name,
container_type,
dtype,
superdtype,
):
"""Check that we convert the container to the right type of array with the
right data type."""
if constructor_name in (
"dataframe",
"index",
"polars",
"polars_series",
"pyarrow",
"pyarrow_array",
"series",
):
# delay the import of pandas/polars within the function to only skip this test
# instead of the whole file
container_type = container_type()
container = [0, 1]
container_converted = _convert_container(
container,
constructor_name,
dtype=dtype,
)
assert isinstance(container_converted, container_type)
if constructor_name in ("list", "tuple", "index"):
# list and tuple will use Python class dtype: int, float
# pandas index will always use high precision: np.int64 and np.float64
assert np.issubdtype(type(container_converted[0]), superdtype)
elif constructor_name in ("polars", "polars_series", "pyarrow", "pyarrow_array"):
return
elif hasattr(container_converted, "dtype"):
assert container_converted.dtype == dtype
elif hasattr(container_converted, "dtypes"):
assert container_converted.dtypes[0] == dtype
def test_convert_container_categories_pandas():
pytest.importorskip("pandas")
df = _convert_container(
[["x"]], "dataframe", ["A"], categorical_feature_names=["A"]
)
assert df.dtypes.iloc[0] == "category"
def test_convert_container_categories_polars():
pl = pytest.importorskip("polars")
df = _convert_container([["x"]], "polars", ["A"], categorical_feature_names=["A"])
assert df.schema["A"] == pl.Categorical()
def test_convert_container_categories_pyarrow():
pa = pytest.importorskip("pyarrow")
df = _convert_container([["x"]], "pyarrow", ["A"], categorical_feature_names=["A"])
assert type(df.schema[0].type) is pa.DictionaryType
def test_raises():
# Tests for the raises context manager
# Proper type, no match
with raises(TypeError):
raise TypeError()
# Proper type, proper match
with raises(TypeError, match="how are you") as cm:
raise TypeError("hello how are you")
assert cm.raised_and_matched
# Proper type, proper match with multiple patterns
with raises(TypeError, match=["not this one", "how are you"]) as cm:
raise TypeError("hello how are you")
assert cm.raised_and_matched
# bad type, no match
with pytest.raises(ValueError, match="this will be raised"):
with raises(TypeError) as cm:
raise ValueError("this will be raised")
assert not cm.raised_and_matched
# Bad type, no match, with an err_msg
with pytest.raises(AssertionError, match="the failure message"):
with raises(TypeError, err_msg="the failure message") as cm:
raise ValueError()
assert not cm.raised_and_matched
# bad type, with match (is ignored anyway)
with pytest.raises(ValueError, match="this will be raised"):
with raises(TypeError, match="this is ignored") as cm:
raise ValueError("this will be raised")
assert not cm.raised_and_matched
# proper type but bad match
with pytest.raises(
AssertionError, match="should contain one of the following patterns"
):
with raises(TypeError, match="hello") as cm:
raise TypeError("Bad message")
assert not cm.raised_and_matched
# proper type but bad match, with err_msg
with pytest.raises(AssertionError, match="the failure message"):
with raises(TypeError, match="hello", err_msg="the failure message") as cm:
raise TypeError("Bad message")
assert not cm.raised_and_matched
# no raise with default may_pass=False
with pytest.raises(AssertionError, match="Did not raise"):
with raises(TypeError) as cm:
pass
assert not cm.raised_and_matched
# no raise with may_pass=True
with raises(TypeError, match="hello", may_pass=True) as cm:
pass # still OK
assert not cm.raised_and_matched
# Multiple exception types:
with raises((TypeError, ValueError)):
raise TypeError()
with raises((TypeError, ValueError)):
raise ValueError()
with pytest.raises(AssertionError):
with raises((TypeError, ValueError)):
pass
def test_float32_aware_assert_allclose():
# The relative tolerance for float32 inputs is 1e-4
assert_allclose(np.array([1.0 + 2e-5], dtype=np.float32), 1.0)
with pytest.raises(AssertionError):
assert_allclose(np.array([1.0 + 2e-4], dtype=np.float32), 1.0)
# The relative tolerance for other inputs is left to 1e-7 as in
# the original numpy version.
assert_allclose(np.array([1.0 + 2e-8], dtype=np.float64), 1.0)
with pytest.raises(AssertionError):
assert_allclose(np.array([1.0 + 2e-7], dtype=np.float64), 1.0)
# atol is left to 0.0 by default, even for float32
with pytest.raises(AssertionError):
assert_allclose(np.array([1e-5], dtype=np.float32), 0.0)
assert_allclose(np.array([1e-5], dtype=np.float32), 0.0, atol=2e-5)
@pytest.mark.xfail(_IS_WASM, reason="cannot start subprocess")
def test_assert_run_python_script_without_output():
code = "x = 1"
assert_run_python_script_without_output(code)
code = "print('something to stdout')"
with pytest.raises(AssertionError, match="Expected no output"):
assert_run_python_script_without_output(code)
code = "print('something to stdout')"
with pytest.raises(
AssertionError,
match="output was not supposed to match.+got.+something to stdout",
):
assert_run_python_script_without_output(code, pattern="to.+stdout")
code = "\n".join(["import sys", "print('something to stderr', file=sys.stderr)"])
with pytest.raises(
AssertionError,
match="output was not supposed to match.+got.+something to stderr",
):
assert_run_python_script_without_output(code, pattern="to.+stderr")
@pytest.mark.parametrize(
"constructor_name",
[
"sparse_csr",
"sparse_csc",
pytest.param(
"sparse_csr_array",
),
pytest.param(
"sparse_csc_array",
),
],
)
def test_convert_container_sparse_to_sparse(constructor_name):
"""Non-regression test to check that we can still convert a sparse container
from a given format to another format.
"""
X_sparse = sparse.random(10, 10, density=0.1, format="csr")
_convert_container(X_sparse, constructor_name)
def check_warnings_as_errors(warning_info, warnings_as_errors):
if warning_info.action == "error" and warnings_as_errors:
with pytest.raises(warning_info.category, match=warning_info.message):
warnings.warn(
message=warning_info.message,
category=warning_info.category,
)
if warning_info.action == "ignore":
with warnings.catch_warnings(record=True) as record:
message = warning_info.message
# Special treatment when regex is used
if "Pyarrow" in message:
message = "\nPyarrow will become a required dependency"
warnings.warn(
message=message,
category=warning_info.category,
)
assert len(record) == 0 if warnings_as_errors else 1
if record:
assert str(record[0].message) == message
assert record[0].category == warning_info.category
@pytest.mark.parametrize("warning_info", _get_warnings_filters_info_list())
def test_sklearn_warnings_as_errors(warning_info):
warnings_as_errors = os.environ.get("SKLEARN_WARNINGS_AS_ERRORS", "0") != "0"
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_extmath.py | sklearn/utils/tests/test_extmath.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import itertools
import numpy as np
import pytest
from scipy import linalg, sparse
from scipy.linalg import eigh
from scipy.sparse.linalg import eigsh
from sklearn import config_context
from sklearn.datasets import make_low_rank_matrix, make_sparse_spd_matrix
from sklearn.utils import gen_batches
from sklearn.utils._arpack import _init_arpack_v0
from sklearn.utils._array_api import (
_convert_to_numpy,
_get_namespace_device_dtype_ids,
_max_precision_float_dtype,
get_namespace,
yield_namespace_device_dtype_combinations,
)
from sklearn.utils._array_api import (
device as array_device,
)
from sklearn.utils._testing import (
_array_api_for_tests,
assert_allclose,
assert_allclose_dense_sparse,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
skip_if_32bit,
)
from sklearn.utils.extmath import (
_approximate_mode,
_deterministic_vector_sign_flip,
_incremental_mean_and_var,
_randomized_eigsh,
_safe_accumulator_op,
cartesian,
density,
randomized_range_finder,
randomized_svd,
row_norms,
safe_sparse_dot,
softmax,
stable_cumsum,
svd_flip,
weighted_mode,
)
from sklearn.utils.fixes import (
COO_CONTAINERS,
CSC_CONTAINERS,
CSR_CONTAINERS,
DOK_CONTAINERS,
LIL_CONTAINERS,
_mode,
)
@pytest.mark.parametrize(
"sparse_container",
COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS + LIL_CONTAINERS,
)
def test_density(sparse_container):
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
assert density(sparse_container(X)) == density(X)
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = _mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis=axis)
assert_array_equal(mode, mode2)
assert_array_equal(score, score2)
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
@pytest.mark.parametrize("dtype", (np.int32, np.int64, np.float32, np.float64))
def test_randomized_svd_low_rank_all_dtypes(dtype):
# Check that extmath.randomized_svd is consistent with linalg.svd
n_samples = 100
n_features = 500
rank = 5
k = 10
decimal = 5 if dtype == np.float32 else 7
dtype = np.dtype(dtype)
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(
n_samples=n_samples,
n_features=n_features,
effective_rank=rank,
tail_strength=0.0,
random_state=0,
).astype(dtype, copy=False)
assert X.shape == (n_samples, n_features)
# compute the singular values of X using the slow exact method
U, s, Vt = linalg.svd(X, full_matrices=False)
# Convert the singular values to the specific dtype
U = U.astype(dtype, copy=False)
s = s.astype(dtype, copy=False)
Vt = Vt.astype(dtype, copy=False)
for normalizer in ["auto", "LU", "QR"]: # 'none' would not be stable
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(
X, k, power_iteration_normalizer=normalizer, random_state=0
)
# If the input dtype is float, then the output dtype is float of the
# same bit size (f32 is not upcast to f64)
# But if the input dtype is int, the output dtype is float64
if dtype.kind == "f":
assert Ua.dtype == dtype
assert sa.dtype == dtype
assert Va.dtype == dtype
else:
assert Ua.dtype == np.float64
assert sa.dtype == np.float64
assert Va.dtype == np.float64
assert Ua.shape == (n_samples, k)
assert sa.shape == (k,)
assert Va.shape == (k, n_features)
# ensure that the singular values of both methods are equal up to the
# real rank of the matrix
assert_almost_equal(s[:k], sa, decimal=decimal)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(
np.dot(U[:, :k], Vt[:k, :]), np.dot(Ua, Va), decimal=decimal
)
# check the sparse matrix representation
for csr_container in CSR_CONTAINERS:
X = csr_container(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(
X, k, power_iteration_normalizer=normalizer, random_state=0
)
if dtype.kind == "f":
assert Ua.dtype == dtype
assert sa.dtype == dtype
assert Va.dtype == dtype
else:
assert Ua.dtype.kind == "f"
assert sa.dtype.kind == "f"
assert Va.dtype.kind == "f"
assert_almost_equal(s[:rank], sa[:rank], decimal=decimal)
@pytest.mark.parametrize("dtype", (np.int32, np.int64, np.float32, np.float64))
def test_randomized_eigsh(dtype):
"""Test that `_randomized_eigsh` returns the appropriate components"""
rng = np.random.RandomState(42)
X = np.diag(np.array([1.0, -2.0, 0.0, 3.0], dtype=dtype))
# random rotation that preserves the eigenvalues of X
rand_rot = np.linalg.qr(rng.normal(size=X.shape))[0]
X = rand_rot @ X @ rand_rot.T
# with 'module' selection method, the negative eigenvalue shows up
eigvals, eigvecs = _randomized_eigsh(X, n_components=2, selection="module")
# eigenvalues
assert eigvals.shape == (2,)
assert_array_almost_equal(eigvals, [3.0, -2.0]) # negative eigenvalue here
# eigenvectors
assert eigvecs.shape == (4, 2)
# with 'value' selection method, the negative eigenvalue does not show up
with pytest.raises(NotImplementedError):
_randomized_eigsh(X, n_components=2, selection="value")
@pytest.mark.parametrize("k", (10, 50, 100, 199, 200))
def test_randomized_eigsh_compared_to_others(k):
"""Check that `_randomized_eigsh` is similar to other `eigsh`
Tests that for a random PSD matrix, `_randomized_eigsh` provides results
comparable to LAPACK (scipy.linalg.eigh) and ARPACK
(scipy.sparse.linalg.eigsh).
Note: some versions of ARPACK do not support k=n_features.
"""
# make a random PSD matrix
n_features = 200
X = make_sparse_spd_matrix(n_features, random_state=0)
# compare two versions of randomized
# rough and fast
eigvals, eigvecs = _randomized_eigsh(
X, n_components=k, selection="module", n_iter=25, random_state=0
)
# more accurate but slow (TODO find realistic settings here)
eigvals_qr, eigvecs_qr = _randomized_eigsh(
X,
n_components=k,
n_iter=25,
n_oversamples=20,
random_state=0,
power_iteration_normalizer="QR",
selection="module",
)
# with LAPACK
eigvals_lapack, eigvecs_lapack = eigh(
X, subset_by_index=(n_features - k, n_features - 1)
)
indices = eigvals_lapack.argsort()[::-1]
eigvals_lapack = eigvals_lapack[indices]
eigvecs_lapack = eigvecs_lapack[:, indices]
# -- eigenvalues comparison
assert eigvals_lapack.shape == (k,)
# comparison precision
assert_array_almost_equal(eigvals, eigvals_lapack, decimal=6)
assert_array_almost_equal(eigvals_qr, eigvals_lapack, decimal=6)
# -- eigenvectors comparison
assert eigvecs_lapack.shape == (n_features, k)
# flip eigenvectors' sign to enforce deterministic output
dummy_vecs = np.zeros_like(eigvecs).T
eigvecs, _ = svd_flip(eigvecs, dummy_vecs)
eigvecs_qr, _ = svd_flip(eigvecs_qr, dummy_vecs)
eigvecs_lapack, _ = svd_flip(eigvecs_lapack, dummy_vecs)
assert_array_almost_equal(eigvecs, eigvecs_lapack, decimal=4)
assert_array_almost_equal(eigvecs_qr, eigvecs_lapack, decimal=6)
# comparison ARPACK ~ LAPACK (some ARPACK implems do not support k=n)
if k < n_features:
v0 = _init_arpack_v0(n_features, random_state=0)
# "LA" largest algebraic <=> selection="value" in randomized_eigsh
eigvals_arpack, eigvecs_arpack = eigsh(
X, k, which="LA", tol=0, maxiter=None, v0=v0
)
indices = eigvals_arpack.argsort()[::-1]
# eigenvalues
eigvals_arpack = eigvals_arpack[indices]
assert_array_almost_equal(eigvals_lapack, eigvals_arpack, decimal=10)
# eigenvectors
eigvecs_arpack = eigvecs_arpack[:, indices]
eigvecs_arpack, _ = svd_flip(eigvecs_arpack, dummy_vecs)
assert_array_almost_equal(eigvecs_arpack, eigvecs_lapack, decimal=8)
@pytest.mark.parametrize(
"n,rank",
[
(10, 7),
(100, 10),
(100, 80),
(500, 10),
(500, 250),
(500, 400),
],
)
def test_randomized_eigsh_reconst_low_rank(n, rank):
"""Check that randomized_eigsh is able to reconstruct a low rank psd matrix
Tests that the decomposition provided by `_randomized_eigsh` leads to
orthonormal eigenvectors, and that a low rank PSD matrix can be effectively
reconstructed with good accuracy using it.
"""
assert rank < n
# create a low rank PSD
rng = np.random.RandomState(69)
X = rng.randn(n, rank)
A = X @ X.T
# approximate A with the "right" number of components
S, V = _randomized_eigsh(A, n_components=rank, random_state=rng)
# orthonormality checks
assert_array_almost_equal(np.linalg.norm(V, axis=0), np.ones(S.shape))
assert_array_almost_equal(V.T @ V, np.diag(np.ones(S.shape)))
# reconstruction
A_reconstruct = V @ np.diag(S) @ V.T
# test that the approximation is good
assert_array_almost_equal(A_reconstruct, A, decimal=6)
@pytest.mark.parametrize("dtype", (np.float32, np.float64))
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_row_norms(dtype, csr_container):
X = np.random.RandomState(42).randn(100, 100)
if dtype is np.float32:
precision = 4
else:
precision = 5
X = X.astype(dtype, copy=False)
sq_norm = (X**2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True), precision)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X), precision)
for csr_index_dtype in [np.int32, np.int64]:
Xcsr = csr_container(X, dtype=dtype)
# csr_matrix will use int32 indices by default,
# up-casting those to int64 when necessary
if csr_index_dtype is np.int64:
Xcsr.indptr = Xcsr.indptr.astype(csr_index_dtype, copy=False)
Xcsr.indices = Xcsr.indices.astype(csr_index_dtype, copy=False)
assert Xcsr.indices.dtype == csr_index_dtype
assert Xcsr.indptr.dtype == csr_index_dtype
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True), precision)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr), precision)
def test_randomized_svd_low_rank_with_noise():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(
n_samples=n_samples,
n_features=n_features,
effective_rank=rank,
tail_strength=0.1,
random_state=0,
)
assert X.shape == (n_samples, n_features)
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ["auto", "none", "LU", "QR"]:
# compute the singular values of X using the fast approximate
# method without the iterated power method
_, sa, _ = randomized_svd(
X, k, n_iter=0, power_iteration_normalizer=normalizer, random_state=0
)
# the approximation does not tolerate the noise:
assert np.abs(s[:k] - sa).max() > 0.01
# compute the singular values of X using the fast approximate
# method with iterated power method
_, sap, _ = randomized_svd(
X, k, power_iteration_normalizer=normalizer, random_state=0
)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(
n_samples=n_samples,
n_features=n_features,
effective_rank=rank,
tail_strength=1.0,
random_state=0,
)
assert X.shape == (n_samples, n_features)
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ["auto", "none", "LU", "QR"]:
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(
X, k, n_iter=0, power_iteration_normalizer=normalizer, random_state=0
)
# the approximation does not tolerate the noise:
assert np.abs(s[:k] - sa).max() > 0.1
# compute the singular values of X using the fast approximate method
# with iterated power method
_, sap, _ = randomized_svd(
X, k, n_iter=5, power_iteration_normalizer=normalizer, random_state=0
)
# the iterated power method is still managing to get most of the
# structure at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
# Check that transposing the design matrix has limited impact
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(
n_samples=n_samples,
n_features=n_features,
effective_rank=rank,
tail_strength=0.5,
random_state=0,
)
assert X.shape == (n_samples, n_features)
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False, random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True, random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose="auto", random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]), decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]), decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_randomized_svd_power_iteration_normalizer():
# randomized_svd with power_iteration_normalized='none' diverges for
# large number of power iterations on this dataset
rng = np.random.RandomState(42)
X = make_low_rank_matrix(100, 500, effective_rank=50, random_state=rng)
X += 3 * rng.randint(0, 2, size=X.shape)
n_components = 50
# Check that it diverges with many (non-normalized) power iterations
U, s, Vt = randomized_svd(
X, n_components, n_iter=2, power_iteration_normalizer="none", random_state=0
)
A = X - U.dot(np.diag(s).dot(Vt))
error_2 = linalg.norm(A, ord="fro")
U, s, Vt = randomized_svd(
X, n_components, n_iter=20, power_iteration_normalizer="none", random_state=0
)
A = X - U.dot(np.diag(s).dot(Vt))
error_20 = linalg.norm(A, ord="fro")
assert np.abs(error_2 - error_20) > 100
for normalizer in ["LU", "QR", "auto"]:
U, s, Vt = randomized_svd(
X,
n_components,
n_iter=2,
power_iteration_normalizer=normalizer,
random_state=0,
)
A = X - U.dot(np.diag(s).dot(Vt))
error_2 = linalg.norm(A, ord="fro")
for i in [5, 10, 50]:
U, s, Vt = randomized_svd(
X,
n_components,
n_iter=i,
power_iteration_normalizer=normalizer,
random_state=0,
)
A = X - U.dot(np.diag(s).dot(Vt))
error = linalg.norm(A, ord="fro")
assert 15 > np.abs(error_2 - error)
@pytest.mark.parametrize("sparse_container", DOK_CONTAINERS + LIL_CONTAINERS)
def test_randomized_svd_sparse_warnings(sparse_container):
# randomized_svd throws a warning for lil and dok matrix
rng = np.random.RandomState(42)
X = make_low_rank_matrix(50, 20, effective_rank=10, random_state=rng)
n_components = 5
X = sparse_container(X)
warn_msg = (
"Calculating SVD of a {} is expensive. csr_matrix is more efficient.".format(
sparse_container.__name__
)
)
with pytest.warns(sparse.SparseEfficiencyWarning, match=warn_msg):
randomized_svd(X, n_components, n_iter=1, power_iteration_normalizer="none")
def test_svd_flip():
# Check that svd_flip works in both situations, and reconstructs input.
rs = np.random.RandomState(1999)
n_samples = 20
n_features = 10
X = rs.randn(n_samples, n_features)
# Check matrix reconstruction
U, S, Vt = linalg.svd(X, full_matrices=False)
U1, V1 = svd_flip(U, Vt, u_based_decision=False)
assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6)
# Check transposed matrix reconstruction
XT = X.T
U, S, Vt = linalg.svd(XT, full_matrices=False)
U2, V2 = svd_flip(U, Vt, u_based_decision=True)
assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6)
# Check that different flip methods are equivalent under reconstruction
U_flip1, V_flip1 = svd_flip(U, Vt, u_based_decision=True)
assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6)
U_flip2, V_flip2 = svd_flip(U, Vt, u_based_decision=False)
assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
@pytest.mark.parametrize("n_samples, n_features", [(3, 4), (4, 3)])
def test_svd_flip_max_abs_cols(n_samples, n_features, global_random_seed):
rs = np.random.RandomState(global_random_seed)
X = rs.randn(n_samples, n_features)
U, _, Vt = linalg.svd(X, full_matrices=False)
U1, _ = svd_flip(U, Vt, u_based_decision=True)
max_abs_U1_row_idx_for_col = np.argmax(np.abs(U1), axis=0)
assert (U1[max_abs_U1_row_idx_for_col, np.arange(U1.shape[1])] >= 0).all()
_, V2 = svd_flip(U, Vt, u_based_decision=False)
max_abs_V2_col_idx_for_row = np.argmax(np.abs(V2), axis=1)
assert (V2[np.arange(V2.shape[0]), max_abs_V2_col_idx_for_row] >= 0).all()
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_randomized_svd_sign_flip_with_transpose():
# Check if the randomized_svd sign flipping is always done based on u
# irrespective of transpose.
# See https://github.com/scikit-learn/scikit-learn/issues/5608
# for more details.
def max_loading_is_positive(u, v):
"""
returns bool tuple indicating if the values maximising np.abs
are positive across all rows for u and across all columns for v.
"""
u_based = (np.abs(u).max(axis=0) == u.max(axis=0)).all()
v_based = (np.abs(v).max(axis=1) == v.max(axis=1)).all()
return u_based, v_based
mat = np.arange(10 * 8).reshape(10, -1)
# Without transpose
u_flipped, _, v_flipped = randomized_svd(mat, 3, flip_sign=True, random_state=0)
u_based, v_based = max_loading_is_positive(u_flipped, v_flipped)
assert u_based
assert not v_based
# With transpose
u_flipped_with_transpose, _, v_flipped_with_transpose = randomized_svd(
mat, 3, flip_sign=True, transpose=True, random_state=0
)
u_based, v_based = max_loading_is_positive(
u_flipped_with_transpose, v_flipped_with_transpose
)
assert u_based
assert not v_based
@pytest.mark.parametrize("n", [50, 100, 300])
@pytest.mark.parametrize("m", [50, 100, 300])
@pytest.mark.parametrize("k", [10, 20, 50])
@pytest.mark.parametrize("seed", range(5))
def test_randomized_svd_lapack_driver(n, m, k, seed):
# Check that different SVD drivers provide consistent results
# Matrix being compressed
rng = np.random.RandomState(seed)
X = rng.rand(n, m)
# Number of components
u1, s1, vt1 = randomized_svd(X, k, svd_lapack_driver="gesdd", random_state=0)
u2, s2, vt2 = randomized_svd(X, k, svd_lapack_driver="gesvd", random_state=0)
# Check shape and contents
assert u1.shape == u2.shape
assert_allclose(u1, u2, atol=0, rtol=1e-3)
assert s1.shape == s2.shape
assert_allclose(s1, s2, atol=0, rtol=1e-3)
assert vt1.shape == vt2.shape
assert_allclose(vt1, vt2, atol=0, rtol=1e-3)
def test_cartesian():
# Check if cartesian product delivers the right results
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array(
[
[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7],
]
)
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
@pytest.mark.parametrize(
"arrays, output_dtype",
[
(
[np.array([1, 2, 3], dtype=np.int32), np.array([4, 5], dtype=np.int64)],
np.dtype(np.int64),
),
(
[np.array([1, 2, 3], dtype=np.int32), np.array([4, 5], dtype=np.float64)],
np.dtype(np.float64),
),
(
[np.array([1, 2, 3], dtype=np.int32), np.array(["x", "y"], dtype=object)],
np.dtype(object),
),
],
)
def test_cartesian_mix_types(arrays, output_dtype):
"""Check that the cartesian product works with mixed types."""
output = cartesian(arrays)
assert output.dtype == output_dtype
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize("as_list", (True, False))
def test_incremental_weighted_mean_and_variance_simple(dtype, as_list):
rng = np.random.RandomState(42)
mult = 10
X = rng.rand(1000, 20).astype(dtype) * mult
sample_weight = rng.rand(X.shape[0]) * mult
X1 = X.tolist() if as_list else X
mean, var, _ = _incremental_mean_and_var(X1, 0, 0, 0, sample_weight=sample_weight)
expected_mean = np.average(X, weights=sample_weight, axis=0)
expected_var = np.average(X**2, weights=sample_weight, axis=0) - expected_mean**2
assert_almost_equal(mean, expected_mean)
assert_almost_equal(var, expected_var)
@pytest.mark.parametrize(
"array_namespace, device, dtype",
yield_namespace_device_dtype_combinations(),
ids=_get_namespace_device_dtype_ids,
)
def test_incremental_weighted_mean_and_variance_array_api(
array_namespace, device, dtype
):
xp = _array_api_for_tests(array_namespace, device)
rng = np.random.RandomState(42)
mult = 10
X = rng.rand(1000, 20).astype(dtype) * mult
sample_weight = rng.rand(X.shape[0]).astype(dtype) * mult
mean, var, _ = _incremental_mean_and_var(X, 0, 0, 0, sample_weight=sample_weight)
X_xp = xp.asarray(X, device=device)
sample_weight_xp = xp.asarray(sample_weight, device=device)
with config_context(array_api_dispatch=True):
mean_xp, var_xp, _ = _incremental_mean_and_var(
X_xp, 0, 0, 0, sample_weight=sample_weight_xp
)
# The attributes like mean and var are computed and set with respect to the
# maximum supported float dtype
assert array_device(mean_xp) == array_device(X_xp)
assert mean_xp.dtype == _max_precision_float_dtype(xp, device=device)
assert array_device(var_xp) == array_device(X_xp)
assert var_xp.dtype == _max_precision_float_dtype(xp, device=device)
mean_xp = _convert_to_numpy(mean_xp, xp=xp)
var_xp = _convert_to_numpy(var_xp, xp=xp)
assert_allclose(mean, mean_xp)
assert_allclose(var, var_xp)
@pytest.mark.parametrize("mean", [0, 1e7, -1e7])
@pytest.mark.parametrize("var", [1, 1e-8, 1e5])
@pytest.mark.parametrize(
"weight_loc, weight_scale", [(0, 1), (0, 1e-8), (1, 1e-8), (10, 1), (1e7, 1)]
)
def test_incremental_weighted_mean_and_variance(mean, var, weight_loc, weight_scale):
rng = np.random.RandomState(42)
# Testing of correctness and numerical stability
def _assert(X, sample_weight, expected_mean, expected_var):
n = X.shape[0]
for chunk_size in [1, n // 10 + 1, n // 4 + 1, n // 2 + 1, n]:
last_mean, last_weight_sum, last_var = 0, 0, 0
for batch in gen_batches(n, chunk_size):
last_mean, last_var, last_weight_sum = _incremental_mean_and_var(
X[batch],
last_mean,
last_var,
last_weight_sum,
sample_weight=sample_weight[batch],
)
assert_allclose(last_mean, expected_mean)
assert_allclose(last_var, expected_var, atol=1e-6)
size = (100, 20)
weight = rng.normal(loc=weight_loc, scale=weight_scale, size=size[0])
# Compare to weighted average: np.average
X = rng.normal(loc=mean, scale=var, size=size)
expected_mean = _safe_accumulator_op(np.average, X, weights=weight, axis=0)
expected_var = _safe_accumulator_op(
np.average, (X - expected_mean) ** 2, weights=weight, axis=0
)
_assert(X, weight, expected_mean, expected_var)
# Compare to unweighted mean: np.mean
X = rng.normal(loc=mean, scale=var, size=size)
ones_weight = np.ones(size[0])
expected_mean = _safe_accumulator_op(np.mean, X, axis=0)
expected_var = _safe_accumulator_op(np.var, X, axis=0)
_assert(X, ones_weight, expected_mean, expected_var)
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_incremental_weighted_mean_and_variance_ignore_nan(dtype):
old_means = np.array([535.0, 535.0, 535.0, 535.0])
old_variances = np.array([4225.0, 4225.0, 4225.0, 4225.0])
old_weight_sum = np.array([2, 2, 2, 2], dtype=np.int32)
sample_weights_X = np.ones(3)
sample_weights_X_nan = np.ones(4)
X = np.array(
[[170, 170, 170, 170], [430, 430, 430, 430], [300, 300, 300, 300]]
).astype(dtype)
X_nan = np.array(
[
[170, np.nan, 170, 170],
[np.nan, 170, 430, 430],
[430, 430, np.nan, 300],
[300, 300, 300, np.nan],
]
).astype(dtype)
X_means, X_variances, X_count = _incremental_mean_and_var(
X, old_means, old_variances, old_weight_sum, sample_weight=sample_weights_X
)
X_nan_means, X_nan_variances, X_nan_count = _incremental_mean_and_var(
X_nan,
old_means,
old_variances,
old_weight_sum,
sample_weight=sample_weights_X_nan,
)
assert_allclose(X_nan_means, X_means)
assert_allclose(X_nan_variances, X_variances)
assert_allclose(X_nan_count, X_count)
def test_incremental_variance_update_formulas():
# Test Youngs and Cramer incremental variance formulas.
# Doggie data from https://www.mathsisfun.com/data/standard-deviation.html
A = np.array(
[
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
]
).T
idx = 2
X1 = A[:idx, :]
X2 = A[idx:, :]
old_means = X1.mean(axis=0)
old_variances = X1.var(axis=0)
old_sample_count = np.full(X1.shape[1], X1.shape[0], dtype=np.int32)
final_means, final_variances, final_count = _incremental_mean_and_var(
X2, old_means, old_variances, old_sample_count
)
assert_almost_equal(final_means, A.mean(axis=0), 6)
assert_almost_equal(final_variances, A.var(axis=0), 6)
assert_almost_equal(final_count, A.shape[0])
def test_incremental_mean_and_variance_ignore_nan():
old_means = np.array([535.0, 535.0, 535.0, 535.0])
old_variances = np.array([4225.0, 4225.0, 4225.0, 4225.0])
old_sample_count = np.array([2, 2, 2, 2], dtype=np.int32)
X = np.array([[170, 170, 170, 170], [430, 430, 430, 430], [300, 300, 300, 300]])
X_nan = np.array(
[
[170, np.nan, 170, 170],
[np.nan, 170, 430, 430],
[430, 430, np.nan, 300],
[300, 300, 300, np.nan],
]
)
X_means, X_variances, X_count = _incremental_mean_and_var(
X, old_means, old_variances, old_sample_count
)
X_nan_means, X_nan_variances, X_nan_count = _incremental_mean_and_var(
X_nan, old_means, old_variances, old_sample_count
)
assert_allclose(X_nan_means, X_means)
assert_allclose(X_nan_variances, X_variances)
assert_allclose(X_nan_count, X_count)
@skip_if_32bit
def test_incremental_variance_numerical_stability():
# Test Youngs and Cramer incremental variance formulas.
def np_var(A):
return A.var(axis=0)
# Naive one pass variance computation - not numerically stable
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
def one_pass_var(X):
n = X.shape[0]
exp_x2 = (X**2).sum(axis=0) / n
expx_2 = (X.sum(axis=0) / n) ** 2
return exp_x2 - expx_2
# Two-pass algorithm, stable.
# We use it as a benchmark. It is not an online algorithm
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm
def two_pass_var(X):
mean = X.mean(axis=0)
Y = X.copy()
return np.mean((Y - mean) ** 2, axis=0)
# Naive online implementation
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm
# This works only for chunks for size 1
def naive_mean_variance_update(x, last_mean, last_variance, last_sample_count):
updated_sample_count = last_sample_count + 1
samples_ratio = last_sample_count / float(updated_sample_count)
updated_mean = x / updated_sample_count + last_mean * samples_ratio
updated_variance = (
last_variance * samples_ratio
+ (x - last_mean) * (x - updated_mean) / updated_sample_count
)
return updated_mean, updated_variance, updated_sample_count
# We want to show a case when one_pass_var has error > 1e-3 while
# _batch_mean_variance_update has less.
tol = 200
n_features = 2
n_samples = 10000
x1 = np.array(1e8, dtype=np.float64)
x2 = np.log(1e-5, dtype=np.float64)
A0 = np.full((n_samples // 2, n_features), x1, dtype=np.float64)
A1 = np.full((n_samples // 2, n_features), x2, dtype=np.float64)
A = np.vstack((A0, A1))
# Naive one pass var: >tol (=1063)
assert np.abs(np_var(A) - one_pass_var(A)).max() > tol
# Starting point for online algorithms: after A0
# Naive implementation: >tol (436)
mean, var, n = A0[0, :], np.zeros(n_features), n_samples // 2
for i in range(A1.shape[0]):
mean, var, n = naive_mean_variance_update(A1[i, :], mean, var, n)
assert n == A.shape[0]
# the mean is also slightly unstable
assert np.abs(A.mean(axis=0) - mean).max() > 1e-6
assert np.abs(np_var(A) - var).max() > tol
# Robust implementation: <tol (177)
mean, var = A0[0, :], np.zeros(n_features)
n = np.full(n_features, n_samples // 2, dtype=np.int32)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_shortest_path.py | sklearn/utils/tests/test_shortest_path.py | from collections import defaultdict
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.graph import single_source_shortest_path_length
def floyd_warshall_slow(graph, directed=False):
N = graph.shape[0]
# set nonzero entries to infinity
graph[np.where(graph == 0)] = np.inf
# set diagonal to zero
graph.flat[:: N + 1] = 0
if not directed:
graph = np.minimum(graph, graph.T)
for k in range(N):
for i in range(N):
for j in range(N):
graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j])
graph[np.where(np.isinf(graph))] = 0
return graph
def generate_graph(N=20):
# sparse grid of distances
rng = np.random.RandomState(0)
dist_matrix = rng.random_sample((N, N))
# make symmetric: distances are not direction-dependent
dist_matrix = dist_matrix + dist_matrix.T
# make graph sparse
i = (rng.randint(N, size=N * N // 2), rng.randint(N, size=N * N // 2))
dist_matrix[i] = 0
# set diagonal to zero
dist_matrix.flat[:: N + 1] = 0
return dist_matrix
def test_shortest_path():
dist_matrix = generate_graph(20)
# We compare path length and not costs (-> set distances to 0 or 1)
dist_matrix[dist_matrix != 0] = 1
for directed in (True, False):
if not directed:
dist_matrix = np.minimum(dist_matrix, dist_matrix.T)
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
for i in range(dist_matrix.shape[0]):
# Non-reachable nodes have distance 0 in graph_py
dist_dict = defaultdict(int)
dist_dict.update(single_source_shortest_path_length(dist_matrix, i))
for j in range(graph_py[i].shape[0]):
assert_array_almost_equal(dist_dict[j], graph_py[i, j])
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_cython_blas.py | sklearn/utils/tests/test_cython_blas.py | import numpy as np
import pytest
from sklearn.utils._cython_blas import (
BLAS_Order,
BLAS_Trans,
_asum_memview,
_axpy_memview,
_copy_memview,
_dot_memview,
_gemm_memview,
_gemv_memview,
_ger_memview,
_nrm2_memview,
_rot_memview,
_rotg_memview,
_scal_memview,
)
from sklearn.utils._testing import assert_allclose
def _numpy_to_cython(dtype):
cython = pytest.importorskip("cython")
if dtype == np.float32:
return cython.float
elif dtype == np.float64:
return cython.double
RTOL = {np.float32: 1e-6, np.float64: 1e-12}
ORDER = {BLAS_Order.RowMajor: "C", BLAS_Order.ColMajor: "F"}
def _no_op(x):
return x
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_dot(dtype):
dot = _dot_memview[_numpy_to_cython(dtype)]
rng = np.random.RandomState(0)
x = rng.random_sample(10).astype(dtype, copy=False)
y = rng.random_sample(10).astype(dtype, copy=False)
expected = x.dot(y)
actual = dot(x, y)
assert_allclose(actual, expected, rtol=RTOL[dtype])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_asum(dtype):
asum = _asum_memview[_numpy_to_cython(dtype)]
rng = np.random.RandomState(0)
x = rng.random_sample(10).astype(dtype, copy=False)
expected = np.abs(x).sum()
actual = asum(x)
assert_allclose(actual, expected, rtol=RTOL[dtype])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_axpy(dtype):
axpy = _axpy_memview[_numpy_to_cython(dtype)]
rng = np.random.RandomState(0)
x = rng.random_sample(10).astype(dtype, copy=False)
y = rng.random_sample(10).astype(dtype, copy=False)
alpha = 2.5
expected = alpha * x + y
axpy(alpha, x, y)
assert_allclose(y, expected, rtol=RTOL[dtype])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_nrm2(dtype):
nrm2 = _nrm2_memview[_numpy_to_cython(dtype)]
rng = np.random.RandomState(0)
x = rng.random_sample(10).astype(dtype, copy=False)
expected = np.linalg.norm(x)
actual = nrm2(x)
assert_allclose(actual, expected, rtol=RTOL[dtype])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_copy(dtype):
copy = _copy_memview[_numpy_to_cython(dtype)]
rng = np.random.RandomState(0)
x = rng.random_sample(10).astype(dtype, copy=False)
y = np.empty_like(x)
expected = x.copy()
copy(x, y)
assert_allclose(y, expected, rtol=RTOL[dtype])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_scal(dtype):
scal = _scal_memview[_numpy_to_cython(dtype)]
rng = np.random.RandomState(0)
x = rng.random_sample(10).astype(dtype, copy=False)
alpha = 2.5
expected = alpha * x
scal(alpha, x)
assert_allclose(x, expected, rtol=RTOL[dtype])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_rotg(dtype):
rotg = _rotg_memview[_numpy_to_cython(dtype)]
rng = np.random.RandomState(0)
a = dtype(rng.randn())
b = dtype(rng.randn())
c, s = 0.0, 0.0
def expected_rotg(a, b):
roe = a if abs(a) > abs(b) else b
if a == 0 and b == 0:
c, s, r, z = (1, 0, 0, 0)
else:
r = np.sqrt(a**2 + b**2) * (1 if roe >= 0 else -1)
c, s = a / r, b / r
z = s if roe == a else (1 if c == 0 else 1 / c)
return r, z, c, s
expected = expected_rotg(a, b)
actual = rotg(a, b, c, s)
assert_allclose(actual, expected, rtol=RTOL[dtype])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_rot(dtype):
rot = _rot_memview[_numpy_to_cython(dtype)]
rng = np.random.RandomState(0)
x = rng.random_sample(10).astype(dtype, copy=False)
y = rng.random_sample(10).astype(dtype, copy=False)
c = dtype(rng.randn())
s = dtype(rng.randn())
expected_x = c * x + s * y
expected_y = c * y - s * x
rot(x, y, c, s)
assert_allclose(x, expected_x)
assert_allclose(y, expected_y)
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize(
"opA, transA",
[(_no_op, BLAS_Trans.NoTrans), (np.transpose, BLAS_Trans.Trans)],
ids=["NoTrans", "Trans"],
)
@pytest.mark.parametrize(
"order",
[BLAS_Order.RowMajor, BLAS_Order.ColMajor],
ids=["RowMajor", "ColMajor"],
)
def test_gemv(dtype, opA, transA, order):
gemv = _gemv_memview[_numpy_to_cython(dtype)]
rng = np.random.RandomState(0)
A = np.asarray(
opA(rng.random_sample((20, 10)).astype(dtype, copy=False)), order=ORDER[order]
)
x = rng.random_sample(10).astype(dtype, copy=False)
y = rng.random_sample(20).astype(dtype, copy=False)
alpha, beta = 2.5, -0.5
expected = alpha * opA(A).dot(x) + beta * y
gemv(transA, alpha, A, x, beta, y)
assert_allclose(y, expected, rtol=RTOL[dtype])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize(
"order",
[BLAS_Order.RowMajor, BLAS_Order.ColMajor],
ids=["BLAS_Order.RowMajor", "BLAS_Order.ColMajor"],
)
def test_ger(dtype, order):
ger = _ger_memview[_numpy_to_cython(dtype)]
rng = np.random.RandomState(0)
x = rng.random_sample(10).astype(dtype, copy=False)
y = rng.random_sample(20).astype(dtype, copy=False)
A = np.asarray(
rng.random_sample((10, 20)).astype(dtype, copy=False), order=ORDER[order]
)
alpha = 2.5
expected = alpha * np.outer(x, y) + A
ger(alpha, x, y, A)
assert_allclose(A, expected, rtol=RTOL[dtype])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize(
"opB, transB",
[(_no_op, BLAS_Trans.NoTrans), (np.transpose, BLAS_Trans.Trans)],
ids=["NoTrans", "Trans"],
)
@pytest.mark.parametrize(
"opA, transA",
[(_no_op, BLAS_Trans.NoTrans), (np.transpose, BLAS_Trans.Trans)],
ids=["NoTrans", "Trans"],
)
@pytest.mark.parametrize(
"order",
[BLAS_Order.RowMajor, BLAS_Order.ColMajor],
ids=["BLAS_Order.RowMajor", "BLAS_Order.ColMajor"],
)
def test_gemm(dtype, opA, transA, opB, transB, order):
gemm = _gemm_memview[_numpy_to_cython(dtype)]
rng = np.random.RandomState(0)
A = np.asarray(
opA(rng.random_sample((30, 10)).astype(dtype, copy=False)), order=ORDER[order]
)
B = np.asarray(
opB(rng.random_sample((10, 20)).astype(dtype, copy=False)), order=ORDER[order]
)
C = np.asarray(
rng.random_sample((30, 20)).astype(dtype, copy=False), order=ORDER[order]
)
alpha, beta = 2.5, -0.5
expected = alpha * opA(A).dot(opB(B)) + beta * C
gemm(transA, transB, alpha, A, B, beta, C)
assert_allclose(C, expected, rtol=RTOL[dtype])
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_fixes.py | sklearn/utils/tests/test_fixes.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
import pytest
from sklearn.utils._testing import assert_array_equal
from sklearn.utils.fixes import _object_dtype_isnan, _smallest_admissible_index_dtype
@pytest.mark.parametrize("dtype, val", ([object, 1], [object, "a"], [float, 1]))
def test_object_dtype_isnan(dtype, val):
X = np.array([[val, np.nan], [np.nan, val]], dtype=dtype)
expected_mask = np.array([[False, True], [True, False]])
mask = _object_dtype_isnan(X)
assert_array_equal(mask, expected_mask)
@pytest.mark.parametrize(
"params, expected_dtype",
[
({}, np.int32), # default behaviour
({"maxval": np.iinfo(np.int32).max}, np.int32),
({"maxval": np.iinfo(np.int32).max + 1}, np.int64),
],
)
def test_smallest_admissible_index_dtype_max_val(params, expected_dtype):
"""Check the behaviour of `smallest_admissible_index_dtype` depending only on the
`max_val` parameter.
"""
assert _smallest_admissible_index_dtype(**params) == expected_dtype
@pytest.mark.parametrize(
"params, expected_dtype",
[
# Arrays dtype is int64 and thus should not be downcasted to int32 without
# checking the content of providing maxval.
({"arrays": np.array([1, 2], dtype=np.int64)}, np.int64),
# One of the array is int64 and should not be downcasted to int32
# for the same reasons.
(
{
"arrays": (
np.array([1, 2], dtype=np.int32),
np.array([1, 2], dtype=np.int64),
)
},
np.int64,
),
# Both arrays are already int32: we can just keep this dtype.
(
{
"arrays": (
np.array([1, 2], dtype=np.int32),
np.array([1, 2], dtype=np.int32),
)
},
np.int32,
),
# Arrays should be upcasted to at least int32 precision.
({"arrays": np.array([1, 2], dtype=np.int8)}, np.int32),
# Check that `maxval` takes precedence over the arrays and thus upcast to
# int64.
(
{
"arrays": np.array([1, 2], dtype=np.int32),
"maxval": np.iinfo(np.int32).max + 1,
},
np.int64,
),
],
)
def test_smallest_admissible_index_dtype_without_checking_contents(
params, expected_dtype
):
"""Check the behaviour of `smallest_admissible_index_dtype` using the passed
arrays but without checking the contents of the arrays.
"""
assert _smallest_admissible_index_dtype(**params) == expected_dtype
@pytest.mark.parametrize(
"params, expected_dtype",
[
# empty arrays should always be converted to int32 indices
(
{
"arrays": (np.array([], dtype=np.int64), np.array([], dtype=np.int64)),
"check_contents": True,
},
np.int32,
),
# arrays respecting np.iinfo(np.int32).min < x < np.iinfo(np.int32).max should
# be converted to int32,
(
{"arrays": np.array([1], dtype=np.int64), "check_contents": True},
np.int32,
),
# otherwise, it should be converted to int64. We need to create a uint32
# arrays to accommodate a value > np.iinfo(np.int32).max
(
{
"arrays": np.array([np.iinfo(np.int32).max + 1], dtype=np.uint32),
"check_contents": True,
},
np.int64,
),
# maxval should take precedence over the arrays contents and thus upcast to
# int64.
(
{
"arrays": np.array([1], dtype=np.int32),
"check_contents": True,
"maxval": np.iinfo(np.int32).max + 1,
},
np.int64,
),
# when maxval is small, but check_contents is True and the contents
# require np.int64, we still require np.int64 indexing in the end.
(
{
"arrays": np.array([np.iinfo(np.int32).max + 1], dtype=np.uint32),
"check_contents": True,
"maxval": 1,
},
np.int64,
),
],
)
def test_smallest_admissible_index_dtype_by_checking_contents(params, expected_dtype):
"""Check the behaviour of `smallest_admissible_index_dtype` using the dtype of the
arrays but as well the contents.
"""
assert _smallest_admissible_index_dtype(**params) == expected_dtype
@pytest.mark.parametrize(
"params, err_type, err_msg",
[
(
{"maxval": np.iinfo(np.int64).max + 1},
ValueError,
"is to large to be represented as np.int64",
),
(
{"arrays": np.array([1, 2], dtype=np.float64)},
ValueError,
"Array dtype float64 is not supported",
),
({"arrays": [1, 2]}, TypeError, "Arrays should be of type np.ndarray"),
],
)
def test_smallest_admissible_index_dtype_error(params, err_type, err_msg):
"""Check that we raise the proper error message."""
with pytest.raises(err_type, match=err_msg):
_smallest_admissible_index_dtype(**params)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_graph.py | sklearn/utils/tests/test_graph.py | import numpy as np
import pytest
from scipy.sparse.csgraph import connected_components
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import kneighbors_graph
from sklearn.utils.graph import _fix_connected_components
def test_fix_connected_components():
# Test that _fix_connected_components reduces the number of component to 1.
X = np.array([0, 1, 2, 5, 6, 7])[:, None]
graph = kneighbors_graph(X, n_neighbors=2, mode="distance")
n_connected_components, labels = connected_components(graph)
assert n_connected_components > 1
graph = _fix_connected_components(X, graph, n_connected_components, labels)
n_connected_components, labels = connected_components(graph)
assert n_connected_components == 1
def test_fix_connected_components_precomputed():
# Test that _fix_connected_components accepts precomputed distance matrix.
X = np.array([0, 1, 2, 5, 6, 7])[:, None]
graph = kneighbors_graph(X, n_neighbors=2, mode="distance")
n_connected_components, labels = connected_components(graph)
assert n_connected_components > 1
distances = pairwise_distances(X)
graph = _fix_connected_components(
distances, graph, n_connected_components, labels, metric="precomputed"
)
n_connected_components, labels = connected_components(graph)
assert n_connected_components == 1
# but it does not work with precomputed neighbors graph
with pytest.raises(RuntimeError, match="does not work with a sparse"):
_fix_connected_components(
graph, graph, n_connected_components, labels, metric="precomputed"
)
def test_fix_connected_components_wrong_mode():
# Test that the an error is raised if the mode string is incorrect.
X = np.array([0, 1, 2, 5, 6, 7])[:, None]
graph = kneighbors_graph(X, n_neighbors=2, mode="distance")
n_connected_components, labels = connected_components(graph)
with pytest.raises(ValueError, match="Unknown mode"):
graph = _fix_connected_components(
X, graph, n_connected_components, labels, mode="foo"
)
def test_fix_connected_components_connectivity_mode():
# Test that the connectivity mode fill new connections with ones.
X = np.array([0, 1, 6, 7])[:, None]
graph = kneighbors_graph(X, n_neighbors=1, mode="connectivity")
n_connected_components, labels = connected_components(graph)
graph = _fix_connected_components(
X, graph, n_connected_components, labels, mode="connectivity"
)
assert np.all(graph.data == 1)
def test_fix_connected_components_distance_mode():
# Test that the distance mode does not fill new connections with ones.
X = np.array([0, 1, 6, 7])[:, None]
graph = kneighbors_graph(X, n_neighbors=1, mode="distance")
assert np.all(graph.data == 1)
n_connected_components, labels = connected_components(graph)
graph = _fix_connected_components(
X, graph, n_connected_components, labels, mode="distance"
)
assert not np.all(graph.data == 1)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_arpack.py | sklearn/utils/tests/test_arpack.py | import pytest
from numpy.testing import assert_allclose
from sklearn.utils import check_random_state
from sklearn.utils._arpack import _init_arpack_v0
@pytest.mark.parametrize("seed", range(100))
def test_init_arpack_v0(seed):
# check that the initialization a sampling from an uniform distribution
# where we can fix the random state
size = 1000
v0 = _init_arpack_v0(size, seed)
rng = check_random_state(seed)
assert_allclose(v0, rng.uniform(-1, 1, size=size))
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_response.py | sklearn/utils/tests/test_response.py | import warnings
import numpy as np
import pytest
from sklearn.base import clone
from sklearn.datasets import (
load_iris,
make_classification,
make_multilabel_classification,
make_regression,
)
from sklearn.ensemble import IsolationForest
from sklearn.linear_model import (
LinearRegression,
LogisticRegression,
)
from sklearn.multioutput import ClassifierChain
from sklearn.preprocessing import scale
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils._mocking import _MockEstimatorOnOffPrediction
from sklearn.utils._response import _get_response_values, _get_response_values_binary
from sklearn.utils._testing import assert_allclose, assert_array_equal
X, y = load_iris(return_X_y=True)
# scale the data to avoid ConvergenceWarning with LogisticRegression
X = scale(X, copy=False)
X_binary, y_binary = X[:100], y[:100]
@pytest.mark.parametrize(
"response_method", ["decision_function", "predict_proba", "predict_log_proba"]
)
def test_get_response_values_regressor_error(response_method):
"""Check the error message with regressor an not supported response
method."""
my_estimator = _MockEstimatorOnOffPrediction(response_methods=[response_method])
X = "mocking_data", "mocking_target"
err_msg = f"{my_estimator.__class__.__name__} should either be a classifier"
with pytest.raises(ValueError, match=err_msg):
_get_response_values(my_estimator, X, response_method=response_method)
@pytest.mark.parametrize("return_response_method_used", [True, False])
def test_get_response_values_regressor(return_response_method_used):
"""Check the behaviour of `_get_response_values` with regressor."""
X, y = make_regression(n_samples=10, random_state=0)
regressor = LinearRegression().fit(X, y)
results = _get_response_values(
regressor,
X,
response_method="predict",
return_response_method_used=return_response_method_used,
)
assert_array_equal(results[0], regressor.predict(X))
assert results[1] is None
if return_response_method_used:
assert results[2] == "predict"
@pytest.mark.parametrize(
"response_method",
["predict", "decision_function", ["decision_function", "predict"]],
)
@pytest.mark.parametrize("return_response_method_used", [True, False])
def test_get_response_values_outlier_detection(
response_method, return_response_method_used
):
"""Check the behaviour of `_get_response_values` with outlier detector."""
X, y = make_classification(n_samples=50, random_state=0)
outlier_detector = IsolationForest(random_state=0).fit(X, y)
results = _get_response_values(
outlier_detector,
X,
response_method=response_method,
return_response_method_used=return_response_method_used,
)
chosen_response_method = (
response_method[0] if isinstance(response_method, list) else response_method
)
prediction_method = getattr(outlier_detector, chosen_response_method)
assert_array_equal(results[0], prediction_method(X))
assert results[1] is None
if return_response_method_used:
assert results[2] == chosen_response_method
@pytest.mark.parametrize(
"response_method",
["predict_proba", "decision_function", "predict", "predict_log_proba"],
)
def test_get_response_values_classifier_unknown_pos_label(response_method):
"""Check that `_get_response_values` raises the proper error message with
classifier."""
X, y = make_classification(n_samples=10, n_classes=2, random_state=0)
classifier = LogisticRegression().fit(X, y)
# provide a `pos_label` which is not in `y`
err_msg = r"pos_label=whatever is not a valid label: It should be one of \[0 1\]"
with pytest.raises(ValueError, match=err_msg):
_get_response_values(
classifier,
X,
response_method=response_method,
pos_label="whatever",
)
@pytest.mark.parametrize("response_method", ["predict_proba", "predict_log_proba"])
def test_get_response_values_classifier_inconsistent_y_pred_for_binary_proba(
response_method,
):
"""Check that `_get_response_values` will raise an error when `y_pred` has a
single class with `predict_proba`."""
X, y_two_class = make_classification(n_samples=10, n_classes=2, random_state=0)
y_single_class = np.zeros_like(y_two_class)
classifier = DecisionTreeClassifier().fit(X, y_single_class)
err_msg = (
r"Got predict_proba of shape \(10, 1\), but need classifier with "
r"two classes"
)
with pytest.raises(ValueError, match=err_msg):
_get_response_values(classifier, X, response_method=response_method)
@pytest.mark.parametrize("return_response_method_used", [True, False])
def test_get_response_values_binary_classifier_decision_function(
return_response_method_used,
):
"""Check the behaviour of `_get_response_values` with `decision_function`
and binary classifier."""
X, y = make_classification(
n_samples=10,
n_classes=2,
weights=[0.3, 0.7],
random_state=0,
)
classifier = LogisticRegression().fit(X, y)
response_method = "decision_function"
# default `pos_label`
results = _get_response_values(
classifier,
X,
response_method=response_method,
pos_label=None,
return_response_method_used=return_response_method_used,
)
assert_allclose(results[0], classifier.decision_function(X))
assert results[1] == 1
if return_response_method_used:
assert results[2] == "decision_function"
# when forcing `pos_label=classifier.classes_[0]`
results = _get_response_values(
classifier,
X,
response_method=response_method,
pos_label=classifier.classes_[0],
return_response_method_used=return_response_method_used,
)
assert_allclose(results[0], classifier.decision_function(X) * -1)
assert results[1] == 0
if return_response_method_used:
assert results[2] == "decision_function"
@pytest.mark.parametrize("return_response_method_used", [True, False])
@pytest.mark.parametrize("response_method", ["predict_proba", "predict_log_proba"])
def test_get_response_values_binary_classifier_predict_proba(
return_response_method_used, response_method
):
"""Check that `_get_response_values` with `predict_proba` and binary
classifier."""
X, y = make_classification(
n_samples=10,
n_classes=2,
weights=[0.3, 0.7],
random_state=0,
)
classifier = LogisticRegression().fit(X, y)
# default `pos_label`
results = _get_response_values(
classifier,
X,
response_method=response_method,
pos_label=None,
return_response_method_used=return_response_method_used,
)
assert_allclose(results[0], getattr(classifier, response_method)(X)[:, 1])
assert results[1] == 1
if return_response_method_used:
assert len(results) == 3
assert results[2] == response_method
else:
assert len(results) == 2
# when forcing `pos_label=classifier.classes_[0]`
y_pred, pos_label, *_ = _get_response_values(
classifier,
X,
response_method=response_method,
pos_label=classifier.classes_[0],
return_response_method_used=return_response_method_used,
)
assert_allclose(y_pred, getattr(classifier, response_method)(X)[:, 0])
assert pos_label == 0
@pytest.mark.parametrize(
"estimator, X, y, err_msg, params",
[
(
DecisionTreeRegressor(),
X_binary,
y_binary,
"Expected 'estimator' to be a binary classifier",
{"response_method": "auto"},
),
(
DecisionTreeClassifier(),
X_binary,
y_binary,
r"pos_label=unknown is not a valid label: It should be one of \[0 1\]",
{"response_method": "auto", "pos_label": "unknown"},
),
(
DecisionTreeClassifier(),
X,
y,
"be a binary classifier. Got 3 classes instead.",
{"response_method": "predict_proba"},
),
],
)
def test_get_response_error(estimator, X, y, err_msg, params):
"""Check that we raise the proper error messages in _get_response_values_binary."""
estimator = clone(estimator).fit(X, y) # clone to make test execution thread-safe
with pytest.raises(ValueError, match=err_msg):
_get_response_values_binary(estimator, X, **params)
@pytest.mark.parametrize("return_response_method_used", [True, False])
def test_get_response_predict_proba(return_response_method_used):
"""Check the behaviour of `_get_response_values_binary` using `predict_proba`."""
classifier = DecisionTreeClassifier().fit(X_binary, y_binary)
results = _get_response_values_binary(
classifier,
X_binary,
response_method="predict_proba",
return_response_method_used=return_response_method_used,
)
assert_allclose(results[0], classifier.predict_proba(X_binary)[:, 1])
assert results[1] == 1
if return_response_method_used:
assert results[2] == "predict_proba"
results = _get_response_values_binary(
classifier,
X_binary,
response_method="predict_proba",
pos_label=0,
return_response_method_used=return_response_method_used,
)
assert_allclose(results[0], classifier.predict_proba(X_binary)[:, 0])
assert results[1] == 0
if return_response_method_used:
assert results[2] == "predict_proba"
@pytest.mark.parametrize("return_response_method_used", [True, False])
def test_get_response_decision_function(return_response_method_used):
"""Check the behaviour of `_get_response_values_binary` using decision_function."""
classifier = LogisticRegression().fit(X_binary, y_binary)
results = _get_response_values_binary(
classifier,
X_binary,
response_method="decision_function",
return_response_method_used=return_response_method_used,
)
assert_allclose(results[0], classifier.decision_function(X_binary))
assert results[1] == 1
if return_response_method_used:
assert results[2] == "decision_function"
results = _get_response_values_binary(
classifier,
X_binary,
response_method="decision_function",
pos_label=0,
return_response_method_used=return_response_method_used,
)
assert_allclose(results[0], classifier.decision_function(X_binary) * -1)
assert results[1] == 0
if return_response_method_used:
assert results[2] == "decision_function"
@pytest.mark.parametrize(
"estimator, response_method",
[
(DecisionTreeClassifier(max_depth=2, random_state=0), "predict_proba"),
(DecisionTreeClassifier(max_depth=2, random_state=0), "predict_log_proba"),
(LogisticRegression(), "decision_function"),
],
)
def test_get_response_values_multiclass(estimator, response_method):
"""Check that we can call `_get_response_values` with a multiclass estimator.
It should return the predictions untouched.
"""
estimator = clone(estimator)
estimator.fit(X, y)
predictions, pos_label = _get_response_values(
estimator, X, response_method=response_method
)
assert pos_label is None
assert predictions.shape == (X.shape[0], len(estimator.classes_))
if response_method == "predict_proba":
assert np.logical_and(predictions >= 0, predictions <= 1).all()
elif response_method == "predict_log_proba":
assert (predictions <= 0.0).all()
def test_get_response_values_with_response_list():
"""Check the behaviour of passing a list of responses to `_get_response_values`."""
classifier = LogisticRegression().fit(X_binary, y_binary)
# it should use `predict_proba`
y_pred, pos_label, response_method = _get_response_values(
classifier,
X_binary,
response_method=["predict_proba", "decision_function"],
return_response_method_used=True,
)
assert_allclose(y_pred, classifier.predict_proba(X_binary)[:, 1])
assert pos_label == 1
assert response_method == "predict_proba"
# it should use `decision_function`
y_pred, pos_label, response_method = _get_response_values(
classifier,
X_binary,
response_method=["decision_function", "predict_proba"],
return_response_method_used=True,
)
assert_allclose(y_pred, classifier.decision_function(X_binary))
assert pos_label == 1
assert response_method == "decision_function"
@pytest.mark.parametrize(
"response_method", ["predict_proba", "decision_function", "predict"]
)
def test_get_response_values_multilabel_indicator(response_method):
X, Y = make_multilabel_classification(random_state=0)
estimator = ClassifierChain(LogisticRegression()).fit(X, Y)
y_pred, pos_label = _get_response_values(
estimator, X, response_method=response_method
)
assert pos_label is None
assert y_pred.shape == Y.shape
if response_method == "predict_proba":
assert np.logical_and(y_pred >= 0, y_pred <= 1).all()
elif response_method == "decision_function":
# values returned by `decision_function` are not bounded in [0, 1]
assert (y_pred < 0).sum() > 0
assert (y_pred > 1).sum() > 0
else: # response_method == "predict"
assert np.logical_or(y_pred == 0, y_pred == 1).all()
def test_response_values_type_of_target_on_classes_no_warning():
"""
Ensure `_get_response_values` doesn't raise spurious warning.
"The number of unique classes is greater than > 50% of samples"
warning should not be raised when calling `type_of_target(classes_)`.
Non-regression test for issue #31583.
"""
X = np.random.RandomState(0).randn(120, 3)
# 30 classes, less than 50% of number of samples
y = np.repeat(np.arange(30), 4)
clf = LogisticRegression().fit(X, y)
with warnings.catch_warnings():
warnings.simplefilter("error", UserWarning)
_get_response_values(clf, X, response_method="predict_proba")
@pytest.mark.parametrize(
"estimator, response_method, target_type, expected_shape",
[
(LogisticRegression(), "predict", "binary", (10,)),
(LogisticRegression(), "predict_proba", "binary", (10,)),
(LogisticRegression(), "decision_function", "binary", (10,)),
(LogisticRegression(), "predict", "multiclass", (10,)),
(LogisticRegression(), "predict_proba", "multiclass", (10, 4)),
(LogisticRegression(), "decision_function", "multiclass", (10, 4)),
(ClassifierChain(LogisticRegression()), "predict", "multilabel", (10, 2)),
(ClassifierChain(LogisticRegression()), "predict_proba", "multilabel", (10, 2)),
(
ClassifierChain(LogisticRegression()),
"decision_function",
"multilabel",
(10, 2),
),
(IsolationForest(), "predict", "binary", (10,)),
(IsolationForest(), "predict", "multiclass", (10,)),
(DecisionTreeRegressor(), "predict", "binary", (10,)),
(DecisionTreeRegressor(), "predict", "multiclass", (10,)),
],
)
def test_response_values_output_shape_(
estimator, response_method, target_type, expected_shape
):
"""
Check that output shape corresponds to docstring description
- for binary classification, it is a 1d array of shape `(n_samples,)`;
- for multiclass classification
- with response_method="predict", it is a 1d array of shape `(n_samples,)`;
- otherwise, it is a 2d array of shape `(n_samples, n_classes)`;
- for multilabel classification, it is a 2d array of shape `(n_samples, n_outputs)`;
- for outlier detection, it is a 1d array of shape `(n_samples,)`;
- for regression, it is a 1d array of shape `(n_samples,)`.
"""
X = np.random.RandomState(0).randn(10, 2)
if target_type == "binary":
y = np.array([0, 1] * 5)
elif target_type == "multiclass":
y = [0, 1, 2, 3, 0, 1, 2, 3, 3, 0]
else: # multilabel
y = np.array([[0, 1], [1, 0]] * 5)
clf = estimator.fit(X, y)
y_pred, _ = _get_response_values(clf, X, response_method=response_method)
assert y_pred.shape == expected_shape
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_mask.py | sklearn/utils/tests/test_mask.py | import pytest
from sklearn.utils._mask import safe_mask
from sklearn.utils.fixes import CSR_CONTAINERS
from sklearn.utils.validation import check_random_state
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_safe_mask(csr_container):
random_state = check_random_state(0)
X = random_state.rand(5, 4)
X_csr = csr_container(X)
mask = [False, False, True, True, True]
mask = safe_mask(X, mask)
assert X[mask].shape[0] == 3
mask = safe_mask(X_csr, mask)
assert X_csr[mask].shape[0] == 3
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_encode.py | sklearn/utils/tests/test_encode.py | import pickle
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from sklearn.utils._encode import _check_unknown, _encode, _get_counts, _unique
@pytest.mark.parametrize(
"values, expected",
[
(np.array([2, 1, 3, 1, 3], dtype="int64"), np.array([1, 2, 3], dtype="int64")),
(
np.array([2, 1, np.nan, 1, np.nan], dtype="float32"),
np.array([1, 2, np.nan], dtype="float32"),
),
(
np.array(["b", "a", "c", "a", "c"], dtype=object),
np.array(["a", "b", "c"], dtype=object),
),
(
np.array(["b", "a", None, "a", None], dtype=object),
np.array(["a", "b", None], dtype=object),
),
(np.array(["b", "a", "c", "a", "c"]), np.array(["a", "b", "c"])),
],
ids=["int64", "float32-nan", "object", "object-None", "str"],
)
def test_encode_util(values, expected):
uniques = _unique(values)
assert_array_equal(uniques, expected)
result, encoded = _unique(values, return_inverse=True)
assert_array_equal(result, expected)
assert_array_equal(encoded, np.array([1, 0, 2, 0, 2]))
encoded = _encode(values, uniques=uniques)
assert_array_equal(encoded, np.array([1, 0, 2, 0, 2]))
result, counts = _unique(values, return_counts=True)
assert_array_equal(result, expected)
assert_array_equal(counts, np.array([2, 1, 2]))
result, encoded, counts = _unique(values, return_inverse=True, return_counts=True)
assert_array_equal(result, expected)
assert_array_equal(encoded, np.array([1, 0, 2, 0, 2]))
assert_array_equal(counts, np.array([2, 1, 2]))
def test_encode_with_check_unknown():
# test for the check_unknown parameter of _encode()
uniques = np.array([1, 2, 3])
values = np.array([1, 2, 3, 4])
# Default is True, raise error
with pytest.raises(ValueError, match="y contains previously unseen labels"):
_encode(values, uniques=uniques, check_unknown=True)
# dont raise error if False
_encode(values, uniques=uniques, check_unknown=False)
# parameter is ignored for object dtype
uniques = np.array(["a", "b", "c"], dtype=object)
values = np.array(["a", "b", "c", "d"], dtype=object)
with pytest.raises(ValueError, match="y contains previously unseen labels"):
_encode(values, uniques=uniques, check_unknown=False)
def _assert_check_unknown(values, uniques, expected_diff, expected_mask):
diff = _check_unknown(values, uniques)
assert_array_equal(diff, expected_diff)
diff, valid_mask = _check_unknown(values, uniques, return_mask=True)
assert_array_equal(diff, expected_diff)
assert_array_equal(valid_mask, expected_mask)
@pytest.mark.parametrize(
"values, uniques, expected_diff, expected_mask",
[
(np.array([1, 2, 3, 4]), np.array([1, 2, 3]), [4], [True, True, True, False]),
(np.array([2, 1, 4, 5]), np.array([2, 5, 1]), [4], [True, True, False, True]),
(np.array([2, 1, np.nan]), np.array([2, 5, 1]), [np.nan], [True, True, False]),
(
np.array([2, 1, 4, np.nan]),
np.array([2, 5, 1, np.nan]),
[4],
[True, True, False, True],
),
(
np.array([2, 1, 4, np.nan]),
np.array([2, 5, 1]),
[4, np.nan],
[True, True, False, False],
),
(
np.array([2, 1, 4, 5]),
np.array([2, 5, 1, np.nan]),
[4],
[True, True, False, True],
),
(
np.array(["a", "b", "c", "d"], dtype=object),
np.array(["a", "b", "c"], dtype=object),
np.array(["d"], dtype=object),
[True, True, True, False],
),
(
np.array(["d", "c", "a", "b"], dtype=object),
np.array(["a", "c", "b"], dtype=object),
np.array(["d"], dtype=object),
[False, True, True, True],
),
(
np.array(["a", "b", "c", "d"]),
np.array(["a", "b", "c"]),
np.array(["d"]),
[True, True, True, False],
),
(
np.array(["d", "c", "a", "b"]),
np.array(["a", "c", "b"]),
np.array(["d"]),
[False, True, True, True],
),
],
)
def test_check_unknown(values, uniques, expected_diff, expected_mask):
_assert_check_unknown(values, uniques, expected_diff, expected_mask)
@pytest.mark.parametrize("missing_value", [None, np.nan, float("nan")])
@pytest.mark.parametrize("pickle_uniques", [True, False])
def test_check_unknown_missing_values(missing_value, pickle_uniques):
# check for check_unknown with missing values with object dtypes
values = np.array(["d", "c", "a", "b", missing_value], dtype=object)
uniques = np.array(["c", "a", "b", missing_value], dtype=object)
if pickle_uniques:
uniques = pickle.loads(pickle.dumps(uniques))
expected_diff = ["d"]
expected_mask = [False, True, True, True, True]
_assert_check_unknown(values, uniques, expected_diff, expected_mask)
values = np.array(["d", "c", "a", "b", missing_value], dtype=object)
uniques = np.array(["c", "a", "b"], dtype=object)
if pickle_uniques:
uniques = pickle.loads(pickle.dumps(uniques))
expected_diff = ["d", missing_value]
expected_mask = [False, True, True, True, False]
_assert_check_unknown(values, uniques, expected_diff, expected_mask)
values = np.array(["a", missing_value], dtype=object)
uniques = np.array(["a", "b", "z"], dtype=object)
if pickle_uniques:
uniques = pickle.loads(pickle.dumps(uniques))
expected_diff = [missing_value]
expected_mask = [True, False]
_assert_check_unknown(values, uniques, expected_diff, expected_mask)
@pytest.mark.parametrize("missing_value", [np.nan, None, float("nan")])
@pytest.mark.parametrize("pickle_uniques", [True, False])
def test_unique_util_missing_values_objects(missing_value, pickle_uniques):
# check for _unique and _encode with missing values with object dtypes
values = np.array(["a", "c", "c", missing_value, "b"], dtype=object)
expected_uniques = np.array(["a", "b", "c", missing_value], dtype=object)
uniques = _unique(values)
if missing_value is None:
assert_array_equal(uniques, expected_uniques)
else: # missing_value == np.nan
assert_array_equal(uniques[:-1], expected_uniques[:-1])
assert np.isnan(uniques[-1])
if pickle_uniques:
uniques = pickle.loads(pickle.dumps(uniques))
encoded = _encode(values, uniques=uniques)
assert_array_equal(encoded, np.array([0, 2, 2, 3, 1]))
def test_unique_util_missing_values_numeric():
# Check missing values in numerical values
values = np.array([3, 1, np.nan, 5, 3, np.nan], dtype=float)
expected_uniques = np.array([1, 3, 5, np.nan], dtype=float)
expected_inverse = np.array([1, 0, 3, 2, 1, 3])
uniques = _unique(values)
assert_array_equal(uniques, expected_uniques)
uniques, inverse = _unique(values, return_inverse=True)
assert_array_equal(uniques, expected_uniques)
assert_array_equal(inverse, expected_inverse)
encoded = _encode(values, uniques=uniques)
assert_array_equal(encoded, expected_inverse)
def test_unique_util_with_all_missing_values():
# test for all types of missing values for object dtype
values = np.array([np.nan, "a", "c", "c", None, float("nan"), None], dtype=object)
uniques = _unique(values)
assert_array_equal(uniques[:-1], ["a", "c", None])
# last value is nan
assert np.isnan(uniques[-1])
expected_inverse = [3, 0, 1, 1, 2, 3, 2]
_, inverse = _unique(values, return_inverse=True)
assert_array_equal(inverse, expected_inverse)
def test_check_unknown_with_both_missing_values():
# test for both types of missing values for object dtype
values = np.array([np.nan, "a", "c", "c", None, np.nan, None], dtype=object)
diff = _check_unknown(values, known_values=np.array(["a", "c"], dtype=object))
assert diff[0] is None
assert np.isnan(diff[1])
diff, valid_mask = _check_unknown(
values, known_values=np.array(["a", "c"], dtype=object), return_mask=True
)
assert diff[0] is None
assert np.isnan(diff[1])
assert_array_equal(valid_mask, [False, True, True, True, False, False, False])
@pytest.mark.parametrize(
"values, uniques, expected_counts",
[
(np.array([1] * 10 + [2] * 4 + [3] * 15), np.array([1, 2, 3]), [10, 4, 15]),
(
np.array([1] * 10 + [2] * 4 + [3] * 15),
np.array([1, 2, 3, 5]),
[10, 4, 15, 0],
),
(
np.array([np.nan] * 10 + [2] * 4 + [3] * 15),
np.array([2, 3, np.nan]),
[4, 15, 10],
),
(
np.array(["b"] * 4 + ["a"] * 16 + ["c"] * 20, dtype=object),
["a", "b", "c"],
[16, 4, 20],
),
(
np.array(["b"] * 4 + ["a"] * 16 + ["c"] * 20, dtype=object),
["c", "b", "a"],
[20, 4, 16],
),
(
np.array([np.nan] * 4 + ["a"] * 16 + ["c"] * 20, dtype=object),
["c", np.nan, "a"],
[20, 4, 16],
),
(
np.array(["b"] * 4 + ["a"] * 16 + ["c"] * 20, dtype=object),
["a", "b", "c", "e"],
[16, 4, 20, 0],
),
],
)
def test_get_counts(values, uniques, expected_counts):
counts = _get_counts(values, uniques)
assert_array_equal(counts, expected_counts)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_dataframe.py | sklearn/utils/tests/test_dataframe.py | """Tests for dataframe detection functions."""
import numpy as np
import pytest
from sklearn._min_dependencies import dependent_packages
from sklearn.utils._dataframe import is_df_or_series, is_pandas_df, is_polars_df
from sklearn.utils._testing import _convert_container
@pytest.mark.parametrize("constructor_name", ["pyarrow", "dataframe", "polars"])
def test_is_df_or_series(constructor_name):
df = _convert_container([[1, 4, 2], [3, 3, 6]], constructor_name)
assert is_df_or_series(df)
assert not is_df_or_series(np.asarray([1, 2, 3]))
@pytest.mark.parametrize("constructor_name", ["pyarrow", "dataframe", "polars"])
def test_is_pandas_df_other_libraries(constructor_name):
df = _convert_container([[1, 4, 2], [3, 3, 6]], constructor_name)
if constructor_name in ("pyarrow", "polars"):
assert not is_pandas_df(df)
else:
assert is_pandas_df(df)
def test_is_pandas_df():
"""Check behavior of is_pandas_df when pandas is installed."""
pd = pytest.importorskip("pandas")
df = pd.DataFrame([[1, 2, 3]])
assert is_pandas_df(df)
assert not is_pandas_df(np.asarray([1, 2, 3]))
assert not is_pandas_df(1)
def test_is_pandas_df_pandas_not_installed(hide_available_pandas):
"""Check is_pandas_df when pandas is not installed."""
assert not is_pandas_df(np.asarray([1, 2, 3]))
assert not is_pandas_df(1)
@pytest.mark.parametrize(
"constructor_name, minversion",
[
("pyarrow", dependent_packages["pyarrow"][0]),
("dataframe", dependent_packages["pandas"][0]),
("polars", dependent_packages["polars"][0]),
],
)
def test_is_polars_df_other_libraries(constructor_name, minversion):
df = _convert_container(
[[1, 4, 2], [3, 3, 6]],
constructor_name,
minversion=minversion,
)
if constructor_name in ("pyarrow", "dataframe"):
assert not is_polars_df(df)
else:
assert is_polars_df(df)
def test_is_polars_df_for_duck_typed_polars_dataframe():
"""Check is_polars_df for object that looks like a polars dataframe"""
class NotAPolarsDataFrame:
def __init__(self):
self.columns = [1, 2, 3]
self.schema = "my_schema"
not_a_polars_df = NotAPolarsDataFrame()
assert not is_polars_df(not_a_polars_df)
def test_is_polars_df():
"""Check that is_polars_df return False for non-dataframe objects."""
class LooksLikePolars:
def __init__(self):
self.columns = ["a", "b"]
self.schema = ["a", "b"]
assert not is_polars_df(LooksLikePolars())
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_plotting.py | sklearn/utils/tests/test_plotting.py | import numpy as np
import pytest
from sklearn.linear_model import LogisticRegression
from sklearn.utils._plotting import (
_BinaryClassifierCurveDisplayMixin,
_deprecate_estimator_name,
_despine,
_interval_max_min_ratio,
_validate_score_name,
_validate_style_kwargs,
)
from sklearn.utils._response import _get_response_values_binary
from sklearn.utils._testing import assert_allclose
@pytest.mark.parametrize("ax", [None, "Ax"])
@pytest.mark.parametrize(
"name, expected_name_out", [(None, "TestEstimator"), ("CustomName", "CustomName")]
)
def test_validate_plot_params(pyplot, ax, name, expected_name_out):
"""Check `_validate_plot_params` returns the correct values."""
display = _BinaryClassifierCurveDisplayMixin()
display.estimator_name = "TestEstimator"
if ax:
_, ax = pyplot.subplots()
ax_out, _, name_out = display._validate_plot_params(ax=ax, name=name)
assert name_out == expected_name_out
if ax:
assert ax == ax_out
@pytest.mark.parametrize("pos_label", [None, 0])
@pytest.mark.parametrize("name", [None, "CustomName"])
@pytest.mark.parametrize(
"response_method", ["auto", "predict_proba", "decision_function"]
)
def test_validate_and_get_response_values(pyplot, pos_label, name, response_method):
"""Check `_validate_and_get_response_values` returns the correct values."""
X = np.array([[0, 0], [1, 1], [2, 2], [3, 3]])
y = np.array([0, 0, 2, 2])
estimator = LogisticRegression().fit(X, y)
y_pred, pos_label, name_out = (
_BinaryClassifierCurveDisplayMixin._validate_and_get_response_values(
estimator,
X,
y,
response_method=response_method,
pos_label=pos_label,
name=name,
)
)
expected_y_pred, expected_pos_label = _get_response_values_binary(
estimator, X, response_method=response_method, pos_label=pos_label
)
assert_allclose(y_pred, expected_y_pred)
assert pos_label == expected_pos_label
# Check name is handled correctly
expected_name = name if name is not None else "LogisticRegression"
assert name_out == expected_name
@pytest.mark.parametrize(
"y_true, error_message",
[
(np.array([0, 1, 2]), "The target y is not binary."),
(np.array([0, 1]), "Found input variables with inconsistent"),
(np.array([0, 2, 0, 2]), r"y_true takes value in \{0, 2\} and pos_label"),
],
)
def test_validate_from_predictions_params_errors(pyplot, y_true, error_message):
"""Check `_validate_from_predictions_params` raises the correct errors."""
y_pred = np.array([0.1, 0.2, 0.3, 0.4])
sample_weight = np.ones(4)
with pytest.raises(ValueError, match=error_message):
_BinaryClassifierCurveDisplayMixin._validate_from_predictions_params(
y_true=y_true,
y_pred=y_pred,
sample_weight=sample_weight,
pos_label=None,
)
@pytest.mark.parametrize("name", [None, "CustomName"])
@pytest.mark.parametrize(
"pos_label, y_true",
[
(None, np.array([0, 1, 0, 1])),
(2, np.array([0, 2, 0, 2])),
],
)
def test_validate_from_predictions_params_returns(pyplot, name, pos_label, y_true):
"""Check `_validate_from_predictions_params` returns the correct values."""
y_pred = np.array([0.1, 0.2, 0.3, 0.4])
pos_label_out, name_out = (
_BinaryClassifierCurveDisplayMixin._validate_from_predictions_params(
y_true=y_true,
y_pred=y_pred,
sample_weight=None,
pos_label=pos_label,
name=name,
)
)
# Check name is handled correctly
expected_name = name if name is not None else "Classifier"
assert name_out == expected_name
# Check pos_label is handled correctly
expected_pos_label = pos_label if pos_label is not None else 1
assert pos_label_out == expected_pos_label
@pytest.mark.parametrize(
"params, err_msg",
[
(
{
# Missing "indices" key
"cv_results": {"estimator": "dummy"},
"X": np.array([[1, 2], [3, 4]]),
"y": np.array([0, 1]),
"sample_weight": None,
},
"`cv_results` does not contain one of the following",
),
(
{
"cv_results": {
"estimator": "dummy",
"indices": {"test": [[1, 2], [1, 2]], "train": [[3, 4], [3, 4]]},
},
# `X` wrong length
"X": np.array([[1, 2]]),
"y": np.array([0, 1]),
"sample_weight": None,
},
"`X` does not contain the correct number of",
),
(
{
"cv_results": {
"estimator": "dummy",
"indices": {"test": [[1, 2], [1, 2]], "train": [[3, 4], [3, 4]]},
},
"X": np.array([1, 2, 3, 4]),
# `y` not binary
"y": np.array([0, 2, 1, 3]),
"sample_weight": None,
},
"The target `y` is not binary",
),
(
{
"cv_results": {
"estimator": "dummy",
"indices": {"test": [[1, 2], [1, 2]], "train": [[3, 4], [3, 4]]},
},
"X": np.array([1, 2, 3, 4]),
"y": np.array([0, 1, 0, 1]),
# `sample_weight` wrong length
"sample_weight": np.array([0.5]),
},
"Found input variables with inconsistent",
),
],
)
def test_validate_from_cv_results_params(pyplot, params, err_msg):
"""Check parameter validation is performed correctly."""
with pytest.raises(ValueError, match=err_msg):
_BinaryClassifierCurveDisplayMixin()._validate_from_cv_results_params(**params)
@pytest.mark.parametrize(
"curve_legend_metric, curve_name, expected_label",
[
(0.85, None, "AUC = 0.85"),
(None, "Model A", "Model A"),
(0.95, "Random Forest", "Random Forest (AUC = 0.95)"),
(None, None, None),
],
)
def test_get_legend_label(curve_legend_metric, curve_name, expected_label):
"""Check `_get_legend_label` returns the correct label."""
legend_metric_name = "AUC"
label = _BinaryClassifierCurveDisplayMixin._get_legend_label(
curve_legend_metric, curve_name, legend_metric_name
)
assert label == expected_label
# TODO(1.9) : Remove
@pytest.mark.parametrize("curve_kwargs", [{"alpha": 1.0}, None])
@pytest.mark.parametrize("kwargs", [{}, {"alpha": 1.0}])
def test_validate_curve_kwargs_deprecate_kwargs(curve_kwargs, kwargs):
"""Check `_validate_curve_kwargs` deprecates kwargs correctly."""
n_curves = 1
name = None
legend_metric = {"mean": 0.8, "std": 0.1}
legend_metric_name = "AUC"
if curve_kwargs and kwargs:
with pytest.raises(ValueError, match="Cannot provide both `curve_kwargs`"):
_BinaryClassifierCurveDisplayMixin._validate_curve_kwargs(
n_curves,
name,
legend_metric,
legend_metric_name,
curve_kwargs,
**kwargs,
)
elif kwargs:
with pytest.warns(FutureWarning, match=r"`\*\*kwargs` is deprecated and"):
_BinaryClassifierCurveDisplayMixin._validate_curve_kwargs(
n_curves,
name,
legend_metric,
legend_metric_name,
curve_kwargs,
**kwargs,
)
else:
# No warning or error should be raised
_BinaryClassifierCurveDisplayMixin._validate_curve_kwargs(
n_curves, name, legend_metric, legend_metric_name, curve_kwargs, **kwargs
)
def test_validate_curve_kwargs_error():
"""Check `_validate_curve_kwargs` performs parameter validation correctly."""
n_curves = 3
legend_metric = {"mean": 0.8, "std": 0.1}
legend_metric_name = "AUC"
with pytest.raises(ValueError, match="`curve_kwargs` must be None"):
_BinaryClassifierCurveDisplayMixin._validate_curve_kwargs(
n_curves=n_curves,
name=None,
legend_metric=legend_metric,
legend_metric_name=legend_metric_name,
curve_kwargs=[{"alpha": 1.0}],
)
with pytest.raises(ValueError, match="To avoid labeling individual curves"):
name = ["one", "two", "three"]
_BinaryClassifierCurveDisplayMixin._validate_curve_kwargs(
n_curves=n_curves,
name=name,
legend_metric=legend_metric,
legend_metric_name=legend_metric_name,
curve_kwargs=None,
)
_BinaryClassifierCurveDisplayMixin._validate_curve_kwargs(
n_curves=n_curves,
name=name,
legend_metric=legend_metric,
legend_metric_name=legend_metric_name,
curve_kwargs={"alpha": 1.0},
)
@pytest.mark.parametrize("name", [None, "curve_name", ["curve_name"]])
@pytest.mark.parametrize(
"legend_metric",
[{"mean": 0.8, "std": 0.2}, {"mean": None, "std": None}],
)
@pytest.mark.parametrize("legend_metric_name", ["AUC", "AP"])
@pytest.mark.parametrize("curve_kwargs", [None, {"color": "red"}])
def test_validate_curve_kwargs_single_legend(
name, legend_metric, legend_metric_name, curve_kwargs
):
"""Check `_validate_curve_kwargs` returns correct kwargs for single legend entry."""
n_curves = 3
curve_kwargs_out = _BinaryClassifierCurveDisplayMixin._validate_curve_kwargs(
n_curves=n_curves,
name=name,
legend_metric=legend_metric,
legend_metric_name=legend_metric_name,
curve_kwargs=curve_kwargs,
)
assert isinstance(curve_kwargs_out, list)
assert len(curve_kwargs_out) == n_curves
expected_label = None
if isinstance(name, list):
name = name[0]
if name is not None:
expected_label = name
if legend_metric["mean"] is not None:
expected_label = expected_label + f" ({legend_metric_name} = 0.80 +/- 0.20)"
# `name` is None
elif legend_metric["mean"] is not None:
expected_label = f"{legend_metric_name} = 0.80 +/- 0.20"
assert curve_kwargs_out[0]["label"] == expected_label
# All remaining curves should have None as "label"
assert curve_kwargs_out[1]["label"] is None
assert curve_kwargs_out[2]["label"] is None
if curve_kwargs is None:
assert all("color" not in kwargs for kwargs in curve_kwargs_out)
else:
assert all(kwargs["color"] == "red" for kwargs in curve_kwargs_out)
@pytest.mark.parametrize("name", [None, "curve_name", ["one", "two", "three"]])
@pytest.mark.parametrize(
"legend_metric", [{"metric": [1.0, 1.0, 1.0]}, {"metric": [None, None, None]}]
)
@pytest.mark.parametrize("legend_metric_name", ["AUC", "AP"])
def test_validate_curve_kwargs_multi_legend(name, legend_metric, legend_metric_name):
"""Check `_validate_curve_kwargs` returns correct kwargs for multi legend entry."""
n_curves = 3
curve_kwargs = [{"color": "red"}, {"color": "yellow"}, {"color": "blue"}]
curve_kwargs_out = _BinaryClassifierCurveDisplayMixin._validate_curve_kwargs(
n_curves=n_curves,
name=name,
legend_metric=legend_metric,
legend_metric_name=legend_metric_name,
curve_kwargs=curve_kwargs,
)
assert isinstance(curve_kwargs_out, list)
assert len(curve_kwargs_out) == n_curves
expected_labels = [None, None, None]
if isinstance(name, str):
expected_labels = "curve_name"
if legend_metric["metric"][0] is not None:
expected_labels = expected_labels + f" ({legend_metric_name} = 1.00)"
expected_labels = [expected_labels] * n_curves
elif isinstance(name, list) and legend_metric["metric"][0] is None:
expected_labels = name
elif isinstance(name, list) and legend_metric["metric"][0] is not None:
expected_labels = [
f"{name_single} ({legend_metric_name} = 1.00)" for name_single in name
]
# `name` is None
elif legend_metric["metric"][0] is not None:
expected_labels = [f"{legend_metric_name} = 1.00"] * n_curves
for idx, expected_label in enumerate(expected_labels):
assert curve_kwargs_out[idx]["label"] == expected_label
for curve_kwarg, curve_kwarg_out in zip(curve_kwargs, curve_kwargs_out):
assert curve_kwarg_out["color"] == curve_kwarg["color"]
@pytest.mark.parametrize("curve_kwargs", [None, {"color": "red"}])
@pytest.mark.parametrize("n_curves", [1, 3])
def test_validate_curve_kwargs_default_kwargs(n_curves, curve_kwargs):
"""Check default kwargs are incorporated correctly."""
curve_kwargs_out = _BinaryClassifierCurveDisplayMixin._validate_curve_kwargs(
n_curves=n_curves,
name="test",
legend_metric={"mean": 0.8, "std": 0.2},
legend_metric_name="metric",
curve_kwargs=curve_kwargs,
default_curve_kwargs={"color": "blue"},
default_multi_curve_kwargs={"alpha": 0.7, "linestyle": "--", "color": "green"},
)
if n_curves > 1:
# `default_multi_curve_kwargs` are incorporated
assert all(kwarg["alpha"] == 0.7 for kwarg in curve_kwargs_out)
assert all(kwarg["linestyle"] == "--" for kwarg in curve_kwargs_out)
if curve_kwargs is None:
# `default_multi_curve_kwargs` over-rides `default_curve_kwargs`
assert all(kwarg["color"] == "green" for kwarg in curve_kwargs_out)
else:
# `curve_kwargs` over-rides any defaults
assert all(kwarg["color"] == "red" for kwarg in curve_kwargs_out)
# Single curve
elif curve_kwargs is None:
# Use `default_curve_kwargs`
assert all(kwarg["color"] == "blue" for kwarg in curve_kwargs_out)
else:
# Use `curve_kwargs`
assert all(kwarg["color"] == "red" for kwarg in curve_kwargs_out)
def metric():
pass # pragma: no cover
def neg_metric():
pass # pragma: no cover
@pytest.mark.parametrize(
"score_name, scoring, negate_score, expected_score_name",
[
("accuracy", None, False, "accuracy"), # do not transform the name
(None, "accuracy", False, "Accuracy"), # capitalize the name
(None, "accuracy", True, "Negative accuracy"), # add "Negative"
(None, "neg_mean_absolute_error", False, "Negative mean absolute error"),
(None, "neg_mean_absolute_error", True, "Mean absolute error"), # remove "neg_"
("MAE", "neg_mean_absolute_error", True, "MAE"), # keep score_name
(None, None, False, "Score"), # default name
(None, None, True, "Negative score"), # default name but negated
("Some metric", metric, False, "Some metric"), # do not transform the name
("Some metric", metric, True, "Some metric"), # do not transform the name
(None, metric, False, "Metric"), # default name
(None, metric, True, "Negative metric"), # default name but negated
("Some metric", neg_metric, False, "Some metric"), # do not transform the name
("Some metric", neg_metric, True, "Some metric"), # do not transform the name
(None, neg_metric, False, "Negative metric"), # default name
(None, neg_metric, True, "Metric"), # default name but negated
],
)
def test_validate_score_name(score_name, scoring, negate_score, expected_score_name):
"""Check that we return the right score name."""
assert (
_validate_score_name(score_name, scoring, negate_score) == expected_score_name
)
# In the following test, we check the value of the max to min ratio
# for parameter value intervals to check that using a decision threshold
# of 5. is a good heuristic to decide between linear and log scales on
# common ranges of parameter values.
@pytest.mark.parametrize(
"data, lower_bound, upper_bound",
[
# Such a range could be clearly displayed with either log scale or linear
# scale.
(np.geomspace(0.1, 1, 5), 5, 6),
# Checking that the ratio is still positive on a negative log scale.
(-np.geomspace(0.1, 1, 10), 7, 8),
# Evenly spaced parameter values lead to a ratio of 1.
(np.linspace(0, 1, 5), 0.9, 1.1),
# This is not exactly spaced on a log scale but we will benefit from treating
# it as such for visualization.
([1, 2, 5, 10, 20, 50], 20, 40),
],
)
def test_inverval_max_min_ratio(data, lower_bound, upper_bound):
assert lower_bound < _interval_max_min_ratio(data) < upper_bound
@pytest.mark.parametrize(
"default_kwargs, user_kwargs, expected",
[
(
{"color": "blue", "linewidth": 2},
{"linestyle": "dashed"},
{"color": "blue", "linewidth": 2, "linestyle": "dashed"},
),
(
{"color": "blue", "linestyle": "solid"},
{"c": "red", "ls": "dashed"},
{"color": "red", "linestyle": "dashed"},
),
(
{"label": "xxx", "color": "k", "linestyle": "--"},
{"ls": "-."},
{"label": "xxx", "color": "k", "linestyle": "-."},
),
({}, {}, {}),
(
{},
{
"ls": "dashed",
"c": "red",
"ec": "black",
"fc": "yellow",
"lw": 2,
"mec": "green",
"mfcalt": "blue",
"ms": 5,
},
{
"linestyle": "dashed",
"color": "red",
"edgecolor": "black",
"facecolor": "yellow",
"linewidth": 2,
"markeredgecolor": "green",
"markerfacecoloralt": "blue",
"markersize": 5,
},
),
],
)
def test_validate_style_kwargs(default_kwargs, user_kwargs, expected):
"""Check the behaviour of `validate_style_kwargs` with various type of entries."""
result = _validate_style_kwargs(default_kwargs, user_kwargs)
assert result == expected, (
"The validation of style keywords does not provide the expected results: "
f"Got {result} instead of {expected}."
)
@pytest.mark.parametrize(
"default_kwargs, user_kwargs",
[({}, {"ls": 2, "linestyle": 3}), ({}, {"c": "r", "color": "blue"})],
)
def test_validate_style_kwargs_error(default_kwargs, user_kwargs):
"""Check that `validate_style_kwargs` raises TypeError"""
with pytest.raises(TypeError):
_validate_style_kwargs(default_kwargs, user_kwargs)
def test_despine(pyplot):
ax = pyplot.gca()
_despine(ax)
assert ax.spines["top"].get_visible() is False
assert ax.spines["right"].get_visible() is False
assert ax.spines["bottom"].get_bounds() == (0, 1)
assert ax.spines["left"].get_bounds() == (0, 1)
@pytest.mark.parametrize("estimator_name", ["my_est_name", "deprecated"])
@pytest.mark.parametrize("name", [None, "my_name"])
def test_deprecate_estimator_name(estimator_name, name):
"""Check `_deprecate_estimator_name` behaves correctly"""
version = "1.7"
version_remove = "1.9"
if estimator_name == "deprecated":
name_out = _deprecate_estimator_name(estimator_name, name, version)
assert name_out == name
# `estimator_name` is provided and `name` is:
elif name is None:
warning_message = (
f"`estimator_name` is deprecated in {version} and will be removed in "
f"{version_remove}. Use `name` instead."
)
with pytest.warns(FutureWarning, match=warning_message):
result = _deprecate_estimator_name(estimator_name, name, version)
assert result == estimator_name
elif name is not None:
error_message = (
f"Cannot provide both `estimator_name` and `name`. `estimator_name` "
f"is deprecated in {version} and will be removed in {version_remove}. "
)
with pytest.raises(ValueError, match=error_message):
_deprecate_estimator_name(estimator_name, name, version)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_typedefs.py | sklearn/utils/tests/test_typedefs.py | import numpy as np
import pytest
from sklearn.utils._typedefs import testing_make_array_from_typed_val
@pytest.mark.parametrize(
"type_t, value, expected_dtype",
[
("float64_t", 1.0, np.float64),
("float32_t", 1.0, np.float32),
("intp_t", 1, np.intp),
("int8_t", 1, np.int8),
("int32_t", 1, np.int32),
("int64_t", 1, np.int64),
("uint8_t", 1, np.uint8),
("uint32_t", 1, np.uint32),
("uint64_t", 1, np.uint64),
],
)
def test_types(type_t, value, expected_dtype):
"""Check that the types defined in _typedefs correspond to the expected
numpy dtypes.
"""
assert testing_make_array_from_typed_val[type_t](value).dtype == expected_dtype
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/__init__.py | sklearn/utils/tests/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_validation.py | sklearn/utils/tests/test_validation.py | """Tests for input validation functions"""
import numbers
import re
import warnings
from itertools import product
from operator import itemgetter
from tempfile import NamedTemporaryFile
import numpy as np
import pytest
import scipy.sparse as sp
from pytest import importorskip
import sklearn
from sklearn._config import config_context
from sklearn.base import BaseEstimator
from sklearn.datasets import make_blobs
from sklearn.ensemble import RandomForestRegressor
from sklearn.exceptions import NotFittedError, PositiveSpectrumWarning
from sklearn.linear_model import ARDRegression
# TODO: add this estimator into the _mocking module in a further refactoring
from sklearn.metrics.tests.test_score_objects import EstimatorWithFit
from sklearn.neighbors import KNeighborsClassifier
from sklearn.random_projection import _sparse_random_matrix
from sklearn.svm import SVR
from sklearn.utils import (
_safe_indexing,
as_float_array,
check_array,
check_symmetric,
check_X_y,
deprecated,
)
from sklearn.utils._array_api import (
_convert_to_numpy,
_get_namespace_device_dtype_ids,
_is_numpy_namespace,
yield_namespace_device_dtype_combinations,
)
from sklearn.utils._mocking import (
MockDataFrame,
_MockEstimatorOnOffPrediction,
)
from sklearn.utils._testing import (
SkipTest,
TempMemmap,
_array_api_for_tests,
_convert_container,
assert_allclose,
assert_allclose_dense_sparse,
assert_array_equal,
create_memmap_backed_data,
skip_if_array_api_compat_not_configured,
)
from sklearn.utils.estimator_checks import _NotAnArray
from sklearn.utils.fixes import (
COO_CONTAINERS,
CSC_CONTAINERS,
CSR_CONTAINERS,
DIA_CONTAINERS,
DOK_CONTAINERS,
)
from sklearn.utils.validation import (
FLOAT_DTYPES,
_allclose_dense_sparse,
_check_feature_names_in,
_check_method_params,
_check_pos_label_consistency,
_check_psd_eigenvalues,
_check_response_method,
_check_sample_weight,
_check_y,
_deprecate_positional_args,
_estimator_has,
_get_feature_names,
_is_fitted,
_num_features,
_num_samples,
_to_object_array,
assert_all_finite,
check_consistent_length,
check_is_fitted,
check_memory,
check_non_negative,
check_random_state,
check_scalar,
column_or_1d,
has_fit_parameter,
validate_data,
)
def test_make_rng():
# Check the check_random_state utility function behavior
assert check_random_state(None) is np.random.mtrand._rand
assert check_random_state(np.random) is np.random.mtrand._rand
rng_42 = np.random.RandomState(42)
assert check_random_state(42).randint(100) == rng_42.randint(100)
rng_42 = np.random.RandomState(42)
assert check_random_state(rng_42) is rng_42
rng_42 = np.random.RandomState(42)
assert check_random_state(43).randint(100) != rng_42.randint(100)
with pytest.raises(ValueError):
check_random_state("some invalid seed")
def test_as_float_array():
# Test function for as_float_array
X = np.ones((3, 10), dtype=np.int32)
X = X + np.arange(10, dtype=np.int32)
X2 = as_float_array(X, copy=False)
assert X2.dtype == np.float32
# Another test
X = X.astype(np.int64)
X2 = as_float_array(X, copy=True)
# Checking that the array wasn't overwritten
assert as_float_array(X, copy=False) is not X
assert X2.dtype == np.float64
# Test int dtypes <= 32bit
tested_dtypes = [bool, np.int8, np.int16, np.int32, np.uint8, np.uint16, np.uint32]
for dtype in tested_dtypes:
X = X.astype(dtype)
X2 = as_float_array(X)
assert X2.dtype == np.float32
# Test object dtype
X = X.astype(object)
X2 = as_float_array(X, copy=True)
assert X2.dtype == np.float64
# Here, X is of the right type, it shouldn't be modified
X = np.ones((3, 2), dtype=np.float32)
assert as_float_array(X, copy=False) is X
# Test that if X is fortran ordered it stays
X = np.asfortranarray(X)
assert np.isfortran(as_float_array(X, copy=True))
# Test the copy parameter with some matrices
matrices = [
sp.csc_matrix(np.arange(5)).toarray(),
_sparse_random_matrix(10, 10, density=0.10).toarray(),
]
for M in matrices:
N = as_float_array(M, copy=True)
N[0, 0] = np.nan
assert not np.isnan(M).any()
@pytest.mark.parametrize(
"X", [np.random.random((10, 2)), sp.random(10, 2, format="csr")]
)
def test_as_float_array_nan(X):
X = X.copy()
X[5, 0] = np.nan
X[6, 1] = np.nan
X_converted = as_float_array(X, ensure_all_finite="allow-nan")
assert_allclose_dense_sparse(X_converted, X)
def test_np_matrix():
# Confirm that input validation code does not return np.matrix
X = np.arange(12).reshape(3, 4)
assert not isinstance(as_float_array(X), np.matrix)
assert not isinstance(as_float_array(sp.csc_matrix(X)), np.matrix)
def test_memmap():
# Confirm that input validation code doesn't copy memory mapped arrays
asflt = lambda x: as_float_array(x, copy=False)
with NamedTemporaryFile(prefix="sklearn-test") as tmp:
M = np.memmap(tmp, shape=(10, 10), dtype=np.float32)
M[:] = 0
for f in (check_array, np.asarray, asflt):
X = f(M)
X[:] = 1
assert_array_equal(X.ravel(), M.ravel())
X[:] = 0
def test_ordering():
# Check that ordering is enforced correctly by validation utilities.
# We need to check each validation utility, because a 'copy' without
# 'order=K' will kill the ordering.
X = np.ones((10, 5))
for A in X, X.T:
for copy in (True, False):
B = check_array(A, order="C", copy=copy)
assert B.flags["C_CONTIGUOUS"]
B = check_array(A, order="F", copy=copy)
assert B.flags["F_CONTIGUOUS"]
if copy:
assert A is not B
X = sp.csr_matrix(X)
X.data = X.data[::-1]
assert not X.data.flags["C_CONTIGUOUS"]
@pytest.mark.parametrize(
"value, ensure_all_finite",
[(np.inf, False), (np.nan, "allow-nan"), (np.nan, False)],
)
@pytest.mark.parametrize("retype", [np.asarray, sp.csr_matrix])
def test_check_array_ensure_all_finite_valid(value, ensure_all_finite, retype):
X = retype(np.arange(4).reshape(2, 2).astype(float))
X[0, 0] = value
X_checked = check_array(X, ensure_all_finite=ensure_all_finite, accept_sparse=True)
assert_allclose_dense_sparse(X, X_checked)
@pytest.mark.parametrize(
"value, input_name, ensure_all_finite, match_msg",
[
(np.inf, "", True, "Input contains infinity"),
(np.inf, "X", True, "Input X contains infinity"),
(np.inf, "sample_weight", True, "Input sample_weight contains infinity"),
(np.inf, "X", "allow-nan", "Input X contains infinity"),
(np.nan, "", True, "Input contains NaN"),
(np.nan, "X", True, "Input X contains NaN"),
(np.nan, "y", True, "Input y contains NaN"),
(
np.nan,
"",
"allow-inf",
"ensure_all_finite should be a bool or 'allow-nan'",
),
(np.nan, "", 1, "Input contains NaN"),
],
)
@pytest.mark.parametrize("retype", [np.asarray, sp.csr_matrix])
def test_check_array_ensure_all_finite_invalid(
value, input_name, ensure_all_finite, match_msg, retype
):
X = retype(np.arange(4).reshape(2, 2).astype(np.float64))
X[0, 0] = value
with pytest.raises(ValueError, match=match_msg):
check_array(
X,
input_name=input_name,
ensure_all_finite=ensure_all_finite,
accept_sparse=True,
)
@pytest.mark.parametrize("input_name", ["X", "y", "sample_weight"])
@pytest.mark.parametrize("retype", [np.asarray, sp.csr_matrix])
def test_check_array_links_to_imputer_doc_only_for_X(input_name, retype):
data = retype(np.arange(4).reshape(2, 2).astype(np.float64))
data[0, 0] = np.nan
estimator = SVR()
extended_msg = (
f"\n{estimator.__class__.__name__} does not accept missing values"
" encoded as NaN natively. For supervised learning, you might want"
" to consider sklearn.ensemble.HistGradientBoostingClassifier and Regressor"
" which accept missing values encoded as NaNs natively."
" Alternatively, it is possible to preprocess the"
" data, for instance by using an imputer transformer in a pipeline"
" or drop samples with missing values. See"
" https://scikit-learn.org/stable/modules/impute.html"
" You can find a list of all estimators that handle NaN values"
" at the following page:"
" https://scikit-learn.org/stable/modules/impute.html"
"#estimators-that-handle-nan-values"
)
with pytest.raises(ValueError, match=f"Input {input_name} contains NaN") as ctx:
check_array(
data,
estimator=estimator,
input_name=input_name,
accept_sparse=True,
)
if input_name == "X":
assert extended_msg in ctx.value.args[0]
else:
assert extended_msg not in ctx.value.args[0]
if input_name == "X":
# Verify that _validate_data is automatically called with the right argument
# to generate the same exception:
with pytest.raises(ValueError, match=f"Input {input_name} contains NaN") as ctx:
SVR().fit(data, np.ones(data.shape[0]))
assert extended_msg in ctx.value.args[0]
def test_check_array_ensure_all_finite_object():
X = np.array([["a", "b", np.nan]], dtype=object).T
X_checked = check_array(X, dtype=None, ensure_all_finite="allow-nan")
assert X is X_checked
X_checked = check_array(X, dtype=None, ensure_all_finite=False)
assert X is X_checked
with pytest.raises(ValueError, match="Input contains NaN"):
check_array(X, dtype=None, ensure_all_finite=True)
@pytest.mark.parametrize(
"X, err_msg",
[
(
np.array([[1, np.nan]]),
"Input contains NaN.",
),
(
np.array([[1, np.nan]]),
"Input contains NaN.",
),
(
np.array([[1, np.inf]]),
"Input contains infinity or a value too large for.*int",
),
(np.array([[1, np.nan]], dtype=object), "cannot convert float NaN to integer"),
],
)
@pytest.mark.parametrize("ensure_all_finite", [True, False])
def test_check_array_ensure_all_finite_object_unsafe_casting(
X, err_msg, ensure_all_finite
):
# casting a float array containing NaN or inf to int dtype should
# raise an error irrespective of the ensure_all_finite parameter.
with pytest.raises(ValueError, match=err_msg):
check_array(X, dtype=int, ensure_all_finite=ensure_all_finite)
def test_check_array_series_err_msg():
"""
Check that we raise a proper error message when passing a Series and we expect a
2-dimensional container.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/27498
"""
pd = pytest.importorskip("pandas")
ser = pd.Series([1, 2, 3])
msg = f"Expected a 2-dimensional container but got {type(ser)} instead."
with pytest.raises(ValueError, match=msg):
check_array(ser, ensure_2d=True)
@pytest.mark.filterwarnings("ignore:Can't check dok sparse matrix for nan or inf")
def test_check_array():
# accept_sparse == False
# raise error on sparse inputs
X = [[1, 2], [3, 4]]
X_csr = sp.csr_matrix(X)
with pytest.raises(TypeError):
check_array(X_csr)
# ensure_2d=False
X_array = check_array([0, 1, 2], ensure_2d=False)
assert X_array.ndim == 1
# ensure_2d=True with 1d array
with pytest.raises(ValueError, match="Expected 2D array, got 1D array instead"):
check_array([0, 1, 2], ensure_2d=True)
# ensure_2d=True with scalar array
with pytest.raises(ValueError, match="Expected 2D array, got scalar array instead"):
check_array(10, ensure_2d=True)
# ensure_2d=True with 1d sparse array
if hasattr(sp, "csr_array"):
sparse_row = next(iter(sp.csr_array(X)))
if sparse_row.ndim == 1:
# In scipy 1.14 and later, sparse row is 1D while it was 2D before.
with pytest.raises(ValueError, match="Expected 2D input, got"):
check_array(sparse_row, accept_sparse=True, ensure_2d=True)
# don't allow ndim > 3
X_ndim = np.arange(8).reshape(2, 2, 2)
with pytest.raises(ValueError):
check_array(X_ndim)
check_array(X_ndim, allow_nd=True) # doesn't raise
# dtype and order enforcement.
X_C = np.arange(4).reshape(2, 2).copy("C")
X_F = X_C.copy("F")
X_int = X_C.astype(int)
X_float = X_C.astype(float)
Xs = [X_C, X_F, X_int, X_float]
dtypes = [np.int32, int, float, np.float32, None, bool, object]
orders = ["C", "F", None]
copys = [True, False]
for X, dtype, order, copy in product(Xs, dtypes, orders, copys):
X_checked = check_array(X, dtype=dtype, order=order, copy=copy)
if dtype is not None:
assert X_checked.dtype == dtype
else:
assert X_checked.dtype == X.dtype
if order == "C":
assert X_checked.flags["C_CONTIGUOUS"]
assert not X_checked.flags["F_CONTIGUOUS"]
elif order == "F":
assert X_checked.flags["F_CONTIGUOUS"]
assert not X_checked.flags["C_CONTIGUOUS"]
if copy:
assert X is not X_checked
else:
# doesn't copy if it was already good
if (
X.dtype == X_checked.dtype
and X_checked.flags["C_CONTIGUOUS"] == X.flags["C_CONTIGUOUS"]
and X_checked.flags["F_CONTIGUOUS"] == X.flags["F_CONTIGUOUS"]
):
assert X is X_checked
# allowed sparse != None
# try different type of sparse format
Xs = []
Xs.extend(
[
sparse_container(X_C)
for sparse_container in CSR_CONTAINERS
+ CSC_CONTAINERS
+ COO_CONTAINERS
+ DOK_CONTAINERS
]
)
Xs.extend([Xs[0].astype(np.int64), Xs[0].astype(np.float64)])
accept_sparses = [["csr", "coo"], ["coo", "dok"]]
# scipy sparse matrices do not support the object dtype so
# this dtype is skipped in this loop
non_object_dtypes = [dt for dt in dtypes if dt is not object]
for X, dtype, accept_sparse, copy in product(
Xs, non_object_dtypes, accept_sparses, copys
):
X_checked = check_array(X, dtype=dtype, accept_sparse=accept_sparse, copy=copy)
if dtype is not None:
assert X_checked.dtype == dtype
else:
assert X_checked.dtype == X.dtype
if X.format in accept_sparse:
# no change if allowed
assert X.format == X_checked.format
else:
# got converted
assert X_checked.format == accept_sparse[0]
if copy:
assert X is not X_checked
else:
# doesn't copy if it was already good
if X.dtype == X_checked.dtype and X.format == X_checked.format:
assert X is X_checked
# other input formats
# convert lists to arrays
X_dense = check_array([[1, 2], [3, 4]])
assert isinstance(X_dense, np.ndarray)
# raise on too deep lists
with pytest.raises(ValueError):
check_array(X_ndim.tolist())
check_array(X_ndim.tolist(), allow_nd=True) # doesn't raise
# convert weird stuff to arrays
X_no_array = _NotAnArray(X_dense)
result = check_array(X_no_array)
assert isinstance(result, np.ndarray)
# check negative values when ensure_non_negative=True
X_neg = check_array([[1, 2], [-3, 4]])
err_msg = "Negative values in data passed to X in RandomForestRegressor"
with pytest.raises(ValueError, match=err_msg):
check_array(
X_neg,
ensure_non_negative=True,
input_name="X",
estimator=RandomForestRegressor(),
)
@pytest.mark.parametrize(
"X",
[
[["1", "2"], ["3", "4"]],
np.array([["1", "2"], ["3", "4"]], dtype="U"),
np.array([["1", "2"], ["3", "4"]], dtype="S"),
[[b"1", b"2"], [b"3", b"4"]],
np.array([[b"1", b"2"], [b"3", b"4"]], dtype="V1"),
],
)
def test_check_array_numeric_error(X):
"""Test that check_array errors when it receives an array of bytes/string
while a numeric dtype is required."""
expected_msg = r"dtype='numeric' is not compatible with arrays of bytes/strings"
with pytest.raises(ValueError, match=expected_msg):
check_array(X, dtype="numeric")
@pytest.mark.parametrize(
"pd_dtype", ["Int8", "Int16", "UInt8", "UInt16", "Float32", "Float64"]
)
@pytest.mark.parametrize(
"dtype, expected_dtype",
[
([np.float32, np.float64], np.float32),
(np.float64, np.float64),
("numeric", np.float64),
],
)
def test_check_array_pandas_na_support(pd_dtype, dtype, expected_dtype):
# Test pandas numerical extension arrays with pd.NA
pd = pytest.importorskip("pandas")
if pd_dtype in {"Float32", "Float64"}:
# Extension dtypes with Floats was added in 1.2
pd = pytest.importorskip("pandas", minversion="1.2")
X_np = np.array(
[[1, 2, 3, np.nan, np.nan], [np.nan, np.nan, 8, 4, 6], [1, 2, 3, 4, 5]]
).T
# Creates dataframe with numerical extension arrays with pd.NA
X = pd.DataFrame(X_np, dtype=pd_dtype, columns=["a", "b", "c"])
# column c has no nans
X["c"] = X["c"].astype("float")
X_checked = check_array(X, ensure_all_finite="allow-nan", dtype=dtype)
assert_allclose(X_checked, X_np)
assert X_checked.dtype == expected_dtype
X_checked = check_array(X, ensure_all_finite=False, dtype=dtype)
assert_allclose(X_checked, X_np)
assert X_checked.dtype == expected_dtype
msg = "Input contains NaN"
with pytest.raises(ValueError, match=msg):
check_array(X, ensure_all_finite=True)
def test_check_array_panadas_na_support_series():
"""Check check_array is correct with pd.NA in a series."""
pd = pytest.importorskip("pandas")
X_int64 = pd.Series([1, 2, pd.NA], dtype="Int64")
msg = "Input contains NaN"
with pytest.raises(ValueError, match=msg):
check_array(X_int64, ensure_all_finite=True, ensure_2d=False)
X_out = check_array(X_int64, ensure_all_finite=False, ensure_2d=False)
assert_allclose(X_out, [1, 2, np.nan])
assert X_out.dtype == np.float64
X_out = check_array(
X_int64, ensure_all_finite=False, ensure_2d=False, dtype=np.float32
)
assert_allclose(X_out, [1, 2, np.nan])
assert X_out.dtype == np.float32
def test_check_array_pandas_dtype_casting():
# test that data-frames with homogeneous dtype are not upcast
pd = pytest.importorskip("pandas")
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)
X_df = pd.DataFrame(X)
assert check_array(X_df).dtype == np.float32
assert check_array(X_df, dtype=FLOAT_DTYPES).dtype == np.float32
X_df = X_df.astype({0: np.float16})
assert_array_equal(X_df.dtypes, (np.float16, np.float32, np.float32))
assert check_array(X_df).dtype == np.float32
assert check_array(X_df, dtype=FLOAT_DTYPES).dtype == np.float32
X_df = X_df.astype({0: np.int16})
# float16, int16, float32 casts to float32
assert check_array(X_df).dtype == np.float32
assert check_array(X_df, dtype=FLOAT_DTYPES).dtype == np.float32
X_df = X_df.astype({2: np.float16})
# float16, int16, float16 casts to float32
assert check_array(X_df).dtype == np.float32
assert check_array(X_df, dtype=FLOAT_DTYPES).dtype == np.float32
X_df = X_df.astype(np.int16)
assert check_array(X_df).dtype == np.int16
# we're not using upcasting rules for determining
# the target type yet, so we cast to the default of float64
assert check_array(X_df, dtype=FLOAT_DTYPES).dtype == np.float64
# check that we handle pandas dtypes in a semi-reasonable way
# this is actually tricky because we can't really know that this
# should be integer ahead of converting it.
cat_df = pd.DataFrame({"cat_col": pd.Categorical([1, 2, 3])})
assert check_array(cat_df).dtype == np.int64
assert check_array(cat_df, dtype=FLOAT_DTYPES).dtype == np.float64
def test_check_array_on_mock_dataframe():
arr = np.array([[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]])
mock_df = MockDataFrame(arr)
checked_arr = check_array(mock_df)
assert checked_arr.dtype == arr.dtype
checked_arr = check_array(mock_df, dtype=np.float32)
assert checked_arr.dtype == np.dtype(np.float32)
def test_check_array_dtype_stability():
# test that lists with ints don't get converted to floats
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
assert check_array(X).dtype.kind == "i"
assert check_array(X, ensure_2d=False).dtype.kind == "i"
def test_check_array_dtype_warning():
X_int_list = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
X_float32 = np.asarray(X_int_list, dtype=np.float32)
X_int64 = np.asarray(X_int_list, dtype=np.int64)
X_csr_float32 = sp.csr_matrix(X_float32)
X_csc_float32 = sp.csc_matrix(X_float32)
X_csc_int32 = sp.csc_matrix(X_int64, dtype=np.int32)
integer_data = [X_int64, X_csc_int32]
float32_data = [X_float32, X_csr_float32, X_csc_float32]
with warnings.catch_warnings():
warnings.simplefilter("error")
for X in integer_data:
X_checked = check_array(X, dtype=np.float64, accept_sparse=True)
assert X_checked.dtype == np.float64
for X in float32_data:
X_checked = check_array(
X, dtype=[np.float64, np.float32], accept_sparse=True
)
assert X_checked.dtype == np.float32
assert X_checked is X
X_checked = check_array(
X,
dtype=[np.float64, np.float32],
accept_sparse=["csr", "dok"],
copy=True,
)
assert X_checked.dtype == np.float32
assert X_checked is not X
X_checked = check_array(
X_csc_float32,
dtype=[np.float64, np.float32],
accept_sparse=["csr", "dok"],
copy=False,
)
assert X_checked.dtype == np.float32
assert X_checked is not X_csc_float32
assert X_checked.format == "csr"
def test_check_array_accept_sparse_type_exception():
X = [[1, 2], [3, 4]]
X_csr = sp.csr_matrix(X)
invalid_type = SVR()
msg = (
"Sparse data was passed, but dense data is required. "
r"Use '.toarray\(\)' to convert to a dense numpy array."
)
with pytest.raises(TypeError, match=msg):
check_array(X_csr, accept_sparse=False)
msg = (
"Parameter 'accept_sparse' should be a string, "
"boolean or list of strings. You provided 'accept_sparse=.*'."
)
with pytest.raises(ValueError, match=msg):
check_array(X_csr, accept_sparse=invalid_type)
msg = (
"When providing 'accept_sparse' as a tuple or list, "
"it must contain at least one string value."
)
with pytest.raises(ValueError, match=msg):
check_array(X_csr, accept_sparse=[])
with pytest.raises(ValueError, match=msg):
check_array(X_csr, accept_sparse=())
with pytest.raises(TypeError, match="SVR"):
check_array(X_csr, accept_sparse=[invalid_type])
def test_check_array_accept_sparse_no_exception():
X = [[1, 2], [3, 4]]
X_csr = sp.csr_matrix(X)
check_array(X_csr, accept_sparse=True)
check_array(X_csr, accept_sparse="csr")
check_array(X_csr, accept_sparse=["csr"])
check_array(X_csr, accept_sparse=("csr",))
@pytest.fixture(params=["csr", "csc", "coo", "bsr"])
def X_64bit(request):
X = sp.random(20, 10, format=request.param)
if request.param == "coo":
if hasattr(X, "coords"):
# for scipy >= 1.13 .coords is a new attribute and is a tuple. The
# .col and .row attributes do not seem to be able to change the
# dtype, for more details see https://github.com/scipy/scipy/pull/18530/
# and https://github.com/scipy/scipy/pull/20003 where .indices was
# renamed to .coords
X.coords = tuple(v.astype("int64") for v in X.coords)
else:
# scipy < 1.13
X.row = X.row.astype("int64")
X.col = X.col.astype("int64")
else:
X.indices = X.indices.astype("int64")
X.indptr = X.indptr.astype("int64")
yield X
def test_check_array_accept_large_sparse_no_exception(X_64bit):
# When large sparse are allowed
check_array(X_64bit, accept_large_sparse=True, accept_sparse=True)
def test_check_array_accept_large_sparse_raise_exception(X_64bit):
# When large sparse are not allowed
msg = (
"Only sparse matrices with 32-bit integer indices "
"are accepted. Got int64 indices. Please do report"
)
with pytest.raises(ValueError, match=msg):
check_array(X_64bit, accept_sparse=True, accept_large_sparse=False)
def test_check_array_min_samples_and_features_messages():
# empty list is considered 2D by default:
msg = r"0 feature\(s\) \(shape=\(1, 0\)\) while a minimum of 1 is required."
with pytest.raises(ValueError, match=msg):
check_array([[]])
# If considered a 1D collection when ensure_2d=False, then the minimum
# number of samples will break:
msg = r"0 sample\(s\) \(shape=\(0,\)\) while a minimum of 1 is required."
with pytest.raises(ValueError, match=msg):
check_array([], ensure_2d=False)
# Invalid edge case when checking the default minimum sample of a scalar
msg = re.escape(
(
"Input should have at least 1 dimension i.e. satisfy "
"`len(x.shape) > 0`, got scalar `array(42)` instead."
)
)
with pytest.raises(TypeError, match=msg):
check_array(42, ensure_2d=False)
# Simulate a model that would need at least 2 samples to be well defined
X = np.ones((1, 10))
y = np.ones(1)
msg = r"1 sample\(s\) \(shape=\(1, 10\)\) while a minimum of 2 is required."
with pytest.raises(ValueError, match=msg):
check_X_y(X, y, ensure_min_samples=2)
# The same message is raised if the data has 2 dimensions even if this is
# not mandatory
with pytest.raises(ValueError, match=msg):
check_X_y(X, y, ensure_min_samples=2, ensure_2d=False)
# Simulate a model that would require at least 3 features (e.g. SelectKBest
# with k=3)
X = np.ones((10, 2))
y = np.ones(2)
msg = r"2 feature\(s\) \(shape=\(10, 2\)\) while a minimum of 3 is required."
with pytest.raises(ValueError, match=msg):
check_X_y(X, y, ensure_min_features=3)
# Only the feature check is enabled whenever the number of dimensions is 2
# even if allow_nd is enabled:
with pytest.raises(ValueError, match=msg):
check_X_y(X, y, ensure_min_features=3, allow_nd=True)
# Simulate a case where a pipeline stage as trimmed all the features of a
# 2D dataset.
X = np.empty(0).reshape(10, 0)
y = np.ones(10)
msg = r"0 feature\(s\) \(shape=\(10, 0\)\) while a minimum of 1 is required."
with pytest.raises(ValueError, match=msg):
check_X_y(X, y)
# nd-data is not checked for any minimum number of features by default:
X = np.ones((10, 0, 28, 28))
y = np.ones(10)
X_checked, y_checked = check_X_y(X, y, allow_nd=True)
assert_array_equal(X, X_checked)
assert_array_equal(y, y_checked)
def test_check_array_complex_data_error():
X = np.array([[1 + 2j, 3 + 4j, 5 + 7j], [2 + 3j, 4 + 5j, 6 + 7j]])
with pytest.raises(ValueError, match="Complex data not supported"):
check_array(X)
# list of lists
X = [[1 + 2j, 3 + 4j, 5 + 7j], [2 + 3j, 4 + 5j, 6 + 7j]]
with pytest.raises(ValueError, match="Complex data not supported"):
check_array(X)
# tuple of tuples
X = ((1 + 2j, 3 + 4j, 5 + 7j), (2 + 3j, 4 + 5j, 6 + 7j))
with pytest.raises(ValueError, match="Complex data not supported"):
check_array(X)
# list of np arrays
X = [np.array([1 + 2j, 3 + 4j, 5 + 7j]), np.array([2 + 3j, 4 + 5j, 6 + 7j])]
with pytest.raises(ValueError, match="Complex data not supported"):
check_array(X)
# tuple of np arrays
X = (np.array([1 + 2j, 3 + 4j, 5 + 7j]), np.array([2 + 3j, 4 + 5j, 6 + 7j]))
with pytest.raises(ValueError, match="Complex data not supported"):
check_array(X)
# dataframe
X = MockDataFrame(np.array([[1 + 2j, 3 + 4j, 5 + 7j], [2 + 3j, 4 + 5j, 6 + 7j]]))
with pytest.raises(ValueError, match="Complex data not supported"):
check_array(X)
# sparse matrix
X = sp.coo_matrix([[0, 1 + 2j], [0, 0]])
with pytest.raises(ValueError, match="Complex data not supported"):
check_array(X)
# target variable does not always go through check_array but should
# never accept complex data either.
y = np.array([1 + 2j, 3 + 4j, 5 + 7j, 2 + 3j, 4 + 5j, 6 + 7j])
with pytest.raises(ValueError, match="Complex data not supported"):
_check_y(y)
def test_has_fit_parameter():
assert not has_fit_parameter(KNeighborsClassifier, "sample_weight")
assert has_fit_parameter(RandomForestRegressor, "sample_weight")
assert has_fit_parameter(SVR, "sample_weight")
assert has_fit_parameter(SVR(), "sample_weight")
class TestClassWithDeprecatedFitMethod:
@deprecated("Deprecated for the purpose of testing has_fit_parameter")
def fit(self, X, y, sample_weight=None):
pass
assert has_fit_parameter(TestClassWithDeprecatedFitMethod, "sample_weight"), (
"has_fit_parameter fails for class with deprecated fit method."
)
def test_check_symmetric():
arr_sym = np.array([[0, 1], [1, 2]])
arr_bad = np.ones(2)
arr_asym = np.array([[0, 2], [0, 2]])
test_arrays = {
"dense": arr_asym,
"dok": sp.dok_matrix(arr_asym),
"csr": sp.csr_matrix(arr_asym),
"csc": sp.csc_matrix(arr_asym),
"coo": sp.coo_matrix(arr_asym),
"lil": sp.lil_matrix(arr_asym),
"bsr": sp.bsr_matrix(arr_asym),
}
# check error for bad inputs
with pytest.raises(ValueError):
check_symmetric(arr_bad)
# check that asymmetric arrays are properly symmetrized
for arr_format, arr in test_arrays.items():
# Check for warnings and errors
with pytest.warns(UserWarning):
check_symmetric(arr)
with pytest.raises(ValueError):
check_symmetric(arr, raise_exception=True)
output = check_symmetric(arr, raise_warning=False)
if sp.issparse(output):
assert output.format == arr_format
assert_array_equal(output.toarray(), arr_sym)
else:
assert_array_equal(output, arr_sym)
def test_check_is_fitted_with_is_fitted():
class Estimator(BaseEstimator):
def fit(self, **kwargs):
self._is_fitted = True
return self
def __sklearn_is_fitted__(self):
return hasattr(self, "_is_fitted") and self._is_fitted
with pytest.raises(NotFittedError):
check_is_fitted(Estimator())
check_is_fitted(Estimator().fit())
def test_check_is_fitted_stateless():
"""Check that check_is_fitted passes for stateless estimators."""
class StatelessEstimator(BaseEstimator):
def fit(self, **kwargs):
return self # pragma: no cover
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.requires_fit = False
return tags
check_is_fitted(StatelessEstimator())
def test_check_is_fitted():
# Check is TypeError raised when non estimator instance passed
with pytest.raises(TypeError):
check_is_fitted(ARDRegression)
with pytest.raises(TypeError):
check_is_fitted("SVR")
ard = ARDRegression()
svr = SVR()
try:
with pytest.raises(NotFittedError):
check_is_fitted(ard)
with pytest.raises(NotFittedError):
check_is_fitted(svr)
except ValueError:
assert False, "check_is_fitted failed with ValueError"
# NotFittedError is a subclass of both ValueError and AttributeError
msg = "Random message %(name)s, %(name)s"
match = "Random message ARDRegression, ARDRegression"
with pytest.raises(ValueError, match=match):
check_is_fitted(ard, msg=msg)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_show_versions.py | sklearn/utils/tests/test_show_versions.py | from threadpoolctl import threadpool_info
from sklearn.utils._show_versions import _get_deps_info, _get_sys_info, show_versions
from sklearn.utils._testing import ignore_warnings
def test_get_sys_info():
sys_info = _get_sys_info()
assert "python" in sys_info
assert "executable" in sys_info
assert "machine" in sys_info
def test_get_deps_info():
with ignore_warnings():
deps_info = _get_deps_info()
assert "pip" in deps_info
assert "setuptools" in deps_info
assert "sklearn" in deps_info
assert "numpy" in deps_info
assert "scipy" in deps_info
assert "Cython" in deps_info
assert "pandas" in deps_info
assert "matplotlib" in deps_info
assert "joblib" in deps_info
def test_show_versions(capsys):
with ignore_warnings():
show_versions()
out, err = capsys.readouterr()
assert "python" in out
assert "numpy" in out
info = threadpool_info()
if info:
assert "threadpoolctl info:" in out
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_bunch.py | sklearn/utils/tests/test_bunch.py | import warnings
import numpy as np
import pytest
from sklearn.utils import Bunch
def test_bunch_attribute_deprecation():
"""Check that bunch raises deprecation message with `__getattr__`."""
bunch = Bunch()
values = np.asarray([1, 2, 3])
msg = (
"Key: 'values', is deprecated in 1.3 and will be "
"removed in 1.5. Please use 'grid_values' instead"
)
bunch._set_deprecated(
values, new_key="grid_values", deprecated_key="values", warning_message=msg
)
with warnings.catch_warnings():
# Does not warn for "grid_values"
warnings.simplefilter("error")
v = bunch["grid_values"]
assert v is values
with pytest.warns(FutureWarning, match=msg):
# Warns for "values"
v = bunch["values"]
assert v is values
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_set_output.py | sklearn/utils/tests/test_set_output.py | import importlib
from collections import namedtuple
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from sklearn._config import config_context, get_config
from sklearn.preprocessing import StandardScaler
from sklearn.utils._set_output import (
ADAPTERS_MANAGER,
ContainerAdapterProtocol,
_get_adapter_from_container,
_get_output_config,
_safe_set_output,
_SetOutputMixin,
_wrap_data_with_container,
check_library_installed,
)
from sklearn.utils.fixes import CSR_CONTAINERS
def test_pandas_adapter():
"""Check pandas adapter has expected behavior."""
pd = pytest.importorskip("pandas")
X_np = np.asarray([[1, 0, 3], [0, 0, 1]])
columns = np.asarray(["f0", "f1", "f2"], dtype=object)
index = np.asarray([1, 2])
X_df_orig = pd.DataFrame([[1, 2], [1, 3]], index=index)
X_ser_orig = pd.Series([2, 3], index=index)
adapter = ADAPTERS_MANAGER.adapters["pandas"]
X_container = adapter.create_container(X_np, X_df_orig, columns=lambda: columns)
assert isinstance(X_container, pd.DataFrame)
assert_array_equal(X_container.columns, columns)
assert_array_equal(X_container.index, index)
# use original index when the original is a series
X_container = adapter.create_container(X_np, X_ser_orig, columns=lambda: columns)
assert isinstance(X_container, pd.DataFrame)
assert_array_equal(X_container.columns, columns)
assert_array_equal(X_container.index, index)
# Input dataframe's index does not change
new_columns = np.asarray(["f0", "f1"], dtype=object)
X_df = pd.DataFrame([[1, 2], [1, 3]], index=[10, 12])
new_df = adapter.create_container(X_df, X_df_orig, columns=new_columns)
assert_array_equal(new_df.columns, new_columns)
assert_array_equal(new_df.index, X_df.index)
assert adapter.is_supported_container(X_df)
assert not adapter.is_supported_container(X_np)
# adapter.update_columns updates the columns
new_columns = np.array(["a", "c"], dtype=object)
new_df = adapter.rename_columns(X_df, new_columns)
assert_array_equal(new_df.columns, new_columns)
# adapter.hstack stacks the dataframes horizontally.
X_df_1 = pd.DataFrame([[1, 2, 5], [3, 4, 6]], columns=["a", "b", "e"])
X_df_2 = pd.DataFrame([[4], [5]], columns=["c"])
X_stacked = adapter.hstack([X_df_1, X_df_2])
expected_df = pd.DataFrame(
[[1, 2, 5, 4], [3, 4, 6, 5]], columns=["a", "b", "e", "c"]
)
pd.testing.assert_frame_equal(X_stacked, expected_df)
# check that we update properly the columns even with duplicate column names
# this use-case potentially happen when using ColumnTransformer
# non-regression test for gh-28260
X_df = pd.DataFrame([[1, 2], [1, 3]], columns=["a", "a"])
new_columns = np.array(["x__a", "y__a"], dtype=object)
new_df = adapter.rename_columns(X_df, new_columns)
assert_array_equal(new_df.columns, new_columns)
# check the behavior of the inplace parameter in `create_container`
# we should trigger a copy
X_df = pd.DataFrame([[1, 2], [1, 3]], index=index)
X_output = adapter.create_container(X_df, X_df, columns=["a", "b"], inplace=False)
assert X_output is not X_df
assert list(X_df.columns) == [0, 1]
assert list(X_output.columns) == ["a", "b"]
# the operation is inplace
X_df = pd.DataFrame([[1, 2], [1, 3]], index=index)
X_output = adapter.create_container(X_df, X_df, columns=["a", "b"], inplace=True)
assert X_output is X_df
assert list(X_df.columns) == ["a", "b"]
assert list(X_output.columns) == ["a", "b"]
def test_polars_adapter():
"""Check Polars adapter has expected behavior."""
pl = pytest.importorskip("polars")
X_np = np.array([[1, 0, 3], [0, 0, 1]])
columns = ["f1", "f2", "f3"]
X_df_orig = pl.DataFrame(X_np, schema=columns, orient="row")
adapter = ADAPTERS_MANAGER.adapters["polars"]
X_container = adapter.create_container(X_np, X_df_orig, columns=lambda: columns)
assert isinstance(X_container, pl.DataFrame)
assert_array_equal(X_container.columns, columns)
# Update columns with create_container
new_columns = np.asarray(["a", "b", "c"], dtype=object)
new_df = adapter.create_container(X_df_orig, X_df_orig, columns=new_columns)
assert_array_equal(new_df.columns, new_columns)
assert adapter.is_supported_container(X_df_orig)
assert not adapter.is_supported_container(X_np)
# adapter.update_columns updates the columns
new_columns = np.array(["a", "c", "g"], dtype=object)
new_df = adapter.rename_columns(X_df_orig, new_columns)
assert_array_equal(new_df.columns, new_columns)
# adapter.hstack stacks the dataframes horizontally.
X_df_1 = pl.DataFrame([[1, 2, 5], [3, 4, 6]], schema=["a", "b", "e"], orient="row")
X_df_2 = pl.DataFrame([[4], [5]], schema=["c"], orient="row")
X_stacked = adapter.hstack([X_df_1, X_df_2])
expected_df = pl.DataFrame(
[[1, 2, 5, 4], [3, 4, 6, 5]], schema=["a", "b", "e", "c"], orient="row"
)
from polars.testing import assert_frame_equal
assert_frame_equal(X_stacked, expected_df)
# check the behavior of the inplace parameter in `create_container`
# we should trigger a copy
X_df = pl.DataFrame([[1, 2], [1, 3]], schema=["a", "b"], orient="row")
X_output = adapter.create_container(X_df, X_df, columns=["c", "d"], inplace=False)
assert X_output is not X_df
assert list(X_df.columns) == ["a", "b"]
assert list(X_output.columns) == ["c", "d"]
# the operation is inplace
X_df = pl.DataFrame([[1, 2], [1, 3]], schema=["a", "b"], orient="row")
X_output = adapter.create_container(X_df, X_df, columns=["c", "d"], inplace=True)
assert X_output is X_df
assert list(X_df.columns) == ["c", "d"]
assert list(X_output.columns) == ["c", "d"]
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test__container_error_validation(csr_container):
"""Check errors in _wrap_data_with_container."""
X = np.asarray([[1, 0, 3], [0, 0, 1]])
X_csr = csr_container(X)
match = "The transformer outputs a scipy sparse matrix."
with config_context(transform_output="pandas"):
with pytest.raises(ValueError, match=match):
_wrap_data_with_container("transform", X_csr, X, StandardScaler())
class EstimatorWithoutSetOutputAndWithoutTransform:
pass
class EstimatorNoSetOutputWithTransform:
def transform(self, X, y=None):
return X # pragma: no cover
class EstimatorWithSetOutput(_SetOutputMixin):
def fit(self, X, y=None):
self.n_features_in_ = X.shape[1]
return self
def transform(self, X, y=None):
return X
def get_feature_names_out(self, input_features=None):
return np.asarray([f"X{i}" for i in range(self.n_features_in_)], dtype=object)
def test__safe_set_output():
"""Check _safe_set_output works as expected."""
# Estimator without transform will not raise when setting set_output for transform.
est = EstimatorWithoutSetOutputAndWithoutTransform()
_safe_set_output(est, transform="pandas")
# Estimator with transform but without set_output will raise
est = EstimatorNoSetOutputWithTransform()
with pytest.raises(ValueError, match="Unable to configure output"):
_safe_set_output(est, transform="pandas")
est = EstimatorWithSetOutput().fit(np.asarray([[1, 2, 3]]))
_safe_set_output(est, transform="pandas")
config = _get_output_config("transform", est)
assert config["dense"] == "pandas"
_safe_set_output(est, transform="default")
config = _get_output_config("transform", est)
assert config["dense"] == "default"
# transform is None is a no-op, so the config remains "default"
_safe_set_output(est, transform=None)
config = _get_output_config("transform", est)
assert config["dense"] == "default"
class EstimatorNoSetOutputWithTransformNoFeatureNamesOut(_SetOutputMixin):
def transform(self, X, y=None):
return X # pragma: no cover
def test_set_output_mixin():
"""Estimator without get_feature_names_out does not define `set_output`."""
est = EstimatorNoSetOutputWithTransformNoFeatureNamesOut()
assert not hasattr(est, "set_output")
def test__safe_set_output_error():
"""Check transform with invalid config."""
X = np.asarray([[1, 0, 3], [0, 0, 1]])
est = EstimatorWithSetOutput()
_safe_set_output(est, transform="bad")
msg = "output config must be in"
with pytest.raises(ValueError, match=msg):
est.transform(X)
@pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"])
def test_set_output_method(dataframe_lib):
"""Check that the output is a dataframe."""
lib = pytest.importorskip(dataframe_lib)
X = np.asarray([[1, 0, 3], [0, 0, 1]])
est = EstimatorWithSetOutput().fit(X)
# transform=None is a no-op
est2 = est.set_output(transform=None)
assert est2 is est
X_trans_np = est2.transform(X)
assert isinstance(X_trans_np, np.ndarray)
est.set_output(transform=dataframe_lib)
X_trans_pd = est.transform(X)
assert isinstance(X_trans_pd, lib.DataFrame)
def test_set_output_method_error():
"""Check transform fails with invalid transform."""
X = np.asarray([[1, 0, 3], [0, 0, 1]])
est = EstimatorWithSetOutput().fit(X)
est.set_output(transform="bad")
msg = "output config must be in"
with pytest.raises(ValueError, match=msg):
est.transform(X)
@pytest.mark.parametrize("transform_output", ["pandas", "polars"])
def test__get_output_config(transform_output):
"""Check _get_output_config works as expected."""
# Without a configuration set, the global config is used
global_config = get_config()["transform_output"]
config = _get_output_config("transform")
assert config["dense"] == global_config
with config_context(transform_output=transform_output):
# with estimator=None, the global config is used
config = _get_output_config("transform")
assert config["dense"] == transform_output
est = EstimatorNoSetOutputWithTransform()
config = _get_output_config("transform", est)
assert config["dense"] == transform_output
est = EstimatorWithSetOutput()
# If estimator has not config, use global config
config = _get_output_config("transform", est)
assert config["dense"] == transform_output
# If estimator has a config, use local config
est.set_output(transform="default")
config = _get_output_config("transform", est)
assert config["dense"] == "default"
est.set_output(transform=transform_output)
config = _get_output_config("transform", est)
assert config["dense"] == transform_output
class EstimatorWithSetOutputNoAutoWrap(_SetOutputMixin, auto_wrap_output_keys=None):
def transform(self, X, y=None):
return X
def test_get_output_auto_wrap_false():
"""Check that auto_wrap_output_keys=None does not wrap."""
est = EstimatorWithSetOutputNoAutoWrap()
assert not hasattr(est, "set_output")
X = np.asarray([[1, 0, 3], [0, 0, 1]])
assert X is est.transform(X)
def test_auto_wrap_output_keys_errors_with_incorrect_input():
msg = "auto_wrap_output_keys must be None or a tuple of keys."
with pytest.raises(ValueError, match=msg):
class BadEstimator(_SetOutputMixin, auto_wrap_output_keys="bad_parameter"):
pass
class AnotherMixin:
def __init_subclass__(cls, custom_parameter, **kwargs):
super().__init_subclass__(**kwargs)
cls.custom_parameter = custom_parameter
def test_set_output_mixin_custom_mixin():
"""Check that multiple init_subclasses passes parameters up."""
class BothMixinEstimator(_SetOutputMixin, AnotherMixin, custom_parameter=123):
def transform(self, X, y=None):
return X
def get_feature_names_out(self, input_features=None):
return input_features
est = BothMixinEstimator()
assert est.custom_parameter == 123
assert hasattr(est, "set_output")
def test_set_output_mro():
"""Check that multi-inheritance resolves to the correct class method.
Non-regression test gh-25293.
"""
class Base(_SetOutputMixin):
def transform(self, X):
return "Base"
class A(Base):
pass
class B(Base):
def transform(self, X):
return "B"
class C(A, B):
pass
assert C().transform(None) == "B"
class EstimatorWithSetOutputIndex(_SetOutputMixin):
def fit(self, X, y=None):
self.n_features_in_ = X.shape[1]
return self
def transform(self, X, y=None):
import pandas as pd
# transform by giving output a new index.
return pd.DataFrame(X.to_numpy(), index=[f"s{i}" for i in range(X.shape[0])])
def get_feature_names_out(self, input_features=None):
return np.asarray([f"X{i}" for i in range(self.n_features_in_)], dtype=object)
def test_set_output_pandas_keep_index():
"""Check that set_output does not override index.
Non-regression test for gh-25730.
"""
pd = pytest.importorskip("pandas")
X = pd.DataFrame([[1, 2, 3], [4, 5, 6]], index=[0, 1])
est = EstimatorWithSetOutputIndex().set_output(transform="pandas")
est.fit(X)
X_trans = est.transform(X)
assert_array_equal(X_trans.index, ["s0", "s1"])
class EstimatorReturnTuple(_SetOutputMixin):
def __init__(self, OutputTuple):
self.OutputTuple = OutputTuple
def transform(self, X, y=None):
return self.OutputTuple(X, 2 * X)
def test_set_output_named_tuple_out():
"""Check that namedtuples are kept by default."""
Output = namedtuple("Output", "X, Y")
X = np.asarray([[1, 2, 3]])
est = EstimatorReturnTuple(OutputTuple=Output)
X_trans = est.transform(X)
assert isinstance(X_trans, Output)
assert_array_equal(X_trans.X, X)
assert_array_equal(X_trans.Y, 2 * X)
class EstimatorWithListInput(_SetOutputMixin):
def fit(self, X, y=None):
assert isinstance(X, list)
self.n_features_in_ = len(X[0])
return self
def transform(self, X, y=None):
return X
def get_feature_names_out(self, input_features=None):
return np.asarray([f"X{i}" for i in range(self.n_features_in_)], dtype=object)
@pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"])
def test_set_output_list_input(dataframe_lib):
"""Check set_output for list input.
Non-regression test for #27037.
"""
lib = pytest.importorskip(dataframe_lib)
X = [[0, 1, 2, 3], [4, 5, 6, 7]]
est = EstimatorWithListInput()
est.set_output(transform=dataframe_lib)
X_out = est.fit(X).transform(X)
assert isinstance(X_out, lib.DataFrame)
assert_array_equal(X_out.columns, ["X0", "X1", "X2", "X3"])
@pytest.mark.parametrize("name", sorted(ADAPTERS_MANAGER.adapters))
def test_adapter_class_has_interface(name):
"""Check adapters have the correct interface."""
assert isinstance(ADAPTERS_MANAGER.adapters[name], ContainerAdapterProtocol)
def test_check_library_installed(monkeypatch):
"""Check import error changed."""
orig_import_module = importlib.import_module
def patched_import_module(name):
if name == "pandas":
raise ImportError()
orig_import_module(name, package=None)
monkeypatch.setattr(importlib, "import_module", patched_import_module)
msg = "Setting output container to 'pandas' requires"
with pytest.raises(ImportError, match=msg):
check_library_installed("pandas")
def test_get_adapter_from_container():
"""Check the behavior fo `_get_adapter_from_container`."""
pd = pytest.importorskip("pandas")
X = pd.DataFrame({"a": [1, 2, 3], "b": [10, 20, 100]})
adapter = _get_adapter_from_container(X)
assert adapter.container_lib == "pandas"
err_msg = "The container does not have a registered adapter in scikit-learn."
with pytest.raises(ValueError, match=err_msg):
_get_adapter_from_container(X.to_numpy())
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_class_weight.py | sklearn/utils/tests/test_class_weight.py | import numpy as np
import pytest
from numpy.testing import assert_allclose
from sklearn.datasets import make_blobs
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils._testing import assert_almost_equal, assert_array_almost_equal
from sklearn.utils.class_weight import compute_class_weight, compute_sample_weight
from sklearn.utils.fixes import CSC_CONTAINERS
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = compute_class_weight("balanced", classes=classes, y=y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert cw[0] < cw[1] < cw[2]
@pytest.mark.parametrize(
"y_type, class_weight, classes, err_msg",
[
(
"numeric",
"balanced",
np.arange(4),
"classes should have valid labels that are in y",
),
# Non-regression for https://github.com/scikit-learn/scikit-learn/issues/8312
(
"numeric",
{"label_not_present": 1.0},
np.arange(4),
r"The classes, \[0, 1, 2, 3\], are not in class_weight",
),
(
"numeric",
"balanced",
np.arange(2),
"classes should include all valid labels",
),
(
"numeric",
{0: 1.0, 1: 2.0},
np.arange(2),
"classes should include all valid labels",
),
(
"string",
{"dogs": 3, "cat": 2},
np.array(["dog", "cat"]),
r"The classes, \['dog'\], are not in class_weight",
),
],
)
def test_compute_class_weight_not_present(y_type, class_weight, classes, err_msg):
# Raise error when y does not contain all class labels
y = (
np.asarray([0, 0, 0, 1, 1, 2])
if y_type == "numeric"
else np.asarray(["dog", "cat", "dog"])
)
print(y)
with pytest.raises(ValueError, match=err_msg):
compute_class_weight(class_weight, classes=classes, y=y)
def test_compute_class_weight_dict():
classes = np.arange(3)
class_weights = {0: 1.0, 1: 2.0, 2: 3.0}
y = np.asarray([0, 0, 1, 2])
cw = compute_class_weight(class_weights, classes=classes, y=y)
# When the user specifies class weights, compute_class_weights should just
# return them.
assert_array_almost_equal(np.asarray([1.0, 2.0, 3.0]), cw)
# When a class weight is specified that isn't in classes, the weight is ignored
class_weights = {0: 1.0, 1: 2.0, 2: 3.0, 4: 1.5}
cw = compute_class_weight(class_weights, classes=classes, y=y)
assert_allclose([1.0, 2.0, 3.0], cw)
class_weights = {-1: 5.0, 0: 4.0, 1: 2.0, 2: 3.0}
cw = compute_class_weight(class_weights, classes=classes, y=y)
assert_allclose([4.0, 2.0, 3.0], cw)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# duplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_balanced_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = compute_class_weight("balanced", classes=classes, y=y)
assert len(cw) == len(classes)
assert_array_almost_equal(cw, np.array([1.0, 1.0, 1.0]))
def test_compute_class_weight_balanced_sample_weight_equivalence():
# Test with unbalanced and negative class labels for
# equivalence between repeated and weighted samples
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
sw = np.asarray([1, 0, 1, 1, 1, 2])
y_rep = np.repeat(y, sw, axis=0)
class_weights_weighted = compute_class_weight(
"balanced", classes=classes, y=y, sample_weight=sw
)
class_weights_repeated = compute_class_weight("balanced", classes=classes, y=y_rep)
assert len(class_weights_weighted) == len(classes)
assert len(class_weights_repeated) == len(classes)
class_counts_weighted = np.bincount(y + 2, weights=sw)
class_counts_repeated = np.bincount(y_rep + 2)
assert np.dot(class_weights_weighted, class_counts_weighted) == pytest.approx(
np.dot(class_weights_repeated, class_counts_repeated)
)
assert_allclose(class_weights_weighted, class_weights_repeated)
def test_compute_class_weight_balanced_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = compute_class_weight("balanced", classes=classes, y=y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2.0, 1.0, 2.0 / 3])
def test_compute_class_weight_default():
# Test for the case where no weight is given for a present class.
# Current behaviour is to assign the unweighted classes a weight of 1.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
classes_len = len(classes)
# Test for non specified weights
cw = compute_class_weight(None, classes=classes, y=y)
assert len(cw) == classes_len
assert_array_almost_equal(cw, np.ones(3))
# Tests for partly specified weights
cw = compute_class_weight({2: 1.5}, classes=classes, y=y)
assert len(cw) == classes_len
assert_array_almost_equal(cw, [1.5, 1.0, 1.0])
cw = compute_class_weight({2: 1.5, 4: 0.5}, classes=classes, y=y)
assert len(cw) == classes_len
assert_array_almost_equal(cw, [1.5, 1.0, 0.5])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2.0, 2.0, 2.0, 1.0, 1.0, 1.0])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array(
[0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333]
)
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2.0, 2.0, 2.0, 2.0, 2.0, 2.0])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced**2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = compute_sample_weight("balanced", y, indices=range(6))
assert_array_almost_equal(sample_weight, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = compute_sample_weight("balanced", y, indices=range(6))
assert_array_almost_equal(sample_weight, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = compute_sample_weight("balanced", y, indices=range(4))
assert_array_almost_equal(sample_weight, [2.0 / 3, 2.0 / 3, 2.0 / 3, 2.0, 2.0, 2.0])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = compute_sample_weight("balanced", y, indices=[0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3.0, 3.0, 3.0])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight("balanced", y, indices=[0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced**2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = compute_sample_weight("balanced", y, indices=range(6))
assert_array_almost_equal(sample_weight, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = compute_sample_weight("balanced", y, indices=range(6))
assert_array_almost_equal(sample_weight, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0])
@pytest.mark.parametrize(
"y_type, class_weight, indices, err_msg",
[
(
"single-output",
{1: 2, 2: 1},
range(4),
"The only valid class_weight for subsampling is 'balanced'.",
),
(
"multi-output",
{1: 2, 2: 1},
None,
"For multi-output, class_weight should be a list of dicts, or the string",
),
(
"multi-output",
[{1: 2, 2: 1}],
None,
r"Got 1 element\(s\) while having 2 outputs",
),
],
)
def test_compute_sample_weight_errors(y_type, class_weight, indices, err_msg):
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y_single_output = np.asarray([1, 1, 1, 2, 2, 2])
y_multi_output = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
y = y_single_output if y_type == "single-output" else y_multi_output
with pytest.raises(ValueError, match=err_msg):
compute_sample_weight(class_weight, y, indices=indices)
def test_compute_sample_weight_more_than_32():
# Non-regression smoke test for #12146
y = np.arange(50) # more than 32 distinct classes
indices = np.arange(50) # use subsampling
weight = compute_sample_weight("balanced", y, indices=indices)
assert_array_almost_equal(weight, np.ones(y.shape[0]))
def test_class_weight_does_not_contains_more_classes():
"""Check that class_weight can contain more labels than in y.
Non-regression test for #22413
"""
tree = DecisionTreeClassifier(class_weight={0: 1, 1: 10, 2: 20})
# Does not raise
tree.fit([[0, 0, 1], [1, 0, 1], [1, 2, 0]], [0, 0, 1])
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
def test_compute_sample_weight_sparse(csc_container):
"""Check that we can compute weight for sparse `y`."""
y = csc_container(np.asarray([[0], [1], [1]]))
sample_weight = compute_sample_weight("balanced", y)
assert_allclose(sample_weight, [1.5, 0.75, 0.75])
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_optimize.py | sklearn/utils/tests/test_optimize.py | import warnings
import numpy as np
import pytest
from scipy.optimize import fmin_ncg
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils._bunch import Bunch
from sklearn.utils._testing import assert_allclose
from sklearn.utils.optimize import _check_optimize_result, _newton_cg
def test_newton_cg(global_random_seed):
# Test that newton_cg gives same result as scipy's fmin_ncg
rng = np.random.RandomState(global_random_seed)
A = rng.normal(size=(10, 10))
x0 = np.ones(10)
def func(x):
Ax = A.dot(x)
return 0.5 * (Ax).dot(Ax)
def grad(x):
return A.T.dot(A.dot(x))
def hess(x, p):
return p.dot(A.T.dot(A.dot(x.all())))
def grad_hess(x):
return grad(x), lambda x: A.T.dot(A.dot(x))
# func is a definite positive quadratic form, so the minimum is at x = 0
# hence the use of absolute tolerance.
assert np.all(np.abs(_newton_cg(grad_hess, func, grad, x0, tol=1e-10)[0]) <= 1e-7)
assert_allclose(
_newton_cg(grad_hess, func, grad, x0, tol=1e-7)[0],
fmin_ncg(f=func, x0=x0, fprime=grad, fhess_p=hess),
atol=1e-5,
)
@pytest.mark.parametrize("verbose", [0, 1, 2])
def test_newton_cg_verbosity(capsys, verbose):
"""Test the std output of verbose newton_cg solver."""
A = np.eye(2)
b = np.array([1, 2], dtype=float)
_newton_cg(
grad_hess=lambda x: (A @ x - b, lambda z: A @ z),
func=lambda x: 0.5 * x @ A @ x - b @ x,
grad=lambda x: A @ x - b,
x0=np.zeros(A.shape[0]),
verbose=verbose,
) # returns array([1., 2])
captured = capsys.readouterr()
if verbose == 0:
assert captured.out == ""
else:
msg = [
"Newton-CG iter = 1",
"Check Convergence",
"max |gradient|",
"Solver did converge at loss = ",
]
for m in msg:
assert m in captured.out
if verbose >= 2:
msg = [
"Inner CG solver iteration 1 stopped with",
"sum(|residuals|) <= tol",
"Line Search",
"try line search wolfe1",
"wolfe1 line search was successful",
]
for m in msg:
assert m in captured.out
if verbose >= 2:
# Set up a badly scaled singular Hessian with a completely wrong starting
# position. This should trigger 2nd line search check
A = np.array([[1.0, 2], [2, 4]]) * 1e30 # collinear columns
b = np.array([1.0, 2.0])
# Note that scipy.optimize._linesearch LineSearchWarning inherits from
# RuntimeWarning, but we do not want to import from non public APIs.
with pytest.warns(RuntimeWarning):
_newton_cg(
grad_hess=lambda x: (A @ x - b, lambda z: A @ z),
func=lambda x: 0.5 * x @ A @ x - b @ x,
grad=lambda x: A @ x - b,
x0=np.array([-2.0, 1]), # null space of hessian
verbose=verbose,
)
captured = capsys.readouterr()
msg = [
"wolfe1 line search was not successful",
"check loss |improvement| <= eps * |loss_old|:",
"check sum(|gradient|) < sum(|gradient_old|):",
"last resort: try line search wolfe2",
]
for m in msg:
assert m in captured.out
# Set up a badly conditioned Hessian that leads to tiny curvature.
# X.T @ X have singular values array([1.00000400e+01, 1.00008192e-11])
A = np.array([[1.0, 2], [1, 2 + 1e-15]])
b = np.array([-2.0, 1])
with pytest.warns(ConvergenceWarning):
_newton_cg(
grad_hess=lambda x: (A @ x - b, lambda z: A @ z),
func=lambda x: 0.5 * x @ A @ x - b @ x,
grad=lambda x: A @ x - b,
x0=b,
verbose=verbose,
maxiter=2,
)
captured = capsys.readouterr()
msg = [
"tiny_|p| = eps * ||p||^2",
]
for m in msg:
assert m in captured.out
# Test for a case with negative Hessian.
# We do not trigger "Inner CG solver iteration {i} stopped with negative
# curvature", but that is very hard to trigger.
A = np.eye(2)
b = np.array([-2.0, 1])
with pytest.warns(RuntimeWarning):
_newton_cg(
# Note the wrong sign in the hessian product.
grad_hess=lambda x: (A @ x - b, lambda z: -A @ z),
func=lambda x: 0.5 * x @ A @ x - b @ x,
grad=lambda x: A @ x - b,
x0=np.array([1.0, 1.0]),
verbose=verbose,
maxiter=3,
)
captured = capsys.readouterr()
msg = [
"Inner CG solver iteration 0 fell back to steepest descent",
]
for m in msg:
assert m in captured.out
A = np.diag([1e-3, 1, 1e3])
b = np.array([-2.0, 1, 2.0])
with pytest.warns(ConvergenceWarning):
_newton_cg(
grad_hess=lambda x: (A @ x - b, lambda z: A @ z),
func=lambda x: 0.5 * x @ A @ x - b @ x,
grad=lambda x: A @ x - b,
x0=np.ones_like(b),
verbose=verbose,
maxiter=2,
maxinner=1,
)
captured = capsys.readouterr()
msg = [
"Inner CG solver stopped reaching maxiter=1",
]
for m in msg:
assert m in captured.out
def test_check_optimize():
# Mock some lbfgs output using a Bunch instance:
result = Bunch()
# First case: no warnings
result.nit = 1
result.status = 0
result.message = "OK"
with warnings.catch_warnings():
warnings.simplefilter("error")
_check_optimize_result("lbfgs", result)
# Second case: warning about implicit `max_iter`: do not recommend the user
# to increase `max_iter` this is not a user settable parameter.
result.status = 1
result.message = "STOP: TOTAL NO. OF ITERATIONS REACHED LIMIT"
with pytest.warns(ConvergenceWarning) as record:
_check_optimize_result("lbfgs", result)
assert len(record) == 1
warn_msg = record[0].message.args[0]
assert "lbfgs failed to converge after 1 iteration(s)" in warn_msg
assert result.message in warn_msg
assert "Increase the number of iterations" not in warn_msg
assert "scale the data" in warn_msg
# Third case: warning about explicit `max_iter`: recommend user to increase
# `max_iter`.
with pytest.warns(ConvergenceWarning) as record:
_check_optimize_result("lbfgs", result, max_iter=1)
assert len(record) == 1
warn_msg = record[0].message.args[0]
assert "lbfgs failed to converge after 1 iteration(s)" in warn_msg
assert result.message in warn_msg
assert "Increase the number of iterations" in warn_msg
assert "scale the data" in warn_msg
# Fourth case: other convergence problem before reaching `max_iter`: do not
# recommend increasing `max_iter`.
result.nit = 2
result.status = 2
result.message = "ABNORMAL"
with pytest.warns(ConvergenceWarning) as record:
_check_optimize_result("lbfgs", result, max_iter=10)
assert len(record) == 1
warn_msg = record[0].message.args[0]
assert "lbfgs failed to converge after 2 iteration(s)" in warn_msg
assert result.message in warn_msg
assert "Increase the number of iterations" not in warn_msg
assert "scale the data" in warn_msg
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_user_interface.py | sklearn/utils/tests/test_user_interface.py | import string
import timeit
import pytest
from sklearn.utils._user_interface import _message_with_time, _print_elapsed_time
@pytest.mark.parametrize(
["source", "message", "is_long"],
[
("ABC", string.ascii_lowercase, False),
("ABCDEF", string.ascii_lowercase, False),
("ABC", string.ascii_lowercase * 3, True),
("ABC" * 10, string.ascii_lowercase, True),
("ABC", string.ascii_lowercase + "\u1048", False),
],
)
@pytest.mark.parametrize(
["time", "time_str"],
[
(0.2, " 0.2s"),
(20, " 20.0s"),
(2000, "33.3min"),
(20000, "333.3min"),
],
)
def test_message_with_time(source, message, is_long, time, time_str):
out = _message_with_time(source, message, time)
if is_long:
assert len(out) > 70
else:
assert len(out) == 70
assert out.startswith("[" + source + "] ")
out = out[len(source) + 3 :]
assert out.endswith(time_str)
out = out[: -len(time_str)]
assert out.endswith(", total=")
out = out[: -len(", total=")]
assert out.endswith(message)
out = out[: -len(message)]
assert out.endswith(" ")
out = out[:-1]
if is_long:
assert not out
else:
assert list(set(out)) == ["."]
@pytest.mark.parametrize(
["message", "expected"],
[
("hello", _message_with_time("ABC", "hello", 0.1) + "\n"),
("", _message_with_time("ABC", "", 0.1) + "\n"),
(None, ""),
],
)
def test_print_elapsed_time(message, expected, capsys, monkeypatch):
monkeypatch.setattr(timeit, "default_timer", lambda: 0)
with _print_elapsed_time("ABC", message):
monkeypatch.setattr(timeit, "default_timer", lambda: 0.1)
assert capsys.readouterr().out == expected
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_weight_vector.py | sklearn/utils/tests/test_weight_vector.py | import numpy as np
import pytest
from sklearn.utils._weight_vector import (
WeightVector32,
WeightVector64,
)
@pytest.mark.parametrize(
"dtype, WeightVector",
[
(np.float32, WeightVector32),
(np.float64, WeightVector64),
],
)
def test_type_invariance(dtype, WeightVector):
"""Check the `dtype` consistency of `WeightVector`."""
weights = np.random.rand(100).astype(dtype)
average_weights = np.random.rand(100).astype(dtype)
weight_vector = WeightVector(weights, average_weights)
assert np.asarray(weight_vector.w).dtype is np.dtype(dtype)
assert np.asarray(weight_vector.aw).dtype is np.dtype(dtype)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/tests/test_sparsefuncs.py | sklearn/utils/tests/test_sparsefuncs.py | import numpy as np
import pytest
import scipy.sparse as sp
from numpy.random import RandomState
from numpy.testing import assert_array_almost_equal, assert_array_equal
from scipy import linalg
from sklearn.datasets import make_classification
from sklearn.utils._testing import assert_allclose
from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS, LIL_CONTAINERS
from sklearn.utils.sparsefuncs import (
_implicit_column_offset,
count_nonzero,
csc_median_axis_0,
incr_mean_variance_axis,
inplace_column_scale,
inplace_row_scale,
inplace_swap_column,
inplace_swap_row,
mean_variance_axis,
min_max_axis,
sparse_matmul_to_dense,
)
from sklearn.utils.sparsefuncs_fast import (
assign_rows_csr,
csr_row_norms,
inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2,
)
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
@pytest.mark.parametrize("lil_container", LIL_CONTAINERS)
def test_mean_variance_axis0(csc_container, csr_container, lil_container):
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = lil_container(X)
X_lil[1, 0] = 0
X[1, 0] = 0
with pytest.raises(TypeError):
mean_variance_axis(X_lil, axis=0)
X_csr = csr_container(X_lil)
X_csc = csc_container(X_lil)
expected_dtypes = [
(np.float32, np.float32),
(np.float64, np.float64),
(np.int32, np.float64),
(np.int64, np.float64),
]
for input_dtype, output_dtype in expected_dtypes:
X_test = X.astype(input_dtype)
for X_sparse in (X_csr, X_csc):
X_sparse = X_sparse.astype(input_dtype)
X_means, X_vars = mean_variance_axis(X_sparse, axis=0)
assert X_means.dtype == output_dtype
assert X_vars.dtype == output_dtype
assert_array_almost_equal(X_means, np.mean(X_test, axis=0))
assert_array_almost_equal(X_vars, np.var(X_test, axis=0))
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize("sparse_constructor", CSC_CONTAINERS + CSR_CONTAINERS)
def test_mean_variance_axis0_precision(dtype, sparse_constructor):
# Check that there's no big loss of precision when the real variance is
# exactly 0. (#19766)
rng = np.random.RandomState(0)
X = np.full(fill_value=100.0, shape=(1000, 1), dtype=dtype)
# Add some missing records which should be ignored:
missing_indices = rng.choice(np.arange(X.shape[0]), 10, replace=False)
X[missing_indices, 0] = np.nan
X = sparse_constructor(X)
# Random positive weights:
sample_weight = rng.rand(X.shape[0]).astype(dtype)
_, var = mean_variance_axis(X, weights=sample_weight, axis=0)
assert var < np.finfo(dtype).eps
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
@pytest.mark.parametrize("lil_container", LIL_CONTAINERS)
def test_mean_variance_axis1(csc_container, csr_container, lil_container):
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = lil_container(X)
X_lil[1, 0] = 0
X[1, 0] = 0
with pytest.raises(TypeError):
mean_variance_axis(X_lil, axis=1)
X_csr = csr_container(X_lil)
X_csc = csc_container(X_lil)
expected_dtypes = [
(np.float32, np.float32),
(np.float64, np.float64),
(np.int32, np.float64),
(np.int64, np.float64),
]
for input_dtype, output_dtype in expected_dtypes:
X_test = X.astype(input_dtype)
for X_sparse in (X_csr, X_csc):
X_sparse = X_sparse.astype(input_dtype)
X_means, X_vars = mean_variance_axis(X_sparse, axis=0)
assert X_means.dtype == output_dtype
assert X_vars.dtype == output_dtype
assert_array_almost_equal(X_means, np.mean(X_test, axis=0))
assert_array_almost_equal(X_vars, np.var(X_test, axis=0))
@pytest.mark.parametrize(
["Xw", "X", "weights"],
[
([[0, 0, 1], [0, 2, 3]], [[0, 0, 1], [0, 2, 3]], [1, 1, 1]),
([[0, 0, 1], [0, 1, 1]], [[0, 0, 0, 1], [0, 1, 1, 1]], [1, 2, 1]),
([[0, 0, 1], [0, 1, 1]], [[0, 0, 1], [0, 1, 1]], None),
(
[[0, np.nan, 2], [0, np.nan, np.nan]],
[[0, np.nan, 2], [0, np.nan, np.nan]],
[1.0, 1.0, 1.0],
),
(
[[0, 0], [1, np.nan], [2, 0], [0, 3], [np.nan, np.nan], [np.nan, 2]],
[
[0, 0, 0],
[1, 1, np.nan],
[2, 2, 0],
[0, 0, 3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 2],
],
[2.0, 1.0],
),
(
[[1, 0, 1], [0, 3, 1]],
[[1, 0, 0, 0, 1], [0, 3, 3, 3, 1]],
np.array([1, 3, 1]),
),
],
)
@pytest.mark.parametrize("sparse_constructor", CSC_CONTAINERS + CSR_CONTAINERS)
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_incr_mean_variance_axis_weighted_axis1(
Xw, X, weights, sparse_constructor, dtype
):
axis = 1
Xw_sparse = sparse_constructor(Xw).astype(dtype)
X_sparse = sparse_constructor(X).astype(dtype)
last_mean = np.zeros(np.shape(Xw)[0], dtype=dtype)
last_var = np.zeros_like(last_mean, dtype=dtype)
last_n = np.zeros_like(last_mean, dtype=np.int64)
means0, vars0, n_incr0 = incr_mean_variance_axis(
X=X_sparse,
axis=axis,
last_mean=last_mean,
last_var=last_var,
last_n=last_n,
weights=None,
)
means_w0, vars_w0, n_incr_w0 = incr_mean_variance_axis(
X=Xw_sparse,
axis=axis,
last_mean=last_mean,
last_var=last_var,
last_n=last_n,
weights=weights,
)
assert means_w0.dtype == dtype
assert vars_w0.dtype == dtype
assert n_incr_w0.dtype == dtype
means_simple, vars_simple = mean_variance_axis(X=X_sparse, axis=axis)
assert_array_almost_equal(means0, means_w0)
assert_array_almost_equal(means0, means_simple)
assert_array_almost_equal(vars0, vars_w0)
assert_array_almost_equal(vars0, vars_simple)
assert_array_almost_equal(n_incr0, n_incr_w0)
# check second round for incremental
means1, vars1, n_incr1 = incr_mean_variance_axis(
X=X_sparse,
axis=axis,
last_mean=means0,
last_var=vars0,
last_n=n_incr0,
weights=None,
)
means_w1, vars_w1, n_incr_w1 = incr_mean_variance_axis(
X=Xw_sparse,
axis=axis,
last_mean=means_w0,
last_var=vars_w0,
last_n=n_incr_w0,
weights=weights,
)
assert_array_almost_equal(means1, means_w1)
assert_array_almost_equal(vars1, vars_w1)
assert_array_almost_equal(n_incr1, n_incr_w1)
assert means_w1.dtype == dtype
assert vars_w1.dtype == dtype
assert n_incr_w1.dtype == dtype
@pytest.mark.parametrize(
["Xw", "X", "weights"],
[
([[0, 0, 1], [0, 2, 3]], [[0, 0, 1], [0, 2, 3]], [1, 1]),
([[0, 0, 1], [0, 1, 1]], [[0, 0, 1], [0, 1, 1], [0, 1, 1]], [1, 2]),
([[0, 0, 1], [0, 1, 1]], [[0, 0, 1], [0, 1, 1]], None),
(
[[0, np.nan, 2], [0, np.nan, np.nan]],
[[0, np.nan, 2], [0, np.nan, np.nan]],
[1.0, 1.0],
),
(
[[0, 0, 1, np.nan, 2, 0], [0, 3, np.nan, np.nan, np.nan, 2]],
[
[0, 0, 1, np.nan, 2, 0],
[0, 0, 1, np.nan, 2, 0],
[0, 3, np.nan, np.nan, np.nan, 2],
],
[2.0, 1.0],
),
(
[[1, 0, 1], [0, 0, 1]],
[[1, 0, 1], [0, 0, 1], [0, 0, 1], [0, 0, 1]],
np.array([1, 3]),
),
],
)
@pytest.mark.parametrize("sparse_constructor", CSC_CONTAINERS + CSR_CONTAINERS)
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_incr_mean_variance_axis_weighted_axis0(
Xw, X, weights, sparse_constructor, dtype
):
axis = 0
Xw_sparse = sparse_constructor(Xw).astype(dtype)
X_sparse = sparse_constructor(X).astype(dtype)
last_mean = np.zeros(np.size(Xw, 1), dtype=dtype)
last_var = np.zeros_like(last_mean)
last_n = np.zeros_like(last_mean, dtype=np.int64)
means0, vars0, n_incr0 = incr_mean_variance_axis(
X=X_sparse,
axis=axis,
last_mean=last_mean,
last_var=last_var,
last_n=last_n,
weights=None,
)
means_w0, vars_w0, n_incr_w0 = incr_mean_variance_axis(
X=Xw_sparse,
axis=axis,
last_mean=last_mean,
last_var=last_var,
last_n=last_n,
weights=weights,
)
assert means_w0.dtype == dtype
assert vars_w0.dtype == dtype
assert n_incr_w0.dtype == dtype
means_simple, vars_simple = mean_variance_axis(X=X_sparse, axis=axis)
assert_array_almost_equal(means0, means_w0)
assert_array_almost_equal(means0, means_simple)
assert_array_almost_equal(vars0, vars_w0)
assert_array_almost_equal(vars0, vars_simple)
assert_array_almost_equal(n_incr0, n_incr_w0)
# check second round for incremental
means1, vars1, n_incr1 = incr_mean_variance_axis(
X=X_sparse,
axis=axis,
last_mean=means0,
last_var=vars0,
last_n=n_incr0,
weights=None,
)
means_w1, vars_w1, n_incr_w1 = incr_mean_variance_axis(
X=Xw_sparse,
axis=axis,
last_mean=means_w0,
last_var=vars_w0,
last_n=n_incr_w0,
weights=weights,
)
assert_array_almost_equal(means1, means_w1)
assert_array_almost_equal(vars1, vars_w1)
assert_array_almost_equal(n_incr1, n_incr_w1)
assert means_w1.dtype == dtype
assert vars_w1.dtype == dtype
assert n_incr_w1.dtype == dtype
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
@pytest.mark.parametrize("lil_container", LIL_CONTAINERS)
def test_incr_mean_variance_axis(csc_container, csr_container, lil_container):
for axis in [0, 1]:
rng = np.random.RandomState(0)
n_features = 50
n_samples = 10
if axis == 0:
data_chunks = [rng.randint(0, 2, size=n_features) for i in range(n_samples)]
else:
data_chunks = [rng.randint(0, 2, size=n_samples) for i in range(n_features)]
# default params for incr_mean_variance
last_mean = np.zeros(n_features) if axis == 0 else np.zeros(n_samples)
last_var = np.zeros_like(last_mean)
last_n = np.zeros_like(last_mean, dtype=np.int64)
# Test errors
X = np.array(data_chunks[0])
X = np.atleast_2d(X)
X = X.T if axis == 1 else X
X_lil = lil_container(X)
X_csr = csr_container(X_lil)
with pytest.raises(TypeError):
incr_mean_variance_axis(
X=axis, axis=last_mean, last_mean=last_var, last_var=last_n
)
with pytest.raises(TypeError):
incr_mean_variance_axis(
X_lil, axis=axis, last_mean=last_mean, last_var=last_var, last_n=last_n
)
# Test _incr_mean_and_var with a 1 row input
X_means, X_vars = mean_variance_axis(X_csr, axis)
X_means_incr, X_vars_incr, n_incr = incr_mean_variance_axis(
X_csr, axis=axis, last_mean=last_mean, last_var=last_var, last_n=last_n
)
assert_array_almost_equal(X_means, X_means_incr)
assert_array_almost_equal(X_vars, X_vars_incr)
# X.shape[axis] picks # samples
assert_array_equal(X.shape[axis], n_incr)
X_csc = csc_container(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis)
assert_array_almost_equal(X_means, X_means_incr)
assert_array_almost_equal(X_vars, X_vars_incr)
assert_array_equal(X.shape[axis], n_incr)
# Test _incremental_mean_and_var with whole data
X = np.vstack(data_chunks)
X = X.T if axis == 1 else X
X_lil = lil_container(X)
X_csr = csr_container(X_lil)
X_csc = csc_container(X_lil)
expected_dtypes = [
(np.float32, np.float32),
(np.float64, np.float64),
(np.int32, np.float64),
(np.int64, np.float64),
]
for input_dtype, output_dtype in expected_dtypes:
for X_sparse in (X_csr, X_csc):
X_sparse = X_sparse.astype(input_dtype)
last_mean = last_mean.astype(output_dtype)
last_var = last_var.astype(output_dtype)
X_means, X_vars = mean_variance_axis(X_sparse, axis)
X_means_incr, X_vars_incr, n_incr = incr_mean_variance_axis(
X_sparse,
axis=axis,
last_mean=last_mean,
last_var=last_var,
last_n=last_n,
)
assert X_means_incr.dtype == output_dtype
assert X_vars_incr.dtype == output_dtype
assert_array_almost_equal(X_means, X_means_incr)
assert_array_almost_equal(X_vars, X_vars_incr)
assert_array_equal(X.shape[axis], n_incr)
@pytest.mark.parametrize("sparse_constructor", CSC_CONTAINERS + CSR_CONTAINERS)
def test_incr_mean_variance_axis_dim_mismatch(sparse_constructor):
"""Check that we raise proper error when axis=1 and the dimension mismatch.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/pull/18655
"""
n_samples, n_features = 60, 4
rng = np.random.RandomState(42)
X = sparse_constructor(rng.rand(n_samples, n_features))
last_mean = np.zeros(n_features)
last_var = np.zeros_like(last_mean)
last_n = np.zeros(last_mean.shape, dtype=np.int64)
kwargs = dict(last_mean=last_mean, last_var=last_var, last_n=last_n)
mean0, var0, _ = incr_mean_variance_axis(X, axis=0, **kwargs)
assert_allclose(np.mean(X.toarray(), axis=0), mean0)
assert_allclose(np.var(X.toarray(), axis=0), var0)
# test ValueError if axis=1 and last_mean.size == n_features
with pytest.raises(ValueError):
incr_mean_variance_axis(X, axis=1, **kwargs)
# test inconsistent shapes of last_mean, last_var, last_n
kwargs = dict(last_mean=last_mean[:-1], last_var=last_var, last_n=last_n)
with pytest.raises(ValueError):
incr_mean_variance_axis(X, axis=0, **kwargs)
@pytest.mark.parametrize(
"X1, X2",
[
(
sp.random(5, 2, density=0.8, format="csr", random_state=0),
sp.random(13, 2, density=0.8, format="csr", random_state=0),
),
(
sp.random(5, 2, density=0.8, format="csr", random_state=0),
sp.hstack(
[
np.full((13, 1), fill_value=np.nan),
sp.random(13, 1, density=0.8, random_state=42),
],
format="csr",
),
),
],
)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_incr_mean_variance_axis_equivalence_mean_variance(X1, X2, csr_container):
# non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/16448
# check that computing the incremental mean and variance is equivalent to
# computing the mean and variance on the stacked dataset.
X1 = csr_container(X1)
X2 = csr_container(X2)
axis = 0
last_mean, last_var = np.zeros(X1.shape[1]), np.zeros(X1.shape[1])
last_n = np.zeros(X1.shape[1], dtype=np.int64)
updated_mean, updated_var, updated_n = incr_mean_variance_axis(
X1, axis=axis, last_mean=last_mean, last_var=last_var, last_n=last_n
)
updated_mean, updated_var, updated_n = incr_mean_variance_axis(
X2, axis=axis, last_mean=updated_mean, last_var=updated_var, last_n=updated_n
)
X = sp.vstack([X1, X2])
assert_allclose(updated_mean, np.nanmean(X.toarray(), axis=axis))
assert_allclose(updated_var, np.nanvar(X.toarray(), axis=axis))
assert_allclose(updated_n, np.count_nonzero(~np.isnan(X.toarray()), axis=0))
def test_incr_mean_variance_no_new_n():
# check the behaviour when we update the variance with an empty matrix
axis = 0
X1 = sp.random(5, 1, density=0.8, random_state=0).tocsr()
X2 = sp.random(0, 1, density=0.8, random_state=0).tocsr()
last_mean, last_var = np.zeros(X1.shape[1]), np.zeros(X1.shape[1])
last_n = np.zeros(X1.shape[1], dtype=np.int64)
last_mean, last_var, last_n = incr_mean_variance_axis(
X1, axis=axis, last_mean=last_mean, last_var=last_var, last_n=last_n
)
# update statistic with a column which should ignored
updated_mean, updated_var, updated_n = incr_mean_variance_axis(
X2, axis=axis, last_mean=last_mean, last_var=last_var, last_n=last_n
)
assert_allclose(updated_mean, last_mean)
assert_allclose(updated_var, last_var)
assert_allclose(updated_n, last_n)
def test_incr_mean_variance_n_float():
# check the behaviour when last_n is just a number
axis = 0
X = sp.random(5, 2, density=0.8, random_state=0).tocsr()
last_mean, last_var = np.zeros(X.shape[1]), np.zeros(X.shape[1])
last_n = 0
_, _, new_n = incr_mean_variance_axis(
X, axis=axis, last_mean=last_mean, last_var=last_var, last_n=last_n
)
assert_allclose(new_n, np.full(X.shape[1], X.shape[0]))
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize("sparse_constructor", CSC_CONTAINERS + CSR_CONTAINERS)
def test_incr_mean_variance_axis_ignore_nan(axis, sparse_constructor):
old_means = np.array([535.0, 535.0, 535.0, 535.0])
old_variances = np.array([4225.0, 4225.0, 4225.0, 4225.0])
old_sample_count = np.array([2, 2, 2, 2], dtype=np.int64)
X = sparse_constructor(
np.array([[170, 170, 170, 170], [430, 430, 430, 430], [300, 300, 300, 300]])
)
X_nan = sparse_constructor(
np.array(
[
[170, np.nan, 170, 170],
[np.nan, 170, 430, 430],
[430, 430, np.nan, 300],
[300, 300, 300, np.nan],
]
)
)
# we avoid creating specific data for axis 0 and 1: translating the data is
# enough.
if axis:
X = X.T
X_nan = X_nan.T
# take a copy of the old statistics since they are modified in place.
X_means, X_vars, X_sample_count = incr_mean_variance_axis(
X,
axis=axis,
last_mean=old_means.copy(),
last_var=old_variances.copy(),
last_n=old_sample_count.copy(),
)
X_nan_means, X_nan_vars, X_nan_sample_count = incr_mean_variance_axis(
X_nan,
axis=axis,
last_mean=old_means.copy(),
last_var=old_variances.copy(),
last_n=old_sample_count.copy(),
)
assert_allclose(X_nan_means, X_means)
assert_allclose(X_nan_vars, X_vars)
assert_allclose(X_nan_sample_count, X_sample_count)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_mean_variance_illegal_axis(csr_container):
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_csr = csr_container(X)
with pytest.raises(ValueError):
mean_variance_axis(X_csr, axis=-3)
with pytest.raises(ValueError):
mean_variance_axis(X_csr, axis=2)
with pytest.raises(ValueError):
mean_variance_axis(X_csr, axis=-1)
with pytest.raises(ValueError):
incr_mean_variance_axis(
X_csr, axis=-3, last_mean=None, last_var=None, last_n=None
)
with pytest.raises(ValueError):
incr_mean_variance_axis(
X_csr, axis=2, last_mean=None, last_var=None, last_n=None
)
with pytest.raises(ValueError):
incr_mean_variance_axis(
X_csr, axis=-1, last_mean=None, last_var=None, last_n=None
)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_densify_rows(csr_container):
for dtype in (np.float32, np.float64):
X = csr_container(
[[0, 3, 0], [2, 4, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=dtype
)
X_rows = np.array([0, 2, 3], dtype=np.intp)
out = np.ones((6, X.shape[1]), dtype=dtype)
out_rows = np.array([1, 3, 4], dtype=np.intp)
expect = np.ones_like(out)
expect[out_rows] = X[X_rows, :].toarray()
assign_rows_csr(X, X_rows, out_rows, out)
assert_array_equal(out, expect)
def test_inplace_column_scale():
rng = np.random.RandomState(0)
X = sp.random(100, 200, density=0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(200)
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
with pytest.raises(TypeError):
inplace_column_scale(X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
with pytest.raises(TypeError):
inplace_column_scale(X.tolil(), scale)
def test_inplace_row_scale():
rng = np.random.RandomState(0)
X = sp.random(100, 200, density=0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(100)
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
with pytest.raises(TypeError):
inplace_column_scale(X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
with pytest.raises(TypeError):
inplace_column_scale(X.tolil(), scale)
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_inplace_swap_row(csc_container, csr_container):
X = np.array(
[[0, 3, 0], [2, 4, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64
)
X_csr = csr_container(X)
X_csc = csc_container(X)
swap = linalg.get_blas_funcs(("swap",), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
with pytest.raises(TypeError):
inplace_swap_row(X_csr.tolil())
X = np.array(
[[0, 3, 0], [2, 4, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float32
)
X_csr = csr_container(X)
X_csc = csc_container(X)
swap = linalg.get_blas_funcs(("swap",), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
with pytest.raises(TypeError):
inplace_swap_row(X_csr.tolil())
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_inplace_swap_column(csc_container, csr_container):
X = np.array(
[[0, 3, 0], [2, 4, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64
)
X_csr = csr_container(X)
X_csc = csc_container(X)
swap = linalg.get_blas_funcs(("swap",), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
with pytest.raises(TypeError):
inplace_swap_column(X_csr.tolil())
X = np.array(
[[0, 3, 0], [2, 4, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float32
)
X_csr = csr_container(X)
X_csc = csc_container(X)
swap = linalg.get_blas_funcs(("swap",), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
with pytest.raises(TypeError):
inplace_swap_column(X_csr.tolil())
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize("axis", [0, 1, None])
@pytest.mark.parametrize("sparse_format", CSC_CONTAINERS + CSR_CONTAINERS)
@pytest.mark.parametrize(
"missing_values, min_func, max_func, ignore_nan",
[(0, np.min, np.max, False), (np.nan, np.nanmin, np.nanmax, True)],
)
@pytest.mark.parametrize("large_indices", [True, False])
def test_min_max(
dtype,
axis,
sparse_format,
missing_values,
min_func,
max_func,
ignore_nan,
large_indices,
):
X = np.array(
[
[0, 3, 0],
[2, -1, missing_values],
[0, 0, 0],
[9, missing_values, 7],
[4, 0, 5],
],
dtype=dtype,
)
X_sparse = sparse_format(X)
if large_indices:
X_sparse.indices = X_sparse.indices.astype("int64")
X_sparse.indptr = X_sparse.indptr.astype("int64")
mins_sparse, maxs_sparse = min_max_axis(X_sparse, axis=axis, ignore_nan=ignore_nan)
assert_array_equal(mins_sparse, min_func(X, axis=axis))
assert_array_equal(maxs_sparse, max_func(X, axis=axis))
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_min_max_axis_errors(csc_container, csr_container):
X = np.array(
[[0, 3, 0], [2, -1, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64
)
X_csr = csr_container(X)
X_csc = csc_container(X)
with pytest.raises(TypeError):
min_max_axis(X_csr.tolil(), axis=0)
with pytest.raises(ValueError):
min_max_axis(X_csr, axis=2)
with pytest.raises(ValueError):
min_max_axis(X_csc, axis=-3)
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_count_nonzero(csc_container, csr_container):
X = np.array(
[[0, 3, 0], [2, -1, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64
)
X_csr = csr_container(X)
X_csc = csc_container(X)
X_nonzero = X != 0
sample_weight = [0.5, 0.2, 0.3, 0.1, 0.1]
X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None]
for axis in [0, 1, -1, -2, None]:
assert_array_almost_equal(
count_nonzero(X_csr, axis=axis), X_nonzero.sum(axis=axis)
)
assert_array_almost_equal(
count_nonzero(X_csr, axis=axis, sample_weight=sample_weight),
X_nonzero_weighted.sum(axis=axis),
)
with pytest.raises(TypeError):
count_nonzero(X_csc)
with pytest.raises(ValueError):
count_nonzero(X_csr, axis=2)
assert count_nonzero(X_csr, axis=0).dtype == count_nonzero(X_csr, axis=1).dtype
assert (
count_nonzero(X_csr, axis=0, sample_weight=sample_weight).dtype
== count_nonzero(X_csr, axis=1, sample_weight=sample_weight).dtype
)
# Check dtypes with large sparse matrices too
# XXX: test fails on 32bit (Windows/Linux)
try:
X_csr.indices = X_csr.indices.astype(np.int64)
X_csr.indptr = X_csr.indptr.astype(np.int64)
assert count_nonzero(X_csr, axis=0).dtype == count_nonzero(X_csr, axis=1).dtype
assert (
count_nonzero(X_csr, axis=0, sample_weight=sample_weight).dtype
== count_nonzero(X_csr, axis=1, sample_weight=sample_weight).dtype
)
except TypeError as e:
assert "according to the rule 'safe'" in e.args[0] and np.intp().nbytes < 8, e
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_csc_row_median(csc_container, csr_container):
# Test csc_row_median actually calculates the median.
# Test that it gives the same output when X is dense.
rng = np.random.RandomState(0)
X = rng.rand(100, 50)
dense_median = np.median(X, axis=0)
csc = csc_container(X)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test that it gives the same output when X is sparse
X = rng.rand(51, 100)
X[X < 0.7] = 0.0
ind = rng.randint(0, 50, 10)
X[ind] = -X[ind]
csc = csc_container(X)
dense_median = np.median(X, axis=0)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test for toy data.
X = [[0, -2], [-1, -1], [1, 0], [2, 1]]
csc = csc_container(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0.5, -0.5]))
X = [[0, -2], [-1, -5], [1, -3]]
csc = csc_container(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0.0, -3]))
# Test that it raises an Error for non-csc matrices.
with pytest.raises(TypeError):
csc_median_axis_0(csr_container(X))
@pytest.mark.parametrize(
"inplace_csr_row_normalize",
(inplace_csr_row_normalize_l1, inplace_csr_row_normalize_l2),
)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_inplace_normalize(csr_container, inplace_csr_row_normalize):
if csr_container is sp.csr_matrix:
ones = np.ones((10, 1))
else:
ones = np.ones(10)
rs = RandomState(10)
for dtype in (np.float64, np.float32):
X = rs.randn(10, 5).astype(dtype)
X_csr = csr_container(X)
for index_dtype in [np.int32, np.int64]:
# csr_matrix will use int32 indices by default,
# up-casting those to int64 when necessary
if index_dtype is np.int64:
X_csr.indptr = X_csr.indptr.astype(index_dtype)
X_csr.indices = X_csr.indices.astype(index_dtype)
assert X_csr.indices.dtype == index_dtype
assert X_csr.indptr.dtype == index_dtype
inplace_csr_row_normalize(X_csr)
assert X_csr.dtype == dtype
if inplace_csr_row_normalize is inplace_csr_row_normalize_l2:
X_csr.data **= 2
assert_array_almost_equal(np.abs(X_csr).sum(axis=1), ones)
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_csr_row_norms(dtype):
# checks that csr_row_norms returns the same output as
# scipy.sparse.linalg.norm, and that the dype is the same as X.dtype.
X = sp.random(100, 10, format="csr", dtype=dtype, random_state=42)
scipy_norms = sp.linalg.norm(X, axis=1) ** 2
norms = csr_row_norms(X)
assert norms.dtype == dtype
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/_repr_html/params.py | sklearn/utils/_repr_html/params.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import html
import inspect
import re
import reprlib
from collections import UserDict
from functools import lru_cache
from urllib.parse import quote
from sklearn.externals._numpydoc import docscrape
from sklearn.utils._repr_html.base import ReprHTMLMixin
def _generate_link_to_param_doc(estimator_class, param_name, doc_link):
"""URL to the relevant section of the docstring using a Text Fragment
https://developer.mozilla.org/en-US/docs/Web/URI/Reference/Fragment/Text_fragments
"""
docstring = estimator_class.__doc__
m = re.search(f"{param_name} : (.+)\\n", docstring or "")
if m is None:
# No match found in the docstring, return None to indicate that we
# cannot link.
return None
# Extract the whole line of the type information, up to the line break as
# disambiguation suffix to build the fragment
param_type = m.group(1)
text_fragment = f"{quote(param_name)},-{quote(param_type)}"
return f"{doc_link}#:~:text={text_fragment}"
def _read_params(name, value, non_default_params):
"""Categorizes parameters as 'default' or 'user-set' and formats their values.
Escapes or truncates parameter values for display safety and readability.
"""
name = html.escape(name)
r = reprlib.Repr()
r.maxlist = 2 # Show only first 2 items of lists
r.maxtuple = 1 # Show only first item of tuples
r.maxstring = 50 # Limit string length
cleaned_value = html.escape(r.repr(value))
param_type = "user-set" if name in non_default_params else "default"
return {"param_type": param_type, "param_name": name, "param_value": cleaned_value}
@lru_cache
def _scrape_estimator_docstring(docstring):
return docscrape.NumpyDocString(docstring)
def _params_html_repr(params):
"""Generate HTML representation of estimator parameters.
Creates an HTML table with parameter names and values, wrapped in a
collapsible details element. Parameters are styled differently based
on whether they are default or user-set values.
"""
PARAMS_TABLE_TEMPLATE = """
<div class="estimator-table">
<details>
<summary>Parameters</summary>
<table class="parameters-table">
<tbody>
{rows}
</tbody>
</table>
</details>
</div>
"""
PARAM_ROW_TEMPLATE = """
<tr class="{param_type}">
<td><i class="copy-paste-icon"
onclick="copyToClipboard('{param_name}',
this.parentElement.nextElementSibling)"
></i></td>
<td class="param">{param_display}</td>
<td class="value">{param_value}</td>
</tr>
"""
PARAM_AVAILABLE_DOC_LINK_TEMPLATE = """
<a class="param-doc-link"
style="anchor-name: --doc-link-{param_name};"
rel="noreferrer" target="_blank" href="{link}">
{param_name}
<span class="param-doc-description"
style="position-anchor: --doc-link-{param_name};">
{param_description}</span>
</a>
"""
estimator_class_docs = inspect.getdoc(params.estimator_class)
if estimator_class_docs and (
structured_docstring := _scrape_estimator_docstring(estimator_class_docs)
):
param_map = {
param_docstring.name: param_docstring
for param_docstring in structured_docstring["Parameters"]
}
else:
param_map = {}
rows = []
for row in params:
param = _read_params(row, params[row], params.non_default)
link = _generate_link_to_param_doc(params.estimator_class, row, params.doc_link)
if param_numpydoc := param_map.get(row, None):
param_description = (
f"{html.escape(param_numpydoc.name)}: "
f"{html.escape(param_numpydoc.type)}<br><br>"
f"{'<br>'.join(html.escape(line) for line in param_numpydoc.desc)}"
)
else:
param_description = None
if params.doc_link and link and param_description:
# Create clickable parameter name with documentation link
param_display = PARAM_AVAILABLE_DOC_LINK_TEMPLATE.format(
link=link,
param_name=param["param_name"],
param_description=param_description,
)
else:
# Just show the parameter name without link
param_display = param["param_name"]
rows.append(PARAM_ROW_TEMPLATE.format(**param, param_display=param_display))
return PARAMS_TABLE_TEMPLATE.format(rows="\n".join(rows))
class ParamsDict(ReprHTMLMixin, UserDict):
"""Dictionary-like class to store and provide an HTML representation.
It builds an HTML structure to be used with Jupyter notebooks or similar
environments. It allows storing metadata to track non-default parameters.
Parameters
----------
params : dict, default=None
The original dictionary of parameters and their values.
non_default : tuple, default=(,)
The list of non-default parameters.
estimator_class : type, default=None
The class of the estimator. It allows to find the online documentation
link for each parameter.
doc_link : str, default=""
The base URL to the online documentation for the estimator class.
Used to generate parameter-specific documentation links in the HTML
representation. If empty, documentation links will not be generated.
"""
_html_repr = _params_html_repr
def __init__(
self, *, params=None, non_default=tuple(), estimator_class=None, doc_link=""
):
super().__init__(params or {})
self.non_default = non_default
self.estimator_class = estimator_class
self.doc_link = doc_link
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/_repr_html/estimator.py | sklearn/utils/_repr_html/estimator.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import html
from contextlib import closing
from inspect import isclass
from io import StringIO
from pathlib import Path
from string import Template
from sklearn import config_context
class _IDCounter:
"""Generate sequential ids with a prefix."""
def __init__(self, prefix):
self.prefix = prefix
self.count = 0
def get_id(self):
self.count += 1
return f"{self.prefix}-{self.count}"
def _get_css_style():
estimator_css_file = Path(__file__).parent / "estimator.css"
params_css_file = Path(__file__).parent / "params.css"
estimator_css = estimator_css_file.read_text(encoding="utf-8")
params_css = params_css_file.read_text(encoding="utf-8")
return f"{estimator_css}\n{params_css}"
_CONTAINER_ID_COUNTER = _IDCounter("sk-container-id")
_ESTIMATOR_ID_COUNTER = _IDCounter("sk-estimator-id")
_CSS_STYLE = _get_css_style()
class _VisualBlock:
"""HTML Representation of Estimator
Parameters
----------
kind : {'serial', 'parallel', 'single'}
kind of HTML block
estimators : list of estimators or `_VisualBlock`s or a single estimator
If kind != 'single', then `estimators` is a list of
estimators.
If kind == 'single', then `estimators` is a single estimator.
names : list of str, default=None
If kind != 'single', then `names` corresponds to estimators.
If kind == 'single', then `names` is a single string corresponding to
the single estimator.
name_details : list of str, str, or None, default=None
If kind != 'single', then `name_details` corresponds to `names`.
If kind == 'single', then `name_details` is a single string
corresponding to the single estimator.
name_caption : str, default=None
The caption below the name. `None` stands for no caption.
Only active when kind == 'single'.
doc_link_label : str, default=None
The label for the documentation link. If provided, the label would be
"Documentation for {doc_link_label}". Otherwise it will look for `names`.
Only active when kind == 'single'.
dash_wrapped : bool, default=True
If true, wrapped HTML element will be wrapped with a dashed border.
Only active when kind != 'single'.
"""
def __init__(
self,
kind,
estimators,
*,
names=None,
name_details=None,
name_caption=None,
doc_link_label=None,
dash_wrapped=True,
):
self.kind = kind
self.estimators = estimators
self.dash_wrapped = dash_wrapped
self.name_caption = name_caption
self.doc_link_label = doc_link_label
if self.kind in ("parallel", "serial"):
if names is None:
names = (None,) * len(estimators)
if name_details is None:
name_details = (None,) * len(estimators)
self.names = names
self.name_details = name_details
def _sk_visual_block_(self):
return self
def _write_label_html(
out,
params,
name,
name_details,
name_caption=None,
doc_link_label=None,
outer_class="sk-label-container",
inner_class="sk-label",
checked=False,
doc_link="",
is_fitted_css_class="",
is_fitted_icon="",
param_prefix="",
):
"""Write labeled html with or without a dropdown with named details.
Parameters
----------
out : file-like object
The file to write the HTML representation to.
params: str
If estimator has `get_params` method, this is the HTML representation
of the estimator's parameters and their values. When the estimator
does not have `get_params`, it is an empty string.
name : str
The label for the estimator. It corresponds either to the estimator class name
for a simple estimator or in the case of a `Pipeline` and `ColumnTransformer`,
it corresponds to the name of the step.
name_details : str
The details to show as content in the dropdown part of the toggleable label. It
can contain information such as non-default parameters or column information for
`ColumnTransformer`.
name_caption : str, default=None
The caption below the name. If `None`, no caption will be created.
doc_link_label : str, default=None
The label for the documentation link. If provided, the label would be
"Documentation for {doc_link_label}". Otherwise it will look for `name`.
outer_class : {"sk-label-container", "sk-item"}, default="sk-label-container"
The CSS class for the outer container.
inner_class : {"sk-label", "sk-estimator"}, default="sk-label"
The CSS class for the inner container.
checked : bool, default=False
Whether the dropdown is folded or not. With a single estimator, we intend to
unfold the content.
doc_link : str, default=""
The link to the documentation for the estimator. If an empty string, no link is
added to the diagram. This can be generated for an estimator if it uses the
`_HTMLDocumentationLinkMixin`.
is_fitted_css_class : {"", "fitted"}
The CSS class to indicate whether or not the estimator is fitted. The
empty string means that the estimator is not fitted and "fitted" means that the
estimator is fitted.
is_fitted_icon : str, default=""
The HTML representation to show the fitted information in the diagram. An empty
string means that no information is shown.
param_prefix : str, default=""
The prefix to prepend to parameter names for nested estimators.
"""
out.write(
f'<div class="{outer_class}"><div'
f' class="{inner_class} {is_fitted_css_class} sk-toggleable">'
)
name = html.escape(name)
if name_details is not None:
name_details = html.escape(str(name_details))
checked_str = "checked" if checked else ""
est_id = _ESTIMATOR_ID_COUNTER.get_id()
if doc_link:
doc_label = "<span>Online documentation</span>"
if doc_link_label is not None:
doc_label = f"<span>Documentation for {doc_link_label}</span>"
elif name is not None:
doc_label = f"<span>Documentation for {name}</span>"
doc_link = (
f'<a class="sk-estimator-doc-link {is_fitted_css_class}"'
f' rel="noreferrer" target="_blank" href="{doc_link}">?{doc_label}</a>'
)
if name == "passthrough" or name_details == "[]":
name_caption = ""
name_caption_div = (
""
if name_caption is None or name_caption == ""
else f'<div class="caption">{html.escape(name_caption)}</div>'
)
name_caption_div = f"<div><div>{name}</div>{name_caption_div}</div>"
links_div = (
f"<div>{doc_link}{is_fitted_icon}</div>"
if doc_link or is_fitted_icon
else ""
)
label_arrow_class = (
"" if name == "passthrough" else "sk-toggleable__label-arrow"
)
label_html = (
f'<label for="{est_id}" class="sk-toggleable__label {is_fitted_css_class} '
f'{label_arrow_class}">{name_caption_div}{links_div}</label>'
)
fmt_str = (
f'<input class="sk-toggleable__control sk-hidden--visually" id="{est_id}" '
f'type="checkbox" {checked_str}>{label_html}<div '
f'class="sk-toggleable__content {is_fitted_css_class}" '
f'data-param-prefix="{html.escape(param_prefix)}">'
)
if params:
fmt_str = "".join([fmt_str, f"{params}</div>"])
elif name_details and ("Pipeline" not in name):
if name == "passthrough" or name_details == "[]":
name_details = ""
fmt_str = "".join([fmt_str, f"<pre>{name_details}</pre></div>"])
out.write(fmt_str)
else:
out.write(f"<label>{name}</label>")
out.write("</div></div>") # outer_class inner_class
def _get_visual_block(estimator):
"""Generate information about how to display an estimator."""
if hasattr(estimator, "_sk_visual_block_"):
try:
return estimator._sk_visual_block_()
except Exception:
return _VisualBlock(
"single",
estimator,
names=estimator.__class__.__name__,
name_details=str(estimator),
)
if isinstance(estimator, str):
return _VisualBlock(
"single", estimator, names=estimator, name_details=estimator
)
elif estimator is None:
return _VisualBlock("single", estimator, names="None", name_details="None")
# check if estimator looks like a meta estimator (wraps estimators)
if hasattr(estimator, "get_params") and not isclass(estimator):
estimators = [
(key, est)
for key, est in estimator.get_params(deep=False).items()
if hasattr(est, "get_params") and hasattr(est, "fit") and not isclass(est)
]
if estimators:
return _VisualBlock(
"parallel",
[est for _, est in estimators],
names=[f"{key}: {est.__class__.__name__}" for key, est in estimators],
name_details=[str(est) for _, est in estimators],
)
return _VisualBlock(
"single",
estimator,
names=estimator.__class__.__name__,
name_details=str(estimator),
)
def _write_estimator_html(
out,
estimator,
estimator_label,
estimator_label_details,
is_fitted_css_class,
is_fitted_icon="",
first_call=False,
param_prefix="",
):
"""Write estimator to html in serial, parallel, or by itself (single).
For multiple estimators, this function is called recursively.
Parameters
----------
out : file-like object
The file to write the HTML representation to.
estimator : estimator object
The estimator to visualize.
estimator_label : str
The label for the estimator. It corresponds either to the estimator class name
for simple estimator or in the case of `Pipeline` and `ColumnTransformer`, it
corresponds to the name of the step.
estimator_label_details : str
The details to show as content in the dropdown part of the toggleable label.
It can contain information as non-default parameters or column information for
`ColumnTransformer`.
is_fitted_css_class : {"", "fitted"}
The CSS class to indicate whether or not the estimator is fitted or not. The
empty string means that the estimator is not fitted and "fitted" means that the
estimator is fitted.
is_fitted_icon : str, default=""
The HTML representation to show the fitted information in the diagram. An empty
string means that no information is shown. If the estimator to be shown is not
the first estimator (i.e. `first_call=False`), `is_fitted_icon` is always an
empty string.
first_call : bool, default=False
Whether this is the first time this function is called.
param_prefix : str, default=""
The prefix to prepend to parameter names for nested estimators.
For example, in a pipeline this might be "pipeline__stepname__".
"""
if first_call:
est_block = _get_visual_block(estimator)
else:
is_fitted_icon = ""
with config_context(print_changed_only=True):
est_block = _get_visual_block(estimator)
# `estimator` can also be an instance of `_VisualBlock`
if hasattr(estimator, "_get_doc_link"):
doc_link = estimator._get_doc_link()
else:
doc_link = ""
if est_block.kind in ("serial", "parallel"):
dashed_wrapped = first_call or est_block.dash_wrapped
dash_cls = " sk-dashed-wrapped" if dashed_wrapped else ""
out.write(f'<div class="sk-item{dash_cls}">')
if estimator_label:
if hasattr(estimator, "get_params") and hasattr(
estimator, "_get_params_html"
):
params = estimator._get_params_html(False, doc_link)._repr_html_inner()
else:
params = ""
_write_label_html(
out,
params,
estimator_label,
estimator_label_details,
doc_link=doc_link,
is_fitted_css_class=is_fitted_css_class,
is_fitted_icon=is_fitted_icon,
param_prefix=param_prefix,
)
kind = est_block.kind
out.write(f'<div class="sk-{kind}">')
est_infos = zip(est_block.estimators, est_block.names, est_block.name_details)
for est, name, name_details in est_infos:
# Build the parameter prefix for nested estimators
if param_prefix and hasattr(name, "split"):
# If we already have a prefix, append the new component
new_prefix = f"{param_prefix}{name.split(':')[0]}__"
elif hasattr(name, "split"):
# If this is the first level, start the prefix
new_prefix = f"{name.split(':')[0]}__" if name else ""
else:
new_prefix = param_prefix
if kind == "serial":
_write_estimator_html(
out,
est,
name,
name_details,
is_fitted_css_class=is_fitted_css_class,
param_prefix=new_prefix,
)
else: # parallel
out.write('<div class="sk-parallel-item">')
# wrap element in a serial visualblock
serial_block = _VisualBlock("serial", [est], dash_wrapped=False)
_write_estimator_html(
out,
serial_block,
name,
name_details,
is_fitted_css_class=is_fitted_css_class,
param_prefix=new_prefix,
)
out.write("</div>") # sk-parallel-item
out.write("</div></div>")
elif est_block.kind == "single":
if (
hasattr(estimator, "_get_params_html")
and not est_block.names == "passthrough"
):
params = estimator._get_params_html(doc_link=doc_link)._repr_html_inner()
else:
params = ""
_write_label_html(
out,
params,
est_block.names,
est_block.name_details,
est_block.name_caption,
est_block.doc_link_label,
outer_class="sk-item",
inner_class="sk-estimator",
checked=first_call,
doc_link=doc_link,
is_fitted_css_class=is_fitted_css_class,
is_fitted_icon=is_fitted_icon,
param_prefix=param_prefix,
)
def estimator_html_repr(estimator):
"""Build a HTML representation of an estimator.
Read more in the :ref:`User Guide <visualizing_composite_estimators>`.
Parameters
----------
estimator : estimator object
The estimator to visualize.
Returns
-------
html: str
HTML representation of estimator.
Examples
--------
>>> from sklearn.utils._repr_html.estimator import estimator_html_repr
>>> from sklearn.linear_model import LogisticRegression
>>> estimator_html_repr(LogisticRegression())
'<style>#sk-container-id...'
"""
from sklearn.exceptions import NotFittedError
from sklearn.utils.validation import check_is_fitted
if not hasattr(estimator, "fit"):
status_label = "<span>Not fitted</span>"
is_fitted_css_class = ""
else:
try:
check_is_fitted(estimator)
status_label = "<span>Fitted</span>"
is_fitted_css_class = "fitted"
except NotFittedError:
status_label = "<span>Not fitted</span>"
is_fitted_css_class = ""
is_fitted_icon = (
f'<span class="sk-estimator-doc-link {is_fitted_css_class}">'
f"i{status_label}</span>"
)
with closing(StringIO()) as out:
container_id = _CONTAINER_ID_COUNTER.get_id()
style_template = Template(_CSS_STYLE)
style_with_id = style_template.substitute(id=container_id)
estimator_str = str(estimator)
# The fallback message is shown by default and loading the CSS sets
# div.sk-text-repr-fallback to display: none to hide the fallback message.
#
# If the notebook is trusted, the CSS is loaded which hides the fallback
# message. If the notebook is not trusted, then the CSS is not loaded and the
# fallback message is shown by default.
#
# The reverse logic applies to HTML repr div.sk-container.
# div.sk-container is hidden by default and the loading the CSS displays it.
fallback_msg = (
"In a Jupyter environment, please rerun this cell to show the HTML"
" representation or trust the notebook. <br />On GitHub, the"
" HTML representation is unable to render, please try loading this page"
" with nbviewer.org."
)
html_template = (
f"<style>{style_with_id}</style>"
f"<body>"
f'<div id="{container_id}" class="sk-top-container">'
'<div class="sk-text-repr-fallback">'
f"<pre>{html.escape(estimator_str)}</pre><b>{fallback_msg}</b>"
"</div>"
'<div class="sk-container" hidden>'
)
out.write(html_template)
_write_estimator_html(
out,
estimator,
estimator.__class__.__name__,
estimator_str,
first_call=True,
is_fitted_css_class=is_fitted_css_class,
is_fitted_icon=is_fitted_icon,
)
with open(str(Path(__file__).parent / "estimator.js"), "r") as f:
script = f.read()
html_end = (
f"</div></div><script>{script}"
f"\nforceTheme('{container_id}');</script></body>"
)
out.write(html_end)
html_output = out.getvalue()
return html_output
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/_repr_html/__init__.py | sklearn/utils/_repr_html/__init__.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/_repr_html/base.py | sklearn/utils/_repr_html/base.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import itertools
from sklearn import __version__
from sklearn._config import get_config
from sklearn.utils.fixes import parse_version
class _HTMLDocumentationLinkMixin:
"""Mixin class allowing to generate a link to the API documentation.
This mixin relies on three attributes:
- `_doc_link_module`: it corresponds to the root module (e.g. `sklearn`). Using this
mixin, the default value is `sklearn`.
- `_doc_link_template`: it corresponds to the template used to generate the
link to the API documentation. Using this mixin, the default value is
`"https://scikit-learn.org/{version_url}/modules/generated/
{estimator_module}.{estimator_name}.html"`.
- `_doc_link_url_param_generator`: it corresponds to a function that generates the
parameters to be used in the template when the estimator module and name are not
sufficient.
The method :meth:`_get_doc_link` generates the link to the API documentation for a
given estimator.
This mixin provides all the necessary states for
:func:`sklearn.utils.estimator_html_repr` to generate a link to the API
documentation for the estimator HTML diagram.
Examples
--------
If the default values for `_doc_link_module`, `_doc_link_template` are not suitable,
then you can override them and provide a method to generate the URL parameters:
>>> from sklearn.base import BaseEstimator
>>> doc_link_template = "https://address.local/{single_param}.html"
>>> def url_param_generator(estimator):
... return {"single_param": estimator.__class__.__name__}
>>> class MyEstimator(BaseEstimator):
... # use "builtins" since it is the associated module when declaring
... # the class in a docstring
... _doc_link_module = "builtins"
... _doc_link_template = doc_link_template
... _doc_link_url_param_generator = url_param_generator
>>> estimator = MyEstimator()
>>> estimator._get_doc_link()
'https://address.local/MyEstimator.html'
If instead of overriding the attributes inside the class definition, you want to
override a class instance, you can use `types.MethodType` to bind the method to the
instance:
>>> import types
>>> estimator = BaseEstimator()
>>> estimator._doc_link_template = doc_link_template
>>> estimator._doc_link_url_param_generator = types.MethodType(
... url_param_generator, estimator)
>>> estimator._get_doc_link()
'https://address.local/BaseEstimator.html'
"""
_doc_link_module = "sklearn"
_doc_link_url_param_generator = None
@property
def _doc_link_template(self):
sklearn_version = parse_version(__version__)
if sklearn_version.dev is None:
version_url = f"{sklearn_version.major}.{sklearn_version.minor}"
else:
version_url = "dev"
return getattr(
self,
"__doc_link_template",
(
f"https://scikit-learn.org/{version_url}/modules/generated/"
"{estimator_module}.{estimator_name}.html"
),
)
@_doc_link_template.setter
def _doc_link_template(self, value):
setattr(self, "__doc_link_template", value)
def _get_doc_link(self):
"""Generates a link to the API documentation for a given estimator.
This method generates the link to the estimator's documentation page
by using the template defined by the attribute `_doc_link_template`.
Returns
-------
url : str
The URL to the API documentation for this estimator. If the estimator does
not belong to module `_doc_link_module`, the empty string (i.e. `""`) is
returned.
"""
if self.__class__.__module__.split(".")[0] != self._doc_link_module:
return ""
if self._doc_link_url_param_generator is None:
estimator_name = self.__class__.__name__
# Construct the estimator's module name, up to the first private submodule.
# This works because in scikit-learn all public estimators are exposed at
# that level, even if they actually live in a private sub-module.
estimator_module = ".".join(
itertools.takewhile(
lambda part: not part.startswith("_"),
self.__class__.__module__.split("."),
)
)
return self._doc_link_template.format(
estimator_module=estimator_module, estimator_name=estimator_name
)
return self._doc_link_template.format(**self._doc_link_url_param_generator())
class ReprHTMLMixin:
"""Mixin to handle consistently the HTML representation.
When inheriting from this class, you need to define an attribute `_html_repr`
which is a callable that returns the HTML representation to be shown.
"""
@property
def _repr_html_(self):
"""HTML representation of estimator.
This is redundant with the logic of `_repr_mimebundle_`. The latter
should be favored in the long term, `_repr_html_` is only
implemented for consumers who do not interpret `_repr_mimbundle_`.
"""
if get_config()["display"] != "diagram":
raise AttributeError(
"_repr_html_ is only defined when the "
"'display' configuration option is set to "
"'diagram'"
)
return self._repr_html_inner
def _repr_html_inner(self):
"""This function is returned by the @property `_repr_html_` to make
`hasattr(estimator, "_repr_html_") return `True` or `False` depending
on `get_config()["display"]`.
"""
return self._html_repr()
def _repr_mimebundle_(self, **kwargs):
"""Mime bundle used by jupyter kernels to display estimator"""
output = {"text/plain": repr(self)}
if get_config()["display"] == "diagram":
output["text/html"] = self._html_repr()
return output
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/_repr_html/tests/test_js.py | sklearn/utils/_repr_html/tests/test_js.py | import socket
import threading
from http.server import BaseHTTPRequestHandler, HTTPServer
from pathlib import Path
import pytest
@pytest.fixture(scope="session", autouse=True)
def check_playwright():
"""Skip tests if playwright is not installed.
This fixture is used by the next fixture (which is autouse) to skip all tests
if playwright is not installed."""
return pytest.importorskip("playwright")
@pytest.fixture
def local_server(request):
"""Start a simple HTTP server that serves custom HTML per test.
Usage :
```python
def test_something(page, local_server):
url, set_html_response = local_server
set_html_response("<html>...</html>")
page.goto(url)
...
```
"""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(("127.0.0.1", 0))
PORT = s.getsockname()[1]
html_content = "<html><body>Default</body></html>"
def set_html_response(content):
nonlocal html_content
html_content = content
class Handler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(html_content.encode("utf-8"))
# suppress logging
def log_message(self, format, *args):
return
httpd = HTTPServer(("127.0.0.1", PORT), Handler)
thread = threading.Thread(target=httpd.serve_forever, daemon=True)
thread.start()
yield f"http://127.0.0.1:{PORT}", set_html_response
httpd.shutdown()
def _make_page(body):
"""Helper to create a HTML page that includes `estimator.js` and the given body."""
js_path = Path(__file__).parent.parent / "estimator.js"
with open(js_path, "r", encoding="utf-8") as f:
script = f.read()
return f"""
<html>
<head>
<script>{script}</script>
</head>
<body>
{body}
</body>
</html>
"""
def test_copy_paste(page, local_server):
"""Test that copyToClipboard copies the right text to the clipboard.
Test requires clipboard permissions, which are granted through page's context.
Assertion is done by reading back the clipboard content from the browser.
This is easier than writing a cross platform clipboard reader.
"""
url, set_html_response = local_server
copy_paste_html = _make_page(
'<div class="sk-toggleable__content" data-param-prefix="prefix"/>'
)
set_html_response(copy_paste_html)
page.context.grant_permissions(["clipboard-read", "clipboard-write"])
page.goto(url)
page.evaluate(
"copyToClipboard('test', document.querySelector('.sk-toggleable__content'))"
)
clipboard_content = page.evaluate("navigator.clipboard.readText()")
# `copyToClipboard` function concatenates the `data-param-prefix` attribute
# with the first argument. Hence we expect "prefixtest" and not just test.
assert clipboard_content == "prefixtest"
@pytest.mark.parametrize(
"color,expected_theme",
[
(
"black",
"light",
),
(
"white",
"dark",
),
(
"#828282",
"light",
),
],
)
def test_force_theme(page, local_server, color, expected_theme):
"""Test that forceTheme applies the right theme class to the element.
A light color must lead to a dark theme and vice-versa.
"""
url, set_html_response = local_server
html = _make_page('<div style="color: ${color};"><div id="test"></div></div>')
set_html_response(html.replace("${color}", color))
page.goto(url)
page.evaluate("forceTheme('test')")
assert page.locator("#test").evaluate(
f"el => el.classList.contains('{expected_theme}')"
)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/_repr_html/tests/test_params.py | sklearn/utils/_repr_html/tests/test_params.py | import re
import pytest
from sklearn import config_context
from sklearn.utils._repr_html.params import (
ParamsDict,
_generate_link_to_param_doc,
_params_html_repr,
_read_params,
)
def test_params_dict_content():
"""Check the behavior of the ParamsDict class."""
params = ParamsDict(params={"a": 1, "b": 2})
assert params["a"] == 1
assert params["b"] == 2
assert params.non_default == ()
params = ParamsDict(params={"a": 1, "b": 2}, non_default=("a",))
assert params["a"] == 1
assert params["b"] == 2
assert params.non_default == ("a",)
def test_params_dict_repr_html_():
params = ParamsDict(params={"a": 1, "b": 2}, non_default=("a",), estimator_class="")
out = params._repr_html_()
assert "<summary>Parameters</summary>" in out
with config_context(display="text"):
msg = "_repr_html_ is only defined when"
with pytest.raises(AttributeError, match=msg):
params._repr_html_()
def test_params_dict_repr_mimebundle():
params = ParamsDict(params={"a": 1, "b": 2}, non_default=("a",), estimator_class="")
out = params._repr_mimebundle_()
assert "text/plain" in out
assert "text/html" in out
assert "<summary>Parameters</summary>" in out["text/html"]
assert out["text/plain"] == "{'a': 1, 'b': 2}"
with config_context(display="text"):
out = params._repr_mimebundle_()
assert "text/plain" in out
assert "text/html" not in out
def test_read_params():
"""Check the behavior of the `_read_params` function."""
out = _read_params("a", 1, tuple())
assert out["param_type"] == "default"
assert out["param_name"] == "a"
assert out["param_value"] == "1"
# check non-default parameters
out = _read_params("a", 1, ("a",))
assert out["param_type"] == "user-set"
assert out["param_name"] == "a"
assert out["param_value"] == "1"
# check that we escape html tags
tag_injection = "<script>alert('xss')</script>"
out = _read_params("a", tag_injection, tuple())
assert (
out["param_value"]
== ""<script>alert('xss')</script>""
)
assert out["param_name"] == "a"
assert out["param_type"] == "default"
def test_params_html_repr():
"""Check returned HTML template"""
params = ParamsDict(params={"a": 1, "b": 2}, estimator_class="")
assert "parameters-table" in _params_html_repr(params)
assert "estimator-table" in _params_html_repr(params)
def test_params_html_repr_with_doc_links():
"""Test `_params_html_repr` with valid and invalid doc links."""
class MockEstimator:
"""A fake estimator class with a docstring used for testing.
Parameters
----------
a : int
Description of a which can include `<formatted text
https://example.com>`_ that should not be confused with HTML tags.
b : str
"""
__module__ = "sklearn.mock_module"
__qualname__ = "MockEstimator"
params = ParamsDict(
params={"a": 1, "b": "value"},
non_default=("a",),
estimator_class=MockEstimator,
doc_link="mock_module.MockEstimator.html",
)
html_output = _params_html_repr(params)
html_param_a = (
r'<td class="param">'
r'\s*<a class="param-doc-link"'
r'\s*style="anchor-name: --doc-link-a;"'
r'\s*rel="noreferrer" target="_blank"'
r'\shref="mock_module\.MockEstimator\.html#:~:text=a,-int">'
r"\s*a"
r'\s*<span class="param-doc-description"'
r'\s*style="position-anchor: --doc-link-a;">\s*a:'
r"\sint<br><br>"
r"Description of a which can include `<formatted text<br>"
r"https://example.com>`_ that should not be confused with HTML tags.</span>"
r"\s*</a>"
r"\s*</td>"
)
assert re.search(html_param_a, html_output, flags=re.DOTALL)
html_param_b = (
r'<td class="param">'
r'.*<a class="param-doc-link"'
r'\s*style="anchor-name: --doc-link-b;"'
r'\s*rel="noreferrer" target="_blank"'
r'\shref="mock_module\.MockEstimator\.html#:~:text=b,-str">'
r"\s*b"
r'\s*<span class="param-doc-description"'
r'\s*style="position-anchor: --doc-link-b;">\s*b:'
r"\sstr<br><br></span>"
r"\s*</a>"
r"\s*</td>"
)
assert re.search(html_param_b, html_output, flags=re.DOTALL)
def test_params_html_repr_without_doc_links():
"""Test `_params_html_repr` when `link_to_param_doc` returns None."""
class MockEstimatorWithoutDoc:
__module__ = "sklearn.mock_module"
__qualname__ = "MockEstimatorWithoutDoc"
# No docstring defined on this test class.
params = ParamsDict(
params={"a": 1, "b": "value"},
non_default=("a",),
estimator_class=MockEstimatorWithoutDoc,
)
html_output = _params_html_repr(params)
# Check that no doc links are generated
assert "?" not in html_output
assert "Click to access" not in html_output
html_param_a = (
r'<td class="param">a</td>'
r'\s*<td class="value">1</td>'
)
assert re.search(html_param_a, html_output, flags=re.DOTALL)
html_param_b = (
r'<td class="param">b</td>'
r'\s*<td class="value">'value'</td>'
)
assert re.search(html_param_b, html_output, flags=re.DOTALL)
def test_generate_link_to_param_doc_basic():
"""Return anchor URLs for documented parameters in the estimator."""
class MockEstimator:
"""Mock class.
Parameters
----------
alpha : float
Regularization strength.
beta : int
Some integer parameter.
"""
doc_link = "mock_module.MockEstimator.html"
url = _generate_link_to_param_doc(MockEstimator, "alpha", doc_link)
assert url == "mock_module.MockEstimator.html#:~:text=alpha,-float"
url = _generate_link_to_param_doc(MockEstimator, "beta", doc_link)
assert url == "mock_module.MockEstimator.html#:~:text=beta,-int"
def test_generate_link_to_param_doc_param_not_found():
"""Ensure None is returned when the parameter is not documented."""
class MockEstimator:
"""Mock class
Parameters
----------
alpha : float
Regularization strength.
"""
doc_link = "mock_module.MockEstimator.html"
url = _generate_link_to_param_doc(MockEstimator, "gamma", doc_link)
assert url is None
def test_generate_link_to_param_doc_empty_docstring():
"""Ensure None is returned when the estimator has no docstring."""
class MockEstimator:
pass
doc_link = "mock_module.MockEstimator.html"
url = _generate_link_to_param_doc(MockEstimator, "alpha", doc_link)
assert url is None
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/_repr_html/tests/__init__.py | sklearn/utils/_repr_html/tests/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/_repr_html/tests/test_estimator.py | sklearn/utils/_repr_html/tests/test_estimator.py | import html
import locale
import re
import types
from contextlib import closing
from functools import partial
from io import StringIO
from unittest.mock import patch
import numpy as np
import pytest
from sklearn import config_context
from sklearn.base import BaseEstimator, clone
from sklearn.cluster import AgglomerativeClustering, Birch
from sklearn.compose import ColumnTransformer, make_column_transformer
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn.ensemble import StackingClassifier, StackingRegressor, VotingClassifier
from sklearn.feature_selection import SelectPercentile
from sklearn.gaussian_process.kernels import ExpSineSquared
from sklearn.impute import SimpleImputer
from sklearn.kernel_ridge import KernelRidge
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import RandomizedSearchCV
from sklearn.multiclass import OneVsOneClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import FeatureUnion, Pipeline, make_pipeline
from sklearn.preprocessing import FunctionTransformer, OneHotEncoder, StandardScaler
from sklearn.svm import LinearSVC, LinearSVR
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils._repr_html.base import _HTMLDocumentationLinkMixin
from sklearn.utils._repr_html.estimator import (
_get_css_style,
_get_visual_block,
_write_label_html,
estimator_html_repr,
)
from sklearn.utils.fixes import parse_version
def dummy_function(x, y):
return x + y # pragma: nocover
@pytest.mark.parametrize("checked", [True, False])
def test_write_label_html(checked):
# Test checking logic and labeling
name = "LogisticRegression"
params = ""
tool_tip = "hello-world"
with closing(StringIO()) as out:
_write_label_html(out, params, name, tool_tip, checked=checked)
html_label = out.getvalue()
p = (
r'<label for="sk-estimator-id-[0-9]*"'
r' class="sk-toggleable__label (fitted)? sk-toggleable__label-arrow">'
r"<div><div>LogisticRegression</div></div>"
)
re_compiled = re.compile(p)
assert re_compiled.search(html_label)
assert html_label.startswith('<div class="sk-label-container">')
assert "<pre>hello-world</pre>" in html_label
if checked:
assert "checked>" in html_label
@pytest.mark.parametrize("est", ["passthrough", "drop", None])
def test_get_visual_block_single_str_none(est):
# Test estimators that are represented by strings
est_html_info = _get_visual_block(est)
assert est_html_info.kind == "single"
assert est_html_info.estimators == est
assert est_html_info.names == str(est)
assert est_html_info.name_details == str(est)
def test_get_visual_block_single_estimator():
est = LogisticRegression(C=10.0)
est_html_info = _get_visual_block(est)
assert est_html_info.kind == "single"
assert est_html_info.estimators == est
assert est_html_info.names == est.__class__.__name__
assert est_html_info.name_details == str(est)
def test_get_visual_block_pipeline():
pipe = Pipeline(
[
("imputer", SimpleImputer()),
("do_nothing", "passthrough"),
("do_nothing_more", None),
("classifier", LogisticRegression()),
]
)
est_html_info = _get_visual_block(pipe)
assert est_html_info.kind == "serial"
assert est_html_info.estimators == tuple(step[1] for step in pipe.steps)
assert est_html_info.names == (
"imputer: SimpleImputer",
"do_nothing: passthrough",
"do_nothing_more: passthrough",
"classifier: LogisticRegression",
)
assert est_html_info.name_details == [str(est) for _, est in pipe.steps]
def test_get_visual_block_feature_union():
f_union = FeatureUnion([("pca", PCA()), ("svd", TruncatedSVD())])
est_html_info = _get_visual_block(f_union)
assert est_html_info.kind == "parallel"
assert est_html_info.names == ("pca", "svd")
assert est_html_info.estimators == tuple(
trans[1] for trans in f_union.transformer_list
)
assert est_html_info.name_details == (None, None)
def test_get_visual_block_voting():
clf = VotingClassifier(
[("log_reg", LogisticRegression()), ("mlp", MLPClassifier())]
)
est_html_info = _get_visual_block(clf)
assert est_html_info.kind == "parallel"
assert est_html_info.estimators == tuple(trans[1] for trans in clf.estimators)
assert est_html_info.names == ("log_reg", "mlp")
assert est_html_info.name_details == (None, None)
def test_get_visual_block_column_transformer():
ct = ColumnTransformer(
[("pca", PCA(), ["num1", "num2"]), ("svd", TruncatedSVD, [0, 3])]
)
est_html_info = _get_visual_block(ct)
assert est_html_info.kind == "parallel"
assert est_html_info.estimators == tuple(trans[1] for trans in ct.transformers)
assert est_html_info.names == ("pca", "svd")
assert est_html_info.name_details == (["num1", "num2"], [0, 3])
def test_estimator_html_repr_an_empty_pipeline():
"""Check that the representation of an empty Pipeline does not fail.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/30197
"""
empty_pipeline = Pipeline([])
estimator_html_repr(empty_pipeline)
def test_estimator_html_repr_pipeline():
num_trans = Pipeline(
steps=[("pass", "passthrough"), ("imputer", SimpleImputer(strategy="median"))]
)
cat_trans = Pipeline(
steps=[
("imputer", SimpleImputer(strategy="constant", missing_values="empty")),
("one-hot", OneHotEncoder(drop="first")),
]
)
preprocess = ColumnTransformer(
[
("num", num_trans, ["a", "b", "c", "d", "e"]),
("cat", cat_trans, [0, 1, 2, 3]),
]
)
feat_u = FeatureUnion(
[
("pca", PCA(n_components=1)),
(
"tsvd",
Pipeline(
[
("first", TruncatedSVD(n_components=3)),
("select", SelectPercentile()),
]
),
),
]
)
clf = VotingClassifier(
[
("lr", LogisticRegression(solver="lbfgs", random_state=1)),
("mlp", MLPClassifier(alpha=0.001)),
]
)
pipe = Pipeline(
[("preprocessor", preprocess), ("feat_u", feat_u), ("classifier", clf)]
)
html_output = estimator_html_repr(pipe)
# top level estimators show estimator with changes
assert html.escape(str(pipe)) in html_output
for _, est in pipe.steps:
assert html.escape(str(est))[:44] in html_output
# low level estimators do not show changes
with config_context(print_changed_only=True):
assert html.escape(str(num_trans["pass"])) in html_output
assert "<div><div>passthrough</div></div></label>" in html_output
assert html.escape(str(num_trans["imputer"])) in html_output
for _, _, cols in preprocess.transformers:
assert f"<pre>{html.escape(str(cols))}</pre>" in html_output
# feature union
for name, _ in feat_u.transformer_list:
assert f"<label>{html.escape(name)}</label>" in html_output
pca = feat_u.transformer_list[0][1]
assert html.escape(str(pca)) in html_output
tsvd = feat_u.transformer_list[1][1]
first = tsvd["first"]
select = tsvd["select"]
assert html.escape(str(first)) in html_output
assert html.escape(str(select)) in html_output
# voting classifier
for name, est in clf.estimators:
assert html.escape(name) in html_output
assert html.escape(str(est)) in html_output
# verify that prefers-color-scheme is implemented
assert "prefers-color-scheme" in html_output
@pytest.mark.parametrize("final_estimator", [None, LinearSVC()])
def test_stacking_classifier(final_estimator):
estimators = [
("mlp", MLPClassifier(alpha=0.001)),
("tree", DecisionTreeClassifier()),
]
clf = StackingClassifier(estimators=estimators, final_estimator=final_estimator)
html_output = estimator_html_repr(clf)
assert html.escape(str(clf)) in html_output
# If final_estimator's default changes from LogisticRegression
# this should be updated
if final_estimator is None:
assert "LogisticRegression" in html_output
else:
assert final_estimator.__class__.__name__ in html_output
@pytest.mark.parametrize("final_estimator", [None, LinearSVR()])
def test_stacking_regressor(final_estimator):
reg = StackingRegressor(
estimators=[("svr", LinearSVR())], final_estimator=final_estimator
)
html_output = estimator_html_repr(reg)
assert html.escape(str(reg.estimators[0][0])) in html_output
p = (
r'<label for="sk-estimator-id-[0-9]*"'
r' class="sk-toggleable__label (fitted)? sk-toggleable__label-arrow">'
r"<div><div>LinearSVR</div></div>"
)
re_compiled = re.compile(p)
assert re_compiled.search(html_output)
if final_estimator is None:
p = (
r'<label for="sk-estimator-id-[0-9]*"'
r' class="sk-toggleable__label (fitted)? sk-toggleable__label-arrow">'
r"<div><div>RidgeCV</div></div>"
)
re_compiled = re.compile(p)
assert re_compiled.search(html_output)
else:
assert html.escape(final_estimator.__class__.__name__) in html_output
def test_birch_duck_typing_meta():
# Test duck typing meta estimators with Birch
birch = Birch(n_clusters=AgglomerativeClustering(n_clusters=3))
html_output = estimator_html_repr(birch)
# inner estimators do not show changes
with config_context(print_changed_only=True):
assert f"<pre>{html.escape(str(birch.n_clusters))}" in html_output
p = r"<div><div>AgglomerativeClustering</div></div><div>.+</div></label>"
re_compiled = re.compile(p)
assert re_compiled.search(html_output)
# outer estimator contains all changes
assert f"<pre>{html.escape(str(birch))}" in html_output
def test_ovo_classifier_duck_typing_meta():
# Test duck typing metaestimators with OVO
ovo = OneVsOneClassifier(LinearSVC(penalty="l1"))
html_output = estimator_html_repr(ovo)
# inner estimators do not show changes
with config_context(print_changed_only=True):
assert f"<pre>{html.escape(str(ovo.estimator))}" in html_output
# regex to match the start of the tag
p = (
r'<label for="sk-estimator-id-[0-9]*" '
r'class="sk-toggleable__label sk-toggleable__label-arrow">'
r"<div><div>LinearSVC</div></div>"
)
re_compiled = re.compile(p)
assert re_compiled.search(html_output)
# outer estimator
assert f"<pre>{html.escape(str(ovo))}" in html_output
def test_duck_typing_nested_estimator():
# Test duck typing metaestimators with random search
kernel_ridge = KernelRidge(kernel=ExpSineSquared())
param_distributions = {"alpha": [1, 2]}
kernel_ridge_tuned = RandomizedSearchCV(
kernel_ridge,
param_distributions=param_distributions,
)
html_output = estimator_html_repr(kernel_ridge_tuned)
assert "<div><div>estimator: KernelRidge</div></div></label>" in html_output
@pytest.mark.parametrize("print_changed_only", [True, False])
def test_one_estimator_print_change_only(print_changed_only):
pca = PCA(n_components=10)
with config_context(print_changed_only=print_changed_only):
pca_repr = html.escape(str(pca))
html_output = estimator_html_repr(pca)
assert pca_repr in html_output
def test_fallback_exists():
"""Check that repr fallback is in the HTML."""
pca = PCA(n_components=10)
html_output = estimator_html_repr(pca)
assert (
f'<div class="sk-text-repr-fallback"><pre>{html.escape(str(pca))}'
in html_output
)
def test_show_arrow_pipeline():
"""Show arrow in pipeline for top level in pipeline"""
pipe = Pipeline([("scale", StandardScaler()), ("log_Reg", LogisticRegression())])
html_output = estimator_html_repr(pipe)
assert (
'class="sk-toggleable__label sk-toggleable__label-arrow">'
"<div><div>Pipeline</div></div>" in html_output
)
def test_invalid_parameters_in_stacking():
"""Invalidate stacking configuration uses default repr.
Non-regression test for #24009.
"""
stacker = StackingClassifier(estimators=[])
html_output = estimator_html_repr(stacker)
assert html.escape(str(stacker)) in html_output
def test_estimator_get_params_return_cls():
"""Check HTML repr works where a value in get_params is a class."""
class MyEstimator:
def get_params(self, deep=False):
return {"inner_cls": LogisticRegression}
est = MyEstimator()
assert "MyEstimator" in estimator_html_repr(est)
def test_estimator_html_repr_unfitted_vs_fitted():
"""Check that we have the information that the estimator is fitted or not in the
HTML representation.
"""
class MyEstimator(BaseEstimator):
def fit(self, X, y):
self.fitted_ = True
return self
X, y = load_iris(return_X_y=True)
estimator = MyEstimator()
assert "<span>Not fitted</span>" in estimator_html_repr(estimator)
estimator.fit(X, y)
assert "<span>Fitted</span>" in estimator_html_repr(estimator)
@pytest.mark.parametrize(
"estimator",
[
LogisticRegression(),
make_pipeline(StandardScaler(), LogisticRegression()),
make_pipeline(
make_column_transformer((StandardScaler(), slice(0, 3))),
LogisticRegression(),
),
],
)
def test_estimator_html_repr_fitted_icon(estimator):
estimator = clone(estimator) # Avoid side effects from previous tests.
"""Check that we are showing the fitted status icon only once."""
pattern = '<span class="sk-estimator-doc-link ">i<span>Not fitted</span></span>'
assert estimator_html_repr(estimator).count(pattern) == 1
X, y = load_iris(return_X_y=True)
estimator.fit(X, y)
pattern = '<span class="sk-estimator-doc-link fitted">i<span>Fitted</span></span>'
assert estimator_html_repr(estimator).count(pattern) == 1
@pytest.mark.parametrize("mock_version", ["1.3.0.dev0", "1.3.0"])
def test_html_documentation_link_mixin_sklearn(mock_version):
"""Check the behaviour of the `_HTMLDocumentationLinkMixin` class for scikit-learn
default.
"""
# mock the `__version__` where the mixin is located
with patch("sklearn.utils._repr_html.base.__version__", mock_version):
mixin = _HTMLDocumentationLinkMixin()
assert mixin._doc_link_module == "sklearn"
sklearn_version = parse_version(mock_version)
# we need to parse the version manually to be sure that this test is passing in
# other branches than `main` (that is "dev").
if sklearn_version.dev is None:
version = f"{sklearn_version.major}.{sklearn_version.minor}"
else:
version = "dev"
assert (
mixin._doc_link_template
== f"https://scikit-learn.org/{version}/modules/generated/"
"{estimator_module}.{estimator_name}.html"
)
assert (
mixin._get_doc_link()
== f"https://scikit-learn.org/{version}/modules/generated/"
"sklearn.utils._HTMLDocumentationLinkMixin.html"
)
@pytest.mark.parametrize(
"module_path,expected_module",
[
("prefix.mymodule", "prefix.mymodule"),
("prefix._mymodule", "prefix"),
("prefix.mypackage._mymodule", "prefix.mypackage"),
("prefix.mypackage._mymodule.submodule", "prefix.mypackage"),
("prefix.mypackage.mymodule.submodule", "prefix.mypackage.mymodule.submodule"),
],
)
def test_html_documentation_link_mixin_get_doc_link_instance(
module_path, expected_module
):
"""Check the behaviour of the `_get_doc_link` with various parameter."""
class FooBar(_HTMLDocumentationLinkMixin):
pass
FooBar.__module__ = module_path
est = FooBar()
# if we set `_doc_link`, then we expect to infer a module and name for the estimator
est._doc_link_module = "prefix"
est._doc_link_template = (
"https://website.com/{estimator_module}.{estimator_name}.html"
)
assert est._get_doc_link() == f"https://website.com/{expected_module}.FooBar.html"
@pytest.mark.parametrize(
"module_path,expected_module",
[
("prefix.mymodule", "prefix.mymodule"),
("prefix._mymodule", "prefix"),
("prefix.mypackage._mymodule", "prefix.mypackage"),
("prefix.mypackage._mymodule.submodule", "prefix.mypackage"),
("prefix.mypackage.mymodule.submodule", "prefix.mypackage.mymodule.submodule"),
],
)
def test_html_documentation_link_mixin_get_doc_link_class(module_path, expected_module):
"""Check the behaviour of the `_get_doc_link` when `_doc_link_module` and
`_doc_link_template` are defined at the class level and not at the instance
level."""
class FooBar(_HTMLDocumentationLinkMixin):
_doc_link_module = "prefix"
_doc_link_template = (
"https://website.com/{estimator_module}.{estimator_name}.html"
)
FooBar.__module__ = module_path
est = FooBar()
assert est._get_doc_link() == f"https://website.com/{expected_module}.FooBar.html"
def test_html_documentation_link_mixin_get_doc_link_out_of_library():
"""Check the behaviour of the `_get_doc_link` with various parameter."""
mixin = _HTMLDocumentationLinkMixin()
# if the `_doc_link_module` does not refer to the root module of the estimator
# (here the mixin), then we should return an empty string.
mixin._doc_link_module = "xxx"
assert mixin._get_doc_link() == ""
def test_html_documentation_link_mixin_doc_link_url_param_generator_instance():
mixin = _HTMLDocumentationLinkMixin()
# we can bypass the generation by providing our own callable
mixin._doc_link_template = (
"https://website.com/{my_own_variable}.{another_variable}.html"
)
def url_param_generator(estimator):
return {
"my_own_variable": "value_1",
"another_variable": "value_2",
}
mixin._doc_link_url_param_generator = types.MethodType(url_param_generator, mixin)
assert mixin._get_doc_link() == "https://website.com/value_1.value_2.html"
def test_html_documentation_link_mixin_doc_link_url_param_generator_class():
# we can bypass the generation by providing our own callable
def url_param_generator(estimator):
return {
"my_own_variable": "value_1",
"another_variable": "value_2",
}
class FooBar(_HTMLDocumentationLinkMixin):
_doc_link_template = (
"https://website.com/{my_own_variable}.{another_variable}.html"
)
_doc_link_url_param_generator = url_param_generator
estimator = FooBar()
assert estimator._get_doc_link() == "https://website.com/value_1.value_2.html"
@pytest.fixture
def set_non_utf8_locale():
"""Pytest fixture to set non utf-8 locale during the test.
The locale is set to the original one after the test has run.
"""
try:
locale.setlocale(locale.LC_CTYPE, "C")
except locale.Error:
pytest.skip("'C' locale is not available on this OS")
yield
# Resets the locale to the original one. Python calls setlocale(LC_TYPE, "")
# at startup according to
# https://docs.python.org/3/library/locale.html#background-details-hints-tips-and-caveats.
# This assumes that no other locale changes have been made. For some reason,
# on some platforms, trying to restore locale with something like
# locale.setlocale(locale.LC_CTYPE, locale.getlocale()) raises a
# locale.Error: unsupported locale setting
locale.setlocale(locale.LC_CTYPE, "")
def test_non_utf8_locale(set_non_utf8_locale):
"""Checks that utf8 encoding is used when reading the CSS file.
Non-regression test for https://github.com/scikit-learn/scikit-learn/issues/27725
"""
_get_css_style()
@pytest.mark.parametrize(
"func, expected_name",
[
(lambda x: x + 1, html.escape("<lambda>")),
(dummy_function, "dummy_function"),
(partial(dummy_function, y=1), "dummy_function"),
(np.vectorize(partial(dummy_function, y=1)), re.escape("vectorize(...)")),
],
)
def test_function_transformer_show_caption(func, expected_name):
# Test that function name is shown as the name and "FunctionTransformer" is shown
# in the caption
ft = FunctionTransformer(func)
html_output = estimator_html_repr(ft)
p = (
r'<label for="sk-estimator-id-[0-9]*" class="sk-toggleable__label fitted '
rf'sk-toggleable__label-arrow"><div><div>{expected_name}</div>'
r'<div class="caption">FunctionTransformer</div></div>'
)
re_compiled = re.compile(p)
assert re_compiled.search(html_output)
def test_estimator_html_repr_table():
"""Check that we add the table of parameters in the HTML representation."""
est = LogisticRegression(C=10.0, fit_intercept=False)
assert "parameters-table" in estimator_html_repr(est)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/cross_decomposition/_pls.py | sklearn/cross_decomposition/_pls.py | """
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from abc import ABCMeta, abstractmethod
from numbers import Integral, Real
import numpy as np
from scipy.linalg import pinv, svd
from sklearn.base import (
BaseEstimator,
ClassNamePrefixFeaturesOutMixin,
MultiOutputMixin,
RegressorMixin,
TransformerMixin,
_fit_context,
)
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils import check_array, check_consistent_length
from sklearn.utils._param_validation import Interval, StrOptions
from sklearn.utils.extmath import svd_flip
from sklearn.utils.validation import FLOAT_DTYPES, check_is_fitted, validate_data
__all__ = ["PLSSVD", "PLSCanonical", "PLSRegression"]
def _pinv2_old(a):
# Used previous scipy pinv2 that was updated in:
# https://github.com/scipy/scipy/pull/10067
# We can not set `cond` or `rcond` for pinv2 in scipy >= 1.3 to keep the
# same behavior of pinv2 for scipy < 1.3, because the condition used to
# determine the rank is dependent on the output of svd.
u, s, vh = svd(a, full_matrices=False, check_finite=False)
t = u.dtype.char.lower()
factor = {"f": 1e3, "d": 1e6}
cond = np.max(s) * factor[t] * np.finfo(t).eps
rank = np.sum(s > cond)
u = u[:, :rank]
u /= s[:rank]
return np.transpose(np.conjugate(np.dot(u, vh[:rank])))
def _get_first_singular_vectors_power_method(
X, y, mode="A", max_iter=500, tol=1e-06, norm_y_weights=False
):
"""Return the first left and right singular vectors of X'y.
Provides an alternative to the svd(X'y) and uses the power method instead.
With norm_y_weights to True and in mode A, this corresponds to the
algorithm section 11.3 of the Wegelin's review, except this starts at the
"update saliences" part.
"""
eps = np.finfo(X.dtype).eps
try:
y_score = next(col for col in y.T if np.any(np.abs(col) > eps))
except StopIteration as e:
raise StopIteration("y residual is constant") from e
x_weights_old = 100 # init to big value for first convergence check
if mode == "B":
# Precompute pseudo inverse matrices
# Basically: X_pinv = (X.T X)^-1 X.T
# Which requires inverting a (n_features, n_features) matrix.
# As a result, and as detailed in the Wegelin's review, CCA (i.e. mode
# B) will be unstable if n_features > n_samples or n_targets >
# n_samples
X_pinv, y_pinv = _pinv2_old(X), _pinv2_old(y)
for i in range(max_iter):
if mode == "B":
x_weights = np.dot(X_pinv, y_score)
else:
x_weights = np.dot(X.T, y_score) / np.dot(y_score, y_score)
x_weights /= np.sqrt(np.dot(x_weights, x_weights)) + eps
x_score = np.dot(X, x_weights)
if mode == "B":
y_weights = np.dot(y_pinv, x_score)
else:
y_weights = np.dot(y.T, x_score) / np.dot(x_score.T, x_score)
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights, y_weights)) + eps
y_score = np.dot(y, y_weights) / (np.dot(y_weights, y_weights) + eps)
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff, x_weights_diff) < tol or y.shape[1] == 1:
break
x_weights_old = x_weights
n_iter = i + 1
if n_iter == max_iter:
warnings.warn("Maximum number of iterations reached", ConvergenceWarning)
return x_weights, y_weights, n_iter
def _get_first_singular_vectors_svd(X, y):
"""Return the first left and right singular vectors of X'y.
Here the whole SVD is computed.
"""
C = np.dot(X.T, y)
U, _, Vt = svd(C, full_matrices=False)
return U[:, 0], Vt[0, :]
def _center_scale_xy(X, y, scale=True):
"""Center X, y and scale if the scale parameter==True
Returns
-------
X, y, x_mean, y_mean, x_std, y_std
"""
# center
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = y.mean(axis=0)
y -= y_mean
# scale
if scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(y.shape[1])
return X, y, x_mean, y_mean, x_std, y_std
def _svd_flip_1d(u, v):
"""Same as svd_flip but works on 1d arrays, and is inplace"""
# svd_flip would force us to convert to 2d array and would also return 2d
# arrays. We don't want that.
biggest_abs_val_idx = np.argmax(np.abs(u))
sign = np.sign(u[biggest_abs_val_idx])
u *= sign
v *= sign
class _PLS(
ClassNamePrefixFeaturesOutMixin,
TransformerMixin,
RegressorMixin,
MultiOutputMixin,
BaseEstimator,
metaclass=ABCMeta,
):
"""Partial Least Squares (PLS)
This class implements the generic PLS algorithm.
Main ref: Wegelin, a survey of Partial Least Squares (PLS) methods,
with emphasis on the two-block case
https://stat.uw.edu/sites/default/files/files/reports/2000/tr371.pdf
"""
_parameter_constraints: dict = {
"n_components": [Interval(Integral, 1, None, closed="left")],
"scale": ["boolean"],
"deflation_mode": [StrOptions({"regression", "canonical"})],
"mode": [StrOptions({"A", "B"})],
"algorithm": [StrOptions({"svd", "nipals"})],
"max_iter": [Interval(Integral, 1, None, closed="left")],
"tol": [Interval(Real, 0, None, closed="left")],
"copy": ["boolean"],
}
@abstractmethod
def __init__(
self,
n_components=2,
*,
scale=True,
deflation_mode="regression",
mode="A",
algorithm="nipals",
max_iter=500,
tol=1e-06,
copy=True,
):
self.n_components = n_components
self.deflation_mode = deflation_mode
self.mode = mode
self.scale = scale
self.algorithm = algorithm
self.max_iter = max_iter
self.tol = tol
self.copy = copy
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y):
"""Fit model to data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of predictors.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target vectors, where `n_samples` is the number of samples and
`n_targets` is the number of response variables.
Returns
-------
self : object
Fitted model.
"""
check_consistent_length(X, y)
X = validate_data(
self,
X,
dtype=np.float64,
force_writeable=True,
copy=self.copy,
ensure_min_samples=2,
)
y = check_array(
y,
input_name="y",
dtype=np.float64,
force_writeable=True,
copy=self.copy,
ensure_2d=False,
)
if y.ndim == 1:
self._predict_1d = True
y = y.reshape(-1, 1)
else:
self._predict_1d = False
n = X.shape[0]
p = X.shape[1]
q = y.shape[1]
n_components = self.n_components
# With PLSRegression n_components is bounded by the rank of (X.T X) see
# Wegelin page 25. With CCA and PLSCanonical, n_components is bounded
# by the rank of X and the rank of y: see Wegelin page 12
rank_upper_bound = (
min(n, p) if self.deflation_mode == "regression" else min(n, p, q)
)
if n_components > rank_upper_bound:
raise ValueError(
f"`n_components` upper bound is {rank_upper_bound}. "
f"Got {n_components} instead. Reduce `n_components`."
)
self._norm_y_weights = self.deflation_mode == "canonical" # 1.1
norm_y_weights = self._norm_y_weights
# Scale (in place)
Xk, yk, self._x_mean, self._y_mean, self._x_std, self._y_std = _center_scale_xy(
X, y, self.scale
)
self.x_weights_ = np.zeros((p, n_components)) # U
self.y_weights_ = np.zeros((q, n_components)) # V
self._x_scores = np.zeros((n, n_components)) # Xi
self._y_scores = np.zeros((n, n_components)) # Omega
self.x_loadings_ = np.zeros((p, n_components)) # Gamma
self.y_loadings_ = np.zeros((q, n_components)) # Delta
self.n_iter_ = []
# This whole thing corresponds to the algorithm in section 4.1 of the
# review from Wegelin. See above for a notation mapping from code to
# paper.
y_eps = np.finfo(yk.dtype).eps
for k in range(n_components):
# Find first left and right singular vectors of the X.T.dot(y)
# cross-covariance matrix.
if self.algorithm == "nipals":
# Replace columns that are all close to zero with zeros
yk_mask = np.all(np.abs(yk) < 10 * y_eps, axis=0)
yk[:, yk_mask] = 0.0
try:
(
x_weights,
y_weights,
n_iter_,
) = _get_first_singular_vectors_power_method(
Xk,
yk,
mode=self.mode,
max_iter=self.max_iter,
tol=self.tol,
norm_y_weights=norm_y_weights,
)
except StopIteration as e:
if str(e) != "y residual is constant":
raise
warnings.warn(f"y residual is constant at iteration {k}")
break
self.n_iter_.append(n_iter_)
elif self.algorithm == "svd":
x_weights, y_weights = _get_first_singular_vectors_svd(Xk, yk)
# inplace sign flip for consistency across solvers and archs
_svd_flip_1d(x_weights, y_weights)
# compute scores, i.e. the projections of X and y
x_scores = np.dot(Xk, x_weights)
if norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights, y_weights)
y_scores = np.dot(yk, y_weights) / y_ss
# Deflation: subtract rank-one approx to obtain Xk+1 and yk+1
x_loadings = np.dot(x_scores, Xk) / np.dot(x_scores, x_scores)
Xk -= np.outer(x_scores, x_loadings)
if self.deflation_mode == "canonical":
# regress yk on y_score
y_loadings = np.dot(y_scores, yk) / np.dot(y_scores, y_scores)
yk -= np.outer(y_scores, y_loadings)
if self.deflation_mode == "regression":
# regress yk on x_score
y_loadings = np.dot(x_scores, yk) / np.dot(x_scores, x_scores)
yk -= np.outer(x_scores, y_loadings)
self.x_weights_[:, k] = x_weights
self.y_weights_[:, k] = y_weights
self._x_scores[:, k] = x_scores
self._y_scores[:, k] = y_scores
self.x_loadings_[:, k] = x_loadings
self.y_loadings_[:, k] = y_loadings
# X was approximated as Xi . Gamma.T + X_(R+1)
# Xi . Gamma.T is a sum of n_components rank-1 matrices. X_(R+1) is
# whatever is left to fully reconstruct X, and can be 0 if X is of rank
# n_components.
# Similarly, y was approximated as Omega . Delta.T + y_(R+1)
# Compute transformation matrices (rotations_). See User Guide.
self.x_rotations_ = np.dot(
self.x_weights_,
pinv(np.dot(self.x_loadings_.T, self.x_weights_), check_finite=False),
)
self.y_rotations_ = np.dot(
self.y_weights_,
pinv(np.dot(self.y_loadings_.T, self.y_weights_), check_finite=False),
)
self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)
self.coef_ = (self.coef_ * self._y_std).T / self._x_std
self.intercept_ = self._y_mean
self._n_features_out = self.x_rotations_.shape[1]
return self
def transform(self, X, y=None, copy=True):
"""Apply the dimension reduction.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Samples to transform.
y : array-like of shape (n_samples, n_targets), default=None
Target vectors.
copy : bool, default=True
Whether to copy `X` and `y`, or perform in-place normalization.
Returns
-------
x_scores, y_scores : array-like or tuple of array-like
Return `x_scores` if `y` is not given, `(x_scores, y_scores)` otherwise.
"""
check_is_fitted(self)
X = validate_data(self, X, copy=copy, dtype=FLOAT_DTYPES, reset=False)
# Normalize
X -= self._x_mean
X /= self._x_std
# Apply rotation
x_scores = np.dot(X, self.x_rotations_)
if y is not None:
y = check_array(
y, input_name="y", ensure_2d=False, copy=copy, dtype=FLOAT_DTYPES
)
if y.ndim == 1:
y = y.reshape(-1, 1)
y -= self._y_mean
y /= self._y_std
y_scores = np.dot(y, self.y_rotations_)
return x_scores, y_scores
return x_scores
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
Parameters
----------
X : array-like of shape (n_samples, n_components)
New data, where `n_samples` is the number of samples
and `n_components` is the number of pls components.
y : array-like of shape (n_samples,) or (n_samples, n_components)
New target, where `n_samples` is the number of samples
and `n_components` is the number of pls components.
Returns
-------
X_original : ndarray of shape (n_samples, n_features)
Return the reconstructed `X` data.
y_original : ndarray of shape (n_samples, n_targets)
Return the reconstructed `X` target. Only returned when `y` is given.
Notes
-----
This transformation will only be exact if `n_components=n_features`.
"""
check_is_fitted(self)
X = check_array(X, input_name="X", dtype=FLOAT_DTYPES)
# From pls space to original space
X_reconstructed = np.matmul(X, self.x_loadings_.T)
# Denormalize
X_reconstructed *= self._x_std
X_reconstructed += self._x_mean
if y is not None:
y = check_array(y, input_name="y", dtype=FLOAT_DTYPES)
# From pls space to original space
y_reconstructed = np.matmul(y, self.y_loadings_.T)
# Denormalize
y_reconstructed *= self._y_std
y_reconstructed += self._y_mean
return X_reconstructed, y_reconstructed
return X_reconstructed
def predict(self, X, copy=True):
"""Predict targets of given samples.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Samples.
copy : bool, default=True
Whether to copy `X` or perform in-place normalization.
Returns
-------
y_pred : ndarray of shape (n_samples,) or (n_samples, n_targets)
Returns predicted values.
Notes
-----
This call requires the estimation of a matrix of shape
`(n_features, n_targets)`, which may be an issue in high dimensional
space.
"""
check_is_fitted(self)
X = validate_data(self, X, copy=copy, dtype=FLOAT_DTYPES, reset=False)
# Only center X but do not scale it since the coefficients are already scaled
X -= self._x_mean
y_pred = X @ self.coef_.T + self.intercept_
return y_pred.ravel() if self._predict_1d else y_pred
def fit_transform(self, X, y=None):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of predictors.
y : array-like of shape (n_samples, n_targets), default=None
Target vectors, where `n_samples` is the number of samples and
`n_targets` is the number of response variables.
Returns
-------
self : ndarray of shape (n_samples, n_components)
Return `x_scores` if `y` is not given, `(x_scores, y_scores)` otherwise.
"""
return self.fit(X, y).transform(X, y)
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.regressor_tags.poor_score = True
tags.target_tags.required = False
return tags
class PLSRegression(_PLS):
"""PLS regression.
PLSRegression is also known as PLS2 or PLS1, depending on the number of
targets.
For a comparison between other cross decomposition algorithms, see
:ref:`sphx_glr_auto_examples_cross_decomposition_plot_compare_cross_decomposition.py`.
Read more in the :ref:`User Guide <cross_decomposition>`.
.. versionadded:: 0.8
Parameters
----------
n_components : int, default=2
Number of components to keep. Should be in `[1, n_features]`.
scale : bool, default=True
Whether to scale `X` and `y`.
max_iter : int, default=500
The maximum number of iterations of the power method when
`algorithm='nipals'`. Ignored otherwise.
tol : float, default=1e-06
The tolerance used as convergence criteria in the power method: the
algorithm stops whenever the squared norm of `u_i - u_{i-1}` is less
than `tol`, where `u` corresponds to the left singular vector.
copy : bool, default=True
Whether to copy `X` and `y` in :term:`fit` before applying centering,
and potentially scaling. If `False`, these operations will be done
inplace, modifying both arrays.
Attributes
----------
x_weights_ : ndarray of shape (n_features, n_components)
The left singular vectors of the cross-covariance matrices of each
iteration.
y_weights_ : ndarray of shape (n_targets, n_components)
The right singular vectors of the cross-covariance matrices of each
iteration.
x_loadings_ : ndarray of shape (n_features, n_components)
The loadings of `X`.
y_loadings_ : ndarray of shape (n_targets, n_components)
The loadings of `y`.
x_scores_ : ndarray of shape (n_samples, n_components)
The transformed training samples.
y_scores_ : ndarray of shape (n_samples, n_components)
The transformed training targets.
x_rotations_ : ndarray of shape (n_features, n_components)
The projection matrix used to transform `X`.
y_rotations_ : ndarray of shape (n_targets, n_components)
The projection matrix used to transform `y`.
coef_ : ndarray of shape (n_target, n_features)
The coefficients of the linear model such that `y` is approximated as
`y = X @ coef_.T + intercept_`.
intercept_ : ndarray of shape (n_targets,)
The intercepts of the linear model such that `y` is approximated as
`y = X @ coef_.T + intercept_`.
.. versionadded:: 1.1
n_iter_ : list of shape (n_components,)
Number of iterations of the power method, for each
component.
n_features_in_ : int
Number of features seen during :term:`fit`.
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
PLSCanonical : Partial Least Squares transformer and regressor.
Examples
--------
>>> from sklearn.cross_decomposition import PLSRegression
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> pls2 = PLSRegression(n_components=2)
>>> pls2.fit(X, y)
PLSRegression()
>>> y_pred = pls2.predict(X)
For a comparison between PLS Regression and :class:`~sklearn.decomposition.PCA`, see
:ref:`sphx_glr_auto_examples_cross_decomposition_plot_pcr_vs_pls.py`.
"""
_parameter_constraints: dict = {**_PLS._parameter_constraints}
for param in ("deflation_mode", "mode", "algorithm"):
_parameter_constraints.pop(param)
# This implementation provides the same results that 3 PLS packages
# provided in the R language (R-project):
# - "mixOmics" with function pls(X, y, mode = "regression")
# - "plspm " with function plsreg2(X, y)
# - "pls" with function oscorespls.fit(X, y)
def __init__(
self, n_components=2, *, scale=True, max_iter=500, tol=1e-06, copy=True
):
super().__init__(
n_components=n_components,
scale=scale,
deflation_mode="regression",
mode="A",
algorithm="nipals",
max_iter=max_iter,
tol=tol,
copy=copy,
)
def fit(self, X, y):
"""Fit model to data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of predictors.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target vectors, where `n_samples` is the number of samples and
`n_targets` is the number of response variables.
Returns
-------
self : object
Fitted model.
"""
super().fit(X, y)
# expose the fitted attributes `x_scores_` and `y_scores_`
self.x_scores_ = self._x_scores
self.y_scores_ = self._y_scores
return self
class PLSCanonical(_PLS):
"""Partial Least Squares transformer and regressor.
For a comparison between other cross decomposition algorithms, see
:ref:`sphx_glr_auto_examples_cross_decomposition_plot_compare_cross_decomposition.py`.
Read more in the :ref:`User Guide <cross_decomposition>`.
.. versionadded:: 0.8
Parameters
----------
n_components : int, default=2
Number of components to keep. Should be in `[1, min(n_samples,
n_features, n_targets)]`.
scale : bool, default=True
Whether to scale `X` and `y`.
algorithm : {'nipals', 'svd'}, default='nipals'
The algorithm used to estimate the first singular vectors of the
cross-covariance matrix. 'nipals' uses the power method while 'svd'
will compute the whole SVD.
max_iter : int, default=500
The maximum number of iterations of the power method when
`algorithm='nipals'`. Ignored otherwise.
tol : float, default=1e-06
The tolerance used as convergence criteria in the power method: the
algorithm stops whenever the squared norm of `u_i - u_{i-1}` is less
than `tol`, where `u` corresponds to the left singular vector.
copy : bool, default=True
Whether to copy `X` and `y` in fit before applying centering, and
potentially scaling. If False, these operations will be done inplace,
modifying both arrays.
Attributes
----------
x_weights_ : ndarray of shape (n_features, n_components)
The left singular vectors of the cross-covariance matrices of each
iteration.
y_weights_ : ndarray of shape (n_targets, n_components)
The right singular vectors of the cross-covariance matrices of each
iteration.
x_loadings_ : ndarray of shape (n_features, n_components)
The loadings of `X`.
y_loadings_ : ndarray of shape (n_targets, n_components)
The loadings of `y`.
x_rotations_ : ndarray of shape (n_features, n_components)
The projection matrix used to transform `X`.
y_rotations_ : ndarray of shape (n_targets, n_components)
The projection matrix used to transform `y`.
coef_ : ndarray of shape (n_targets, n_features)
The coefficients of the linear model such that `y` is approximated as
`y = X @ coef_.T + intercept_`.
intercept_ : ndarray of shape (n_targets,)
The intercepts of the linear model such that `y` is approximated as
`y = X @ coef_.T + intercept_`.
.. versionadded:: 1.1
n_iter_ : list of shape (n_components,)
Number of iterations of the power method, for each
component. Empty if `algorithm='svd'`.
n_features_in_ : int
Number of features seen during :term:`fit`.
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
CCA : Canonical Correlation Analysis.
PLSSVD : Partial Least Square SVD.
Examples
--------
>>> from sklearn.cross_decomposition import PLSCanonical
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> plsca = PLSCanonical(n_components=2)
>>> plsca.fit(X, y)
PLSCanonical()
>>> X_c, y_c = plsca.transform(X, y)
"""
_parameter_constraints: dict = {**_PLS._parameter_constraints}
for param in ("deflation_mode", "mode"):
_parameter_constraints.pop(param)
# This implementation provides the same results that the "plspm" package
# provided in the R language (R-project), using the function plsca(X, y).
# Results are equal or collinear with the function
# ``pls(..., mode = "canonical")`` of the "mixOmics" package. The
# difference relies in the fact that mixOmics implementation does not
# exactly implement the Wold algorithm since it does not normalize
# y_weights to one.
def __init__(
self,
n_components=2,
*,
scale=True,
algorithm="nipals",
max_iter=500,
tol=1e-06,
copy=True,
):
super().__init__(
n_components=n_components,
scale=scale,
deflation_mode="canonical",
mode="A",
algorithm=algorithm,
max_iter=max_iter,
tol=tol,
copy=copy,
)
class CCA(_PLS):
"""Canonical Correlation Analysis, also known as "Mode B" PLS.
For a comparison between other cross decomposition algorithms, see
:ref:`sphx_glr_auto_examples_cross_decomposition_plot_compare_cross_decomposition.py`.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, default=2
Number of components to keep. Should be in `[1, min(n_samples,
n_features, n_targets)]`.
scale : bool, default=True
Whether to scale `X` and `y`.
max_iter : int, default=500
The maximum number of iterations of the power method.
tol : float, default=1e-06
The tolerance used as convergence criteria in the power method: the
algorithm stops whenever the squared norm of `u_i - u_{i-1}` is less
than `tol`, where `u` corresponds to the left singular vector.
copy : bool, default=True
Whether to copy `X` and `y` in fit before applying centering, and
potentially scaling. If False, these operations will be done inplace,
modifying both arrays.
Attributes
----------
x_weights_ : ndarray of shape (n_features, n_components)
The left singular vectors of the cross-covariance matrices of each
iteration.
y_weights_ : ndarray of shape (n_targets, n_components)
The right singular vectors of the cross-covariance matrices of each
iteration.
x_loadings_ : ndarray of shape (n_features, n_components)
The loadings of `X`.
y_loadings_ : ndarray of shape (n_targets, n_components)
The loadings of `y`.
x_rotations_ : ndarray of shape (n_features, n_components)
The projection matrix used to transform `X`.
y_rotations_ : ndarray of shape (n_targets, n_components)
The projection matrix used to transform `y`.
coef_ : ndarray of shape (n_targets, n_features)
The coefficients of the linear model such that `y` is approximated as
`y = X @ coef_.T + intercept_`.
intercept_ : ndarray of shape (n_targets,)
The intercepts of the linear model such that `y` is approximated as
`y = X @ coef_.T + intercept_`.
.. versionadded:: 1.1
n_iter_ : list of shape (n_components,)
Number of iterations of the power method, for each
component.
n_features_in_ : int
Number of features seen during :term:`fit`.
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
PLSCanonical : Partial Least Squares transformer and regressor.
PLSSVD : Partial Least Square SVD.
Examples
--------
>>> from sklearn.cross_decomposition import CCA
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
>>> y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> cca = CCA(n_components=1)
>>> cca.fit(X, y)
CCA(n_components=1)
>>> X_c, y_c = cca.transform(X, y)
"""
_parameter_constraints: dict = {**_PLS._parameter_constraints}
for param in ("deflation_mode", "mode", "algorithm"):
_parameter_constraints.pop(param)
def __init__(
self, n_components=2, *, scale=True, max_iter=500, tol=1e-06, copy=True
):
super().__init__(
n_components=n_components,
scale=scale,
deflation_mode="canonical",
mode="B",
algorithm="nipals",
max_iter=max_iter,
tol=tol,
copy=copy,
)
class PLSSVD(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
"""Partial Least Square SVD.
This transformer simply performs an SVD on the cross-covariance matrix
`X'y`. It is able to project both the training data `X` and the targets
`y`. The training data `X` is projected on the left singular vectors, while
the targets are projected on the right singular vectors.
Read more in the :ref:`User Guide <cross_decomposition>`.
.. versionadded:: 0.8
Parameters
----------
n_components : int, default=2
The number of components to keep. Should be in `[1,
min(n_samples, n_features, n_targets)]`.
scale : bool, default=True
Whether to scale `X` and `y`.
copy : bool, default=True
Whether to copy `X` and `y` in fit before applying centering, and
potentially scaling. If `False`, these operations will be done inplace,
modifying both arrays.
Attributes
----------
x_weights_ : ndarray of shape (n_features, n_components)
The left singular vectors of the SVD of the cross-covariance matrix.
Used to project `X` in :meth:`transform`.
y_weights_ : ndarray of (n_targets, n_components)
The right singular vectors of the SVD of the cross-covariance matrix.
Used to project `X` in :meth:`transform`.
n_features_in_ : int
Number of features seen during :term:`fit`.
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
PLSCanonical : Partial Least Squares transformer and regressor.
CCA : Canonical Correlation Analysis.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_decomposition import PLSSVD
>>> X = np.array([[0., 0., 1.],
... [1., 0., 0.],
... [2., 2., 2.],
... [2., 5., 4.]])
>>> y = np.array([[0.1, -0.2],
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/cross_decomposition/__init__.py | sklearn/cross_decomposition/__init__.py | """Algorithms for cross decomposition."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from sklearn.cross_decomposition._pls import CCA, PLSSVD, PLSCanonical, PLSRegression
__all__ = ["CCA", "PLSSVD", "PLSCanonical", "PLSRegression"]
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/cross_decomposition/tests/test_pls.py | sklearn/cross_decomposition/tests/test_pls.py | import warnings
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_almost_equal, assert_array_equal
from sklearn.cross_decomposition import CCA, PLSSVD, PLSCanonical, PLSRegression
from sklearn.cross_decomposition._pls import (
_center_scale_xy,
_get_first_singular_vectors_power_method,
_get_first_singular_vectors_svd,
_svd_flip_1d,
)
from sklearn.datasets import load_linnerud, make_regression
from sklearn.ensemble import VotingRegressor
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model import LinearRegression
from sklearn.utils import check_random_state
from sklearn.utils.extmath import svd_flip
def assert_matrix_orthogonal(M):
K = np.dot(M.T, M)
assert_array_almost_equal(K, np.diag(np.diag(K)))
def test_pls_canonical_basics():
# Basic checks for PLSCanonical
d = load_linnerud()
X = d.data
y = d.target
pls = PLSCanonical(n_components=X.shape[1])
pls.fit(X, y)
assert_matrix_orthogonal(pls.x_weights_)
assert_matrix_orthogonal(pls.y_weights_)
assert_matrix_orthogonal(pls._x_scores)
assert_matrix_orthogonal(pls._y_scores)
# Check X = TP' and y = UQ'
T = pls._x_scores
P = pls.x_loadings_
U = pls._y_scores
Q = pls.y_loadings_
# Need to scale first
Xc, yc, x_mean, y_mean, x_std, y_std = _center_scale_xy(
X.copy(), y.copy(), scale=True
)
assert_array_almost_equal(Xc, np.dot(T, P.T))
assert_array_almost_equal(yc, np.dot(U, Q.T))
# Check that rotations on training data lead to scores
Xt = pls.transform(X)
assert_array_almost_equal(Xt, pls._x_scores)
Xt, yt = pls.transform(X, y)
assert_array_almost_equal(Xt, pls._x_scores)
assert_array_almost_equal(yt, pls._y_scores)
# Check that inverse_transform works
X_back = pls.inverse_transform(Xt)
assert_array_almost_equal(X_back, X)
_, y_back = pls.inverse_transform(Xt, yt)
assert_array_almost_equal(y_back, y)
def test_sanity_check_pls_regression():
# Sanity check for PLSRegression
# The results were checked against the R-packages plspm, misOmics and pls
d = load_linnerud()
X = d.data
y = d.target
pls = PLSRegression(n_components=X.shape[1])
X_trans, _ = pls.fit_transform(X, y)
# FIXME: one would expect y_trans == pls.y_scores_ but this is not
# the case.
# xref: https://github.com/scikit-learn/scikit-learn/issues/22420
assert_allclose(X_trans, pls.x_scores_)
expected_x_weights = np.array(
[
[-0.61330704, -0.00443647, 0.78983213],
[-0.74697144, -0.32172099, -0.58183269],
[-0.25668686, 0.94682413, -0.19399983],
]
)
expected_x_loadings = np.array(
[
[-0.61470416, -0.24574278, 0.78983213],
[-0.65625755, -0.14396183, -0.58183269],
[-0.51733059, 1.00609417, -0.19399983],
]
)
expected_y_weights = np.array(
[
[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916],
]
)
expected_y_loadings = np.array(
[
[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916],
]
)
assert_array_almost_equal(np.abs(pls.x_loadings_), np.abs(expected_x_loadings))
assert_array_almost_equal(np.abs(pls.x_weights_), np.abs(expected_x_weights))
assert_array_almost_equal(np.abs(pls.y_loadings_), np.abs(expected_y_loadings))
assert_array_almost_equal(np.abs(pls.y_weights_), np.abs(expected_y_weights))
# The R / Python difference in the signs should be consistent across
# loadings, weights, etc.
x_loadings_sign_flip = np.sign(pls.x_loadings_ / expected_x_loadings)
x_weights_sign_flip = np.sign(pls.x_weights_ / expected_x_weights)
y_weights_sign_flip = np.sign(pls.y_weights_ / expected_y_weights)
y_loadings_sign_flip = np.sign(pls.y_loadings_ / expected_y_loadings)
assert_array_almost_equal(x_loadings_sign_flip, x_weights_sign_flip)
assert_array_almost_equal(y_loadings_sign_flip, y_weights_sign_flip)
def test_sanity_check_pls_regression_constant_column_y():
# Check behavior when the first column of y is constant
# The results are checked against a modified version of plsreg2
# from the R-package plsdepot
d = load_linnerud()
X = d.data
y = d.target
y[:, 0] = 1
pls = PLSRegression(n_components=X.shape[1])
pls.fit(X, y)
expected_x_weights = np.array(
[
[-0.6273573, 0.007081799, 0.7786994],
[-0.7493417, -0.277612681, -0.6011807],
[-0.2119194, 0.960666981, -0.1794690],
]
)
expected_x_loadings = np.array(
[
[-0.6273512, -0.22464538, 0.7786994],
[-0.6643156, -0.09871193, -0.6011807],
[-0.5125877, 1.01407380, -0.1794690],
]
)
expected_y_loadings = np.array(
[
[0.0000000, 0.0000000, 0.0000000],
[0.4357300, 0.5828479, 0.2174802],
[-0.1353739, -0.2486423, -0.1810386],
]
)
assert_array_almost_equal(np.abs(expected_x_weights), np.abs(pls.x_weights_))
assert_array_almost_equal(np.abs(expected_x_loadings), np.abs(pls.x_loadings_))
# For the PLSRegression with default parameters, y_loadings == y_weights
assert_array_almost_equal(np.abs(pls.y_loadings_), np.abs(expected_y_loadings))
assert_array_almost_equal(np.abs(pls.y_weights_), np.abs(expected_y_loadings))
x_loadings_sign_flip = np.sign(expected_x_loadings / pls.x_loadings_)
x_weights_sign_flip = np.sign(expected_x_weights / pls.x_weights_)
# we ignore the first full-zeros row for y
y_loadings_sign_flip = np.sign(expected_y_loadings[1:] / pls.y_loadings_[1:])
assert_array_equal(x_loadings_sign_flip, x_weights_sign_flip)
assert_array_equal(x_loadings_sign_flip[1:], y_loadings_sign_flip)
def test_sanity_check_pls_canonical():
# Sanity check for PLSCanonical
# The results were checked against the R-package plspm
d = load_linnerud()
X = d.data
y = d.target
pls = PLSCanonical(n_components=X.shape[1])
pls.fit(X, y)
expected_x_weights = np.array(
[
[-0.61330704, 0.25616119, -0.74715187],
[-0.74697144, 0.11930791, 0.65406368],
[-0.25668686, -0.95924297, -0.11817271],
]
)
expected_x_rotations = np.array(
[
[-0.61330704, 0.41591889, -0.62297525],
[-0.74697144, 0.31388326, 0.77368233],
[-0.25668686, -0.89237972, -0.24121788],
]
)
expected_y_weights = np.array(
[
[+0.58989127, 0.7890047, 0.1717553],
[+0.77134053, -0.61351791, 0.16920272],
[-0.23887670, -0.03267062, 0.97050016],
]
)
expected_y_rotations = np.array(
[
[+0.58989127, 0.7168115, 0.30665872],
[+0.77134053, -0.70791757, 0.19786539],
[-0.23887670, -0.00343595, 0.94162826],
]
)
assert_array_almost_equal(np.abs(pls.x_rotations_), np.abs(expected_x_rotations))
assert_array_almost_equal(np.abs(pls.x_weights_), np.abs(expected_x_weights))
assert_array_almost_equal(np.abs(pls.y_rotations_), np.abs(expected_y_rotations))
assert_array_almost_equal(np.abs(pls.y_weights_), np.abs(expected_y_weights))
x_rotations_sign_flip = np.sign(pls.x_rotations_ / expected_x_rotations)
x_weights_sign_flip = np.sign(pls.x_weights_ / expected_x_weights)
y_rotations_sign_flip = np.sign(pls.y_rotations_ / expected_y_rotations)
y_weights_sign_flip = np.sign(pls.y_weights_ / expected_y_weights)
assert_array_almost_equal(x_rotations_sign_flip, x_weights_sign_flip)
assert_array_almost_equal(y_rotations_sign_flip, y_weights_sign_flip)
assert_matrix_orthogonal(pls.x_weights_)
assert_matrix_orthogonal(pls.y_weights_)
assert_matrix_orthogonal(pls._x_scores)
assert_matrix_orthogonal(pls._y_scores)
def test_sanity_check_pls_canonical_random():
# Sanity check for PLSCanonical on random data
# The results were checked against the R-package plspm
n = 500
p_noise = 10
q_noise = 5
# 2 latents vars:
rng = check_random_state(11)
l1 = rng.normal(size=n)
l2 = rng.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + rng.normal(size=4 * n).reshape((n, 4))
y = latents + rng.normal(size=4 * n).reshape((n, 4))
X = np.concatenate((X, rng.normal(size=p_noise * n).reshape(n, p_noise)), axis=1)
y = np.concatenate((y, rng.normal(size=q_noise * n).reshape(n, q_noise)), axis=1)
pls = PLSCanonical(n_components=3)
pls.fit(X, y)
expected_x_weights = np.array(
[
[0.65803719, 0.19197924, 0.21769083],
[0.7009113, 0.13303969, -0.15376699],
[0.13528197, -0.68636408, 0.13856546],
[0.16854574, -0.66788088, -0.12485304],
[-0.03232333, -0.04189855, 0.40690153],
[0.1148816, -0.09643158, 0.1613305],
[0.04792138, -0.02384992, 0.17175319],
[-0.06781, -0.01666137, -0.18556747],
[-0.00266945, -0.00160224, 0.11893098],
[-0.00849528, -0.07706095, 0.1570547],
[-0.00949471, -0.02964127, 0.34657036],
[-0.03572177, 0.0945091, 0.3414855],
[0.05584937, -0.02028961, -0.57682568],
[0.05744254, -0.01482333, -0.17431274],
]
)
expected_x_loadings = np.array(
[
[0.65649254, 0.1847647, 0.15270699],
[0.67554234, 0.15237508, -0.09182247],
[0.19219925, -0.67750975, 0.08673128],
[0.2133631, -0.67034809, -0.08835483],
[-0.03178912, -0.06668336, 0.43395268],
[0.15684588, -0.13350241, 0.20578984],
[0.03337736, -0.03807306, 0.09871553],
[-0.06199844, 0.01559854, -0.1881785],
[0.00406146, -0.00587025, 0.16413253],
[-0.00374239, -0.05848466, 0.19140336],
[0.00139214, -0.01033161, 0.32239136],
[-0.05292828, 0.0953533, 0.31916881],
[0.04031924, -0.01961045, -0.65174036],
[0.06172484, -0.06597366, -0.1244497],
]
)
expected_y_weights = np.array(
[
[0.66101097, 0.18672553, 0.22826092],
[0.69347861, 0.18463471, -0.23995597],
[0.14462724, -0.66504085, 0.17082434],
[0.22247955, -0.6932605, -0.09832993],
[0.07035859, 0.00714283, 0.67810124],
[0.07765351, -0.0105204, -0.44108074],
[-0.00917056, 0.04322147, 0.10062478],
[-0.01909512, 0.06182718, 0.28830475],
[0.01756709, 0.04797666, 0.32225745],
]
)
expected_y_loadings = np.array(
[
[0.68568625, 0.1674376, 0.0969508],
[0.68782064, 0.20375837, -0.1164448],
[0.11712173, -0.68046903, 0.12001505],
[0.17860457, -0.6798319, -0.05089681],
[0.06265739, -0.0277703, 0.74729584],
[0.0914178, 0.00403751, -0.5135078],
[-0.02196918, -0.01377169, 0.09564505],
[-0.03288952, 0.09039729, 0.31858973],
[0.04287624, 0.05254676, 0.27836841],
]
)
assert_array_almost_equal(np.abs(pls.x_loadings_), np.abs(expected_x_loadings))
assert_array_almost_equal(np.abs(pls.x_weights_), np.abs(expected_x_weights))
assert_array_almost_equal(np.abs(pls.y_loadings_), np.abs(expected_y_loadings))
assert_array_almost_equal(np.abs(pls.y_weights_), np.abs(expected_y_weights))
x_loadings_sign_flip = np.sign(pls.x_loadings_ / expected_x_loadings)
x_weights_sign_flip = np.sign(pls.x_weights_ / expected_x_weights)
y_weights_sign_flip = np.sign(pls.y_weights_ / expected_y_weights)
y_loadings_sign_flip = np.sign(pls.y_loadings_ / expected_y_loadings)
assert_array_almost_equal(x_loadings_sign_flip, x_weights_sign_flip)
assert_array_almost_equal(y_loadings_sign_flip, y_weights_sign_flip)
assert_matrix_orthogonal(pls.x_weights_)
assert_matrix_orthogonal(pls.y_weights_)
assert_matrix_orthogonal(pls._x_scores)
assert_matrix_orthogonal(pls._y_scores)
def test_convergence_fail():
# Make sure ConvergenceWarning is raised if max_iter is too small
d = load_linnerud()
X = d.data
y = d.target
pls_nipals = PLSCanonical(n_components=X.shape[1], max_iter=2)
with pytest.warns(ConvergenceWarning):
pls_nipals.fit(X, y)
@pytest.mark.parametrize("Est", (PLSSVD, PLSRegression, PLSCanonical))
def test_attibutes_shapes(Est):
# Make sure attributes are of the correct shape depending on n_components
d = load_linnerud()
X = d.data
y = d.target
n_components = 2
pls = Est(n_components=n_components)
pls.fit(X, y)
assert all(
attr.shape[1] == n_components for attr in (pls.x_weights_, pls.y_weights_)
)
@pytest.mark.parametrize("Est", (PLSRegression, PLSCanonical, CCA))
def test_univariate_equivalence(Est):
# Ensure 2D y with 1 column is equivalent to 1D y
d = load_linnerud()
X = d.data
y = d.target
est = Est(n_components=1)
one_d_coeff = est.fit(X, y[:, 0]).coef_
two_d_coeff = est.fit(X, y[:, :1]).coef_
assert one_d_coeff.shape == two_d_coeff.shape
assert_array_almost_equal(one_d_coeff, two_d_coeff)
@pytest.mark.parametrize("Est", (PLSRegression, PLSCanonical, CCA, PLSSVD))
def test_copy(Est):
# check that the "copy" keyword works
d = load_linnerud()
X = d.data
y = d.target
X_orig = X.copy()
# copy=True won't modify inplace
pls = Est(copy=True).fit(X, y)
assert_array_equal(X, X_orig)
# copy=False will modify inplace
with pytest.raises(AssertionError):
Est(copy=False).fit(X, y)
assert_array_almost_equal(X, X_orig)
if Est is PLSSVD:
return # PLSSVD does not support copy param in predict or transform
X_orig = X.copy()
with pytest.raises(AssertionError):
pls.transform(X, y, copy=False)
assert_array_almost_equal(X, X_orig)
X_orig = X.copy()
with pytest.raises(AssertionError):
pls.predict(X, copy=False)
assert_array_almost_equal(X, X_orig)
# Make sure copy=True gives same transform and predictions as predict=False
assert_array_almost_equal(
pls.transform(X, y, copy=True), pls.transform(X.copy(), y.copy(), copy=False)
)
assert_array_almost_equal(
pls.predict(X, copy=True), pls.predict(X.copy(), copy=False)
)
def _generate_test_scale_and_stability_datasets():
"""Generate dataset for test_scale_and_stability"""
# dataset for non-regression 7818
rng = np.random.RandomState(0)
n_samples = 1000
n_targets = 5
n_features = 10
Q = rng.randn(n_targets, n_features)
y = rng.randn(n_samples, n_targets)
X = np.dot(y, Q) + 2 * rng.randn(n_samples, n_features) + 1
X *= 1000
yield X, y
# Data set where one of the features is constraint
X, y = load_linnerud(return_X_y=True)
# causes X[:, -1].std() to be zero
X[:, -1] = 1.0
yield X, y
X = np.array([[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [2.0, 2.0, 2.0], [3.0, 5.0, 4.0]])
y = np.array([[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]])
yield X, y
# Seeds that provide a non-regression test for #18746, where CCA fails
seeds = [530, 741]
for seed in seeds:
rng = np.random.RandomState(seed)
X = rng.randn(4, 3)
y = rng.randn(4, 2)
yield X, y
@pytest.mark.parametrize("Est", (CCA, PLSCanonical, PLSRegression, PLSSVD))
@pytest.mark.parametrize("X, y", _generate_test_scale_and_stability_datasets())
def test_scale_and_stability(Est, X, y):
"""scale=True is equivalent to scale=False on centered/scaled data
This allows to check numerical stability over platforms as well"""
# Avoid in-place modification of X and y to avoid side effects in other tests.
X, y = X.copy(), y.copy()
X_s, y_s, *_ = _center_scale_xy(X, y)
X_score, y_score = Est(scale=True).fit_transform(X, y)
X_s_score, y_s_score = Est(scale=False).fit_transform(X_s, y_s)
assert_allclose(X_s_score, X_score, atol=1e-4)
assert_allclose(y_s_score, y_score, atol=1e-4)
@pytest.mark.parametrize("Estimator", (PLSSVD, PLSRegression, PLSCanonical, CCA))
def test_n_components_upper_bounds(Estimator):
"""Check the validation of `n_components` upper bounds for `PLS` regressors."""
rng = np.random.RandomState(0)
X = rng.randn(10, 5)
y = rng.randn(10, 3)
est = Estimator(n_components=10)
err_msg = "`n_components` upper bound is .*. Got 10 instead. Reduce `n_components`."
with pytest.raises(ValueError, match=err_msg):
est.fit(X, y)
def test_n_components_upper_PLSRegression():
"""Check the validation of `n_components` upper bounds for PLSRegression."""
rng = np.random.RandomState(0)
X = rng.randn(20, 64)
y = rng.randn(20, 3)
est = PLSRegression(n_components=30)
err_msg = "`n_components` upper bound is 20. Got 30 instead. Reduce `n_components`."
with pytest.raises(ValueError, match=err_msg):
est.fit(X, y)
@pytest.mark.parametrize("n_samples, n_features", [(100, 10), (100, 200)])
def test_singular_value_helpers(n_samples, n_features, global_random_seed):
# Make sure SVD and power method give approximately the same results
X, y = make_regression(
n_samples, n_features, n_targets=5, random_state=global_random_seed
)
u1, v1, _ = _get_first_singular_vectors_power_method(X, y, norm_y_weights=True)
u2, v2 = _get_first_singular_vectors_svd(X, y)
_svd_flip_1d(u1, v1)
_svd_flip_1d(u2, v2)
rtol = 1e-3
# Setting atol because some coordinates are very close to zero
assert_allclose(u1, u2, atol=u2.max() * rtol)
assert_allclose(v1, v2, atol=v2.max() * rtol)
def test_one_component_equivalence(global_random_seed):
# PLSSVD, PLSRegression and PLSCanonical should all be equivalent when
# n_components is 1
X, y = make_regression(100, 10, n_targets=5, random_state=global_random_seed)
svd = PLSSVD(n_components=1).fit(X, y).transform(X)
reg = PLSRegression(n_components=1).fit(X, y).transform(X)
canonical = PLSCanonical(n_components=1).fit(X, y).transform(X)
rtol = 1e-3
# Setting atol because some entries are very close to zero
assert_allclose(svd, reg, atol=reg.max() * rtol)
assert_allclose(svd, canonical, atol=canonical.max() * rtol)
def test_svd_flip_1d():
# Make sure svd_flip_1d is equivalent to svd_flip
u = np.array([1, -4, 2])
v = np.array([1, 2, 3])
u_expected, v_expected = svd_flip(u.reshape(-1, 1), v.reshape(1, -1))
_svd_flip_1d(u, v) # inplace
assert_allclose(u, u_expected.ravel())
assert_allclose(u, [-1, 4, -2])
assert_allclose(v, v_expected.ravel())
assert_allclose(v, [-1, -2, -3])
def test_loadings_converges(global_random_seed):
"""Test that CCA converges. Non-regression test for #19549."""
X, y = make_regression(
n_samples=200, n_features=20, n_targets=20, random_state=global_random_seed
)
cca = CCA(n_components=10, max_iter=500)
with warnings.catch_warnings():
warnings.simplefilter("error", ConvergenceWarning)
cca.fit(X, y)
# Loadings converges to reasonable values
assert np.all(np.abs(cca.x_loadings_) < 1)
def test_pls_constant_y():
"""Checks warning when y is constant. Non-regression test for #19831"""
rng = np.random.RandomState(42)
x = rng.rand(100, 3)
y = np.zeros(100)
pls = PLSRegression()
msg = "y residual is constant at iteration"
with pytest.warns(UserWarning, match=msg):
pls.fit(x, y)
assert_allclose(pls.x_rotations_, 0)
@pytest.mark.parametrize("PLSEstimator", [PLSRegression, PLSCanonical, CCA])
def test_pls_coef_shape(PLSEstimator):
"""Check the shape of `coef_` attribute.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/12410
"""
d = load_linnerud()
X = d.data
y = d.target
pls = PLSEstimator(copy=True).fit(X, y)
n_targets, n_features = y.shape[1], X.shape[1]
assert pls.coef_.shape == (n_targets, n_features)
@pytest.mark.parametrize("scale", [True, False])
@pytest.mark.parametrize("PLSEstimator", [PLSRegression, PLSCanonical, CCA])
def test_pls_prediction(PLSEstimator, scale):
"""Check the behaviour of the prediction function."""
d = load_linnerud()
X = d.data
y = d.target
pls = PLSEstimator(copy=True, scale=scale).fit(X, y)
y_pred = pls.predict(X, copy=True)
y_mean = y.mean(axis=0)
X_trans = X - X.mean(axis=0)
assert_allclose(pls.intercept_, y_mean)
assert_allclose(y_pred, X_trans @ pls.coef_.T + pls.intercept_)
@pytest.mark.parametrize("Klass", [CCA, PLSSVD, PLSRegression, PLSCanonical])
def test_pls_feature_names_out(Klass):
"""Check `get_feature_names_out` cross_decomposition module."""
X, y = load_linnerud(return_X_y=True)
est = Klass().fit(X, y)
names_out = est.get_feature_names_out()
class_name_lower = Klass.__name__.lower()
expected_names_out = np.array(
[f"{class_name_lower}{i}" for i in range(est.x_weights_.shape[1])],
dtype=object,
)
assert_array_equal(names_out, expected_names_out)
@pytest.mark.parametrize("Klass", [CCA, PLSSVD, PLSRegression, PLSCanonical])
def test_pls_set_output(Klass):
"""Check `set_output` in cross_decomposition module."""
pd = pytest.importorskip("pandas")
X, y = load_linnerud(return_X_y=True, as_frame=True)
est = Klass().set_output(transform="pandas").fit(X, y)
X_trans, y_trans = est.transform(X, y)
assert isinstance(y_trans, np.ndarray)
assert isinstance(X_trans, pd.DataFrame)
assert_array_equal(X_trans.columns, est.get_feature_names_out())
def test_pls_regression_fit_1d_y():
"""Check that when fitting with 1d `y`, prediction should also be 1d.
Non-regression test for Issue #26549.
"""
X = np.array([[1, 1], [2, 4], [3, 9], [4, 16], [5, 25], [6, 36]])
y = np.array([2, 6, 12, 20, 30, 42])
expected = y.copy()
plsr = PLSRegression().fit(X, y)
y_pred = plsr.predict(X)
assert y_pred.shape == expected.shape
# Check that it works in VotingRegressor
lr = LinearRegression().fit(X, y)
vr = VotingRegressor([("lr", lr), ("plsr", plsr)])
y_pred = vr.fit(X, y).predict(X)
assert y_pred.shape == expected.shape
assert_allclose(y_pred, expected)
def test_pls_regression_scaling_coef():
"""Check that when using `scale=True`, the coefficients are using the std. dev. from
both `X` and `y`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/27964
"""
# handcrafted data where we can predict y from X with an additional scaling factor
rng = np.random.RandomState(0)
coef = rng.uniform(size=(3, 5))
X = rng.normal(scale=10, size=(30, 5)) # add a std of 10
y = X @ coef.T
# we need to make sure that the dimension of the latent space is large enough to
# perfectly predict `y` from `X` (no information loss)
pls = PLSRegression(n_components=5, scale=True).fit(X, y)
assert_allclose(pls.coef_, coef)
# we therefore should be able to predict `y` from `X`
assert_allclose(pls.predict(X), y)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/cross_decomposition/tests/__init__.py | sklearn/cross_decomposition/tests/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/_passive_aggressive.py | sklearn/linear_model/_passive_aggressive.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from numbers import Real
from sklearn.base import _fit_context
from sklearn.linear_model._stochastic_gradient import (
DEFAULT_EPSILON,
BaseSGDClassifier,
BaseSGDRegressor,
)
from sklearn.utils import deprecated
from sklearn.utils._param_validation import Interval, StrOptions
# TODO(1.10): Remove
@deprecated(
"this is deprecated in version 1.8 and will be removed in 1.10. "
"Use `SGDClassifier(loss='hinge', penalty=None, learning_rate='pa1', eta0=1.0)` "
"instead."
)
class PassiveAggressiveClassifier(BaseSGDClassifier):
"""Passive Aggressive Classifier.
.. deprecated:: 1.8
The whole class `PassiveAggressiveClassifier` was deprecated in version 1.8
and will be removed in 1.10. Instead use:
.. code-block:: python
clf = SGDClassifier(
loss="hinge",
penalty=None,
learning_rate="pa1", # or "pa2"
eta0=1.0, # for parameter C
)
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float, default=1.0
Aggressiveness parameter for the passive-agressive algorithm, see [1].
For PA-I it is the maximum step size. For PA-II it regularizes the
step size (the smaller `C` the more it regularizes).
As a general rule-of-thumb, `C` should be small when the data is noisy.
fit_intercept : bool, default=True
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
max_iter : int, default=1000
The maximum number of passes over the training data (aka epochs).
It only impacts the behavior in the ``fit`` method, and not the
:meth:`~sklearn.linear_model.PassiveAggressiveClassifier.partial_fit` method.
.. versionadded:: 0.19
tol : float or None, default=1e-3
The stopping criterion. If it is not None, the iterations will stop
when (loss > previous_loss - tol).
.. versionadded:: 0.19
early_stopping : bool, default=False
Whether to use early stopping to terminate training when validation
score is not improving. If set to True, it will automatically set aside
a stratified fraction of training data as validation and terminate
training when validation score is not improving by at least `tol` for
`n_iter_no_change` consecutive epochs.
.. versionadded:: 0.20
validation_fraction : float, default=0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if early_stopping is True.
.. versionadded:: 0.20
n_iter_no_change : int, default=5
Number of iterations with no improvement to wait before early stopping.
.. versionadded:: 0.20
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
verbose : int, default=0
The verbosity level.
loss : str, default="hinge"
The loss function to be used:
hinge: equivalent to PA-I in the reference paper.
squared_hinge: equivalent to PA-II in the reference paper.
n_jobs : int or None, default=None
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance, default=None
Used to shuffle the training data, when ``shuffle`` is set to
``True``. Pass an int for reproducible output across multiple
function calls.
See :term:`Glossary <random_state>`.
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
Repeatedly calling fit or partial_fit when warm_start is True can
result in a different solution than when calling fit a single time
because of the way the data is shuffled.
class_weight : dict, {class_label: weight} or "balanced" or None, \
default=None
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
.. versionadded:: 0.17
parameter *class_weight* to automatically weight samples.
average : bool or int, default=False
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
.. versionadded:: 0.19
parameter *average* to use weights averaging in SGD.
Attributes
----------
coef_ : ndarray of shape (1, n_features) if n_classes == 2 else \
(n_classes, n_features)
Weights assigned to the features.
intercept_ : ndarray of shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_iter_ : int
The actual number of iterations to reach the stopping criterion.
For multiclass fits, it is the maximum over every binary fit.
classes_ : ndarray of shape (n_classes,)
The unique classes labels.
t_ : int
Number of weight updates performed during training.
Same as ``(n_iter_ * n_samples + 1)``.
See Also
--------
SGDClassifier : Incrementally trained logistic regression.
Perceptron : Linear perceptron classifier.
References
----------
.. [1] Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
Examples
--------
>>> from sklearn.linear_model import PassiveAggressiveClassifier
>>> from sklearn.datasets import make_classification
>>> X, y = make_classification(n_features=4, random_state=0)
>>> clf = PassiveAggressiveClassifier(max_iter=1000, random_state=0,
... tol=1e-3)
>>> clf.fit(X, y)
PassiveAggressiveClassifier(random_state=0)
>>> print(clf.coef_)
[[0.26642044 0.45070924 0.67251877 0.64185414]]
>>> print(clf.intercept_)
[1.84127814]
>>> print(clf.predict([[0, 0, 0, 0]]))
[1]
"""
_parameter_constraints: dict = {
**BaseSGDClassifier._parameter_constraints,
"loss": [StrOptions({"hinge", "squared_hinge"})],
"C": [Interval(Real, 0, None, closed="right")],
}
_parameter_constraints.pop("eta0")
def __init__(
self,
*,
C=1.0,
fit_intercept=True,
max_iter=1000,
tol=1e-3,
early_stopping=False,
validation_fraction=0.1,
n_iter_no_change=5,
shuffle=True,
verbose=0,
loss="hinge",
n_jobs=None,
random_state=None,
warm_start=False,
class_weight=None,
average=False,
):
super().__init__(
penalty=None,
fit_intercept=fit_intercept,
max_iter=max_iter,
tol=tol,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
eta0=C,
warm_start=warm_start,
class_weight=class_weight,
average=average,
n_jobs=n_jobs,
)
self.C = C
self.loss = loss
@_fit_context(prefer_skip_nested_validation=True)
def partial_fit(self, X, y, classes=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Subset of the training data.
y : array-like of shape (n_samples,)
Subset of the target values.
classes : ndarray of shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
Returns
-------
self : object
Fitted estimator.
"""
if not hasattr(self, "classes_"):
self._more_validate_params(for_partial_fit=True)
if self.class_weight == "balanced":
raise ValueError(
"class_weight 'balanced' is not supported for "
"partial_fit. For 'balanced' weights, use "
"`sklearn.utils.compute_class_weight` with "
"`class_weight='balanced'`. In place of y you "
"can use a large enough subset of the full "
"training set target to properly estimate the "
"class frequency distributions. Pass the "
"resulting weights as the class_weight "
"parameter."
)
# For an explanation, see
# https://github.com/scikit-learn/scikit-learn/pull/1259#issuecomment-9818044
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._partial_fit(
X,
y,
alpha=1.0,
loss="hinge",
learning_rate=lr,
max_iter=1,
classes=classes,
sample_weight=None,
coef_init=None,
intercept_init=None,
)
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
coef_init : ndarray of shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : ndarray of shape (n_classes,)
The initial intercept to warm-start the optimization.
Returns
-------
self : object
Fitted estimator.
"""
self._more_validate_params()
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._fit(
X,
y,
alpha=1.0,
loss="hinge",
learning_rate=lr,
coef_init=coef_init,
intercept_init=intercept_init,
)
# TODO(1.10): Remove
@deprecated(
"this is deprecated in version 1.8 and will be removed in 1.10. "
"Use `SGDRegressor(loss='epsilon_insensitive', penalty=None, learning_rate='pa1', "
"eta0 = 1.0)` instead."
)
class PassiveAggressiveRegressor(BaseSGDRegressor):
"""Passive Aggressive Regressor.
.. deprecated:: 1.8
The whole class `PassiveAggressiveRegressor` was deprecated in version 1.8
and will be removed in 1.10. Instead use:
.. code-block:: python
reg = SGDRegressor(
loss="epsilon_insensitive",
penalty=None,
learning_rate="pa1", # or "pa2"
eta0=1.0, # for parameter C
)
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float, default=1.0
Aggressiveness parameter for the passive-agressive algorithm, see [1].
For PA-I it is the maximum step size. For PA-II it regularizes the
step size (the smaller `C` the more it regularizes).
As a general rule-of-thumb, `C` should be small when the data is noisy.
fit_intercept : bool, default=True
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
max_iter : int, default=1000
The maximum number of passes over the training data (aka epochs).
It only impacts the behavior in the ``fit`` method, and not the
:meth:`~sklearn.linear_model.PassiveAggressiveRegressor.partial_fit` method.
.. versionadded:: 0.19
tol : float or None, default=1e-3
The stopping criterion. If it is not None, the iterations will stop
when (loss > previous_loss - tol).
.. versionadded:: 0.19
early_stopping : bool, default=False
Whether to use early stopping to terminate training when validation.
score is not improving. If set to True, it will automatically set aside
a fraction of training data as validation and terminate
training when validation score is not improving by at least tol for
n_iter_no_change consecutive epochs.
.. versionadded:: 0.20
validation_fraction : float, default=0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if early_stopping is True.
.. versionadded:: 0.20
n_iter_no_change : int, default=5
Number of iterations with no improvement to wait before early stopping.
.. versionadded:: 0.20
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
verbose : int, default=0
The verbosity level.
loss : str, default="epsilon_insensitive"
The loss function to be used:
epsilon_insensitive: equivalent to PA-I in the reference paper.
squared_epsilon_insensitive: equivalent to PA-II in the reference
paper.
epsilon : float, default=0.1
If the difference between the current prediction and the correct label
is below this threshold, the model is not updated.
random_state : int, RandomState instance, default=None
Used to shuffle the training data, when ``shuffle`` is set to
``True``. Pass an int for reproducible output across multiple
function calls.
See :term:`Glossary <random_state>`.
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
Repeatedly calling fit or partial_fit when warm_start is True can
result in a different solution than when calling fit a single time
because of the way the data is shuffled.
average : bool or int, default=False
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
.. versionadded:: 0.19
parameter *average* to use weights averaging in SGD.
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_iter_ : int
The actual number of iterations to reach the stopping criterion.
t_ : int
Number of weight updates performed during training.
Same as ``(n_iter_ * n_samples + 1)``.
See Also
--------
SGDRegressor : Linear model fitted by minimizing a regularized
empirical loss with SGD.
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006).
Examples
--------
>>> from sklearn.linear_model import PassiveAggressiveRegressor
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(n_features=4, random_state=0)
>>> regr = PassiveAggressiveRegressor(max_iter=100, random_state=0,
... tol=1e-3)
>>> regr.fit(X, y)
PassiveAggressiveRegressor(max_iter=100, random_state=0)
>>> print(regr.coef_)
[20.48736655 34.18818427 67.59122734 87.94731329]
>>> print(regr.intercept_)
[-0.02306214]
>>> print(regr.predict([[0, 0, 0, 0]]))
[-0.02306214]
"""
_parameter_constraints: dict = {
**BaseSGDRegressor._parameter_constraints,
"loss": [StrOptions({"epsilon_insensitive", "squared_epsilon_insensitive"})],
"C": [Interval(Real, 0, None, closed="right")],
"epsilon": [Interval(Real, 0, None, closed="left")],
}
_parameter_constraints.pop("eta0")
def __init__(
self,
*,
C=1.0,
fit_intercept=True,
max_iter=1000,
tol=1e-3,
early_stopping=False,
validation_fraction=0.1,
n_iter_no_change=5,
shuffle=True,
verbose=0,
loss="epsilon_insensitive",
epsilon=DEFAULT_EPSILON,
random_state=None,
warm_start=False,
average=False,
):
super().__init__(
loss=loss,
penalty=None,
l1_ratio=0,
epsilon=epsilon,
eta0=C,
fit_intercept=fit_intercept,
max_iter=max_iter,
tol=tol,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
warm_start=warm_start,
average=average,
)
self.C = C
@_fit_context(prefer_skip_nested_validation=True)
def partial_fit(self, X, y):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Subset of training data.
y : numpy array of shape [n_samples]
Subset of target values.
Returns
-------
self : object
Fitted estimator.
"""
if not hasattr(self, "coef_"):
self._more_validate_params(for_partial_fit=True)
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._partial_fit(
X,
y,
alpha=1.0,
loss="epsilon_insensitive",
learning_rate=lr,
max_iter=1,
sample_weight=None,
coef_init=None,
intercept_init=None,
)
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : numpy array of shape [n_samples]
Target values.
coef_init : array, shape = [n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [1]
The initial intercept to warm-start the optimization.
Returns
-------
self : object
Fitted estimator.
"""
self._more_validate_params()
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._fit(
X,
y,
alpha=1.0,
loss="epsilon_insensitive",
learning_rate=lr,
coef_init=coef_init,
intercept_init=intercept_init,
)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/_base.py | sklearn/linear_model/_base.py | """
Generalized Linear Models.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from abc import ABCMeta, abstractmethod
from numbers import Integral, Real
import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
from scipy.sparse.linalg import lsqr
from scipy.special import expit
from sklearn.base import (
BaseEstimator,
ClassifierMixin,
MultiOutputMixin,
RegressorMixin,
_fit_context,
)
from sklearn.utils import check_array, check_random_state
from sklearn.utils._array_api import (
_asarray_with_order,
_average,
get_namespace,
get_namespace_and_device,
indexing_dtype,
supported_float_dtypes,
)
from sklearn.utils._param_validation import Interval
from sklearn.utils._seq_dataset import (
ArrayDataset32,
ArrayDataset64,
CSRDataset32,
CSRDataset64,
)
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.parallel import Parallel, delayed
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.utils.validation import (
_check_sample_weight,
check_is_fitted,
validate_data,
)
# TODO: bayesian_ridge_regression and bayesian_regression_ard
# should be squashed into its respective objects.
SPARSE_INTERCEPT_DECAY = 0.01
# For sparse data intercept updates are scaled by this decay factor to avoid
# intercept oscillation.
def make_dataset(X, y, sample_weight, random_state=None):
"""Create ``Dataset`` abstraction for sparse and dense inputs.
This also returns the ``intercept_decay`` which is different
for sparse datasets.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data
y : array-like, shape (n_samples, )
Target values.
sample_weight : numpy array of shape (n_samples,)
The weight of each sample
random_state : int, RandomState instance or None (default)
Determines random number generation for dataset random sampling. It is not
used for dataset shuffling.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
dataset
The ``Dataset`` abstraction
intercept_decay
The intercept decay
"""
rng = check_random_state(random_state)
# seed should never be 0 in SequentialDataset64
seed = rng.randint(1, np.iinfo(np.int32).max)
if X.dtype == np.float32:
CSRData = CSRDataset32
ArrayData = ArrayDataset32
else:
CSRData = CSRDataset64
ArrayData = ArrayDataset64
if sp.issparse(X):
dataset = CSRData(X.data, X.indptr, X.indices, y, sample_weight, seed=seed)
intercept_decay = SPARSE_INTERCEPT_DECAY
else:
X = np.ascontiguousarray(X)
dataset = ArrayData(X, y, sample_weight, seed=seed)
intercept_decay = 1.0
return dataset, intercept_decay
def _preprocess_data(
X,
y,
*,
fit_intercept,
copy=True,
copy_y=True,
sample_weight=None,
check_input=True,
rescale_with_sw=True,
):
"""Common data preprocessing for fitting linear models.
This helper is in charge of the following steps:
- `sample_weight` is assumed to be `None` or a validated array with same dtype as
`X`.
- If `check_input=True`, perform standard input validation of `X`, `y`.
- Perform copies if requested to avoid side-effects in case of inplace
modifications of the input.
Then, if `fit_intercept=True` this preprocessing centers both `X` and `y` as
follows:
- if `X` is dense, center the data and
store the mean vector in `X_offset`.
- if `X` is sparse, store the mean in `X_offset`
without centering `X`. The centering is expected to be handled by the
linear solver where appropriate.
- in either case, always center `y` and store the mean in `y_offset`.
- both `X_offset` and `y_offset` are always weighted by `sample_weight`
if not set to `None`.
If `fit_intercept=False`, no centering is performed and `X_offset`, `y_offset`
are set to zero.
If `rescale_with_sw` is True, then X and y are rescaled with the square root of
sample weights.
Returns
-------
X_out : {ndarray, sparse matrix} of shape (n_samples, n_features)
If copy=True a copy of the input X is triggered, otherwise operations are
inplace.
If input X is dense, then X_out is centered.
y_out : {ndarray, sparse matrix} of shape (n_samples,) or (n_samples, n_targets)
Centered version of y. Possibly performed inplace on input y depending
on the copy_y parameter.
X_offset : ndarray of shape (n_features,)
The mean per column of input X.
y_offset : float or ndarray of shape (n_features,)
X_scale : ndarray of shape (n_features,)
Always an array of ones. TODO: refactor the code base to make it
possible to remove this unused variable.
sample_weight_sqrt : ndarray of shape (n_samples, ) or None
`np.sqrt(sample_weight)`
"""
xp, _, device_ = get_namespace_and_device(X, y, sample_weight)
n_samples, n_features = X.shape
X_is_sparse = sp.issparse(X)
if check_input:
X = check_array(
X, copy=copy, accept_sparse=["csr", "csc"], dtype=supported_float_dtypes(xp)
)
y = check_array(y, dtype=X.dtype, copy=copy_y, ensure_2d=False)
else:
y = xp.astype(y, X.dtype, copy=copy_y)
if copy:
if X_is_sparse:
X = X.copy()
else:
X = _asarray_with_order(X, order="K", copy=True, xp=xp)
dtype_ = X.dtype
if fit_intercept:
if X_is_sparse:
X_offset, X_var = mean_variance_axis(X, axis=0, weights=sample_weight)
else:
X_offset = _average(X, axis=0, weights=sample_weight, xp=xp)
X_offset = xp.astype(X_offset, X.dtype, copy=False)
X -= X_offset
y_offset = _average(y, axis=0, weights=sample_weight, xp=xp)
y -= y_offset
else:
X_offset = xp.zeros(n_features, dtype=X.dtype, device=device_)
if y.ndim == 1:
y_offset = xp.asarray(0.0, dtype=dtype_, device=device_)
else:
y_offset = xp.zeros(y.shape[1], dtype=dtype_, device=device_)
# X_scale is no longer needed. It is a historic artifact from the
# time where linear model exposed the normalize parameter.
X_scale = xp.ones(n_features, dtype=X.dtype, device=device_)
if sample_weight is not None and rescale_with_sw:
# Sample weight can be implemented via a simple rescaling.
# For sparse X and y, it triggers copies anyway.
# For dense X and y that already have been copied, we safely do inplace
# rescaling.
X, y, sample_weight_sqrt = _rescale_data(X, y, sample_weight, inplace=copy)
else:
sample_weight_sqrt = None
return X, y, X_offset, y_offset, X_scale, sample_weight_sqrt
def _rescale_data(X, y, sample_weight, inplace=False):
"""Rescale data sample-wise by square root of sample_weight.
For many linear models, this enables easy support for sample_weight because
(y - X w)' S (y - X w)
with S = diag(sample_weight) becomes
||y_rescaled - X_rescaled w||_2^2
when setting
y_rescaled = sqrt(S) y
X_rescaled = sqrt(S) X
The parameter `inplace` only takes effect for dense X and dense y.
Returns
-------
X_rescaled : {array-like, sparse matrix}
y_rescaled : {array-like, sparse matrix}
sample_weight_sqrt : array-like of shape (n_samples,)
"""
# Assume that _validate_data and _check_sample_weight have been called by
# the caller.
xp, _ = get_namespace(X, y, sample_weight)
n_samples = X.shape[0]
sample_weight_sqrt = xp.sqrt(sample_weight)
if sp.issparse(X) or sp.issparse(y):
sw_matrix = sparse.dia_matrix(
(sample_weight_sqrt, 0), shape=(n_samples, n_samples)
)
if sp.issparse(X):
X = safe_sparse_dot(sw_matrix, X)
else:
if inplace:
X *= sample_weight_sqrt[:, None]
else:
X = X * sample_weight_sqrt[:, None]
if sp.issparse(y):
y = safe_sparse_dot(sw_matrix, y)
else:
if inplace:
if y.ndim == 1:
y *= sample_weight_sqrt
else:
y *= sample_weight_sqrt[:, None]
else:
if y.ndim == 1:
y = y * sample_weight_sqrt
else:
y = y * sample_weight_sqrt[:, None]
return X, y, sample_weight_sqrt
class LinearModel(BaseEstimator, metaclass=ABCMeta):
"""Base class for Linear Models"""
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _decision_function(self, X):
check_is_fitted(self)
X = validate_data(self, X, accept_sparse=["csr", "csc", "coo"], reset=False)
coef_ = self.coef_
if coef_.ndim == 1:
return X @ coef_ + self.intercept_
else:
return X @ coef_.T + self.intercept_
def predict(self, X):
"""
Predict using the linear model.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Samples.
Returns
-------
C : array, shape (n_samples,)
Returns predicted values.
"""
return self._decision_function(X)
def _set_intercept(self, X_offset, y_offset, X_scale=None):
"""Set the intercept_"""
xp, _ = get_namespace(X_offset, y_offset, X_scale)
if self.fit_intercept:
# We always want coef_.dtype=X.dtype. For instance, X.dtype can differ from
# coef_.dtype if warm_start=True.
self.coef_ = xp.astype(self.coef_, X_offset.dtype, copy=False)
if X_scale is not None:
self.coef_ = xp.divide(self.coef_, X_scale)
if self.coef_.ndim == 1:
self.intercept_ = y_offset - X_offset @ self.coef_
else:
self.intercept_ = y_offset - X_offset @ self.coef_.T
else:
self.intercept_ = 0.0
# XXX Should this derive from LinearModel? It should be a mixin, not an ABC.
# Maybe the n_features checking can be moved to LinearModel.
class LinearClassifierMixin(ClassifierMixin):
"""Mixin for linear classifiers.
Handles prediction for sparse and dense X.
"""
def decision_function(self, X):
"""
Predict confidence scores for samples.
The confidence score for a sample is proportional to the signed
distance of that sample to the hyperplane.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data matrix for which we want to get the confidence scores.
Returns
-------
scores : ndarray of shape (n_samples,) or (n_samples, n_classes)
Confidence scores per `(n_samples, n_classes)` combination. In the
binary case, confidence score for `self.classes_[1]` where >0 means
this class would be predicted.
"""
check_is_fitted(self)
xp, _ = get_namespace(X)
X = validate_data(self, X, accept_sparse="csr", reset=False)
coef_T = self.coef_.T if self.coef_.ndim == 2 else self.coef_
scores = safe_sparse_dot(X, coef_T, dense_output=True) + self.intercept_
return (
xp.reshape(scores, (-1,))
if (scores.ndim > 1 and scores.shape[1] == 1)
else scores
)
def predict(self, X):
"""
Predict class labels for samples in X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data matrix for which we want to get the predictions.
Returns
-------
y_pred : ndarray of shape (n_samples,)
Vector containing the class labels for each sample.
"""
xp, _ = get_namespace(X)
scores = self.decision_function(X)
if len(scores.shape) == 1:
indices = xp.astype(scores > 0, indexing_dtype(xp))
else:
indices = xp.argmax(scores, axis=1)
return xp.take(self.classes_, indices, axis=0)
def _predict_proba_lr(self, X):
"""Probability estimation for OvR logistic regression.
Positive class probabilities are computed as
1. / (1. + np.exp(-self.decision_function(X)));
multiclass is handled by normalizing that over all classes.
"""
prob = self.decision_function(X)
expit(prob, out=prob)
if prob.ndim == 1:
return np.vstack([1 - prob, prob]).T
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
class SparseCoefMixin:
"""Mixin for converting coef_ to and from CSR format.
L1-regularizing estimators should inherit this.
"""
def densify(self):
"""
Convert coefficient matrix to dense array format.
Converts the ``coef_`` member (back) to a numpy.ndarray. This is the
default format of ``coef_`` and is required for fitting, so calling
this method is only required on models that have previously been
sparsified; otherwise, it is a no-op.
Returns
-------
self
Fitted estimator.
"""
msg = "Estimator, %(name)s, must be fitted before densifying."
check_is_fitted(self, msg=msg)
if sp.issparse(self.coef_):
self.coef_ = self.coef_.toarray()
return self
def sparsify(self):
"""
Convert coefficient matrix to sparse format.
Converts the ``coef_`` member to a scipy.sparse matrix, which for
L1-regularized models can be much more memory- and storage-efficient
than the usual numpy.ndarray representation.
The ``intercept_`` member is not converted.
Returns
-------
self
Fitted estimator.
Notes
-----
For non-sparse models, i.e. when there are not many zeros in ``coef_``,
this may actually *increase* memory usage, so use this method with
care. A rule of thumb is that the number of zero elements, which can
be computed with ``(coef_ == 0).sum()``, must be more than 50% for this
to provide significant benefits.
After calling this method, further fitting with the partial_fit
method (if any) will not work until you call densify.
"""
msg = "Estimator, %(name)s, must be fitted before sparsifying."
check_is_fitted(self, msg=msg)
self.coef_ = sp.csr_matrix(self.coef_)
return self
class LinearRegression(MultiOutputMixin, RegressorMixin, LinearModel):
"""
Ordinary least squares Linear Regression.
LinearRegression fits a linear model with coefficients w = (w1, ..., wp)
to minimize the residual sum of squares between the observed targets in
the dataset, and the targets predicted by the linear approximation.
Parameters
----------
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to False, no intercept will be used in calculations
(i.e. data is expected to be centered).
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
tol : float, default=1e-6
The precision of the solution (`coef_`) is determined by `tol` which
specifies a different convergence criterion for the `lsqr` solver.
`tol` is set as `atol` and `btol` of :func:`scipy.sparse.linalg.lsqr` when
fitting on sparse training data. This parameter has no effect when fitting
on dense data.
.. versionadded:: 1.7
n_jobs : int, default=None
The number of jobs to use for the computation. This will only provide
speedup in case of sufficiently large problems, that is if firstly
`n_targets > 1` and secondly `X` is sparse or if `positive` is set
to `True`. ``None`` means 1 unless in a
:obj:`joblib.parallel_backend` context. ``-1`` means using all
processors. See :term:`Glossary <n_jobs>` for more details.
positive : bool, default=False
When set to ``True``, forces the coefficients to be positive. This
option is only supported for dense arrays.
For a comparison between a linear regression model with positive constraints
on the regression coefficients and a linear regression without such constraints,
see :ref:`sphx_glr_auto_examples_linear_model_plot_nnls.py`.
.. versionadded:: 0.24
Attributes
----------
coef_ : array of shape (n_features, ) or (n_targets, n_features)
Estimated coefficients for the linear regression problem.
If multiple targets are passed during the fit (y 2D), this
is a 2D array of shape (n_targets, n_features), while if only
one target is passed, this is a 1D array of length n_features.
rank_ : int
Rank of matrix `X`. Only available when `X` is dense.
singular_ : array of shape (min(X, y),)
Singular values of `X`. Only available when `X` is dense.
intercept_ : float or array of shape (n_targets,)
Independent term in the linear model. Set to 0.0 if
`fit_intercept = False`.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
Ridge : Ridge regression addresses some of the
problems of Ordinary Least Squares by imposing a penalty on the
size of the coefficients with l2 regularization.
Lasso : The Lasso is a linear model that estimates
sparse coefficients with l1 regularization.
ElasticNet : Elastic-Net is a linear regression
model trained with both l1 and l2 -norm regularization of the
coefficients.
Notes
-----
From the implementation point of view, this is just plain Ordinary
Least Squares (:func:`scipy.linalg.lstsq`) or Non Negative Least Squares
(:func:`scipy.optimize.nnls`) wrapped as a predictor object.
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import LinearRegression
>>> X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])
>>> # y = 1 * x_0 + 2 * x_1 + 3
>>> y = np.dot(X, np.array([1, 2])) + 3
>>> reg = LinearRegression().fit(X, y)
>>> reg.score(X, y)
1.0
>>> reg.coef_
array([1., 2.])
>>> reg.intercept_
np.float64(3.0)
>>> reg.predict(np.array([[3, 5]]))
array([16.])
"""
_parameter_constraints: dict = {
"fit_intercept": ["boolean"],
"copy_X": ["boolean"],
"n_jobs": [None, Integral],
"positive": ["boolean"],
"tol": [Interval(Real, 0, None, closed="left")],
}
def __init__(
self,
*,
fit_intercept=True,
copy_X=True,
tol=1e-6,
n_jobs=None,
positive=False,
):
self.fit_intercept = fit_intercept
self.copy_X = copy_X
self.tol = tol
self.n_jobs = n_jobs
self.positive = positive
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y, sample_weight=None):
"""
Fit linear model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values. Will be cast to X's dtype if necessary.
sample_weight : array-like of shape (n_samples,), default=None
Individual weights for each sample.
.. versionadded:: 0.17
parameter *sample_weight* support to LinearRegression.
Returns
-------
self : object
Fitted Estimator.
"""
n_jobs_ = self.n_jobs
accept_sparse = False if self.positive else ["csr", "csc", "coo"]
X, y = validate_data(
self,
X,
y,
accept_sparse=accept_sparse,
y_numeric=True,
multi_output=True,
force_writeable=True,
)
has_sw = sample_weight is not None
if has_sw:
sample_weight = _check_sample_weight(
sample_weight, X, dtype=X.dtype, ensure_non_negative=True
)
# Note that neither _rescale_data nor the rest of the fit method of
# LinearRegression can benefit from in-place operations when X is a
# sparse matrix. Therefore, let's not copy X when it is sparse.
copy_X_in_preprocess_data = self.copy_X and not sp.issparse(X)
X, y, X_offset, y_offset, _, sample_weight_sqrt = _preprocess_data(
X,
y,
fit_intercept=self.fit_intercept,
copy=copy_X_in_preprocess_data,
sample_weight=sample_weight,
)
if self.positive:
if y.ndim < 2:
self.coef_ = optimize.nnls(X, y)[0]
else:
# scipy.optimize.nnls cannot handle y with shape (M, K)
outs = Parallel(n_jobs=n_jobs_)(
delayed(optimize.nnls)(X, y[:, j]) for j in range(y.shape[1])
)
self.coef_ = np.vstack([out[0] for out in outs])
elif sp.issparse(X):
if has_sw:
def matvec(b):
return X.dot(b) - sample_weight_sqrt * b.dot(X_offset)
def rmatvec(b):
return X.T.dot(b) - X_offset * b.dot(sample_weight_sqrt)
else:
def matvec(b):
return X.dot(b) - b.dot(X_offset)
def rmatvec(b):
return X.T.dot(b) - X_offset * b.sum()
X_centered = sparse.linalg.LinearOperator(
shape=X.shape, matvec=matvec, rmatvec=rmatvec
)
if y.ndim < 2:
self.coef_ = lsqr(X_centered, y, atol=self.tol, btol=self.tol)[0]
else:
# sparse_lstsq cannot handle y with shape (M, K)
outs = Parallel(n_jobs=n_jobs_)(
delayed(lsqr)(
X_centered, y[:, j].ravel(), atol=self.tol, btol=self.tol
)
for j in range(y.shape[1])
)
self.coef_ = np.vstack([out[0] for out in outs])
else:
# cut-off ratio for small singular values
cond = max(X.shape) * np.finfo(X.dtype).eps
self.coef_, _, self.rank_, self.singular_ = linalg.lstsq(X, y, cond=cond)
self.coef_ = self.coef_.T
if y.ndim == 1:
self.coef_ = np.ravel(self.coef_)
self._set_intercept(X_offset, y_offset)
return self
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = not self.positive
return tags
def _check_precomputed_gram_matrix(
X, precompute, X_offset, X_scale, rtol=None, atol=1e-5
):
"""Computes a single element of the gram matrix and compares it to
the corresponding element of the user supplied gram matrix.
If the values do not match a ValueError will be thrown.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data array.
precompute : array-like of shape (n_features, n_features)
User-supplied gram matrix.
X_offset : ndarray of shape (n_features,)
Array of feature means used to center design matrix.
X_scale : ndarray of shape (n_features,)
Array of feature scale factors used to normalize design matrix.
rtol : float, default=None
Relative tolerance; see numpy.allclose
If None, it is set to 1e-4 for arrays of dtype numpy.float32 and 1e-7
otherwise.
atol : float, default=1e-5
absolute tolerance; see :func`numpy.allclose`. Note that the default
here is more tolerant than the default for
:func:`numpy.testing.assert_allclose`, where `atol=0`.
Raises
------
ValueError
Raised when the provided Gram matrix is not consistent.
"""
n_features = X.shape[1]
f1 = n_features // 2
f2 = min(f1 + 1, n_features - 1)
v1 = (X[:, f1] - X_offset[f1]) * X_scale[f1]
v2 = (X[:, f2] - X_offset[f2]) * X_scale[f2]
expected = np.dot(v1, v2)
actual = precompute[f1, f2]
dtypes = [precompute.dtype, expected.dtype]
if rtol is None:
rtols = [1e-4 if dtype == np.float32 else 1e-7 for dtype in dtypes]
rtol = max(rtols)
if not np.isclose(expected, actual, rtol=rtol, atol=atol):
raise ValueError(
"Gram matrix passed in via 'precompute' parameter "
"did not pass validation when a single element was "
"checked - please check that it was computed "
f"properly. For element ({f1},{f2}) we computed "
f"{expected} but the user-supplied value was "
f"{actual}."
)
def _pre_fit(
X,
y,
Xy,
precompute,
fit_intercept,
copy,
check_gram=True,
sample_weight=None,
):
"""Function used at beginning of fit in linear models with L1 or L0 penalty.
This function applies _preprocess_data and additionally computes the gram matrix
`precompute` as needed as well as `Xy`.
It is assumed that X, y and sample_weight are already validated.
Returns
-------
X
y
X_offset
y_offset
X_scale
precompute
Xy
"""
n_samples, n_features = X.shape
if sparse.issparse(X):
# copy is not needed here as X is not modified inplace when X is sparse
copy = False
precompute = False
# Rescale X and y only in dense case. Sparse cd solver directly deals with
# sample_weight.
rescale_with_sw = False
else:
# copy was done in fit if necessary
rescale_with_sw = True
X, y, X_offset, y_offset, X_scale, _ = _preprocess_data(
X,
y,
fit_intercept=fit_intercept,
copy=copy,
sample_weight=sample_weight,
check_input=False,
rescale_with_sw=rescale_with_sw,
)
if hasattr(precompute, "__array__"):
if fit_intercept and not np.allclose(X_offset, np.zeros(n_features)):
warnings.warn(
(
"Gram matrix was provided but X was centered to fit "
"intercept: recomputing Gram matrix."
),
UserWarning,
)
# TODO: instead of warning and recomputing, we could just center
# the user provided Gram matrix a-posteriori (after making a copy
# when `copy=True`).
# recompute Gram
precompute = "auto"
Xy = None
elif check_gram:
# If we're going to use the user's precomputed gram matrix, we
# do a quick check to make sure its not totally bogus.
_check_precomputed_gram_matrix(X, precompute, X_offset, X_scale)
# precompute if n_samples > n_features
if isinstance(precompute, str) and precompute == "auto":
precompute = n_samples > n_features
if precompute is True:
# make sure that the 'precompute' array is contiguous.
precompute = np.empty(shape=(n_features, n_features), dtype=X.dtype, order="C")
np.dot(X.T, X, out=precompute)
if not hasattr(precompute, "__array__"):
Xy = None # cannot use Xy if precompute is not Gram
if hasattr(precompute, "__array__") and Xy is None:
common_dtype = np.result_type(X.dtype, y.dtype)
if y.ndim == 1:
# Xy is 1d, make sure it is contiguous.
Xy = np.empty(shape=n_features, dtype=common_dtype, order="C")
np.dot(X.T, y, out=Xy)
else:
# Make sure that Xy is always F contiguous even if X or y are not
# contiguous: the goal is to make it fast to extract the data for a
# specific target.
n_targets = y.shape[1]
Xy = np.empty(shape=(n_features, n_targets), dtype=common_dtype, order="F")
np.dot(y.T, X, out=Xy.T)
return X, y, X_offset, y_offset, X_scale, precompute, Xy
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/_coordinate_descent.py | sklearn/linear_model/_coordinate_descent.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numbers
import sys
import warnings
from abc import ABC, abstractmethod
from functools import partial
from numbers import Integral, Real
import numpy as np
from joblib import effective_n_jobs
from scipy import sparse
from sklearn.base import MultiOutputMixin, RegressorMixin, _fit_context
# mypy error: Module 'sklearn.linear_model' has no attribute '_cd_fast'
from sklearn.linear_model import _cd_fast as cd_fast # type: ignore[attr-defined]
from sklearn.linear_model._base import LinearModel, _pre_fit, _preprocess_data
from sklearn.model_selection import check_cv
from sklearn.utils import Bunch, check_array, check_scalar, metadata_routing
from sklearn.utils._metadata_requests import (
MetadataRouter,
MethodMapping,
_raise_for_params,
get_routing_for_object,
)
from sklearn.utils._param_validation import (
Hidden,
Interval,
StrOptions,
validate_params,
)
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.metadata_routing import _routing_enabled, process_routing
from sklearn.utils.parallel import Parallel, delayed
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.utils.validation import (
_check_sample_weight,
check_consistent_length,
check_is_fitted,
check_random_state,
column_or_1d,
has_fit_parameter,
validate_data,
)
def _set_order(X, y, order="C"):
"""Change the order of X and y if necessary.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values.
order : {None, 'C', 'F'}
If 'C', dense arrays are returned as C-ordered, sparse matrices in csr
format. If 'F', dense arrays are return as F-ordered, sparse matrices
in csc format.
Returns
-------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data with guaranteed order.
y : ndarray of shape (n_samples,)
Target values with guaranteed order.
"""
if order not in [None, "C", "F"]:
raise ValueError(
"Unknown value for order. Got {} instead of None, 'C' or 'F'.".format(order)
)
sparse_X = sparse.issparse(X)
sparse_y = sparse.issparse(y)
if order is not None:
sparse_format = "csc" if order == "F" else "csr"
if sparse_X:
X = X.asformat(sparse_format, copy=False)
else:
X = np.asarray(X, order=order)
if sparse_y:
y = y.asformat(sparse_format)
else:
y = np.asarray(y, order=order)
return X, y
###############################################################################
# Paths functions
def _alpha_grid(
X,
y,
Xy=None,
l1_ratio=1.0,
fit_intercept=True,
eps=1e-3,
n_alphas=100,
sample_weight=None,
*,
positive: bool = False,
):
"""Compute the grid of alpha values for elastic net parameter search
Computes alpha_max which results in coef=0 and then uses a multiplicative grid of
length `eps`.
`X` is never copied.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray of shape (n_samples,) or (n_samples, n_outputs)
Target values
Xy : array-like of shape (n_features,) or (n_features, n_outputs),\
default=None
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float, default=1.0
The elastic net mixing parameter, with ``0 < l1_ratio <= 1``.
For ``l1_ratio = 0``, there would be no L1 penalty which is not supported
for the generation of alphas.
eps : float, default=1e-3
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, default=100
Number of alphas along the regularization path
fit_intercept : bool, default=True
Whether to fit an intercept or not
sample_weight : ndarray of shape (n_samples,), default=None
positive : bool, default=False
If set to True, forces coefficients to be positive.
Returns
-------
np.ndarray
Grid of alpha values.
"""
if l1_ratio == 0:
raise ValueError(
"Automatic alpha grid generation is not supported for"
" l1_ratio=0. Please supply a grid by providing "
"your estimator with the appropriate `alphas=` "
"argument."
)
if Xy is not None:
Xyw = Xy
else:
if fit_intercept:
# TODO: For y.ndim >> 1, think about avoiding memory of y = y - y.mean()
y = y - np.average(y, axis=0, weights=sample_weight)
if sparse.issparse(X):
X_mean, _ = mean_variance_axis(X, axis=0, weights=sample_weight)
else:
X_mean = np.average(X, axis=0, weights=sample_weight)
if sample_weight is None:
yw = y
else:
if y.ndim > 1:
yw = y * sample_weight.reshape(-1, 1)
else:
yw = y * sample_weight
if fit_intercept:
# Avoid copy of X, i.e. avoid explicitly computing X - X_mean
if y.ndim > 1:
Xyw = X.T @ yw - X_mean[:, None] * np.sum(yw, axis=0)
else:
Xyw = X.T @ yw - X_mean * np.sum(yw, axis=0)
else:
Xyw = X.T @ yw
if Xyw.ndim == 1:
Xyw = Xyw[:, np.newaxis]
if sample_weight is not None:
n_samples = sample_weight.sum()
else:
n_samples = X.shape[0]
if not positive:
# Compute np.max(np.sqrt(np.sum(Xyw**2, axis=1))). We switch sqrt and max to
# avoid many computations of sqrt.
alpha_max = np.sqrt(np.max(np.sum(Xyw**2, axis=1))) / (n_samples * l1_ratio)
else:
# We may safely assume Xyw.shape[1] == 1, MultiTask estimators do not support
# positive constraints.
alpha_max = max(0, np.max(Xyw)) / (n_samples * l1_ratio)
if alpha_max <= np.finfo(np.float64).resolution:
return np.full(n_alphas, np.finfo(np.float64).resolution)
return np.geomspace(alpha_max, alpha_max * eps, num=n_alphas)
@validate_params(
{
"X": ["array-like", "sparse matrix"],
"y": ["array-like", "sparse matrix"],
"eps": [Interval(Real, 0, None, closed="neither")],
"n_alphas": [Interval(Integral, 1, None, closed="left")],
"alphas": ["array-like", None],
"precompute": [StrOptions({"auto"}), "boolean", "array-like"],
"Xy": ["array-like", None],
"copy_X": ["boolean"],
"coef_init": ["array-like", None],
"verbose": ["verbose"],
"return_n_iter": ["boolean"],
"positive": ["boolean"],
},
prefer_skip_nested_validation=True,
)
def lasso_path(
X,
y,
*,
eps=1e-3,
n_alphas=100,
alphas=None,
precompute="auto",
Xy=None,
copy_X=True,
coef_init=None,
verbose=False,
return_n_iter=False,
positive=False,
**params,
):
"""Compute Lasso path with coordinate descent.
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : {array-like, sparse matrix} of shape (n_samples,) or \
(n_samples, n_targets)
Target values.
eps : float, default=1e-3
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, default=100
Number of alphas along the regularization path.
alphas : array-like, default=None
List of alphas where to compute the models.
If ``None`` alphas are set automatically.
precompute : 'auto', bool or array-like of shape \
(n_features, n_features), default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like of shape (n_features,) or (n_features, n_targets),\
default=None
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array-like of shape (n_features, ), default=None
The initial values of the coefficients.
verbose : bool or int, default=False
Amount of verbosity.
return_n_iter : bool, default=False
Whether to return the number of iterations or not.
positive : bool, default=False
If set to True, forces coefficients to be positive.
(Only allowed when ``y.ndim == 1``).
**params : kwargs
Keyword arguments passed to the coordinate descent solver.
Returns
-------
alphas : ndarray of shape (n_alphas,)
The alphas along the path where models are computed.
coefs : ndarray of shape (n_features, n_alphas) or \
(n_targets, n_features, n_alphas)
Coefficients along the path.
dual_gaps : ndarray of shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : list of int
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
See Also
--------
lars_path : Compute Least Angle Regression or Lasso path using LARS
algorithm.
Lasso : The Lasso is a linear model that estimates sparse coefficients.
LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars.
LassoCV : Lasso linear model with iterative fitting along a regularization
path.
LassoLarsCV : Cross-validated Lasso using the LARS algorithm.
sklearn.decomposition.sparse_encode : Estimator that can be used to
transform signals into sparse linear combination of atoms from a fixed.
Notes
-----
For an example, see
:ref:`examples/linear_model/plot_lasso_lasso_lars_elasticnet_path.py
<sphx_glr_auto_examples_linear_model_plot_lasso_lasso_lars_elasticnet_path.py>`.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path.
The underlying coordinate descent solver uses gap safe screening rules to speedup
fitting time, see :ref:`User Guide on coordinate descent <coordinate_descent>`.
Examples
--------
Comparing lasso_path and lars_path with interpolation:
>>> import numpy as np
>>> from sklearn.linear_model import lasso_path
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[0. 0. 0.46874778]
[0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[0. 0. 0.46915237]
[0.2159048 0.4425765 0.23668876]]
"""
return enet_path(
X,
y,
l1_ratio=1.0,
eps=eps,
n_alphas=n_alphas,
alphas=alphas,
precompute=precompute,
Xy=Xy,
copy_X=copy_X,
coef_init=coef_init,
verbose=verbose,
positive=positive,
return_n_iter=return_n_iter,
**params,
)
@validate_params(
{
"X": ["array-like", "sparse matrix"],
"y": ["array-like", "sparse matrix"],
"l1_ratio": [Interval(Real, 0.0, 1.0, closed="both")],
"eps": [Interval(Real, 0.0, None, closed="neither")],
"n_alphas": [Interval(Integral, 1, None, closed="left")],
"alphas": ["array-like", None],
"precompute": [StrOptions({"auto"}), "boolean", "array-like"],
"Xy": ["array-like", None],
"copy_X": ["boolean"],
"coef_init": ["array-like", None],
"verbose": ["verbose"],
"return_n_iter": ["boolean"],
"positive": ["boolean"],
"check_input": ["boolean"],
},
prefer_skip_nested_validation=True,
)
def enet_path(
X,
y,
*,
l1_ratio=0.5,
eps=1e-3,
n_alphas=100,
alphas=None,
precompute="auto",
Xy=None,
copy_X=True,
coef_init=None,
verbose=False,
return_n_iter=False,
positive=False,
check_input=True,
**params,
):
"""Compute elastic net path with coordinate descent.
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
1 / (2 * n_samples) * ||Y - XW||_Fro^2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2}
i.e. the sum of L2-norm of each row (task) (i=feature, j=task)
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : {array-like, sparse matrix} of shape (n_samples,) or \
(n_samples, n_targets)
Target values.
l1_ratio : float, default=0.5
Number between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso.
eps : float, default=1e-3
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, default=100
Number of alphas along the regularization path.
alphas : array-like, default=None
List of alphas where to compute the models.
If None alphas are set automatically.
precompute : 'auto', bool or array-like of shape \
(n_features, n_features), default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like of shape (n_features,) or (n_features, n_targets),\
default=None
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array-like of shape (n_features, ), default=None
The initial values of the coefficients.
verbose : bool or int, default=False
Amount of verbosity.
return_n_iter : bool, default=False
Whether to return the number of iterations or not.
positive : bool, default=False
If set to True, forces coefficients to be positive.
(Only allowed when ``y.ndim == 1``).
check_input : bool, default=True
If set to False, the input validation checks are skipped (including the
Gram matrix when provided). It is assumed that they are handled
by the caller.
**params : kwargs
Keyword arguments passed to the coordinate descent solver.
Returns
-------
alphas : ndarray of shape (n_alphas,)
The alphas along the path where models are computed.
coefs : ndarray of shape (n_features, n_alphas) or \
(n_targets, n_features, n_alphas)
Coefficients along the path.
dual_gaps : ndarray of shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : list of int
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
See Also
--------
MultiTaskElasticNet : Multi-task ElasticNet model trained with L1/L2 mixed-norm \
as regularizer.
MultiTaskElasticNetCV : Multi-task L1/L2 ElasticNet with built-in cross-validation.
ElasticNet : Linear regression with combined L1 and L2 priors as regularizer.
ElasticNetCV : Elastic Net model with iterative fitting along a regularization path.
Notes
-----
For an example, see
:ref:`examples/linear_model/plot_lasso_lasso_lars_elasticnet_path.py
<sphx_glr_auto_examples_linear_model_plot_lasso_lasso_lars_elasticnet_path.py>`.
The underlying coordinate descent solver uses gap safe screening rules to speedup
fitting time, see :ref:`User Guide on coordinate descent <coordinate_descent>`.
Examples
--------
>>> from sklearn.linear_model import enet_path
>>> from sklearn.datasets import make_regression
>>> X, y, true_coef = make_regression(
... n_samples=100, n_features=5, n_informative=2, coef=True, random_state=0
... )
>>> true_coef
array([ 0. , 0. , 0. , 97.9, 45.7])
>>> alphas, estimated_coef, _ = enet_path(X, y, n_alphas=3)
>>> alphas.shape
(3,)
>>> estimated_coef
array([[ 0., 0.787, 0.568],
[ 0., 1.120, 0.620],
[-0., -2.129, -1.128],
[ 0., 23.046, 88.939],
[ 0., 10.637, 41.566]])
"""
X_offset_param = params.pop("X_offset", None)
X_scale_param = params.pop("X_scale", None)
sample_weight = params.pop("sample_weight", None)
tol = params.pop("tol", 1e-4)
max_iter = params.pop("max_iter", 1000)
random_state = params.pop("random_state", None)
selection = params.pop("selection", "cyclic")
do_screening = params.pop("do_screening", True)
if len(params) > 0:
raise ValueError("Unexpected parameters in params", params.keys())
# We expect X and y to be already Fortran ordered when bypassing
# checks
if check_input:
X = check_array(
X,
accept_sparse="csc",
dtype=[np.float64, np.float32],
order="F",
copy=copy_X,
)
y = check_array(
y,
accept_sparse="csc",
dtype=X.dtype.type,
order="F",
copy=False,
ensure_2d=False,
)
if Xy is not None:
# Xy should be a 1d contiguous array or a 2D C ordered array
Xy = check_array(
Xy, dtype=X.dtype.type, order="C", copy=False, ensure_2d=False
)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
n_targets = y.shape[1]
if multi_output and positive:
raise ValueError("positive=True is not allowed for multi-output (y.ndim != 1)")
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.issparse(X):
if X_offset_param is not None:
# As sparse matrices are not actually centered we need this to be passed to
# the CD solver.
X_sparse_scaling = X_offset_param / X_scale_param
X_sparse_scaling = np.asarray(X_sparse_scaling, dtype=X.dtype)
else:
X_sparse_scaling = np.zeros(n_features, dtype=X.dtype)
# X should have been passed through _pre_fit already if function is called
# from ElasticNet.fit
if check_input:
X, y, _, _, _, precompute, Xy = _pre_fit(
X,
y,
Xy,
precompute,
fit_intercept=False,
copy=False,
check_gram=True,
)
if alphas is None:
# fit_intercept and sample_weight have already been dealt with in calling
# methods like ElasticNet.fit.
alphas = _alpha_grid(
X,
y,
Xy=Xy,
l1_ratio=l1_ratio,
fit_intercept=False,
positive=positive,
eps=eps,
n_alphas=n_alphas,
)
elif len(alphas) > 1:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(random_state)
if selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
random = selection == "random"
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=X.dtype)
else:
coefs = np.empty((n_targets, n_features, n_alphas), dtype=X.dtype)
if coef_init is None:
coef_ = np.zeros(coefs.shape[:-1], dtype=X.dtype, order="F")
else:
coef_ = np.asfortranarray(coef_init, dtype=X.dtype)
for i, alpha in enumerate(alphas):
# account for n_samples scaling in objectives between here and cd_fast
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.issparse(X):
model = cd_fast.sparse_enet_coordinate_descent(
w=coef_,
alpha=l1_reg,
beta=l2_reg,
X_data=X.data,
X_indices=X.indices,
X_indptr=X.indptr,
y=y,
sample_weight=sample_weight,
X_mean=X_sparse_scaling,
max_iter=max_iter,
tol=tol,
rng=rng,
random=random,
positive=positive,
do_screening=do_screening,
)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random, do_screening
)
elif isinstance(precompute, np.ndarray):
# We expect precompute to be already Fortran ordered when bypassing
# checks
if check_input:
precompute = check_array(precompute, dtype=X.dtype.type, order="C")
model = cd_fast.enet_coordinate_descent_gram(
coef_,
l1_reg,
l2_reg,
precompute,
Xy,
y,
max_iter,
tol,
rng,
random,
positive,
do_screening,
)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_,
l1_reg,
l2_reg,
X,
y,
max_iter,
tol,
rng,
random,
positive,
do_screening,
)
else:
raise ValueError(
"Precompute should be one of True, False, 'auto' or array-like. Got %r"
% precompute
)
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
# we correct the scale of the returned dual gap, as the objective
# in cd_fast is n_samples * the objective in this docstring.
dual_gaps[i] = dual_gap_ / n_samples
n_iters.append(n_iter_)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print("Path: %03i out of %03i" % (i, n_alphas))
else:
sys.stderr.write(".")
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(MultiOutputMixin, RegressorMixin, LinearModel):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function:
.. math::
\\frac{1}{2 n_{\\rm samples}} \\cdot \\|y - X w\\|_2^2
+ \\alpha \\cdot {\\rm l1\\_{ratio}} \\cdot \\|w\\|_1
+ 0.5 \\cdot \\alpha \\cdot (1 - {\\rm l1\\_{ratio}}) \\cdot \\|w\\|_2^2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to:
.. math::
a \\cdot \\|w\\|_1 + 0.5 \\cdot b \\cdot \\|w\\|_2^2
where:
.. math::
\\alpha = a + b, \\quad {\\rm l1\\_{ratio}} = \\frac{a}{a + b}
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float, default=1.0
Constant that multiplies the penalty terms. Defaults to 1.0.
See the notes for the exact mathematical meaning of this
parameter. ``alpha = 0`` is equivalent to an ordinary least square,
solved by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the ``Lasso`` object is not advised.
Given this, you should use the :class:`LinearRegression` object.
l1_ratio : float, default=0.5
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool, default=True
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
precompute : bool or array-like of shape (n_features, n_features),\
default=False
Whether to use a precomputed Gram matrix to speed up
calculations. The Gram matrix can also be passed as argument.
For sparse input this option is always ``False`` to preserve sparsity.
Check :ref:`an example on how to use a precomputed Gram Matrix in ElasticNet
<sphx_glr_auto_examples_linear_model_plot_elastic_net_precomputed_gram_matrix_with_weighted_samples.py>`
for details.
max_iter : int, default=1000
The maximum number of iterations.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, default=1e-4
The tolerance for the optimization: if the updates are smaller or equal to
``tol``, the optimization code checks the dual gap for optimality and continues
until it is smaller or equal to ``tol``, see Notes below.
warm_start : bool, default=False
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
positive : bool, default=False
When set to ``True``, forces the coefficients to be positive.
random_state : int, RandomState instance, default=None
The seed of the pseudo random number generator that selects a random
feature to update. Used when ``selection`` == 'random'.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
selection : {'cyclic', 'random'}, default='cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
Attributes
----------
coef_ : ndarray of shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the cost function formula).
sparse_coef_ : sparse matrix of shape (n_features,) or \
(n_targets, n_features)
Sparse representation of the `coef_`.
intercept_ : float or ndarray of shape (n_targets,)
Independent term in decision function.
n_iter_ : list of int
Number of iterations run by the coordinate descent solver to reach
the specified tolerance.
dual_gap_ : float or ndarray of shape (n_targets,)
Given param alpha, the dual gaps at the end of the optimization,
same shape as each observation of y.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
ElasticNetCV : Elastic net model with best model selection by
cross-validation.
SGDRegressor : Implements elastic net regression with incremental training.
SGDClassifier : Implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log_loss", penalty="elasticnet")``).
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The precise stopping criteria based on `tol` are the following: First, check that
that maximum coordinate update, i.e. :math:`\\max_j |w_j^{new} - w_j^{old}|`
is smaller or equal to `tol` times the maximum absolute coefficient,
:math:`\\max_j |w_j|`. If so, then additionally check whether the dual gap is
smaller or equal to `tol` times :math:`||y||_2^2 / n_{\\text{samples}}`.
The underlying coordinate descent solver uses gap safe screening rules to speedup
fitting time, see :ref:`User Guide on coordinate descent <coordinate_descent>`.
Examples
--------
>>> from sklearn.linear_model import ElasticNet
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(n_features=2, random_state=0)
>>> regr = ElasticNet(random_state=0)
>>> regr.fit(X, y)
ElasticNet(random_state=0)
>>> print(regr.coef_)
[18.83816048 64.55968825]
>>> print(regr.intercept_)
1.451
>>> print(regr.predict([[0, 0]]))
[1.451]
- :ref:`sphx_glr_auto_examples_linear_model_plot_lasso_and_elasticnet.py`
showcases ElasticNet alongside Lasso and ARD Regression for sparse
signal recovery in the presence of noise and feature correlation.
"""
# "check_input" is used for optimisation and isn't something to be passed
# around in a pipeline.
__metadata_request__fit = {"check_input": metadata_routing.UNUSED}
_parameter_constraints: dict = {
"alpha": [Interval(Real, 0, None, closed="left")],
"l1_ratio": [Interval(Real, 0, 1, closed="both")],
"fit_intercept": ["boolean"],
"precompute": ["boolean", "array-like"],
"max_iter": [Interval(Integral, 1, None, closed="left"), None],
"copy_X": ["boolean"],
"tol": [Interval(Real, 0, None, closed="left")],
"warm_start": ["boolean"],
"positive": ["boolean"],
"random_state": ["random_state"],
"selection": [StrOptions({"cyclic", "random"})],
}
path = staticmethod(enet_path)
def __init__(
self,
alpha=1.0,
*,
l1_ratio=0.5,
fit_intercept=True,
precompute=False,
max_iter=1000,
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/_linear_loss.py | sklearn/linear_model/_linear_loss.py | """
Loss functions for linear models with raw_prediction = X @ coef
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from scipy import sparse
from sklearn.utils.extmath import safe_sparse_dot, squared_norm
def sandwich_dot(X, W):
"""Compute the sandwich product X.T @ diag(W) @ X."""
# TODO: This "sandwich product" is the main computational bottleneck for solvers
# that use the full hessian matrix. Here, thread parallelism would pay-off the
# most.
# While a dedicated Cython routine could exploit the symmetry, it is very hard to
# beat BLAS GEMM, even thought the latter cannot exploit the symmetry, unless one
# pays the price of taking square roots and implements
# sqrtWX = sqrt(W)[: None] * X
# return sqrtWX.T @ sqrtWX
# which (might) detect the symmetry and use BLAS SYRK under the hood.
n_samples = X.shape[0]
if sparse.issparse(X):
return safe_sparse_dot(
X.T,
sparse.dia_matrix((W, 0), shape=(n_samples, n_samples)) @ X,
dense_output=True,
)
else:
# np.einsum may use less memory but the following, using BLAS matrix
# multiplication (GEMM), is by far faster.
WX = W[:, None] * X
return X.T @ WX
class LinearModelLoss:
"""General class for loss functions with raw_prediction = X @ coef + intercept.
Note that raw_prediction is also known as linear predictor.
The loss is the average of per sample losses and includes a term for L2
regularization::
loss = 1 / s_sum * sum_i s_i loss(y_i, X_i @ coef + intercept)
+ 1/2 * l2_reg_strength * ||coef||_2^2
with sample weights s_i=1 if sample_weight=None and s_sum=sum_i s_i.
Gradient and hessian, for simplicity without intercept, are::
gradient = 1 / s_sum * X.T @ loss.gradient + l2_reg_strength * coef
hessian = 1 / s_sum * X.T @ diag(loss.hessian) @ X
+ l2_reg_strength * identity
Conventions:
if fit_intercept:
n_dof = n_features + 1
else:
n_dof = n_features
if base_loss.is_multiclass:
coef.shape = (n_classes, n_dof) or ravelled (n_classes * n_dof,)
else:
coef.shape = (n_dof,)
The intercept term is at the end of the coef array:
if base_loss.is_multiclass:
if coef.shape (n_classes, n_dof):
intercept = coef[:, -1]
if coef.shape (n_classes * n_dof,)
intercept = coef[n_classes * n_features:] = coef[(n_dof-1):]
intercept.shape = (n_classes,)
else:
intercept = coef[-1]
Shape of gradient follows shape of coef.
gradient.shape = coef.shape
But hessian (to make our lives simpler) are always 2-d:
if base_loss.is_multiclass:
hessian.shape = (n_classes * n_dof, n_classes * n_dof)
else:
hessian.shape = (n_dof, n_dof)
Note: if coef has shape (n_classes * n_dof,), the classes are expected to be
contiguous, i.e. the 2d-array can be reconstructed as
coef.reshape((n_classes, -1), order="F")
The option order="F" makes coef[:, i] contiguous. This, in turn, makes the
coefficients without intercept, coef[:, :-1], contiguous and speeds up
matrix-vector computations.
Note: If the average loss per sample is wanted instead of the sum of the loss per
sample, one can simply use a rescaled sample_weight such that
sum(sample_weight) = 1.
Parameters
----------
base_loss : instance of class BaseLoss from sklearn._loss.
fit_intercept : bool
"""
def __init__(self, base_loss, fit_intercept):
self.base_loss = base_loss
self.fit_intercept = fit_intercept
def init_zero_coef(self, X, dtype=None):
"""Allocate coef of correct shape with zeros.
Parameters:
-----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
dtype : data-type, default=None
Overrides the data type of coef. With dtype=None, coef will have the same
dtype as X.
Returns
-------
coef : ndarray of shape (n_dof,) or (n_classes, n_dof)
Coefficients of a linear model.
"""
n_features = X.shape[1]
n_classes = self.base_loss.n_classes
if self.fit_intercept:
n_dof = n_features + 1
else:
n_dof = n_features
if self.base_loss.is_multiclass:
coef = np.zeros_like(X, shape=(n_classes, n_dof), dtype=dtype, order="F")
else:
coef = np.zeros_like(X, shape=n_dof, dtype=dtype)
return coef
def weight_intercept(self, coef):
"""Helper function to get coefficients and intercept.
Parameters
----------
coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
Coefficients of a linear model.
If shape (n_classes * n_dof,), the classes of one feature are contiguous,
i.e. one reconstructs the 2d-array via
coef.reshape((n_classes, -1), order="F").
Returns
-------
weights : ndarray of shape (n_features,) or (n_classes, n_features)
Coefficients without intercept term.
intercept : float or ndarray of shape (n_classes,)
Intercept terms.
"""
if not self.base_loss.is_multiclass:
if self.fit_intercept:
intercept = coef[-1]
weights = coef[:-1]
else:
intercept = 0.0
weights = coef
else:
# reshape to (n_classes, n_dof)
if coef.ndim == 1:
weights = coef.reshape((self.base_loss.n_classes, -1), order="F")
else:
weights = coef
if self.fit_intercept:
intercept = weights[:, -1]
weights = weights[:, :-1]
else:
intercept = 0.0
return weights, intercept
def weight_intercept_raw(self, coef, X):
"""Helper function to get coefficients, intercept and raw_prediction.
Parameters
----------
coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
Coefficients of a linear model.
If shape (n_classes * n_dof,), the classes of one feature are contiguous,
i.e. one reconstructs the 2d-array via
coef.reshape((n_classes, -1), order="F").
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
Returns
-------
weights : ndarray of shape (n_features,) or (n_classes, n_features)
Coefficients without intercept term.
intercept : float or ndarray of shape (n_classes,)
Intercept terms.
raw_prediction : ndarray of shape (n_samples,) or \
(n_samples, n_classes)
"""
weights, intercept = self.weight_intercept(coef)
if not self.base_loss.is_multiclass:
raw_prediction = X @ weights + intercept
else:
# weights has shape (n_classes, n_dof)
raw_prediction = X @ weights.T + intercept # ndarray, likely C-contiguous
return weights, intercept, raw_prediction
def l2_penalty(self, weights, l2_reg_strength):
"""Compute L2 penalty term l2_reg_strength/2 *||w||_2^2."""
norm2_w = weights @ weights if weights.ndim == 1 else squared_norm(weights)
return 0.5 * l2_reg_strength * norm2_w
def loss(
self,
coef,
X,
y,
sample_weight=None,
l2_reg_strength=0.0,
n_threads=1,
raw_prediction=None,
):
"""Compute the loss as weighted average over point-wise losses.
Parameters
----------
coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
Coefficients of a linear model.
If shape (n_classes * n_dof,), the classes of one feature are contiguous,
i.e. one reconstructs the 2d-array via
coef.reshape((n_classes, -1), order="F").
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : contiguous array of shape (n_samples,)
Observed, true target values.
sample_weight : None or contiguous array of shape (n_samples,), default=None
Sample weights.
l2_reg_strength : float, default=0.0
L2 regularization strength
n_threads : int, default=1
Number of OpenMP threads to use.
raw_prediction : C-contiguous array of shape (n_samples,) or array of \
shape (n_samples, n_classes)
Raw prediction values (in link space). If provided, these are used. If
None, then raw_prediction = X @ coef + intercept is calculated.
Returns
-------
loss : float
Weighted average of losses per sample, plus penalty.
"""
if raw_prediction is None:
weights, intercept, raw_prediction = self.weight_intercept_raw(coef, X)
else:
weights, intercept = self.weight_intercept(coef)
loss = self.base_loss.loss(
y_true=y,
raw_prediction=raw_prediction,
sample_weight=None,
n_threads=n_threads,
)
loss = np.average(loss, weights=sample_weight)
return loss + self.l2_penalty(weights, l2_reg_strength)
def loss_gradient(
self,
coef,
X,
y,
sample_weight=None,
l2_reg_strength=0.0,
n_threads=1,
raw_prediction=None,
):
"""Computes the sum of loss and gradient w.r.t. coef.
Parameters
----------
coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
Coefficients of a linear model.
If shape (n_classes * n_dof,), the classes of one feature are contiguous,
i.e. one reconstructs the 2d-array via
coef.reshape((n_classes, -1), order="F").
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : contiguous array of shape (n_samples,)
Observed, true target values.
sample_weight : None or contiguous array of shape (n_samples,), default=None
Sample weights.
l2_reg_strength : float, default=0.0
L2 regularization strength
n_threads : int, default=1
Number of OpenMP threads to use.
raw_prediction : C-contiguous array of shape (n_samples,) or array of \
shape (n_samples, n_classes)
Raw prediction values (in link space). If provided, these are used. If
None, then raw_prediction = X @ coef + intercept is calculated.
Returns
-------
loss : float
Weighted average of losses per sample, plus penalty.
gradient : ndarray of shape coef.shape
The gradient of the loss.
"""
(n_samples, n_features), n_classes = X.shape, self.base_loss.n_classes
n_dof = n_features + int(self.fit_intercept)
if raw_prediction is None:
weights, intercept, raw_prediction = self.weight_intercept_raw(coef, X)
else:
weights, intercept = self.weight_intercept(coef)
loss, grad_pointwise = self.base_loss.loss_gradient(
y_true=y,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
n_threads=n_threads,
)
sw_sum = n_samples if sample_weight is None else np.sum(sample_weight)
loss = loss.sum() / sw_sum
loss += self.l2_penalty(weights, l2_reg_strength)
grad_pointwise /= sw_sum
if not self.base_loss.is_multiclass:
grad = np.empty_like(coef, dtype=weights.dtype)
grad[:n_features] = X.T @ grad_pointwise + l2_reg_strength * weights
if self.fit_intercept:
grad[-1] = grad_pointwise.sum()
else:
grad = np.empty((n_classes, n_dof), dtype=weights.dtype, order="F")
# grad_pointwise.shape = (n_samples, n_classes)
grad[:, :n_features] = grad_pointwise.T @ X + l2_reg_strength * weights
if self.fit_intercept:
grad[:, -1] = grad_pointwise.sum(axis=0)
if coef.ndim == 1:
grad = grad.ravel(order="F")
return loss, grad
def gradient(
self,
coef,
X,
y,
sample_weight=None,
l2_reg_strength=0.0,
n_threads=1,
raw_prediction=None,
):
"""Computes the gradient w.r.t. coef.
Parameters
----------
coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
Coefficients of a linear model.
If shape (n_classes * n_dof,), the classes of one feature are contiguous,
i.e. one reconstructs the 2d-array via
coef.reshape((n_classes, -1), order="F").
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : contiguous array of shape (n_samples,)
Observed, true target values.
sample_weight : None or contiguous array of shape (n_samples,), default=None
Sample weights.
l2_reg_strength : float, default=0.0
L2 regularization strength
n_threads : int, default=1
Number of OpenMP threads to use.
raw_prediction : C-contiguous array of shape (n_samples,) or array of \
shape (n_samples, n_classes)
Raw prediction values (in link space). If provided, these are used. If
None, then raw_prediction = X @ coef + intercept is calculated.
Returns
-------
gradient : ndarray of shape coef.shape
The gradient of the loss.
"""
(n_samples, n_features), n_classes = X.shape, self.base_loss.n_classes
n_dof = n_features + int(self.fit_intercept)
if raw_prediction is None:
weights, intercept, raw_prediction = self.weight_intercept_raw(coef, X)
else:
weights, intercept = self.weight_intercept(coef)
grad_pointwise = self.base_loss.gradient(
y_true=y,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
n_threads=n_threads,
)
sw_sum = n_samples if sample_weight is None else np.sum(sample_weight)
grad_pointwise /= sw_sum
if not self.base_loss.is_multiclass:
grad = np.empty_like(coef, dtype=weights.dtype)
grad[:n_features] = X.T @ grad_pointwise + l2_reg_strength * weights
if self.fit_intercept:
grad[-1] = grad_pointwise.sum()
return grad
else:
grad = np.empty((n_classes, n_dof), dtype=weights.dtype, order="F")
# gradient.shape = (n_samples, n_classes)
grad[:, :n_features] = grad_pointwise.T @ X + l2_reg_strength * weights
if self.fit_intercept:
grad[:, -1] = grad_pointwise.sum(axis=0)
if coef.ndim == 1:
return grad.ravel(order="F")
else:
return grad
def gradient_hessian(
self,
coef,
X,
y,
sample_weight=None,
l2_reg_strength=0.0,
n_threads=1,
gradient_out=None,
hessian_out=None,
raw_prediction=None,
):
"""Computes gradient and hessian w.r.t. coef.
Parameters
----------
coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
Coefficients of a linear model.
If shape (n_classes * n_dof,), the classes of one feature are contiguous,
i.e. one reconstructs the 2d-array via
coef.reshape((n_classes, -1), order="F").
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : contiguous array of shape (n_samples,)
Observed, true target values.
sample_weight : None or contiguous array of shape (n_samples,), default=None
Sample weights.
l2_reg_strength : float, default=0.0
L2 regularization strength
n_threads : int, default=1
Number of OpenMP threads to use.
gradient_out : None or ndarray of shape coef.shape
A location into which the gradient is stored. If None, a new array
might be created.
hessian_out : None or ndarray of shape (n_dof, n_dof) or \
(n_classes * n_dof, n_classes * n_dof)
A location into which the hessian is stored. If None, a new array
might be created.
raw_prediction : C-contiguous array of shape (n_samples,) or array of \
shape (n_samples, n_classes)
Raw prediction values (in link space). If provided, these are used. If
None, then raw_prediction = X @ coef + intercept is calculated.
Returns
-------
gradient : ndarray of shape coef.shape
The gradient of the loss.
hessian : ndarray of shape (n_dof, n_dof) or \
(n_classes, n_dof, n_dof, n_classes)
Hessian matrix.
hessian_warning : bool
True if pointwise hessian has more than 25% of its elements non-positive.
"""
(n_samples, n_features), n_classes = X.shape, self.base_loss.n_classes
n_dof = n_features + int(self.fit_intercept)
if raw_prediction is None:
weights, intercept, raw_prediction = self.weight_intercept_raw(coef, X)
else:
weights, intercept = self.weight_intercept(coef)
sw_sum = n_samples if sample_weight is None else np.sum(sample_weight)
# Allocate gradient.
if gradient_out is None:
grad = np.empty_like(coef, dtype=weights.dtype, order="F")
elif gradient_out.shape != coef.shape:
raise ValueError(
f"gradient_out is required to have shape coef.shape = {coef.shape}; "
f"got {gradient_out.shape}."
)
elif self.base_loss.is_multiclass and not gradient_out.flags.f_contiguous:
raise ValueError("gradient_out must be F-contiguous.")
else:
grad = gradient_out
# Allocate hessian.
n = coef.size # for multinomial this equals n_dof * n_classes
if hessian_out is None:
hess = np.empty((n, n), dtype=weights.dtype)
elif hessian_out.shape != (n, n):
raise ValueError(
f"hessian_out is required to have shape ({n, n}); got "
f"{hessian_out.shape=}."
)
elif self.base_loss.is_multiclass and (
not hessian_out.flags.c_contiguous and not hessian_out.flags.f_contiguous
):
raise ValueError("hessian_out must be contiguous.")
else:
hess = hessian_out
if not self.base_loss.is_multiclass:
grad_pointwise, hess_pointwise = self.base_loss.gradient_hessian(
y_true=y,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
n_threads=n_threads,
)
grad_pointwise /= sw_sum
hess_pointwise /= sw_sum
# For non-canonical link functions and far away from the optimum, the
# pointwise hessian can be negative. We take care that 75% of the hessian
# entries are positive.
hessian_warning = (
np.average(hess_pointwise <= 0, weights=sample_weight) > 0.25
)
hess_pointwise = np.abs(hess_pointwise)
grad[:n_features] = X.T @ grad_pointwise + l2_reg_strength * weights
if self.fit_intercept:
grad[-1] = grad_pointwise.sum()
if hessian_warning:
# Exit early without computing the hessian.
return grad, hess, hessian_warning
hess[:n_features, :n_features] = sandwich_dot(X, hess_pointwise)
if l2_reg_strength > 0:
# The L2 penalty enters the Hessian on the diagonal only. To add those
# terms, we use a flattened view of the array.
order = "C" if hess.flags.c_contiguous else "F"
hess.reshape(-1, order=order)[: (n_features * n_dof) : (n_dof + 1)] += (
l2_reg_strength
)
if self.fit_intercept:
# With intercept included as added column to X, the hessian becomes
# hess = (X, 1)' @ diag(h) @ (X, 1)
# = (X' @ diag(h) @ X, X' @ h)
# ( h @ X, sum(h))
# The left upper part has already been filled, it remains to compute
# the last row and the last column.
Xh = X.T @ hess_pointwise
hess[:-1, -1] = Xh
hess[-1, :-1] = Xh
hess[-1, -1] = hess_pointwise.sum()
else:
# Here we may safely assume HalfMultinomialLoss aka categorical
# cross-entropy.
# HalfMultinomialLoss computes only the diagonal part of the hessian, i.e.
# diagonal in the classes. Here, we want the full hessian. Therefore, we
# call gradient_proba.
grad_pointwise, proba = self.base_loss.gradient_proba(
y_true=y,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
n_threads=n_threads,
)
grad_pointwise /= sw_sum
grad = grad.reshape((n_classes, n_dof), order="F")
grad[:, :n_features] = grad_pointwise.T @ X + l2_reg_strength * weights
if self.fit_intercept:
grad[:, -1] = grad_pointwise.sum(axis=0)
if coef.ndim == 1:
grad = grad.ravel(order="F")
# The full hessian matrix, i.e. not only the diagonal part, dropping most
# indices, is given by:
#
# hess = X' @ h @ X
#
# Here, h is a priori a 4-dimensional matrix of shape
# (n_samples, n_samples, n_classes, n_classes). It is diagonal its first
# two dimensions (the ones with n_samples), i.e. it is
# effectively a 3-dimensional matrix (n_samples, n_classes, n_classes).
#
# h = diag(p) - p' p
#
# or with indices k and l for classes
#
# h_kl = p_k * delta_kl - p_k * p_l
#
# with p_k the (predicted) probability for class k. Only the dimension in
# n_samples multiplies with X.
# For 3 classes and n_samples = 1, this looks like ("@" is a bit misused
# here):
#
# hess = X' @ (h00 h10 h20) @ X
# (h10 h11 h12)
# (h20 h12 h22)
# = (X' @ diag(h00) @ X, X' @ diag(h10), X' @ diag(h20))
# (X' @ diag(h10) @ X, X' @ diag(h11), X' @ diag(h12))
# (X' @ diag(h20) @ X, X' @ diag(h12), X' @ diag(h22))
#
# Now coef of shape (n_classes * n_dof) is contiguous in n_classes.
# Therefore, we want the hessian to follow this convention, too, i.e.
# hess[:n_classes, :n_classes] = (x0' @ h00 @ x0, x0' @ h10 @ x0, ..)
# (x0' @ h10 @ x0, x0' @ h11 @ x0, ..)
# (x0' @ h20 @ x0, x0' @ h12 @ x0, ..)
# is the first feature, x0, for all classes. In our implementation, we
# still want to take advantage of BLAS "X.T @ X". Therefore, we have some
# index/slicing battle to fight.
if sample_weight is not None:
sw = sample_weight / sw_sum
else:
sw = 1.0 / sw_sum
for k in range(n_classes):
# Diagonal terms (in classes) hess_kk.
# Note that this also writes to some of the lower triangular part.
h = proba[:, k] * (1 - proba[:, k]) * sw
hess[
k : n_classes * n_features : n_classes,
k : n_classes * n_features : n_classes,
] = sandwich_dot(X, h)
if self.fit_intercept:
# See above in the non multiclass case.
Xh = X.T @ h
hess[
k : n_classes * n_features : n_classes,
n_classes * n_features + k,
] = Xh
hess[
n_classes * n_features + k,
k : n_classes * n_features : n_classes,
] = Xh
hess[n_classes * n_features + k, n_classes * n_features + k] = (
h.sum()
)
# Off diagonal terms (in classes) hess_kl.
for l in range(k + 1, n_classes):
# Upper triangle (in classes).
h = -proba[:, k] * proba[:, l] * sw
hess[
k : n_classes * n_features : n_classes,
l : n_classes * n_features : n_classes,
] = sandwich_dot(X, h)
if self.fit_intercept:
Xh = X.T @ h
hess[
k : n_classes * n_features : n_classes,
n_classes * n_features + l,
] = Xh
hess[
n_classes * n_features + k,
l : n_classes * n_features : n_classes,
] = Xh
hess[n_classes * n_features + k, n_classes * n_features + l] = (
h.sum()
)
# Fill lower triangle (in classes).
hess[l::n_classes, k::n_classes] = hess[k::n_classes, l::n_classes]
if l2_reg_strength > 0:
# See above in the non multiclass case.
order = "C" if hess.flags.c_contiguous else "F"
hess.reshape(-1, order=order)[
: (n_classes**2 * n_features * n_dof) : (n_classes * n_dof + 1)
] += l2_reg_strength
# The pointwise hessian is always non-negative for the multinomial loss.
hessian_warning = False
return grad, hess, hessian_warning
def gradient_hessian_product(
self, coef, X, y, sample_weight=None, l2_reg_strength=0.0, n_threads=1
):
"""Computes gradient and hessp (hessian product function) w.r.t. coef.
Parameters
----------
coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
Coefficients of a linear model.
If shape (n_classes * n_dof,), the classes of one feature are contiguous,
i.e. one reconstructs the 2d-array via
coef.reshape((n_classes, -1), order="F").
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : contiguous array of shape (n_samples,)
Observed, true target values.
sample_weight : None or contiguous array of shape (n_samples,), default=None
Sample weights.
l2_reg_strength : float, default=0.0
L2 regularization strength
n_threads : int, default=1
Number of OpenMP threads to use.
Returns
-------
gradient : ndarray of shape coef.shape
The gradient of the loss.
hessp : callable
Function that takes in a vector input of shape of gradient and
and returns matrix-vector product with hessian.
"""
(n_samples, n_features), n_classes = X.shape, self.base_loss.n_classes
n_dof = n_features + int(self.fit_intercept)
weights, intercept, raw_prediction = self.weight_intercept_raw(coef, X)
sw_sum = n_samples if sample_weight is None else np.sum(sample_weight)
if not self.base_loss.is_multiclass:
grad_pointwise, hess_pointwise = self.base_loss.gradient_hessian(
y_true=y,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
n_threads=n_threads,
)
grad_pointwise /= sw_sum
hess_pointwise /= sw_sum
grad = np.empty_like(coef, dtype=weights.dtype)
grad[:n_features] = X.T @ grad_pointwise + l2_reg_strength * weights
if self.fit_intercept:
grad[-1] = grad_pointwise.sum()
# Precompute as much as possible: hX, hX_sum and hessian_sum
hessian_sum = hess_pointwise.sum()
if sparse.issparse(X):
hX = (
sparse.dia_matrix((hess_pointwise, 0), shape=(n_samples, n_samples))
@ X
)
else:
hX = hess_pointwise[:, np.newaxis] * X
if self.fit_intercept:
# Calculate the double derivative with respect to intercept.
# Note: In case hX is sparse, hX.sum is a matrix object.
hX_sum = np.squeeze(np.asarray(hX.sum(axis=0)))
# prevent squeezing to zero-dim array if n_features == 1
hX_sum = np.atleast_1d(hX_sum)
# With intercept included and l2_reg_strength = 0, hessp returns
# res = (X, 1)' @ diag(h) @ (X, 1) @ s
# = (X, 1)' @ (hX @ s[:n_features], sum(h) * s[-1])
# res[:n_features] = X' @ hX @ s[:n_features] + sum(h) * s[-1]
# res[-1] = 1' @ hX @ s[:n_features] + sum(h) * s[-1]
def hessp(s):
ret = np.empty_like(s)
if sparse.issparse(X):
ret[:n_features] = X.T @ (hX @ s[:n_features])
else:
ret[:n_features] = np.linalg.multi_dot([X.T, hX, s[:n_features]])
ret[:n_features] += l2_reg_strength * s[:n_features]
if self.fit_intercept:
ret[:n_features] += s[-1] * hX_sum
ret[-1] = hX_sum @ s[:n_features] + hessian_sum * s[-1]
return ret
else:
# Here we may safely assume HalfMultinomialLoss aka categorical
# cross-entropy.
# HalfMultinomialLoss computes only the diagonal part of the hessian, i.e.
# diagonal in the classes. Here, we want the matrix-vector product of the
# full hessian. Therefore, we call gradient_proba.
grad_pointwise, proba = self.base_loss.gradient_proba(
y_true=y,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
n_threads=n_threads,
)
grad_pointwise /= sw_sum
grad = np.empty((n_classes, n_dof), dtype=weights.dtype, order="F")
grad[:, :n_features] = grad_pointwise.T @ X + l2_reg_strength * weights
if self.fit_intercept:
grad[:, -1] = grad_pointwise.sum(axis=0)
# Full hessian-vector product, i.e. not only the diagonal part of the
# hessian. Derivation with some index battle for input vector s:
# - sample index i
# - feature indices j, m
# - class indices k, l
# - 1_{k=l} is one if k=l else 0
# - p_i_k is the (predicted) probability that sample i belongs to class k
# for all i: sum_k p_i_k = 1
# - s_l_m is input vector for class l and feature m
# - X' = X transposed
#
# Note: Hessian with dropping most indices is just:
# X' @ p_k (1(k=l) - p_l) @ X
#
# result_{k j} = sum_{i, l, m} Hessian_{i, k j, m l} * s_l_m
# = sum_{i, l, m} (X')_{ji} * p_i_k * (1_{k=l} - p_i_l)
# * X_{im} s_l_m
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/_omp.py | sklearn/linear_model/_omp.py | """Orthogonal matching pursuit algorithms"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from math import sqrt
from numbers import Integral, Real
import numpy as np
from scipy import linalg
from scipy.linalg.lapack import get_lapack_funcs
from sklearn.base import MultiOutputMixin, RegressorMixin, _fit_context
from sklearn.linear_model._base import LinearModel, _pre_fit
from sklearn.model_selection import check_cv
from sklearn.utils import Bunch, as_float_array, check_array
from sklearn.utils._param_validation import Interval, StrOptions, validate_params
from sklearn.utils.metadata_routing import (
MetadataRouter,
MethodMapping,
_raise_for_params,
_routing_enabled,
process_routing,
)
from sklearn.utils.parallel import Parallel, delayed
from sklearn.utils.validation import FLOAT_DTYPES, validate_data
premature = (
"Orthogonal matching pursuit ended prematurely due to linear"
" dependence in the dictionary. The requested precision might"
" not have been met."
)
def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True, return_path=False):
"""Orthogonal Matching Pursuit step using the Cholesky decomposition.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input dictionary. Columns are assumed to have unit norm.
y : ndarray of shape (n_samples,)
Input targets.
n_nonzero_coefs : int
Targeted number of non-zero elements.
tol : float, default=None
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_X : bool, default=True
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, default=False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : ndarray of shape (n_nonzero_coefs,)
Non-zero elements of the solution.
idx : ndarray of shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector.
coef : ndarray of shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
if copy_X:
X = X.copy("F")
else: # even if we are allowed to overwrite, still copy it if bad order
X = np.asfortranarray(X)
min_float = np.finfo(X.dtype).eps
nrm2, swap = linalg.get_blas_funcs(("nrm2", "swap"), (X,))
(potrs,) = get_lapack_funcs(("potrs",), (X,))
alpha = np.dot(X.T, y)
residual = y
gamma = np.empty(0)
n_active = 0
indices = np.arange(X.shape[1]) # keeping track of swapping
max_features = X.shape[1] if tol is not None else n_nonzero_coefs
L = np.empty((max_features, max_features), dtype=X.dtype)
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(np.dot(X.T, residual)))
if lam < n_active or alpha[lam] ** 2 < min_float:
# atom already selected or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
if n_active > 0:
# Updates the Cholesky decomposition of X' X
L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam])
linalg.solve_triangular(
L[:n_active, :n_active],
L[n_active, :n_active],
trans=0,
lower=1,
overwrite_b=True,
check_finite=False,
)
v = nrm2(L[n_active, :n_active]) ** 2
Lkk = linalg.norm(X[:, lam]) ** 2 - v
if Lkk <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
L[n_active, n_active] = sqrt(Lkk)
else:
L[0, 0] = linalg.norm(X[:, lam])
X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam])
alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active]
indices[n_active], indices[lam] = indices[lam], indices[n_active]
n_active += 1
# solves LL'x = X'y as a composition of two triangular systems
gamma, _ = potrs(
L[:n_active, :n_active], alpha[:n_active], lower=True, overwrite_b=False
)
if return_path:
coefs[:n_active, n_active - 1] = gamma
residual = y - np.dot(X[:, :n_active], gamma)
if tol is not None and nrm2(residual) ** 2 <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def _gram_omp(
Gram,
Xy,
n_nonzero_coefs,
tol_0=None,
tol=None,
copy_Gram=True,
copy_Xy=True,
return_path=False,
):
"""Orthogonal Matching Pursuit step on a precomputed Gram matrix.
This function uses the Cholesky decomposition method.
Parameters
----------
Gram : ndarray of shape (n_features, n_features)
Gram matrix of the input data matrix.
Xy : ndarray of shape (n_features,)
Input targets.
n_nonzero_coefs : int
Targeted number of non-zero elements.
tol_0 : float, default=None
Squared norm of y, required if tol is not None.
tol : float, default=None
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_Gram : bool, default=True
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, default=True
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, default=False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : ndarray of shape (n_nonzero_coefs,)
Non-zero elements of the solution.
idx : ndarray of shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector.
coefs : ndarray of shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
Gram = Gram.copy("F") if copy_Gram else np.asfortranarray(Gram)
if copy_Xy or not Xy.flags.writeable:
Xy = Xy.copy()
min_float = np.finfo(Gram.dtype).eps
nrm2, swap = linalg.get_blas_funcs(("nrm2", "swap"), (Gram,))
(potrs,) = get_lapack_funcs(("potrs",), (Gram,))
indices = np.arange(len(Gram)) # keeping track of swapping
alpha = Xy
tol_curr = tol_0
delta = 0
gamma = np.empty(0)
n_active = 0
max_features = len(Gram) if tol is not None else n_nonzero_coefs
L = np.empty((max_features, max_features), dtype=Gram.dtype)
L[0, 0] = 1.0
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(alpha))
if lam < n_active or alpha[lam] ** 2 < min_float:
# selected same atom twice, or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
if n_active > 0:
L[n_active, :n_active] = Gram[lam, :n_active]
linalg.solve_triangular(
L[:n_active, :n_active],
L[n_active, :n_active],
trans=0,
lower=1,
overwrite_b=True,
check_finite=False,
)
v = nrm2(L[n_active, :n_active]) ** 2
Lkk = Gram[lam, lam] - v
if Lkk <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
L[n_active, n_active] = sqrt(Lkk)
else:
L[0, 0] = sqrt(Gram[lam, lam])
Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam])
Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam])
indices[n_active], indices[lam] = indices[lam], indices[n_active]
Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active]
n_active += 1
# solves LL'x = X'y as a composition of two triangular systems
gamma, _ = potrs(
L[:n_active, :n_active], Xy[:n_active], lower=True, overwrite_b=False
)
if return_path:
coefs[:n_active, n_active - 1] = gamma
beta = np.dot(Gram[:, :n_active], gamma)
alpha = Xy - beta
if tol is not None:
tol_curr += delta
delta = np.inner(gamma, beta[:n_active])
tol_curr -= delta
if abs(tol_curr) <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
@validate_params(
{
"X": ["array-like"],
"y": [np.ndarray],
"n_nonzero_coefs": [Interval(Integral, 1, None, closed="left"), None],
"tol": [Interval(Real, 0, None, closed="left"), None],
"precompute": ["boolean", StrOptions({"auto"})],
"copy_X": ["boolean"],
"return_path": ["boolean"],
"return_n_iter": ["boolean"],
},
prefer_skip_nested_validation=True,
)
def orthogonal_mp(
X,
y,
*,
n_nonzero_coefs=None,
tol=None,
precompute=False,
copy_X=True,
return_path=False,
return_n_iter=False,
):
r"""Orthogonal Matching Pursuit (OMP).
Solves n_targets Orthogonal Matching Pursuit problems.
An instance of the problem has the form:
When parametrized by the number of non-zero coefficients using
`n_nonzero_coefs`:
argmin ||y - X\gamma||^2 subject to ||\gamma||_0 <= n_{nonzero coefs}
When parametrized by error using the parameter `tol`:
argmin ||\gamma||_0 subject to ||y - X\gamma||^2 <= tol
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data. Columns are assumed to have unit norm.
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
Input targets.
n_nonzero_coefs : int, default=None
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float, default=None
Maximum squared norm of the residual. If not None, overrides n_nonzero_coefs.
precompute : 'auto' or bool, default=False
Whether to perform precomputations. Improves performance when n_targets
or n_samples is very large.
copy_X : bool, default=True
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, default=False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
Returns
-------
coef : ndarray of shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis generates coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See Also
--------
OrthogonalMatchingPursuit : Orthogonal Matching Pursuit model.
orthogonal_mp_gram : Solve OMP problems using Gram matrix and the product X.T * y.
lars_path : Compute Least Angle Regression or Lasso path using LARS algorithm.
sklearn.decomposition.sparse_encode : Sparse coding.
Notes
-----
Orthogonal matching pursuit was introduced in S. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(https://www.di.ens.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
https://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
Examples
--------
>>> from sklearn.datasets import make_regression
>>> from sklearn.linear_model import orthogonal_mp
>>> X, y = make_regression(noise=4, random_state=0)
>>> coef = orthogonal_mp(X, y)
>>> coef.shape
(100,)
>>> X[:1,] @ coef
array([-78.68])
"""
X = check_array(X, order="F", copy=copy_X)
copy_X = False
if y.ndim == 1:
y = y.reshape(-1, 1)
y = check_array(y)
if y.shape[1] > 1: # subsequent targets will be affected
copy_X = True
if n_nonzero_coefs is None and tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
n_nonzero_coefs = max(int(0.1 * X.shape[1]), 1)
if tol is None and n_nonzero_coefs > X.shape[1]:
raise ValueError(
"The number of atoms cannot be more than the number of features"
)
if precompute == "auto":
precompute = X.shape[0] > X.shape[1]
if precompute:
G = np.dot(X.T, X)
G = np.asfortranarray(G)
Xy = np.dot(X.T, y)
if tol is not None:
norms_squared = np.sum((y**2), axis=0)
else:
norms_squared = None
return orthogonal_mp_gram(
G,
Xy,
n_nonzero_coefs=n_nonzero_coefs,
tol=tol,
norms_squared=norms_squared,
copy_Gram=copy_X,
copy_Xy=False,
return_path=return_path,
)
if return_path:
coef = np.zeros((X.shape[1], y.shape[1], X.shape[1]))
else:
coef = np.zeros((X.shape[1], y.shape[1]))
n_iters = []
for k in range(y.shape[1]):
out = _cholesky_omp(
X, y[:, k], n_nonzero_coefs, tol, copy_X=copy_X, return_path=return_path
)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, : len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[: n_active + 1], k, n_active] = x[: n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if y.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
@validate_params(
{
"Gram": ["array-like"],
"Xy": ["array-like"],
"n_nonzero_coefs": [Interval(Integral, 0, None, closed="neither"), None],
"tol": [Interval(Real, 0, None, closed="left"), None],
"norms_squared": ["array-like", None],
"copy_Gram": ["boolean"],
"copy_Xy": ["boolean"],
"return_path": ["boolean"],
"return_n_iter": ["boolean"],
},
prefer_skip_nested_validation=True,
)
def orthogonal_mp_gram(
Gram,
Xy,
*,
n_nonzero_coefs=None,
tol=None,
norms_squared=None,
copy_Gram=True,
copy_Xy=True,
return_path=False,
return_n_iter=False,
):
"""Gram Orthogonal Matching Pursuit (OMP).
Solves n_targets Orthogonal Matching Pursuit problems using only
the Gram matrix X.T * X and the product X.T * y.
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
Gram : array-like of shape (n_features, n_features)
Gram matrix of the input data: `X.T * X`.
Xy : array-like of shape (n_features,) or (n_features, n_targets)
Input targets multiplied by `X`: `X.T * y`.
n_nonzero_coefs : int, default=None
Desired number of non-zero entries in the solution. If `None` (by
default) this value is set to 10% of n_features.
tol : float, default=None
Maximum squared norm of the residual. If not `None`,
overrides `n_nonzero_coefs`.
norms_squared : array-like of shape (n_targets,), default=None
Squared L2 norms of the lines of `y`. Required if `tol` is not None.
copy_Gram : bool, default=True
Whether the gram matrix must be copied by the algorithm. A `False`
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, default=True
Whether the covariance vector `Xy` must be copied by the algorithm.
If `False`, it may be overwritten.
return_path : bool, default=False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
Returns
-------
coef : ndarray of shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
`(n_features, n_features)` or `(n_features, n_targets, n_features)` and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : list or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See Also
--------
OrthogonalMatchingPursuit : Orthogonal Matching Pursuit model (OMP).
orthogonal_mp : Solves n_targets Orthogonal Matching Pursuit problems.
lars_path : Compute Least Angle Regression or Lasso path using
LARS algorithm.
sklearn.decomposition.sparse_encode : Generic sparse coding.
Each column of the result is the solution to a Lasso problem.
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(https://www.di.ens.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
https://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
Examples
--------
>>> from sklearn.datasets import make_regression
>>> from sklearn.linear_model import orthogonal_mp_gram
>>> X, y = make_regression(noise=4, random_state=0)
>>> coef = orthogonal_mp_gram(X.T @ X, X.T @ y)
>>> coef.shape
(100,)
>>> X[:1,] @ coef
array([-78.68])
"""
Gram = check_array(Gram, order="F", copy=copy_Gram)
Xy = np.asarray(Xy)
if Xy.ndim > 1 and Xy.shape[1] > 1:
# or subsequent target will be affected
copy_Gram = True
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if tol is not None:
norms_squared = [norms_squared]
if copy_Xy or not Xy.flags.writeable:
# Make the copy once instead of many times in _gram_omp itself.
Xy = Xy.copy()
if n_nonzero_coefs is None and tol is None:
n_nonzero_coefs = int(0.1 * len(Gram))
if tol is not None and norms_squared is None:
raise ValueError(
"Gram OMP needs the precomputed norms in order "
"to evaluate the error sum of squares."
)
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > len(Gram):
raise ValueError(
"The number of atoms cannot be more than the number of features"
)
if return_path:
coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)), dtype=Gram.dtype)
else:
coef = np.zeros((len(Gram), Xy.shape[1]), dtype=Gram.dtype)
n_iters = []
for k in range(Xy.shape[1]):
out = _gram_omp(
Gram,
Xy[:, k],
n_nonzero_coefs,
norms_squared[k] if tol is not None else None,
tol,
copy_Gram=copy_Gram,
copy_Xy=False,
return_path=return_path,
)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, : len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[: n_active + 1], k, n_active] = x[: n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if Xy.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
class OrthogonalMatchingPursuit(MultiOutputMixin, RegressorMixin, LinearModel):
"""Orthogonal Matching Pursuit model (OMP).
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
n_nonzero_coefs : int, default=None
Desired number of non-zero entries in the solution. Ignored if `tol` is set.
When `None` and `tol` is also `None`, this value is either set to 10% of
`n_features` or 1, whichever is greater.
tol : float, default=None
Maximum squared norm of the residual. If not None, overrides n_nonzero_coefs.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
precompute : 'auto' or bool, default='auto'
Whether to use a precomputed Gram and Xy matrix to speed up
calculations. Improves performance when :term:`n_targets` or
:term:`n_samples` is very large.
Attributes
----------
coef_ : ndarray of shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formula).
intercept_ : float or ndarray of shape (n_targets,)
Independent term in decision function.
n_iter_ : int or array-like
Number of active features across every target.
n_nonzero_coefs_ : int or None
The number of non-zero coefficients in the solution or `None` when `tol` is
set. If `n_nonzero_coefs` is None and `tol` is None this value is either set
to 10% of `n_features` or 1, whichever is greater.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
orthogonal_mp : Solves n_targets Orthogonal Matching Pursuit problems.
orthogonal_mp_gram : Solves n_targets Orthogonal Matching Pursuit
problems using only the Gram matrix X.T * X and the product X.T * y.
lars_path : Compute Least Angle Regression or Lasso path using LARS algorithm.
Lars : Least Angle Regression model a.k.a. LAR.
LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars.
sklearn.decomposition.sparse_encode : Generic sparse coding.
Each column of the result is the solution to a Lasso problem.
OrthogonalMatchingPursuitCV : Cross-validated
Orthogonal Matching Pursuit model (OMP).
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(https://www.di.ens.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
https://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
Examples
--------
>>> from sklearn.linear_model import OrthogonalMatchingPursuit
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(noise=4, random_state=0)
>>> reg = OrthogonalMatchingPursuit().fit(X, y)
>>> reg.score(X, y)
0.9991
>>> reg.predict(X[:1,])
array([-78.3854])
"""
_parameter_constraints: dict = {
"n_nonzero_coefs": [Interval(Integral, 1, None, closed="left"), None],
"tol": [Interval(Real, 0, None, closed="left"), None],
"fit_intercept": ["boolean"],
"precompute": [StrOptions({"auto"}), "boolean"],
}
def __init__(
self,
*,
n_nonzero_coefs=None,
tol=None,
fit_intercept=True,
precompute="auto",
):
self.n_nonzero_coefs = n_nonzero_coefs
self.tol = tol
self.fit_intercept = fit_intercept
self.precompute = precompute
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values. Will be cast to X's dtype if necessary.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = validate_data(
self, X, y, multi_output=True, y_numeric=True, dtype=FLOAT_DTYPES
)
n_features = X.shape[1]
X, y, X_offset, y_offset, X_scale, Gram, Xy = _pre_fit(
X,
y,
None,
self.precompute,
self.fit_intercept,
copy=True,
check_gram=False,
)
if y.ndim == 1:
y = y[:, np.newaxis]
if self.n_nonzero_coefs is None and self.tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1)
elif self.tol is not None:
self.n_nonzero_coefs_ = None
else:
self.n_nonzero_coefs_ = self.n_nonzero_coefs
if Gram is False:
coef_, self.n_iter_ = orthogonal_mp(
X,
y,
n_nonzero_coefs=self.n_nonzero_coefs_,
tol=self.tol,
precompute=False,
copy_X=True,
return_n_iter=True,
)
else:
norms_sq = np.sum(y**2, axis=0) if self.tol is not None else None
coef_, self.n_iter_ = orthogonal_mp_gram(
Gram,
Xy=Xy,
n_nonzero_coefs=self.n_nonzero_coefs_,
tol=self.tol,
norms_squared=norms_sq,
copy_Gram=True,
copy_Xy=True,
return_n_iter=True,
)
self.coef_ = coef_.T
self._set_intercept(X_offset, y_offset, X_scale)
return self
def _omp_path_residues(
X_train,
y_train,
X_test,
y_test,
copy=True,
fit_intercept=True,
max_iter=100,
):
"""Compute the residues on left-out data for a full LARS path.
Parameters
----------
X_train : ndarray of shape (n_samples, n_features)
The data to fit the LARS on.
y_train : ndarray of shape (n_samples)
The target variable to fit LARS on.
X_test : ndarray of shape (n_samples, n_features)
The data to compute the residues on.
y_test : ndarray of shape (n_samples)
The target variable to compute the residues on.
copy : bool, default=True
Whether X_train, X_test, y_train and y_test should be copied. If
False, they may be overwritten.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
max_iter : int, default=100
Maximum numbers of iterations to perform, therefore maximum features
to include. 100 by default.
Returns
-------
residues : ndarray of shape (n_samples, max_features)
Residues of the prediction on the test data.
"""
if copy:
X_train = X_train.copy()
y_train = y_train.copy()
X_test = X_test.copy()
y_test = y_test.copy()
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
coefs = orthogonal_mp(
X_train,
y_train,
n_nonzero_coefs=max_iter,
tol=None,
precompute=False,
copy_X=False,
return_path=True,
)
if coefs.ndim == 1:
coefs = coefs[:, np.newaxis]
return np.dot(coefs.T, X_test.T) - y_test
class OrthogonalMatchingPursuitCV(RegressorMixin, LinearModel):
"""Cross-validated Orthogonal Matching Pursuit model (OMP).
See glossary entry for :term:`cross-validation estimator`.
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
copy : bool, default=True
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
max_iter : int, default=None
Maximum numbers of iterations to perform, therefore maximum features
to include. 10% of ``n_features`` but at least 5 if available.
cv : int, cross-validation generator or iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, :class:`~sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
n_jobs : int, default=None
Number of CPUs to use during the cross validation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : bool or int, default=False
Sets the verbosity amount.
Attributes
----------
intercept_ : float or ndarray of shape (n_targets,)
Independent term in decision function.
coef_ : ndarray of shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the problem formulation).
n_nonzero_coefs_ : int
Estimated number of non-zero coefficients giving the best mean squared
error over the cross-validation folds.
n_iter_ : int or array-like
Number of active features across every target for the model refit with
the best hyperparameters got by cross-validating across all folds.
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/_ridge.py | sklearn/linear_model/_ridge.py | """
Ridge regression
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numbers
import warnings
from abc import ABCMeta, abstractmethod
from functools import partial
from numbers import Integral, Real
import numpy as np
from scipy import linalg, optimize, sparse
from scipy.sparse import linalg as sp_linalg
from sklearn.base import (
BaseEstimator,
MultiOutputMixin,
RegressorMixin,
_fit_context,
is_classifier,
)
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model._base import (
LinearClassifierMixin,
LinearModel,
_preprocess_data,
_rescale_data,
)
from sklearn.linear_model._sag import sag_solver
from sklearn.metrics import check_scoring, get_scorer, get_scorer_names
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import (
Bunch,
check_array,
check_consistent_length,
check_scalar,
column_or_1d,
compute_sample_weight,
)
from sklearn.utils._array_api import (
_is_numpy_namespace,
_max_precision_float_dtype,
_ravel,
device,
get_namespace,
get_namespace_and_device,
move_to,
)
from sklearn.utils._param_validation import Interval, StrOptions, validate_params
from sklearn.utils.extmath import row_norms, safe_sparse_dot
from sklearn.utils.fixes import _sparse_linalg_cg
from sklearn.utils.metadata_routing import (
MetadataRouter,
MethodMapping,
_raise_for_params,
_routing_enabled,
process_routing,
)
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.utils.validation import (
_check_sample_weight,
check_is_fitted,
validate_data,
)
def _get_rescaled_operator(X, X_offset, sample_weight_sqrt):
"""Create LinearOperator for matrix products with implicit centering.
Matrix product `LinearOperator @ coef` returns `(X - X_offset) @ coef`.
"""
def matvec(b):
return X.dot(b) - sample_weight_sqrt * b.dot(X_offset)
def rmatvec(b):
return X.T.dot(b) - X_offset * b.dot(sample_weight_sqrt)
X1 = sparse.linalg.LinearOperator(shape=X.shape, matvec=matvec, rmatvec=rmatvec)
return X1
def _solve_sparse_cg(
X,
y,
alpha,
max_iter=None,
tol=1e-4,
verbose=0,
X_offset=None,
X_scale=None,
sample_weight_sqrt=None,
):
if sample_weight_sqrt is None:
sample_weight_sqrt = np.ones(X.shape[0], dtype=X.dtype)
n_samples, n_features = X.shape
if X_offset is None or X_scale is None:
X1 = sp_linalg.aslinearoperator(X)
else:
X_offset_scale = X_offset / X_scale
X1 = _get_rescaled_operator(X, X_offset_scale, sample_weight_sqrt)
coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype
)
coef, info = _sparse_linalg_cg(C, y_column, rtol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype
)
coefs[i], info = _sparse_linalg_cg(C, y_column, maxiter=max_iter, rtol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn(
"sparse_cg did not converge after %d iterations." % info,
ConvergenceWarning,
)
return coefs
def _solve_lsqr(
X,
y,
*,
alpha,
fit_intercept=True,
max_iter=None,
tol=1e-4,
X_offset=None,
X_scale=None,
sample_weight_sqrt=None,
):
"""Solve Ridge regression via LSQR.
We expect that y is always mean centered.
If X is dense, we expect it to be mean centered such that we can solve
||y - Xw||_2^2 + alpha * ||w||_2^2
If X is sparse, we expect X_offset to be given such that we can solve
||y - (X - X_offset)w||_2^2 + alpha * ||w||_2^2
With sample weights S=diag(sample_weight), this becomes
||sqrt(S) (y - (X - X_offset) w)||_2^2 + alpha * ||w||_2^2
and we expect y and X to already be rescaled, i.e. sqrt(S) @ y, sqrt(S) @ X. In
this case, X_offset is the sample_weight weighted mean of X before scaling by
sqrt(S). The objective then reads
||y - (X - sqrt(S) X_offset) w)||_2^2 + alpha * ||w||_2^2
"""
if sample_weight_sqrt is None:
sample_weight_sqrt = np.ones(X.shape[0], dtype=X.dtype)
if sparse.issparse(X) and fit_intercept:
X_offset_scale = X_offset / X_scale
X1 = _get_rescaled_operator(X, X_offset_scale, sample_weight_sqrt)
else:
# No need to touch anything
X1 = X
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
n_iter = np.empty(y.shape[1], dtype=np.int32)
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
info = sp_linalg.lsqr(
X1, y_column, damp=sqrt_alpha[i], atol=tol, btol=tol, iter_lim=max_iter
)
coefs[i] = info[0]
n_iter[i] = info[2]
return coefs, n_iter
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_features = X.shape[1]
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[:: n_features + 1] += alpha[0]
return linalg.solve(A, Xy, assume_a="pos", overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features], dtype=X.dtype)
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[:: n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, assume_a="pos", overwrite_a=False).ravel()
A.flat[:: n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[:: n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, assume_a="pos", overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn(
"Singular matrix in solving dual problem. Using "
"least-squares solution instead."
)
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[:: n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples], K.dtype)
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[:: n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(
K, target, assume_a="pos", overwrite_a=False
).ravel()
K.flat[:: n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha, xp=None):
xp, _ = get_namespace(X, xp=xp)
U, s, Vt = xp.linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, None]
UTy = U.T @ y
d = xp.zeros((s.shape[0], alpha.shape[0]), dtype=X.dtype, device=device(X))
d[idx] = s_nnz / (s_nnz**2 + alpha)
d_UT_y = d * UTy
return (Vt.T @ d_UT_y).T
def _solve_lbfgs(
X,
y,
alpha,
positive=True,
max_iter=None,
tol=1e-4,
X_offset=None,
X_scale=None,
sample_weight_sqrt=None,
):
"""Solve ridge regression with LBFGS.
The main purpose is fitting with forcing coefficients to be positive.
For unconstrained ridge regression, there are faster dedicated solver methods.
Note that with positive bounds on the coefficients, LBFGS seems faster
than scipy.optimize.lsq_linear.
"""
n_samples, n_features = X.shape
options = {}
if max_iter is not None:
options["maxiter"] = max_iter
config = {
"method": "L-BFGS-B",
"tol": tol,
"jac": True,
"options": options,
}
if positive:
config["bounds"] = [(0, np.inf)] * n_features
if X_offset is not None and X_scale is not None:
X_offset_scale = X_offset / X_scale
else:
X_offset_scale = None
if sample_weight_sqrt is None:
sample_weight_sqrt = np.ones(X.shape[0], dtype=X.dtype)
coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
for i in range(y.shape[1]):
x0 = np.zeros((n_features,))
y_column = y[:, i]
def func(w):
residual = X.dot(w) - y_column
if X_offset_scale is not None:
residual -= sample_weight_sqrt * w.dot(X_offset_scale)
f = 0.5 * residual.dot(residual) + 0.5 * alpha[i] * w.dot(w)
grad = X.T @ residual + alpha[i] * w
if X_offset_scale is not None:
grad -= X_offset_scale * residual.dot(sample_weight_sqrt)
return f, grad
result = optimize.minimize(func, x0, **config)
if not result["success"]:
warnings.warn(
(
"The lbfgs solver did not converge. Try increasing max_iter "
f"or tol. Currently: max_iter={max_iter} and tol={tol}"
),
ConvergenceWarning,
)
coefs[i] = result["x"]
return coefs
def _get_valid_accept_sparse(is_X_sparse, solver):
if is_X_sparse and solver in ["auto", "sag", "saga"]:
return "csr"
else:
return ["csr", "csc", "coo"]
@validate_params(
{
"X": ["array-like", "sparse matrix", sp_linalg.LinearOperator],
"y": ["array-like"],
"alpha": [Interval(Real, 0, None, closed="left"), "array-like"],
"sample_weight": [
Interval(Real, None, None, closed="neither"),
"array-like",
None,
],
"solver": [
StrOptions(
{"auto", "svd", "cholesky", "lsqr", "sparse_cg", "sag", "saga", "lbfgs"}
)
],
"max_iter": [Interval(Integral, 0, None, closed="left"), None],
"tol": [Interval(Real, 0, None, closed="left")],
"verbose": ["verbose"],
"positive": ["boolean"],
"random_state": ["random_state"],
"return_n_iter": ["boolean"],
"return_intercept": ["boolean"],
"check_input": ["boolean"],
},
prefer_skip_nested_validation=True,
)
def ridge_regression(
X,
y,
alpha,
*,
sample_weight=None,
solver="auto",
max_iter=None,
tol=1e-4,
verbose=0,
positive=False,
random_state=None,
return_n_iter=False,
return_intercept=False,
check_input=True,
):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator} of shape \
(n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
alpha : float or array-like of shape (n_targets,)
Constant that multiplies the L2 term, controlling regularization
strength. `alpha` must be a non-negative float i.e. in `[0, inf)`.
When `alpha = 0`, the objective is equivalent to ordinary least
squares, solved by the :class:`LinearRegression` object. For numerical
reasons, using `alpha = 0` with the `Ridge` object is not advised.
Instead, you should use the :class:`LinearRegression` object.
If an array is passed, penalties are assumed to be specific to the
targets. Hence they must correspond in number.
sample_weight : float or array-like of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight. If sample_weight is not None and
solver='auto', the solver will be set to 'cholesky'.
.. versionadded:: 0.17
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', \
'sag', 'saga', 'lbfgs'}, default='auto'
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. It is the most stable solver, in particular more stable
for singular matrices than 'cholesky' at the cost of being slower.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative
procedure.
- 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses
its improved, unbiased version named SAGA. Both methods also use an
iterative procedure, and are often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' and
'saga' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
- 'lbfgs' uses L-BFGS-B algorithm implemented in
`scipy.optimize.minimize`. It can be used only when `positive`
is True.
All solvers except 'svd' support both dense and sparse data. However, only
'lsqr', 'sag', 'sparse_cg', and 'lbfgs' support sparse input when
`fit_intercept` is True.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
max_iter : int, default=None
Maximum number of iterations for conjugate gradient solver.
For the 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' and saga solver, the default value is
1000. For 'lbfgs' solver, the default value is 15000.
tol : float, default=1e-4
Precision of the solution. Note that `tol` has no effect for solvers 'svd' and
'cholesky'.
.. versionchanged:: 1.2
Default value changed from 1e-3 to 1e-4 for consistency with other linear
models.
verbose : int, default=0
Verbosity level. Setting verbose > 0 will display additional
information depending on the solver used.
positive : bool, default=False
When set to ``True``, forces the coefficients to be positive.
Only 'lbfgs' solver is supported in this case.
random_state : int, RandomState instance, default=None
Used when ``solver`` == 'sag' or 'saga' to shuffle the data.
See :term:`Glossary <random_state>` for details.
return_n_iter : bool, default=False
If True, the method also returns `n_iter`, the actual number of
iteration performed by the solver.
.. versionadded:: 0.17
return_intercept : bool, default=False
If True and if X is sparse, the method also returns the intercept,
and the solver is automatically changed to 'sag'. This is only a
temporary fix for fitting the intercept with sparse data. For dense
data, use sklearn.linear_model._preprocess_data before your regression.
.. versionadded:: 0.17
check_input : bool, default=True
If False, the input arrays X and y will not be checked.
.. versionadded:: 0.21
Returns
-------
coef : ndarray of shape (n_features,) or (n_targets, n_features)
Weight vector(s).
n_iter : int, optional
The actual number of iteration performed by the solver.
Only returned if `return_n_iter` is True.
intercept : float or ndarray of shape (n_targets,)
The intercept of the model. Only returned if `return_intercept`
is True and if X is a scipy sparse array.
Notes
-----
This function won't compute the intercept.
Regularization improves the conditioning of the problem and
reduces the variance of the estimates. Larger values specify stronger
regularization. Alpha corresponds to ``1 / (2C)`` in other linear
models such as :class:`~sklearn.linear_model.LogisticRegression` or
:class:`~sklearn.svm.LinearSVC`. If an array is passed, penalties are
assumed to be specific to the targets. Hence they must correspond in
number.
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import make_regression
>>> from sklearn.linear_model import ridge_regression
>>> rng = np.random.RandomState(0)
>>> X = rng.randn(100, 4)
>>> y = 2.0 * X[:, 0] - 1.0 * X[:, 1] + 0.1 * rng.standard_normal(100)
>>> coef, intercept = ridge_regression(X, y, alpha=1.0, return_intercept=True,
... random_state=0)
>>> coef
array([ 1.97, -1., -2.69e-3, -9.27e-4 ])
>>> intercept
np.float64(-.0012)
"""
return _ridge_regression(
X,
y,
alpha,
sample_weight=sample_weight,
solver=solver,
max_iter=max_iter,
tol=tol,
verbose=verbose,
positive=positive,
random_state=random_state,
return_n_iter=return_n_iter,
return_intercept=return_intercept,
X_scale=None,
X_offset=None,
check_input=check_input,
)
def _ridge_regression(
X,
y,
alpha,
sample_weight=None,
solver="auto",
max_iter=None,
tol=1e-4,
verbose=0,
positive=False,
random_state=None,
return_n_iter=False,
return_intercept=False,
return_solver=False,
X_scale=None,
X_offset=None,
check_input=True,
fit_intercept=False,
):
xp, is_array_api_compliant, device_ = get_namespace_and_device(
X, y, sample_weight, X_scale, X_offset
)
is_numpy_namespace = _is_numpy_namespace(xp)
X_is_sparse = sparse.issparse(X)
has_sw = sample_weight is not None
solver = resolve_solver(solver, positive, return_intercept, X_is_sparse, xp)
if is_numpy_namespace and not X_is_sparse:
X = np.asarray(X)
if not is_numpy_namespace and solver != "svd":
raise ValueError(
f"Array API dispatch to namespace {xp.__name__} only supports "
f"solver 'svd'. Got '{solver}'."
)
if positive and solver != "lbfgs":
raise ValueError(
"When positive=True, only 'lbfgs' solver can be used. "
f"Please change solver {solver} to 'lbfgs' "
"or set positive=False."
)
if solver == "lbfgs" and not positive:
raise ValueError(
"'lbfgs' solver can be used only when positive=True. "
"Please use another solver."
)
if return_intercept and solver != "sag":
raise ValueError(
"In Ridge, only 'sag' solver can directly fit the "
"intercept. Please change solver to 'sag' or set "
"return_intercept=False."
)
if check_input:
_dtype = [xp.float64, xp.float32]
_accept_sparse = _get_valid_accept_sparse(X_is_sparse, solver)
X = check_array(X, accept_sparse=_accept_sparse, dtype=_dtype, order="C")
y = check_array(y, dtype=X.dtype, ensure_2d=False, order=None)
check_consistent_length(X, y)
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
if y.ndim == 1:
y = xp.reshape(y, (-1, 1))
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError(
"Number of samples in X and y does not correspond: %d != %d"
% (n_samples, n_samples_)
)
if has_sw:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
if solver not in ["sag", "saga"]:
# SAG supports sample_weight directly. For other solvers,
# we implement sample_weight via a simple rescaling.
X, y, sample_weight_sqrt = _rescale_data(X, y, sample_weight)
# Some callers of this method might pass alpha as single
# element array which already has been validated.
if alpha is not None and not isinstance(alpha, type(xp.asarray([0.0]))):
alpha = check_scalar(
alpha,
"alpha",
target_type=numbers.Real,
min_val=0.0,
include_boundaries="left",
)
# There should be either 1 or n_targets penalties
alpha = _ravel(xp.asarray(alpha, device=device_, dtype=X.dtype), xp=xp)
if alpha.shape[0] not in [1, n_targets]:
raise ValueError(
"Number of targets and number of penalties do not correspond: %d != %d"
% (alpha.shape[0], n_targets)
)
if alpha.shape[0] == 1 and n_targets > 1:
alpha = xp.full(
shape=(n_targets,), fill_value=alpha[0], dtype=alpha.dtype, device=device_
)
n_iter = None
if solver == "sparse_cg":
coef = _solve_sparse_cg(
X,
y,
alpha,
max_iter=max_iter,
tol=tol,
verbose=verbose,
X_offset=X_offset,
X_scale=X_scale,
sample_weight_sqrt=sample_weight_sqrt if has_sw else None,
)
elif solver == "lsqr":
coef, n_iter = _solve_lsqr(
X,
y,
alpha=alpha,
fit_intercept=fit_intercept,
max_iter=max_iter,
tol=tol,
X_offset=X_offset,
X_scale=X_scale,
sample_weight_sqrt=sample_weight_sqrt if has_sw else None,
)
elif solver == "cholesky":
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = "svd"
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = "svd"
elif solver in ["sag", "saga"]:
# precompute max_squared_sum for all targets
max_squared_sum = row_norms(X, squared=True).max()
coef = np.empty((y.shape[1], n_features), dtype=X.dtype)
n_iter = np.empty(y.shape[1], dtype=np.int32)
intercept = np.zeros((y.shape[1],), dtype=X.dtype)
for i, (alpha_i, target) in enumerate(zip(alpha, y.T)):
init = {
"coef": np.zeros((n_features + int(return_intercept), 1), dtype=X.dtype)
}
coef_, n_iter_, _ = sag_solver(
X,
target.ravel(),
sample_weight,
"squared",
alpha_i,
0,
max_iter,
tol,
verbose,
random_state,
False,
max_squared_sum,
init,
is_saga=solver == "saga",
)
if return_intercept:
coef[i] = coef_[:-1]
intercept[i] = coef_[-1]
else:
coef[i] = coef_
n_iter[i] = n_iter_
if intercept.shape[0] == 1:
intercept = intercept[0]
elif solver == "lbfgs":
coef = _solve_lbfgs(
X,
y,
alpha,
positive=positive,
tol=tol,
max_iter=max_iter,
X_offset=X_offset,
X_scale=X_scale,
sample_weight_sqrt=sample_weight_sqrt if has_sw else None,
)
if solver == "svd":
if X_is_sparse:
raise TypeError("SVD solver does not support sparse inputs currently")
coef = _solve_svd(X, y, alpha, xp)
if n_targets == 1:
coef = _ravel(coef)
coef = xp.asarray(coef)
if return_n_iter and return_intercept:
res = coef, n_iter, intercept
elif return_intercept:
res = coef, intercept
elif return_n_iter:
res = coef, n_iter
else:
res = coef
return (*res, solver) if return_solver else res
def resolve_solver(solver, positive, return_intercept, is_sparse, xp):
if solver != "auto":
return solver
is_numpy_namespace = _is_numpy_namespace(xp)
auto_solver_np = resolve_solver_for_numpy(positive, return_intercept, is_sparse)
if is_numpy_namespace:
return auto_solver_np
if positive:
raise ValueError(
"The solvers that support positive fitting do not support "
f"Array API dispatch to namespace {xp.__name__}. Please "
"either disable Array API dispatch, or use a numpy-like "
"namespace, or set `positive=False`."
)
# At the moment, Array API dispatch only supports the "svd" solver.
solver = "svd"
if solver != auto_solver_np:
warnings.warn(
f"Using Array API dispatch to namespace {xp.__name__} with "
f"`solver='auto'` will result in using the solver '{solver}'. "
"The results may differ from those when using a Numpy array, "
f"because in that case the preferred solver would be {auto_solver_np}. "
f"Set `solver='{solver}'` to suppress this warning."
)
return solver
def resolve_solver_for_numpy(positive, return_intercept, is_sparse):
if positive:
return "lbfgs"
if return_intercept:
# sag supports fitting intercept directly
return "sag"
if not is_sparse:
return "cholesky"
return "sparse_cg"
class _BaseRidge(LinearModel, metaclass=ABCMeta):
_parameter_constraints: dict = {
"alpha": [Interval(Real, 0, None, closed="left"), np.ndarray],
"fit_intercept": ["boolean"],
"copy_X": ["boolean"],
"max_iter": [Interval(Integral, 1, None, closed="left"), None],
"tol": [Interval(Real, 0, None, closed="left")],
"solver": [
StrOptions(
{"auto", "svd", "cholesky", "lsqr", "sparse_cg", "sag", "saga", "lbfgs"}
)
],
"positive": ["boolean"],
"random_state": ["random_state"],
}
@abstractmethod
def __init__(
self,
alpha=1.0,
*,
fit_intercept=True,
copy_X=True,
max_iter=None,
tol=1e-4,
solver="auto",
positive=False,
random_state=None,
):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
self.positive = positive
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
xp, is_array_api_compliant = get_namespace(X, y, sample_weight)
if self.solver == "lbfgs" and not self.positive:
raise ValueError(
"'lbfgs' solver can be used only when positive=True. "
"Please use another solver."
)
if self.positive:
if self.solver not in ["auto", "lbfgs"]:
raise ValueError(
f"solver='{self.solver}' does not support positive fitting. Please"
" set the solver to 'auto' or 'lbfgs', or set `positive=False`"
)
else:
solver = self.solver
elif sparse.issparse(X) and self.fit_intercept:
if self.solver not in ["auto", "lbfgs", "lsqr", "sag", "sparse_cg"]:
raise ValueError(
"solver='{}' does not support fitting the intercept "
"on sparse data. Please set the solver to 'auto' or "
"'lsqr', 'sparse_cg', 'sag', 'lbfgs' "
"or set `fit_intercept=False`".format(self.solver)
)
if self.solver in ["lsqr", "lbfgs"]:
solver = self.solver
elif self.solver == "sag" and self.max_iter is None and self.tol > 1e-4:
warnings.warn(
'"sag" solver requires many iterations to fit '
"an intercept with sparse inputs. Either set the "
'solver to "auto" or "sparse_cg", or set a low '
'"tol" and a high "max_iter" (especially if inputs are '
"not standardized)."
)
solver = "sag"
else:
solver = "sparse_cg"
else:
solver = self.solver
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
# when X is sparse we only remove offset from y
X, y, X_offset, y_offset, X_scale, _ = _preprocess_data(
X,
y,
fit_intercept=self.fit_intercept,
copy=self.copy_X,
sample_weight=sample_weight,
rescale_with_sw=False,
)
if solver == "sag" and sparse.issparse(X) and self.fit_intercept:
self.coef_, self.n_iter_, self.intercept_, self.solver_ = _ridge_regression(
X,
y,
alpha=self.alpha,
sample_weight=sample_weight,
max_iter=self.max_iter,
tol=self.tol,
solver="sag",
positive=self.positive,
random_state=self.random_state,
return_n_iter=True,
return_intercept=True,
return_solver=True,
check_input=False,
)
# add the offset which was subtracted by _preprocess_data
self.intercept_ += y_offset
else:
if sparse.issparse(X) and self.fit_intercept:
# required to fit intercept with sparse_cg and lbfgs solver
params = {"X_offset": X_offset, "X_scale": X_scale}
else:
# for dense matrices or when intercept is set to 0
params = {}
self.coef_, self.n_iter_, self.solver_ = _ridge_regression(
X,
y,
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/_perceptron.py | sklearn/linear_model/_perceptron.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from numbers import Real
from sklearn.linear_model._stochastic_gradient import BaseSGDClassifier
from sklearn.utils._param_validation import Interval, StrOptions
class Perceptron(BaseSGDClassifier):
"""Linear perceptron classifier.
The implementation is a wrapper around :class:`~sklearn.linear_model.SGDClassifier`
by fixing the `loss` and `learning_rate` parameters as::
SGDClassifier(loss="perceptron", learning_rate="constant")
Other available parameters are described below and are forwarded to
:class:`~sklearn.linear_model.SGDClassifier`.
Read more in the :ref:`User Guide <perceptron>`.
Parameters
----------
penalty : {'l2','l1','elasticnet'}, default=None
The penalty (aka regularization term) to be used.
alpha : float, default=0.0001
Constant that multiplies the regularization term if regularization is
used.
l1_ratio : float, default=0.15
The Elastic Net mixing parameter, with `0 <= l1_ratio <= 1`.
`l1_ratio=0` corresponds to L2 penalty, `l1_ratio=1` to L1.
Only used if `penalty='elasticnet'`.
.. versionadded:: 0.24
fit_intercept : bool, default=True
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
max_iter : int, default=1000
The maximum number of passes over the training data (aka epochs).
It only impacts the behavior in the ``fit`` method, and not the
:meth:`partial_fit` method.
.. versionadded:: 0.19
tol : float or None, default=1e-3
The stopping criterion. If it is not None, the iterations will stop
when (loss > previous_loss - tol).
.. versionadded:: 0.19
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
verbose : int, default=0
The verbosity level.
eta0 : float, default=1
Constant by which the updates are multiplied.
n_jobs : int, default=None
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance or None, default=0
Used to shuffle the training data, when ``shuffle`` is set to
``True``. Pass an int for reproducible output across multiple
function calls.
See :term:`Glossary <random_state>`.
early_stopping : bool, default=False
Whether to use early stopping to terminate training when validation
score is not improving. If set to True, it will automatically set aside
a stratified fraction of training data as validation and terminate
training when validation score is not improving by at least `tol` for
`n_iter_no_change` consecutive epochs.
.. versionadded:: 0.20
validation_fraction : float, default=0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if early_stopping is True.
.. versionadded:: 0.20
n_iter_no_change : int, default=5
Number of iterations with no improvement to wait before early stopping.
.. versionadded:: 0.20
class_weight : dict, {class_label: weight} or "balanced", default=None
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution. See
:term:`the Glossary <warm_start>`.
Attributes
----------
classes_ : ndarray of shape (n_classes,)
The unique classes labels.
coef_ : ndarray of shape (1, n_features) if n_classes == 2 else \
(n_classes, n_features)
Weights assigned to the features.
intercept_ : ndarray of shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_iter_ : int
The actual number of iterations to reach the stopping criterion.
For multiclass fits, it is the maximum over every binary fit.
t_ : int
Number of weight updates performed during training.
Same as ``(n_iter_ * n_samples + 1)``.
See Also
--------
sklearn.linear_model.SGDClassifier : Linear classifiers
(SVM, logistic regression, etc.) with SGD training.
Notes
-----
``Perceptron`` is a classification algorithm which shares the same
underlying implementation with ``SGDClassifier``. In fact,
``Perceptron()`` is equivalent to `SGDClassifier(loss="perceptron",
eta0=1, learning_rate="constant", penalty=None)`.
References
----------
https://en.wikipedia.org/wiki/Perceptron and references therein.
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.linear_model import Perceptron
>>> X, y = load_digits(return_X_y=True)
>>> clf = Perceptron(tol=1e-3, random_state=0)
>>> clf.fit(X, y)
Perceptron()
>>> clf.score(X, y)
0.939...
"""
_parameter_constraints: dict = {**BaseSGDClassifier._parameter_constraints}
_parameter_constraints.pop("loss")
_parameter_constraints.pop("average")
_parameter_constraints.update(
{
"penalty": [StrOptions({"l2", "l1", "elasticnet"}), None],
"alpha": [Interval(Real, 0, None, closed="left")],
"l1_ratio": [Interval(Real, 0, 1, closed="both")],
"eta0": [Interval(Real, 0, None, closed="neither")],
}
)
def __init__(
self,
*,
penalty=None,
alpha=0.0001,
l1_ratio=0.15,
fit_intercept=True,
max_iter=1000,
tol=1e-3,
shuffle=True,
verbose=0,
eta0=1.0,
n_jobs=None,
random_state=0,
early_stopping=False,
validation_fraction=0.1,
n_iter_no_change=5,
class_weight=None,
warm_start=False,
):
super().__init__(
loss="perceptron",
penalty=penalty,
alpha=alpha,
l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
max_iter=max_iter,
tol=tol,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
learning_rate="constant",
eta0=eta0,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change,
power_t=0.5,
warm_start=warm_start,
class_weight=class_weight,
n_jobs=n_jobs,
)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/_theil_sen.py | sklearn/linear_model/_theil_sen.py | """
A Theil-Sen Estimator for Multiple Linear Regression Model
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from itertools import combinations
from numbers import Integral, Real
import numpy as np
from joblib import effective_n_jobs
from scipy import linalg
from scipy.linalg.lapack import get_lapack_funcs
from scipy.special import binom
from sklearn.base import RegressorMixin, _fit_context
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model._base import LinearModel
from sklearn.utils import check_random_state
from sklearn.utils._param_validation import Interval
from sklearn.utils.parallel import Parallel, delayed
from sklearn.utils.validation import validate_data
_EPSILON = np.finfo(np.double).eps
def _modified_weiszfeld_step(X, x_old):
"""Modified Weiszfeld step.
This function defines one iteration step in order to approximate the
spatial median (L1 median). It is a form of an iteratively re-weighted
least squares method.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
x_old : ndarray of shape = (n_features,)
Current start vector.
Returns
-------
x_new : ndarray of shape (n_features,)
New iteration step.
References
----------
- On Computation of Spatial Median for Robust Data Mining, 2005
T. Kärkkäinen and S. Äyrämö
http://users.jyu.fi/~samiayr/pdf/ayramo_eurogen05.pdf
"""
diff = X - x_old
diff_norm = np.sqrt(np.sum(diff**2, axis=1))
mask = diff_norm >= _EPSILON
# x_old equals one of our samples
is_x_old_in_X = int(mask.sum() < X.shape[0])
diff = diff[mask]
diff_norm = diff_norm[mask][:, np.newaxis]
quotient_norm = linalg.norm(np.sum(diff / diff_norm, axis=0))
if quotient_norm > _EPSILON: # to avoid division by zero
new_direction = np.sum(X[mask, :] / diff_norm, axis=0) / np.sum(
1 / diff_norm, axis=0
)
else:
new_direction = 1.0
quotient_norm = 1.0
return (
max(0.0, 1.0 - is_x_old_in_X / quotient_norm) * new_direction
+ min(1.0, is_x_old_in_X / quotient_norm) * x_old
)
def _spatial_median(X, max_iter=300, tol=1.0e-3):
"""Spatial median (L1 median).
The spatial median is member of a class of so-called M-estimators which
are defined by an optimization problem. Given a number of p points in an
n-dimensional space, the point x minimizing the sum of all distances to the
p other points is called spatial median.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
max_iter : int, default=300
Maximum number of iterations.
tol : float, default=1.e-3
Stop the algorithm if spatial_median has converged.
Returns
-------
spatial_median : ndarray of shape = (n_features,)
Spatial median.
n_iter : int
Number of iterations needed.
References
----------
- On Computation of Spatial Median for Robust Data Mining, 2005
T. Kärkkäinen and S. Äyrämö
http://users.jyu.fi/~samiayr/pdf/ayramo_eurogen05.pdf
"""
if X.shape[1] == 1:
return 1, np.median(X.ravel(), keepdims=True)
tol **= 2 # We are computing the tol on the squared norm
spatial_median_old = np.mean(X, axis=0)
for n_iter in range(max_iter):
spatial_median = _modified_weiszfeld_step(X, spatial_median_old)
if np.sum((spatial_median_old - spatial_median) ** 2) < tol:
break
else:
spatial_median_old = spatial_median
else:
warnings.warn(
"Maximum number of iterations {max_iter} reached in "
"spatial median for TheilSen regressor."
"".format(max_iter=max_iter),
ConvergenceWarning,
)
return n_iter, spatial_median
def _breakdown_point(n_samples, n_subsamples):
"""Approximation of the breakdown point.
Parameters
----------
n_samples : int
Number of samples.
n_subsamples : int
Number of subsamples to consider.
Returns
-------
breakdown_point : float
Approximation of breakdown point.
"""
return (
1
- (
0.5 ** (1 / n_subsamples) * (n_samples - n_subsamples + 1)
+ n_subsamples
- 1
)
/ n_samples
)
def _lstsq(X, y, indices, fit_intercept):
"""Least Squares Estimator for TheilSenRegressor class.
This function calculates the least squares method on a subset of rows of X
and y defined by the indices array. Optionally, an intercept column is
added if intercept is set to true.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Design matrix, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : ndarray of shape (n_samples,)
Target vector, where `n_samples` is the number of samples.
indices : ndarray of shape (n_subpopulation, n_subsamples)
Indices of all subsamples with respect to the chosen subpopulation.
fit_intercept : bool
Fit intercept or not.
Returns
-------
weights : ndarray of shape (n_subpopulation, n_features + intercept)
Solution matrix of n_subpopulation solved least square problems.
"""
fit_intercept = int(fit_intercept)
n_features = X.shape[1] + fit_intercept
n_subsamples = indices.shape[1]
weights = np.empty((indices.shape[0], n_features))
X_subpopulation = np.ones((n_subsamples, n_features))
# gelss need to pad y_subpopulation to be of the max dim of X_subpopulation
y_subpopulation = np.zeros((max(n_subsamples, n_features)))
(lstsq,) = get_lapack_funcs(("gelss",), (X_subpopulation, y_subpopulation))
for index, subset in enumerate(indices):
X_subpopulation[:, fit_intercept:] = X[subset, :]
y_subpopulation[:n_subsamples] = y[subset]
weights[index] = lstsq(X_subpopulation, y_subpopulation)[1][:n_features]
return weights
class TheilSenRegressor(RegressorMixin, LinearModel):
"""Theil-Sen Estimator: robust multivariate regression model.
The algorithm calculates least square solutions on subsets with size
n_subsamples of the samples in X. Any value of n_subsamples between the
number of features and samples leads to an estimator with a compromise
between robustness and efficiency. Since the number of least square
solutions is "n_samples choose n_subsamples", it can be extremely large
and can therefore be limited with max_subpopulation. If this limit is
reached, the subsets are chosen randomly. In a final step, the spatial
median (or L1 median) is calculated of all least square solutions.
Read more in the :ref:`User Guide <theil_sen_regression>`.
Parameters
----------
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations.
max_subpopulation : int, default=1e4
Instead of computing with a set of cardinality 'n choose k', where n is
the number of samples and k is the number of subsamples (at least
number of features), consider only a stochastic subpopulation of a
given maximal size if 'n choose k' is larger than max_subpopulation.
For other than small problem sizes this parameter will determine
memory usage and runtime if n_subsamples is not changed. Note that the
data type should be int but floats such as 1e4 can be accepted too.
n_subsamples : int, default=None
Number of samples to calculate the parameters. This is at least the
number of features (plus 1 if fit_intercept=True) and the number of
samples as a maximum. A lower number leads to a higher breakdown
point and a low efficiency while a high number leads to a low
breakdown point and a high efficiency. If None, take the
minimum number of subsamples leading to maximal robustness.
If n_subsamples is set to n_samples, Theil-Sen is identical to least
squares.
max_iter : int, default=300
Maximum number of iterations for the calculation of spatial median.
tol : float, default=1e-3
Tolerance when calculating spatial median.
random_state : int, RandomState instance or None, default=None
A random number generator instance to define the state of the random
permutations generator. Pass an int for reproducible output across
multiple function calls.
See :term:`Glossary <random_state>`.
n_jobs : int, default=None
Number of CPUs to use during the cross validation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : bool, default=False
Verbose mode when fitting the model.
Attributes
----------
coef_ : ndarray of shape (n_features,)
Coefficients of the regression model (median of distribution).
intercept_ : float
Estimated intercept of regression model.
breakdown_ : float
Approximated breakdown point.
n_iter_ : int
Number of iterations needed for the spatial median.
n_subpopulation_ : int
Number of combinations taken into account from 'n choose k', where n is
the number of samples and k is the number of subsamples.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
HuberRegressor : Linear regression model that is robust to outliers.
RANSACRegressor : RANSAC (RANdom SAmple Consensus) algorithm.
SGDRegressor : Fitted by minimizing a regularized empirical loss with SGD.
References
----------
- Theil-Sen Estimators in a Multiple Linear Regression Model, 2009
Xin Dang, Hanxiang Peng, Xueqin Wang and Heping Zhang
http://home.olemiss.edu/~xdang/papers/MTSE.pdf
Examples
--------
>>> from sklearn.linear_model import TheilSenRegressor
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(
... n_samples=200, n_features=2, noise=4.0, random_state=0)
>>> reg = TheilSenRegressor(random_state=0).fit(X, y)
>>> reg.score(X, y)
0.9884
>>> reg.predict(X[:1,])
array([-31.5871])
"""
_parameter_constraints: dict = {
"fit_intercept": ["boolean"],
# target_type should be Integral but can accept Real for backward compatibility
"max_subpopulation": [Interval(Real, 1, None, closed="left")],
"n_subsamples": [None, Integral],
"max_iter": [Interval(Integral, 0, None, closed="left")],
"tol": [Interval(Real, 0.0, None, closed="left")],
"random_state": ["random_state"],
"n_jobs": [None, Integral],
"verbose": ["verbose"],
}
def __init__(
self,
*,
fit_intercept=True,
max_subpopulation=1e4,
n_subsamples=None,
max_iter=300,
tol=1.0e-3,
random_state=None,
n_jobs=None,
verbose=False,
):
self.fit_intercept = fit_intercept
self.max_subpopulation = max_subpopulation
self.n_subsamples = n_subsamples
self.max_iter = max_iter
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.verbose = verbose
def _check_subparams(self, n_samples, n_features):
n_subsamples = self.n_subsamples
if self.fit_intercept:
n_dim = n_features + 1
else:
n_dim = n_features
if n_subsamples is not None:
if n_subsamples > n_samples:
raise ValueError(
"Invalid parameter since n_subsamples > "
"n_samples ({0} > {1}).".format(n_subsamples, n_samples)
)
if n_samples >= n_features:
if n_dim > n_subsamples:
plus_1 = "+1" if self.fit_intercept else ""
raise ValueError(
"Invalid parameter since n_features{0} "
"> n_subsamples ({1} > {2})."
"".format(plus_1, n_dim, n_subsamples)
)
else: # if n_samples < n_features
if n_subsamples != n_samples:
raise ValueError(
"Invalid parameter since n_subsamples != "
"n_samples ({0} != {1}) while n_samples "
"< n_features.".format(n_subsamples, n_samples)
)
else:
n_subsamples = min(n_dim, n_samples)
all_combinations = max(1, np.rint(binom(n_samples, n_subsamples)))
n_subpopulation = int(min(self.max_subpopulation, all_combinations))
return n_subsamples, n_subpopulation
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y):
"""Fit linear model.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
Fitted `TheilSenRegressor` estimator.
"""
random_state = check_random_state(self.random_state)
X, y = validate_data(self, X, y, y_numeric=True)
n_samples, n_features = X.shape
n_subsamples, self.n_subpopulation_ = self._check_subparams(
n_samples, n_features
)
self.breakdown_ = _breakdown_point(n_samples, n_subsamples)
if self.verbose:
print("Breakdown point: {0}".format(self.breakdown_))
print("Number of samples: {0}".format(n_samples))
tol_outliers = int(self.breakdown_ * n_samples)
print("Tolerable outliers: {0}".format(tol_outliers))
print("Number of subpopulations: {0}".format(self.n_subpopulation_))
# Determine indices of subpopulation
if np.rint(binom(n_samples, n_subsamples)) <= self.max_subpopulation:
indices = list(combinations(range(n_samples), n_subsamples))
else:
indices = [
random_state.choice(n_samples, size=n_subsamples, replace=False)
for _ in range(self.n_subpopulation_)
]
n_jobs = effective_n_jobs(self.n_jobs)
index_list = np.array_split(indices, n_jobs)
weights = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_lstsq)(X, y, index_list[job], self.fit_intercept)
for job in range(n_jobs)
)
weights = np.vstack(weights)
self.n_iter_, coefs = _spatial_median(
weights, max_iter=self.max_iter, tol=self.tol
)
if self.fit_intercept:
self.intercept_ = coefs[0]
self.coef_ = coefs[1:]
else:
self.intercept_ = 0.0
self.coef_ = coefs
return self
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/_least_angle.py | sklearn/linear_model/_least_angle.py | """
Least Angle Regression algorithm. See the documentation on the
Generalized Linear Model for a complete discussion.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import sys
import warnings
from math import log
from numbers import Integral, Real
import numpy as np
from scipy import interpolate, linalg
from scipy.linalg.lapack import get_lapack_funcs
from sklearn.base import MultiOutputMixin, RegressorMixin, _fit_context
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model._base import LinearModel, LinearRegression, _preprocess_data
from sklearn.model_selection import check_cv
# mypy error: Module 'sklearn.utils' has no attribute 'arrayfuncs'
from sklearn.utils import Bunch, arrayfuncs, as_float_array, check_random_state
from sklearn.utils._metadata_requests import (
MetadataRouter,
MethodMapping,
_raise_for_params,
_routing_enabled,
process_routing,
)
from sklearn.utils._param_validation import (
Hidden,
Interval,
StrOptions,
validate_params,
)
from sklearn.utils.parallel import Parallel, delayed
from sklearn.utils.validation import validate_data
SOLVE_TRIANGULAR_ARGS = {"check_finite": False}
@validate_params(
{
"X": [np.ndarray, None],
"y": [np.ndarray, None],
"Xy": [np.ndarray, None],
"Gram": [StrOptions({"auto"}), "boolean", np.ndarray, None],
"max_iter": [Interval(Integral, 0, None, closed="left")],
"alpha_min": [Interval(Real, 0, None, closed="left")],
"method": [StrOptions({"lar", "lasso"})],
"copy_X": ["boolean"],
"eps": [Interval(Real, 0, None, closed="neither"), None],
"copy_Gram": ["boolean"],
"verbose": ["verbose"],
"return_path": ["boolean"],
"return_n_iter": ["boolean"],
"positive": ["boolean"],
},
prefer_skip_nested_validation=True,
)
def lars_path(
X,
y,
Xy=None,
*,
Gram=None,
max_iter=500,
alpha_min=0,
method="lar",
copy_X=True,
eps=np.finfo(float).eps,
copy_Gram=True,
verbose=0,
return_path=True,
return_n_iter=False,
positive=False,
):
"""Compute Least Angle Regression or Lasso path using the LARS algorithm.
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lar', the objective function is only known in
the form of an implicit equation (see discussion in [1]_).
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
X : None or ndarray of shape (n_samples, n_features)
Input data. If X is `None`, Gram must also be `None`.
If only the Gram matrix is available, use `lars_path_gram` instead.
y : None or ndarray of shape (n_samples,)
Input targets.
Xy : array-like of shape (n_features,), default=None
`Xy = X.T @ y` that can be precomputed. It is useful
only when the Gram matrix is precomputed.
Gram : None, 'auto', bool, ndarray of shape (n_features, n_features), \
default=None
Precomputed Gram matrix `X.T @ X`, if `'auto'`, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
max_iter : int, default=500
Maximum number of iterations to perform, set to infinity for no limit.
alpha_min : float, default=0
Minimum correlation along the path. It corresponds to the
regularization parameter `alpha` in the Lasso.
method : {'lar', 'lasso'}, default='lar'
Specifies the returned model. Select `'lar'` for Least Angle
Regression, `'lasso'` for the Lasso.
copy_X : bool, default=True
If `False`, `X` is overwritten.
eps : float, default=np.finfo(float).eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the `tol` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
copy_Gram : bool, default=True
If `False`, `Gram` is overwritten.
verbose : int, default=0
Controls output verbosity.
return_path : bool, default=True
If `True`, returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, default=False
Whether to return the number of iterations.
positive : bool, default=False
Restrict coefficients to be >= 0.
This option is only allowed with method 'lasso'. Note that the model
coefficients will not converge to the ordinary-least-squares solution
for small values of alpha. Only coefficients up to the smallest alpha
value (`alphas_[alphas_ > 0.].min()` when fit_path=True) reached by
the stepwise Lars-Lasso algorithm are typically in congruence with the
solution of the coordinate descent `lasso_path` function.
Returns
-------
alphas : ndarray of shape (n_alphas + 1,)
Maximum of covariances (in absolute value) at each iteration.
`n_alphas` is either `max_iter`, `n_features`, or the
number of nodes in the path with `alpha >= alpha_min`, whichever
is smaller.
active : ndarray of shape (n_alphas,)
Indices of active variables at the end of the path.
coefs : ndarray of shape (n_features, n_alphas + 1)
Coefficients along the path.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is set
to True.
See Also
--------
lars_path_gram : Compute LARS path in the sufficient stats mode.
lasso_path : Compute Lasso path with coordinate descent.
LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars.
Lars : Least Angle Regression model a.k.a. LAR.
LassoLarsCV : Cross-validated Lasso, using the LARS algorithm.
LarsCV : Cross-validated Least Angle Regression model.
sklearn.decomposition.sparse_encode : Sparse coding.
References
----------
.. [1] "Least Angle Regression", Efron et al.
http://statweb.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<https://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<https://en.wikipedia.org/wiki/Lasso_(statistics)>`_
Examples
--------
>>> from sklearn.linear_model import lars_path
>>> from sklearn.datasets import make_regression
>>> X, y, true_coef = make_regression(
... n_samples=100, n_features=5, n_informative=2, coef=True, random_state=0
... )
>>> true_coef
array([ 0. , 0. , 0. , 97.9, 45.7])
>>> alphas, _, estimated_coef = lars_path(X, y)
>>> alphas.shape
(3,)
>>> estimated_coef
array([[ 0. , 0. , 0. ],
[ 0. , 0. , 0. ],
[ 0. , 0. , 0. ],
[ 0. , 46.96, 97.99],
[ 0. , 0. , 45.70]])
"""
if X is None and Gram is not None:
raise ValueError(
"X cannot be None if Gram is not None"
"Use lars_path_gram to avoid passing X and y."
)
return _lars_path_solver(
X=X,
y=y,
Xy=Xy,
Gram=Gram,
n_samples=None,
max_iter=max_iter,
alpha_min=alpha_min,
method=method,
copy_X=copy_X,
eps=eps,
copy_Gram=copy_Gram,
verbose=verbose,
return_path=return_path,
return_n_iter=return_n_iter,
positive=positive,
)
@validate_params(
{
"Xy": [np.ndarray],
"Gram": [np.ndarray],
"n_samples": [Interval(Integral, 0, None, closed="left")],
"max_iter": [Interval(Integral, 0, None, closed="left")],
"alpha_min": [Interval(Real, 0, None, closed="left")],
"method": [StrOptions({"lar", "lasso"})],
"copy_X": ["boolean"],
"eps": [Interval(Real, 0, None, closed="neither"), None],
"copy_Gram": ["boolean"],
"verbose": ["verbose"],
"return_path": ["boolean"],
"return_n_iter": ["boolean"],
"positive": ["boolean"],
},
prefer_skip_nested_validation=True,
)
def lars_path_gram(
Xy,
Gram,
*,
n_samples,
max_iter=500,
alpha_min=0,
method="lar",
copy_X=True,
eps=np.finfo(float).eps,
copy_Gram=True,
verbose=0,
return_path=True,
return_n_iter=False,
positive=False,
):
"""The lars_path in the sufficient stats mode.
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lar', the objective function is only known in
the form of an implicit equation (see discussion in [1]_).
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
Xy : ndarray of shape (n_features,)
`Xy = X.T @ y`.
Gram : ndarray of shape (n_features, n_features)
`Gram = X.T @ X`.
n_samples : int
Equivalent size of sample.
max_iter : int, default=500
Maximum number of iterations to perform, set to infinity for no limit.
alpha_min : float, default=0
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, default='lar'
Specifies the returned model. Select `'lar'` for Least Angle
Regression, ``'lasso'`` for the Lasso.
copy_X : bool, default=True
If `False`, `X` is overwritten.
eps : float, default=np.finfo(float).eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the `tol` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
copy_Gram : bool, default=True
If `False`, `Gram` is overwritten.
verbose : int, default=0
Controls output verbosity.
return_path : bool, default=True
If `return_path==True` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, default=False
Whether to return the number of iterations.
positive : bool, default=False
Restrict coefficients to be >= 0.
This option is only allowed with method 'lasso'. Note that the model
coefficients will not converge to the ordinary-least-squares solution
for small values of alpha. Only coefficients up to the smallest alpha
value (`alphas_[alphas_ > 0.].min()` when `fit_path=True`) reached by
the stepwise Lars-Lasso algorithm are typically in congruence with the
solution of the coordinate descent lasso_path function.
Returns
-------
alphas : ndarray of shape (n_alphas + 1,)
Maximum of covariances (in absolute value) at each iteration.
`n_alphas` is either `max_iter`, `n_features` or the
number of nodes in the path with `alpha >= alpha_min`, whichever
is smaller.
active : ndarray of shape (n_alphas,)
Indices of active variables at the end of the path.
coefs : ndarray of shape (n_features, n_alphas + 1)
Coefficients along the path.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is set
to True.
See Also
--------
lars_path_gram : Compute LARS path.
lasso_path : Compute Lasso path with coordinate descent.
LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars.
Lars : Least Angle Regression model a.k.a. LAR.
LassoLarsCV : Cross-validated Lasso, using the LARS algorithm.
LarsCV : Cross-validated Least Angle Regression model.
sklearn.decomposition.sparse_encode : Sparse coding.
References
----------
.. [1] "Least Angle Regression", Efron et al.
http://statweb.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<https://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<https://en.wikipedia.org/wiki/Lasso_(statistics)>`_
Examples
--------
>>> from sklearn.linear_model import lars_path_gram
>>> from sklearn.datasets import make_regression
>>> X, y, true_coef = make_regression(
... n_samples=100, n_features=5, n_informative=2, coef=True, random_state=0
... )
>>> true_coef
array([ 0. , 0. , 0. , 97.9, 45.7])
>>> alphas, _, estimated_coef = lars_path_gram(X.T @ y, X.T @ X, n_samples=100)
>>> alphas.shape
(3,)
>>> estimated_coef
array([[ 0. , 0. , 0. ],
[ 0. , 0. , 0. ],
[ 0. , 0. , 0. ],
[ 0. , 46.96, 97.99],
[ 0. , 0. , 45.70]])
"""
return _lars_path_solver(
X=None,
y=None,
Xy=Xy,
Gram=Gram,
n_samples=n_samples,
max_iter=max_iter,
alpha_min=alpha_min,
method=method,
copy_X=copy_X,
eps=eps,
copy_Gram=copy_Gram,
verbose=verbose,
return_path=return_path,
return_n_iter=return_n_iter,
positive=positive,
)
def _lars_path_solver(
X,
y,
Xy=None,
Gram=None,
n_samples=None,
max_iter=500,
alpha_min=0,
method="lar",
copy_X=True,
eps=np.finfo(float).eps,
copy_Gram=True,
verbose=0,
return_path=True,
return_n_iter=False,
positive=False,
):
"""Compute Least Angle Regression or Lasso path using LARS algorithm [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lar', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
X : None or ndarray of shape (n_samples, n_features)
Input data. Note that if X is None then Gram must be specified,
i.e., cannot be None or False.
y : None or ndarray of shape (n_samples,)
Input targets.
Xy : array-like of shape (n_features,), default=None
`Xy = np.dot(X.T, y)` that can be precomputed. It is useful
only when the Gram matrix is precomputed.
Gram : None, 'auto' or array-like of shape (n_features, n_features), \
default=None
Precomputed Gram matrix `(X' * X)`, if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
n_samples : int or float, default=None
Equivalent size of sample. If `None`, it will be `n_samples`.
max_iter : int, default=500
Maximum number of iterations to perform, set to infinity for no limit.
alpha_min : float, default=0
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, default='lar'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
copy_X : bool, default=True
If ``False``, ``X`` is overwritten.
eps : float, default=np.finfo(float).eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
copy_Gram : bool, default=True
If ``False``, ``Gram`` is overwritten.
verbose : int, default=0
Controls output verbosity.
return_path : bool, default=True
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, default=False
Whether to return the number of iterations.
positive : bool, default=False
Restrict coefficients to be >= 0.
This option is only allowed with method 'lasso'. Note that the model
coefficients will not converge to the ordinary-least-squares solution
for small values of alpha. Only coefficients up to the smallest alpha
value (``alphas_[alphas_ > 0.].min()`` when fit_path=True) reached by
the stepwise Lars-Lasso algorithm are typically in congruence with the
solution of the coordinate descent lasso_path function.
Returns
-------
alphas : array-like of shape (n_alphas + 1,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array-like of shape (n_alphas,)
Indices of active variables at the end of the path.
coefs : array-like of shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See Also
--------
lasso_path
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Efron et al.
http://statweb.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<https://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<https://en.wikipedia.org/wiki/Lasso_(statistics)>`_
"""
if method == "lar" and positive:
raise ValueError("Positive constraint not supported for 'lar' coding method.")
n_samples = n_samples if n_samples is not None else y.size
if Xy is None:
Cov = np.dot(X.T, y)
else:
Cov = Xy.copy()
if Gram is None or Gram is False:
Gram = None
if X is None:
raise ValueError("X and Gram cannot both be unspecified.")
elif (isinstance(Gram, str) and Gram == "auto") or Gram is True:
if Gram is True or X.shape[0] > X.shape[1]:
Gram = np.dot(X.T, X)
else:
Gram = None
elif copy_Gram:
Gram = Gram.copy()
if Gram is None:
n_features = X.shape[1]
else:
n_features = Cov.shape[0]
if Gram.shape != (n_features, n_features):
raise ValueError("The shapes of the inputs Gram and Xy do not match.")
if copy_X and X is not None and Gram is None:
# force copy. setting the array to be fortran-ordered
# speeds up the calculation of the (partial) Gram matrix
# and allows to easily swap columns
X = X.copy("F")
max_features = min(max_iter, n_features)
dtypes = set(a.dtype for a in (X, y, Xy, Gram) if a is not None)
if len(dtypes) == 1:
# use the precision level of input data if it is consistent
return_dtype = next(iter(dtypes))
else:
# fallback to double precision otherwise
return_dtype = np.float64
if return_path:
coefs = np.zeros((max_features + 1, n_features), dtype=return_dtype)
alphas = np.zeros(max_features + 1, dtype=return_dtype)
else:
coef, prev_coef = (
np.zeros(n_features, dtype=return_dtype),
np.zeros(n_features, dtype=return_dtype),
)
alpha, prev_alpha = (
np.array([0.0], dtype=return_dtype),
np.array([0.0], dtype=return_dtype),
)
# above better ideas?
n_iter, n_active = 0, 0
active, indices = list(), np.arange(n_features)
# holds the sign of covariance
sign_active = np.empty(max_features, dtype=np.int8)
drop = False
# will hold the cholesky factorization. Only lower part is
# referenced.
if Gram is None:
L = np.empty((max_features, max_features), dtype=X.dtype)
swap, nrm2 = linalg.get_blas_funcs(("swap", "nrm2"), (X,))
else:
L = np.empty((max_features, max_features), dtype=Gram.dtype)
swap, nrm2 = linalg.get_blas_funcs(("swap", "nrm2"), (Cov,))
(solve_cholesky,) = get_lapack_funcs(("potrs",), (L,))
if verbose:
if verbose > 1:
print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC")
else:
sys.stdout.write(".")
sys.stdout.flush()
tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning
cov_precision = np.finfo(Cov.dtype).precision
equality_tolerance = np.finfo(np.float32).eps
if Gram is not None:
Gram_copy = Gram.copy()
Cov_copy = Cov.copy()
while True:
if Cov.size:
if positive:
C_idx = np.argmax(Cov)
else:
C_idx = np.argmax(np.abs(Cov))
C_ = Cov[C_idx]
if positive:
C = C_
else:
C = np.fabs(C_)
else:
C = 0.0
if return_path:
alpha = alphas[n_iter, np.newaxis]
coef = coefs[n_iter]
prev_alpha = alphas[n_iter - 1, np.newaxis]
prev_coef = coefs[n_iter - 1]
alpha[0] = C / n_samples
if alpha[0] <= alpha_min + equality_tolerance: # early stopping
if abs(alpha[0] - alpha_min) > equality_tolerance:
# interpolation factor 0 <= ss < 1
if n_iter > 0:
# In the first iteration, all alphas are zero, the formula
# below would make ss a NaN
ss = (prev_alpha[0] - alpha_min) / (prev_alpha[0] - alpha[0])
coef[:] = prev_coef + ss * (coef - prev_coef)
alpha[0] = alpha_min
if return_path:
coefs[n_iter] = coef
break
if n_iter >= max_iter or n_active >= n_features:
break
if not drop:
##########################################################
# Append x_j to the Cholesky factorization of (Xa * Xa') #
# #
# ( L 0 ) #
# L -> ( ) , where L * w = Xa' x_j #
# ( w z ) and z = ||x_j|| #
# #
##########################################################
if positive:
sign_active[n_active] = np.ones_like(C_)
else:
sign_active[n_active] = np.sign(C_)
m, n = n_active, C_idx + n_active
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
indices[n], indices[m] = indices[m], indices[n]
Cov_not_shortened = Cov
Cov = Cov[1:] # remove Cov[0]
if Gram is None:
X.T[n], X.T[m] = swap(X.T[n], X.T[m])
c = nrm2(X.T[n_active]) ** 2
L[n_active, :n_active] = np.dot(X.T[n_active], X.T[:n_active].T)
else:
# swap does only work inplace if matrix is fortran
# contiguous ...
Gram[m], Gram[n] = swap(Gram[m], Gram[n])
Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n])
c = Gram[n_active, n_active]
L[n_active, :n_active] = Gram[n_active, :n_active]
# Update the cholesky decomposition for the Gram matrix
if n_active:
linalg.solve_triangular(
L[:n_active, :n_active],
L[n_active, :n_active],
trans=0,
lower=1,
overwrite_b=True,
**SOLVE_TRIANGULAR_ARGS,
)
v = np.dot(L[n_active, :n_active], L[n_active, :n_active])
diag = max(np.sqrt(np.abs(c - v)), eps)
L[n_active, n_active] = diag
if diag < 1e-7:
# The system is becoming too ill-conditioned.
# We have degenerate vectors in our active set.
# We'll 'drop for good' the last regressor added.
warnings.warn(
"Regressors in active set degenerate. "
"Dropping a regressor, after %i iterations, "
"i.e. alpha=%.3e, "
"with an active set of %i regressors, and "
"the smallest cholesky pivot element being %.3e."
" Reduce max_iter or increase eps parameters."
% (n_iter, alpha.item(), n_active, diag),
ConvergenceWarning,
)
# XXX: need to figure a 'drop for good' way
Cov = Cov_not_shortened
Cov[0] = 0
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
continue
active.append(indices[n_active])
n_active += 1
if verbose > 1:
print(
"%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], "", n_active, C)
)
if method == "lasso" and n_iter > 0 and prev_alpha[0] < alpha[0]:
# alpha is increasing. This is because the updates of Cov are
# bringing in too much numerical error that is greater than
# than the remaining correlation with the
# regressors. Time to bail out
warnings.warn(
"Early stopping the lars path, as the residues "
"are small and the current value of alpha is no "
"longer well controlled. %i iterations, alpha=%.3e, "
"previous alpha=%.3e, with an active set of %i "
"regressors." % (n_iter, alpha.item(), prev_alpha.item(), n_active),
ConvergenceWarning,
)
break
# least squares solution
least_squares, _ = solve_cholesky(
L[:n_active, :n_active], sign_active[:n_active], lower=True
)
if least_squares.size == 1 and least_squares == 0:
# This happens because sign_active[:n_active] = 0
least_squares[...] = 1
AA = 1.0
else:
# is this really needed ?
AA = 1.0 / np.sqrt(np.sum(least_squares * sign_active[:n_active]))
if not np.isfinite(AA):
# L is too ill-conditioned
i = 0
L_ = L[:n_active, :n_active].copy()
while not np.isfinite(AA):
L_.flat[:: n_active + 1] += (2**i) * eps
least_squares, _ = solve_cholesky(
L_, sign_active[:n_active], lower=True
)
tmp = max(np.sum(least_squares * sign_active[:n_active]), eps)
AA = 1.0 / np.sqrt(tmp)
i += 1
least_squares *= AA
if Gram is None:
# equiangular direction of variables in the active set
eq_dir = np.dot(X.T[:n_active].T, least_squares)
# correlation between each unactive variables and
# eqiangular vector
corr_eq_dir = np.dot(X.T[n_active:], eq_dir)
else:
# if huge number of features, this takes 50% of time, I
# think could be avoided if we just update it using an
# orthogonal (QR) decomposition of X
corr_eq_dir = np.dot(Gram[:n_active, n_active:].T, least_squares)
# Explicit rounding can be necessary to avoid `np.argmax(Cov)` yielding
# unstable results because of rounding errors.
np.around(corr_eq_dir, decimals=cov_precision, out=corr_eq_dir)
g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny32))
if positive:
gamma_ = min(g1, C / AA)
else:
g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny32))
gamma_ = min(g1, g2, C / AA)
# TODO: better names for these variables: z
drop = False
z = -coef[active] / (least_squares + tiny32)
z_pos = arrayfuncs.min_pos(z)
if z_pos < gamma_:
# some coefficients have changed sign
idx = np.where(z == z_pos)[0][::-1]
# update the sign, important for LAR
sign_active[idx] = -sign_active[idx]
if method == "lasso":
gamma_ = z_pos
drop = True
n_iter += 1
if return_path:
if n_iter >= coefs.shape[0]:
del coef, alpha, prev_alpha, prev_coef
# resize the coefs and alphas array
add_features = 2 * max(1, (max_features - n_active))
coefs = np.resize(coefs, (n_iter + add_features, n_features))
coefs[-add_features:] = 0
alphas = np.resize(alphas, n_iter + add_features)
alphas[-add_features:] = 0
coef = coefs[n_iter]
prev_coef = coefs[n_iter - 1]
else:
# mimic the effect of incrementing n_iter on the array references
prev_coef = coef
prev_alpha[0] = alpha[0]
coef = np.zeros_like(coef)
coef[active] = prev_coef[active] + gamma_ * least_squares
# update correlations
Cov -= gamma_ * corr_eq_dir
# See if any coefficient has changed sign
if drop and method == "lasso":
# handle the case when idx is not length of 1
for ii in idx:
arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii)
n_active -= 1
# handle the case when idx is not length of 1
drop_idx = [active.pop(ii) for ii in idx]
if Gram is None:
# propagate dropped variable
for ii in idx:
for i in range(ii, n_active):
X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1])
# yeah this is stupid
indices[i], indices[i + 1] = indices[i + 1], indices[i]
# TODO: this could be updated
residual = y - np.dot(X[:, :n_active], coef[active])
temp = np.dot(X.T[n_active], residual)
Cov = np.r_[temp, Cov]
else:
for ii in idx:
for i in range(ii, n_active):
indices[i], indices[i + 1] = indices[i + 1], indices[i]
Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1])
Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i], Gram[:, i + 1])
# Cov_n = Cov_j + x_j * X + increment(betas) TODO:
# will this still work with multiple drops ?
# recompute covariance. Probably could be done better
# wrong as Xy is not swapped with the rest of variables
# TODO: this could be updated
temp = Cov_copy[drop_idx] - np.dot(Gram_copy[drop_idx], coef)
Cov = np.r_[temp, Cov]
sign_active = np.delete(sign_active, idx)
sign_active = np.append(sign_active, 0.0) # just to maintain size
if verbose > 1:
print(
"%s\t\t%s\t\t%s\t\t%s\t\t%s"
% (n_iter, "", drop_idx, n_active, abs(temp))
)
if return_path:
# resize coefs in case of early stop
alphas = alphas[: n_iter + 1]
coefs = coefs[: n_iter + 1]
if return_n_iter:
return alphas, active, coefs.T, n_iter
else:
return alphas, active, coefs.T
else:
if return_n_iter:
return alpha, active, coef, n_iter
else:
return alpha, active, coef
###############################################################################
# Estimator classes
class Lars(MultiOutputMixin, RegressorMixin, LinearModel):
"""Least Angle Regression model a.k.a. LAR.
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/_stochastic_gradient.py | sklearn/linear_model/_stochastic_gradient.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
"""Classification, regression and One-Class SVM using Stochastic Gradient
Descent (SGD).
"""
import warnings
from abc import ABCMeta, abstractmethod
from numbers import Integral, Real
import numpy as np
from sklearn._loss._loss import CyHalfBinomialLoss, CyHalfSquaredError, CyHuberLoss
from sklearn.base import (
BaseEstimator,
OutlierMixin,
RegressorMixin,
_fit_context,
clone,
is_classifier,
)
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model._base import (
LinearClassifierMixin,
SparseCoefMixin,
make_dataset,
)
from sklearn.linear_model._sgd_fast import (
EpsilonInsensitive,
Hinge,
ModifiedHuber,
SquaredEpsilonInsensitive,
SquaredHinge,
_plain_sgd32,
_plain_sgd64,
)
from sklearn.model_selection import ShuffleSplit, StratifiedShuffleSplit
from sklearn.utils import check_random_state, compute_class_weight
from sklearn.utils._param_validation import Hidden, Interval, StrOptions
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.metaestimators import available_if
from sklearn.utils.multiclass import _check_partial_fit_first_call
from sklearn.utils.parallel import Parallel, delayed
from sklearn.utils.validation import (
_check_sample_weight,
check_is_fitted,
validate_data,
)
LEARNING_RATE_TYPES = {
"constant": 1,
"optimal": 2,
"invscaling": 3,
"adaptive": 4,
"pa1": 5,
"pa2": 6,
}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
DEFAULT_EPSILON = 0.1
# Default value of ``epsilon`` parameter.
MAX_INT = np.iinfo(np.int32).max
class _ValidationScoreCallback:
"""Callback for early stopping based on validation score"""
def __init__(self, estimator, X_val, y_val, sample_weight_val, classes=None):
self.estimator = clone(estimator)
self.estimator.t_ = 1 # to pass check_is_fitted
if classes is not None:
self.estimator.classes_ = classes
self.X_val = X_val
self.y_val = y_val
self.sample_weight_val = sample_weight_val
def __call__(self, coef, intercept):
est = self.estimator
est.coef_ = coef.reshape(1, -1)
est.intercept_ = np.atleast_1d(intercept)
return est.score(self.X_val, self.y_val, self.sample_weight_val)
class BaseSGD(SparseCoefMixin, BaseEstimator, metaclass=ABCMeta):
"""Base class for SGD classification and regression."""
_parameter_constraints: dict = {
"fit_intercept": ["boolean"],
"max_iter": [Interval(Integral, 1, None, closed="left")],
"tol": [Interval(Real, 0, None, closed="left"), None],
"shuffle": ["boolean"],
"verbose": ["verbose"],
"random_state": ["random_state"],
"warm_start": ["boolean"],
"average": [Interval(Integral, 0, None, closed="neither"), "boolean"],
"eta0": [Interval(Real, 0, None, closed="neither")],
}
def __init__(
self,
loss,
*,
penalty="l2",
alpha=0.0001,
l1_ratio=0.15,
fit_intercept=True,
max_iter=1000,
tol=1e-3,
shuffle=True,
verbose=0,
epsilon=0.1,
random_state=None,
learning_rate="optimal",
eta0=0.01,
power_t=0.5,
early_stopping=False,
validation_fraction=0.1,
n_iter_no_change=5,
warm_start=False,
average=False,
):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.early_stopping = early_stopping
self.validation_fraction = validation_fraction
self.n_iter_no_change = n_iter_no_change
self.warm_start = warm_start
self.average = average
self.max_iter = max_iter
self.tol = tol
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _more_validate_params(self, for_partial_fit=False):
"""Validate input params."""
if self.early_stopping and for_partial_fit:
raise ValueError("early_stopping should be False with partial_fit")
if self.learning_rate == "optimal" and self.alpha == 0:
raise ValueError(
"alpha must be > 0 since "
"learning_rate is 'optimal'. alpha is used "
"to compute the optimal learning rate."
)
# TODO: Consider whether pa1 and pa2 could also work for other losses.
if self.learning_rate in ("pa1", "pa2"):
if is_classifier(self):
if self.loss != "hinge":
msg = (
f"Learning rate '{self.learning_rate}' only works with loss "
"'hinge'."
)
raise ValueError(msg)
elif self.loss != "epsilon_insensitive":
msg = (
f"Learning rate '{self.learning_rate}' only works with loss "
"'epsilon_insensitive'."
)
raise ValueError(msg)
if self.penalty == "elasticnet" and self.l1_ratio is None:
raise ValueError("l1_ratio must be set when penalty is 'elasticnet'")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
def _get_l1_ratio(self):
if self.l1_ratio is None:
# plain_sgd expects a float. Any value is fine since at this point
# penalty can't be "elsaticnet" so l1_ratio is not used.
return 0.0
return self.l1_ratio
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``."""
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ("huber", "epsilon_insensitive", "squared_epsilon_insensitive"):
args = (self.epsilon,)
return loss_class(*args)
def _get_learning_rate_type(self, learning_rate):
return LEARNING_RATE_TYPES[learning_rate]
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
return PENALTY_TYPES[penalty]
def _allocate_parameter_mem(
self,
n_classes,
n_features,
input_dtype,
coef_init=None,
intercept_init=None,
one_class=0,
):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=input_dtype, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(
(n_classes, n_features), dtype=input_dtype, order="C"
)
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(
intercept_init, order="C", dtype=input_dtype
)
if intercept_init.shape != (n_classes,):
raise ValueError("Provided intercept_init does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=input_dtype, order="C")
else:
# allocate coef_
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=input_dtype, order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features, dtype=input_dtype, order="C")
# allocate intercept_
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=input_dtype)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init does not match dataset.")
if one_class:
self.offset_ = intercept_init.reshape(
1,
)
else:
self.intercept_ = intercept_init.reshape(
1,
)
else:
if one_class:
self.offset_ = np.zeros(1, dtype=input_dtype, order="C")
else:
self.intercept_ = np.zeros(1, dtype=input_dtype, order="C")
# initialize average parameters
if self.average > 0:
self._standard_coef = self.coef_
self._average_coef = np.zeros(
self.coef_.shape, dtype=input_dtype, order="C"
)
if one_class:
self._standard_intercept = 1 - self.offset_
else:
self._standard_intercept = self.intercept_
self._average_intercept = np.zeros(
self._standard_intercept.shape, dtype=input_dtype, order="C"
)
def _make_validation_split(self, y, sample_mask):
"""Split the dataset between training set and validation set.
Parameters
----------
y : ndarray of shape (n_samples, )
Target values.
sample_mask : ndarray of shape (n_samples, )
A boolean array indicating whether each sample should be included
for validation set.
Returns
-------
validation_mask : ndarray of shape (n_samples, )
Equal to True on the validation set, False on the training set.
"""
n_samples = y.shape[0]
validation_mask = np.zeros(n_samples, dtype=np.bool_)
if not self.early_stopping:
# use the full set for training, with an empty validation set
return validation_mask
if is_classifier(self):
splitter_type = StratifiedShuffleSplit
else:
splitter_type = ShuffleSplit
cv = splitter_type(
test_size=self.validation_fraction, random_state=self.random_state
)
idx_train, idx_val = next(cv.split(np.zeros(shape=(y.shape[0], 1)), y))
if not np.any(sample_mask[idx_val]):
raise ValueError(
"The sample weights for validation set are all zero, consider using a"
" different random state."
)
if idx_train.shape[0] == 0 or idx_val.shape[0] == 0:
raise ValueError(
"Splitting %d samples into a train set and a validation set "
"with validation_fraction=%r led to an empty set (%d and %d "
"samples). Please either change validation_fraction, increase "
"number of samples, or disable early_stopping."
% (
n_samples,
self.validation_fraction,
idx_train.shape[0],
idx_val.shape[0],
)
)
validation_mask[idx_val] = True
return validation_mask
def _make_validation_score_cb(
self, validation_mask, X, y, sample_weight, classes=None
):
if not self.early_stopping:
return None
return _ValidationScoreCallback(
self,
X[validation_mask],
y[validation_mask],
sample_weight[validation_mask],
classes=classes,
)
def _prepare_fit_binary(est, y, i, input_dtype, label_encode=True):
"""Initialization for fit_binary.
Returns y, coef, intercept, average_coef, average_intercept.
"""
y_i = np.ones(y.shape, dtype=input_dtype, order="C")
if label_encode:
# y in {0, 1}
y_i[y != est.classes_[i]] = 0.0
else:
# y in {-1, +1}
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est._standard_coef.ravel()
intercept = est._standard_intercept[0]
average_coef = est._average_coef.ravel()
average_intercept = est._average_intercept[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est._standard_coef[i]
intercept = est._standard_intercept[i]
average_coef = est._average_coef[i]
average_intercept = est._average_intercept[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(
est,
i,
X,
y,
alpha,
learning_rate,
max_iter,
pos_weight,
neg_weight,
sample_weight,
validation_mask=None,
random_state=None,
):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
Parameters
----------
est : Estimator object
The estimator to fit
i : int
Index of the positive class
X : numpy array or sparse matrix of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples, ]
Target values
alpha : float
The regularization parameter
learning_rate : str
The learning rate. Accepted values are 'constant', 'optimal',
'invscaling', 'pa1' and 'pa2'.
max_iter : int
The maximum number of iterations (epochs)
pos_weight : float
The weight of the positive class
neg_weight : float
The weight of the negative class
sample_weight : numpy array of shape [n_samples, ]
The weight of each sample
validation_mask : numpy array of shape [n_samples, ], default=None
Precomputed validation mask in case _fit_binary is called in the
context of a one-vs-rest reduction.
random_state : int, RandomState instance, default=None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
label_encode = isinstance(est._loss_function_, CyHalfBinomialLoss)
y_i, coef, intercept, average_coef, average_intercept = _prepare_fit_binary(
est, y, i, input_dtype=X.dtype, label_encode=label_encode
)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
random_state = check_random_state(random_state)
dataset, intercept_decay = make_dataset(
X, y_i, sample_weight, random_state=random_state
)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
if validation_mask is None:
validation_mask = est._make_validation_split(y_i, sample_mask=sample_weight > 0)
classes = np.array([-1, 1], dtype=y_i.dtype)
validation_score_cb = est._make_validation_score_cb(
validation_mask, X, y_i, sample_weight, classes=classes
)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(MAX_INT)
tol = est.tol if est.tol is not None else -np.inf
_plain_sgd = _get_plain_sgd_function(input_dtype=coef.dtype)
coef, intercept, average_coef, average_intercept, n_iter_ = _plain_sgd(
coef,
intercept,
average_coef,
average_intercept,
est._loss_function_,
penalty_type,
alpha,
est._get_l1_ratio(),
dataset,
validation_mask,
est.early_stopping,
validation_score_cb,
int(est.n_iter_no_change),
max_iter,
tol,
int(est.fit_intercept),
int(est.verbose),
int(est.shuffle),
seed,
pos_weight,
neg_weight,
learning_rate_type,
est.eta0,
est.power_t,
0,
est.t_,
intercept_decay,
est.average,
)
if est.average:
if len(est.classes_) == 2:
est._average_intercept[0] = average_intercept
else:
est._average_intercept[i] = average_intercept
return coef, intercept, n_iter_
def _get_plain_sgd_function(input_dtype):
return _plain_sgd32 if input_dtype == np.float32 else _plain_sgd64
class BaseSGDClassifier(LinearClassifierMixin, BaseSGD, metaclass=ABCMeta):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log_loss": (CyHalfBinomialLoss,),
"modified_huber": (ModifiedHuber,),
"squared_error": (CyHalfSquaredError,),
"huber": (CyHuberLoss, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive, DEFAULT_EPSILON),
}
_parameter_constraints: dict = {
**BaseSGD._parameter_constraints,
"loss": [StrOptions(set(loss_functions))],
"early_stopping": ["boolean"],
"validation_fraction": [Interval(Real, 0, 1, closed="neither")],
"n_iter_no_change": [Interval(Integral, 1, None, closed="left")],
"n_jobs": [Integral, None],
"class_weight": [StrOptions({"balanced"}), dict, None],
}
@abstractmethod
def __init__(
self,
loss="hinge",
*,
penalty="l2",
alpha=0.0001,
l1_ratio=0.15,
fit_intercept=True,
max_iter=1000,
tol=1e-3,
shuffle=True,
verbose=0,
epsilon=DEFAULT_EPSILON,
n_jobs=None,
random_state=None,
learning_rate="optimal",
eta0=0.01,
power_t=0.5,
early_stopping=False,
validation_fraction=0.1,
n_iter_no_change=5,
class_weight=None,
warm_start=False,
average=False,
):
super().__init__(
loss=loss,
penalty=penalty,
alpha=alpha,
l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
max_iter=max_iter,
tol=tol,
shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0,
power_t=power_t,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change,
warm_start=warm_start,
average=average,
)
self.class_weight = class_weight
self.n_jobs = n_jobs
def _partial_fit(
self,
X,
y,
alpha,
loss,
learning_rate,
max_iter,
classes,
sample_weight,
coef_init,
intercept_init,
):
first_call = not hasattr(self, "classes_")
X, y = validate_data(
self,
X,
y,
accept_sparse="csr",
dtype=[np.float64, np.float32],
order="C",
accept_large_sparse=False,
reset=first_call,
)
n_samples, n_features = X.shape
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(
self.class_weight, classes=self.classes_, y=y
)
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
if getattr(self, "coef_", None) is None or coef_init is not None:
self._allocate_parameter_mem(
n_classes=n_classes,
n_features=n_features,
input_dtype=X.dtype,
coef_init=coef_init,
intercept_init=intercept_init,
)
elif n_features != self.coef_.shape[-1]:
raise ValueError(
"Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1])
)
self._loss_function_ = self._get_loss_function(loss)
if not hasattr(self, "t_"):
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(
X,
y,
alpha=alpha,
learning_rate=learning_rate,
sample_weight=sample_weight,
max_iter=max_iter,
)
elif n_classes == 2:
self._fit_binary(
X,
y,
alpha=alpha,
learning_rate=learning_rate,
sample_weight=sample_weight,
max_iter=max_iter,
)
else:
raise ValueError(
"The number of classes has to be greater than one; got %d class"
% n_classes
)
return self
def _fit(
self,
X,
y,
alpha,
loss,
learning_rate,
coef_init=None,
intercept_init=None,
sample_weight=None,
):
if hasattr(self, "classes_"):
# delete the attribute otherwise _partial_fit thinks it's not the first call
delattr(self, "classes_")
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
y = validate_data(self, y=y)
classes = np.unique(y)
if self.warm_start and hasattr(self, "coef_"):
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self._standard_coef = self.coef_
self._standard_intercept = self.intercept_
self._average_coef = None
self._average_intercept = None
# Clear iteration count for multiple call to fit.
self.t_ = 1.0
self._partial_fit(
X,
y,
alpha,
loss,
learning_rate,
self.max_iter,
classes,
sample_weight,
coef_init,
intercept_init,
)
if (
self.tol is not None
and self.tol > -np.inf
and self.n_iter_ == self.max_iter
):
warnings.warn(
(
"Maximum number of iteration reached before "
"convergence. Consider increasing max_iter to "
"improve the fit."
),
ConvergenceWarning,
)
if self.power_t < 0:
warnings.warn(
"Negative values for `power_t` are deprecated in version 1.8 "
"and will raise an error in 1.10. "
"Use values in the range [0.0, inf) instead.",
FutureWarning,
)
return self
def _fit_binary(self, X, y, alpha, sample_weight, learning_rate, max_iter):
"""Fit a binary classifier on X and y."""
coef, intercept, n_iter_ = fit_binary(
self,
1,
X,
y,
alpha,
learning_rate,
max_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight,
random_state=self.random_state,
)
self.t_ += n_iter_ * X.shape[0]
self.n_iter_ = n_iter_
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self._average_coef.reshape(1, -1)
self.intercept_ = self._average_intercept
else:
self.coef_ = self._standard_coef.reshape(1, -1)
self._standard_intercept = np.atleast_1d(intercept)
self.intercept_ = self._standard_intercept
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, learning_rate, sample_weight, max_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OvA (One versus All) or OvR (One versus Rest).
"""
# Precompute the validation split using the multiclass labels
# to ensure proper balancing of the classes.
validation_mask = self._make_validation_split(y, sample_mask=sample_weight > 0)
# Use joblib to fit OvA in parallel.
# Pick the random seed for each job outside of fit_binary to avoid
# sharing the estimator random state between threads which could lead
# to non-deterministic behavior
random_state = check_random_state(self.random_state)
seeds = random_state.randint(MAX_INT, size=len(self.classes_))
result = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose, require="sharedmem"
)(
delayed(fit_binary)(
self,
i,
X,
y,
alpha,
learning_rate,
max_iter,
self._expanded_class_weight[i],
1.0,
sample_weight,
validation_mask=validation_mask,
random_state=seed,
)
for i, seed in enumerate(seeds)
)
# take the maximum of n_iter_ over every binary fit
n_iter_ = 0.0
for i, (_, intercept, n_iter_i) in enumerate(result):
self.intercept_[i] = intercept
n_iter_ = max(n_iter_, n_iter_i)
self.t_ += n_iter_ * X.shape[0]
self.n_iter_ = n_iter_
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self._average_coef
self.intercept_ = self._average_intercept
else:
self.coef_ = self._standard_coef
self._standard_intercept = np.atleast_1d(self.intercept_)
self.intercept_ = self._standard_intercept
@_fit_context(prefer_skip_nested_validation=True)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Perform one epoch of stochastic gradient descent on given samples.
Internally, this method uses ``max_iter = 1``. Therefore, it is not
guaranteed that a minimum of the cost function is reached after calling
it once. Matters such as objective convergence, early stopping, and
learning rate adjustments should be handled by the user.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data.
y : ndarray of shape (n_samples,)
Subset of the target values.
classes : ndarray of shape (n_classes,), default=None
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), default=None
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : object
Returns an instance of self.
"""
if not hasattr(self, "classes_"):
self._more_validate_params(for_partial_fit=True)
if self.class_weight == "balanced":
raise ValueError(
"class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights,"
" use compute_class_weight('{0}', "
"classes=classes, y=y). "
"In place of y you can use a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight)
)
return self._partial_fit(
X,
y,
alpha=self.alpha,
loss=self.loss,
learning_rate=self.learning_rate,
max_iter=1,
classes=classes,
sample_weight=sample_weight,
coef_init=None,
intercept_init=None,
)
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values.
coef_init : ndarray of shape (n_classes, n_features), default=None
The initial coefficients to warm-start the optimization.
intercept_init : ndarray of shape (n_classes,), default=None
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), default=None
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
constructor) if class_weight is specified.
Returns
-------
self : object
Returns an instance of self.
"""
self._more_validate_params()
return self._fit(
X,
y,
alpha=self.alpha,
loss=self.loss,
learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight,
)
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = True
return tags
class SGDClassifier(BaseSGDClassifier):
"""Linear classifiers (SVM, logistic regression, etc.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning via the `partial_fit` method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/_bayes.py | sklearn/linear_model/_bayes.py | """
Various bayesian regression
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from math import log
from numbers import Integral, Real
import numpy as np
from scipy import linalg
from scipy.linalg import pinvh
from sklearn.base import RegressorMixin, _fit_context
from sklearn.linear_model._base import LinearModel, _preprocess_data
from sklearn.utils import _safe_indexing
from sklearn.utils._param_validation import Interval
from sklearn.utils.extmath import fast_logdet
from sklearn.utils.validation import _check_sample_weight, validate_data
###############################################################################
# BayesianRidge regression
class BayesianRidge(RegressorMixin, LinearModel):
"""Bayesian ridge regression.
Fit a Bayesian ridge model. See the Notes section for details on this
implementation and the optimization of the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
For an intuitive visualization of how the sinusoid is approximated by
a polynomial using different pairs of initial values, see
:ref:`sphx_glr_auto_examples_linear_model_plot_bayesian_ridge_curvefit.py`.
Parameters
----------
max_iter : int, default=300
Maximum number of iterations over the complete dataset before
stopping independently of any early stopping criterion.
.. versionchanged:: 1.3
tol : float, default=1e-3
Stop the algorithm if w has converged.
alpha_1 : float, default=1e-6
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter.
alpha_2 : float, default=1e-6
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
lambda_1 : float, default=1e-6
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter.
lambda_2 : float, default=1e-6
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
alpha_init : float, default=None
Initial value for alpha (precision of the noise).
If not set, alpha_init is 1/Var(y).
.. versionadded:: 0.22
lambda_init : float, default=None
Initial value for lambda (precision of the weights).
If not set, lambda_init is 1.
.. versionadded:: 0.22
compute_score : bool, default=False
If True, compute the log marginal likelihood at each iteration of the
optimization.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model.
The intercept is not treated as a probabilistic parameter
and thus has no associated variance. If set
to False, no intercept will be used in calculations
(i.e. data is expected to be centered).
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
verbose : bool, default=False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array-like of shape (n_features,)
Coefficients of the regression model (mean of distribution)
intercept_ : float
Independent term in decision function. Set to 0.0 if
`fit_intercept = False`.
alpha_ : float
Estimated precision of the noise.
lambda_ : float
Estimated precision of the weights.
sigma_ : array-like of shape (n_features, n_features)
Estimated variance-covariance matrix of the weights
scores_ : array-like of shape (n_iter_+1,)
If computed_score is True, value of the log marginal likelihood (to be
maximized) at each iteration of the optimization. The array starts
with the value of the log marginal likelihood obtained for the initial
values of alpha and lambda and ends with the value obtained for the
estimated alpha and lambda.
n_iter_ : int
The actual number of iterations to reach the stopping criterion.
X_offset_ : ndarray of shape (n_features,)
If `fit_intercept=True`, offset subtracted for centering data to a
zero mean. Set to np.zeros(n_features) otherwise.
X_scale_ : ndarray of shape (n_features,)
Set to np.ones(n_features).
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
ARDRegression : Bayesian ARD regression.
Notes
-----
There exist several strategies to perform Bayesian ridge regression. This
implementation is based on the algorithm described in Appendix A of
(Tipping, 2001) where updates of the regularization parameters are done as
suggested in (MacKay, 1992). Note that according to A New
View of Automatic Relevance Determination (Wipf and Nagarajan, 2008) these
update rules do not guarantee that the marginal likelihood is increasing
between two consecutive iterations of the optimization.
References
----------
D. J. C. MacKay, Bayesian Interpolation, Computation and Neural Systems,
Vol. 4, No. 3, 1992.
M. E. Tipping, Sparse Bayesian Learning and the Relevance Vector Machine,
Journal of Machine Learning Research, Vol. 1, 2001.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
BayesianRidge()
>>> clf.predict([[1, 1]])
array([1.])
"""
_parameter_constraints: dict = {
"max_iter": [Interval(Integral, 1, None, closed="left")],
"tol": [Interval(Real, 0, None, closed="neither")],
"alpha_1": [Interval(Real, 0, None, closed="left")],
"alpha_2": [Interval(Real, 0, None, closed="left")],
"lambda_1": [Interval(Real, 0, None, closed="left")],
"lambda_2": [Interval(Real, 0, None, closed="left")],
"alpha_init": [None, Interval(Real, 0, None, closed="left")],
"lambda_init": [None, Interval(Real, 0, None, closed="left")],
"compute_score": ["boolean"],
"fit_intercept": ["boolean"],
"copy_X": ["boolean"],
"verbose": ["verbose"],
}
def __init__(
self,
*,
max_iter=300,
tol=1.0e-3,
alpha_1=1.0e-6,
alpha_2=1.0e-6,
lambda_1=1.0e-6,
lambda_2=1.0e-6,
alpha_init=None,
lambda_init=None,
compute_score=False,
fit_intercept=True,
copy_X=True,
verbose=False,
):
self.max_iter = max_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.alpha_init = alpha_init
self.lambda_init = lambda_init
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.copy_X = copy_X
self.verbose = verbose
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y, sample_weight=None):
"""Fit the model.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values. Will be cast to X's dtype if necessary.
sample_weight : ndarray of shape (n_samples,), default=None
Individual weights for each sample.
.. versionadded:: 0.20
parameter *sample_weight* support to BayesianRidge.
Returns
-------
self : object
Returns the instance itself.
"""
X, y = validate_data(
self,
X,
y,
dtype=[np.float64, np.float32],
force_writeable=True,
y_numeric=True,
)
dtype = X.dtype
n_samples, n_features = X.shape
sw_sum = n_samples
y_var = y.var()
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=dtype)
sw_sum = sample_weight.sum()
y_mean = np.average(y, weights=sample_weight)
y_var = np.average((y - y_mean) ** 2, weights=sample_weight)
X, y, X_offset_, y_offset_, X_scale_, _ = _preprocess_data(
X,
y,
fit_intercept=self.fit_intercept,
copy=self.copy_X,
sample_weight=sample_weight,
# Sample weight can be implemented via a simple rescaling.
rescale_with_sw=True,
)
self.X_offset_ = X_offset_
self.X_scale_ = X_scale_
# Initialization of the values of the parameters
eps = np.finfo(np.float64).eps
# Add `eps` in the denominator to omit division by zero
alpha_ = self.alpha_init
lambda_ = self.lambda_init
if alpha_ is None:
alpha_ = 1.0 / (y_var + eps)
if lambda_ is None:
lambda_ = 1.0
# Avoid unintended type promotion to float64 with numpy 2
alpha_ = np.asarray(alpha_, dtype=dtype)
lambda_ = np.asarray(lambda_, dtype=dtype)
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
# Let M, N = n_samples, n_features and K = min(M, N).
# The posterior covariance matrix needs Vh_full: (N, N).
# The full SVD is only required when n_samples < n_features.
# When n_samples < n_features, K=M and full_matrices=True
# U: (M, M), S: M, Vh_full: (N, N), Vh: (M, N)
# When n_samples > n_features, K=N and full_matrices=False
# U: (M, N), S: N, Vh_full: (N, N), Vh: (N, N)
U, S, Vh_full = linalg.svd(X, full_matrices=(n_samples < n_features))
K = len(S)
eigen_vals_ = S**2
eigen_vals_full = np.zeros(n_features, dtype=dtype)
eigen_vals_full[0:K] = eigen_vals_
Vh = Vh_full[0:K, :]
# Convergence loop of the bayesian ridge regression
for iter_ in range(self.max_iter):
# update posterior mean coef_ based on alpha_ and lambda_ and
# compute corresponding sse (sum of squared errors)
coef_, sse_ = self._update_coef_(
X, y, n_samples, n_features, XT_y, U, Vh, eigen_vals_, alpha_, lambda_
)
if self.compute_score:
# compute the log marginal likelihood
s = self._log_marginal_likelihood(
n_samples,
n_features,
sw_sum,
eigen_vals_,
alpha_,
lambda_,
coef_,
sse_,
)
self.scores_.append(s)
# Update alpha and lambda according to (MacKay, 1992)
gamma_ = np.sum((alpha_ * eigen_vals_) / (lambda_ + alpha_ * eigen_vals_))
lambda_ = (gamma_ + 2 * lambda_1) / (np.sum(coef_**2) + 2 * lambda_2)
alpha_ = (sw_sum - gamma_ + 2 * alpha_1) / (sse_ + 2 * alpha_2)
# Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.n_iter_ = iter_ + 1
# return regularization parameters and corresponding posterior mean,
# log marginal likelihood and posterior covariance
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_, sse_ = self._update_coef_(
X, y, n_samples, n_features, XT_y, U, Vh, eigen_vals_, alpha_, lambda_
)
if self.compute_score:
# compute the log marginal likelihood
s = self._log_marginal_likelihood(
n_samples,
n_features,
sw_sum,
eigen_vals_,
alpha_,
lambda_,
coef_,
sse_,
)
self.scores_.append(s)
self.scores_ = np.array(self.scores_)
# posterior covariance
self.sigma_ = np.dot(
Vh_full.T, Vh_full / (alpha_ * eigen_vals_full + lambda_)[:, np.newaxis]
)
self._set_intercept(X_offset_, y_offset_, X_scale_)
return self
def predict(self, X, return_std=False):
"""Predict using the linear model.
In addition to the mean of the predictive distribution, also its
standard deviation can be returned.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Samples.
return_std : bool, default=False
Whether to return the standard deviation of posterior prediction.
Returns
-------
y_mean : array-like of shape (n_samples,)
Mean of predictive distribution of query points.
y_std : array-like of shape (n_samples,)
Standard deviation of predictive distribution of query points.
"""
y_mean = self._decision_function(X)
if not return_std:
return y_mean
else:
sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)
y_std = np.sqrt(sigmas_squared_data + (1.0 / self.alpha_))
return y_mean, y_std
def _update_coef_(
self, X, y, n_samples, n_features, XT_y, U, Vh, eigen_vals_, alpha_, lambda_
):
"""Update posterior mean and compute corresponding sse (sum of squared errors).
Posterior mean is given by coef_ = scaled_sigma_ * X.T * y where
scaled_sigma_ = (lambda_/alpha_ * np.eye(n_features)
+ np.dot(X.T, X))^-1
"""
if n_samples > n_features:
coef_ = np.linalg.multi_dot(
[Vh.T, Vh / (eigen_vals_ + lambda_ / alpha_)[:, np.newaxis], XT_y]
)
else:
coef_ = np.linalg.multi_dot(
[X.T, U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T, y]
)
# Note: we do not need to explicitly use the weights in this sum because
# y and X were preprocessed by _rescale_data to handle the weights.
sse_ = np.sum((y - np.dot(X, coef_)) ** 2)
return coef_, sse_
def _log_marginal_likelihood(
self, n_samples, n_features, sw_sum, eigen_vals, alpha_, lambda_, coef, sse
):
"""Log marginal likelihood."""
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
# compute the log of the determinant of the posterior covariance.
# posterior covariance is given by
# sigma = (lambda_ * np.eye(n_features) + alpha_ * np.dot(X.T, X))^-1
if n_samples > n_features:
logdet_sigma = -np.sum(np.log(lambda_ + alpha_ * eigen_vals))
else:
logdet_sigma = np.full(n_features, lambda_, dtype=np.array(lambda_).dtype)
logdet_sigma[:n_samples] += alpha_ * eigen_vals
logdet_sigma = -np.sum(np.log(logdet_sigma))
score = lambda_1 * log(lambda_) - lambda_2 * lambda_
score += alpha_1 * log(alpha_) - alpha_2 * alpha_
score += 0.5 * (
n_features * log(lambda_)
+ sw_sum * log(alpha_)
- alpha_ * sse
- lambda_ * np.sum(coef**2)
+ logdet_sigma
- sw_sum * log(2 * np.pi)
)
return score
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(RegressorMixin, LinearModel):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
max_iter : int, default=300
Maximum number of iterations.
.. versionchanged:: 1.3
tol : float, default=1e-3
Stop the algorithm if w has converged.
alpha_1 : float, default=1e-6
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter.
alpha_2 : float, default=1e-6
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
lambda_1 : float, default=1e-6
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter.
lambda_2 : float, default=1e-6
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
compute_score : bool, default=False
If True, compute the objective function at each step of the model.
threshold_lambda : float, default=10 000
Threshold for removing (pruning) weights with high precision from
the computation.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
verbose : bool, default=False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array-like of shape (n_features,)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array-like of shape (n_features,)
estimated precisions of the weights.
sigma_ : array-like of shape (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
n_iter_ : int
The actual number of iterations to reach the stopping criterion.
.. versionadded:: 1.3
intercept_ : float
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
X_offset_ : float
If `fit_intercept=True`, offset subtracted for centering data to a
zero mean. Set to np.zeros(n_features) otherwise.
X_scale_ : float
Set to np.ones(n_features).
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
BayesianRidge : Bayesian ridge regression.
References
----------
D. J. C. MacKay, Bayesian nonlinear modeling for the prediction
competition, ASHRAE Transactions, 1994.
R. Salakhutdinov, Lecture notes on Statistical Machine Learning,
http://www.utstat.toronto.edu/~rsalakhu/sta4273/notes/Lecture2.pdf#page=15
Their beta is our ``self.alpha_``
Their alpha is our ``self.lambda_``
ARD is a little different than the slide: only dimensions/features for
which ``self.lambda_ < self.threshold_lambda`` are kept and the rest are
discarded.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
ARDRegression()
>>> clf.predict([[1, 1]])
array([1.])
- :ref:`sphx_glr_auto_examples_linear_model_plot_ard.py` demonstrates ARD
Regression.
- :ref:`sphx_glr_auto_examples_linear_model_plot_lasso_and_elasticnet.py`
showcases ARD Regression alongside Lasso and Elastic-Net for sparse,
correlated signals, in the presence of noise.
"""
_parameter_constraints: dict = {
"max_iter": [Interval(Integral, 1, None, closed="left")],
"tol": [Interval(Real, 0, None, closed="left")],
"alpha_1": [Interval(Real, 0, None, closed="left")],
"alpha_2": [Interval(Real, 0, None, closed="left")],
"lambda_1": [Interval(Real, 0, None, closed="left")],
"lambda_2": [Interval(Real, 0, None, closed="left")],
"compute_score": ["boolean"],
"threshold_lambda": [Interval(Real, 0, None, closed="left")],
"fit_intercept": ["boolean"],
"copy_X": ["boolean"],
"verbose": ["verbose"],
}
def __init__(
self,
*,
max_iter=300,
tol=1.0e-3,
alpha_1=1.0e-6,
alpha_2=1.0e-6,
lambda_1=1.0e-6,
lambda_2=1.0e-6,
compute_score=False,
threshold_lambda=1.0e4,
fit_intercept=True,
copy_X=True,
verbose=False,
):
self.max_iter = max_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y):
"""Fit the model according to the given training data and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values (integers). Will be cast to X's dtype if necessary.
Returns
-------
self : object
Fitted estimator.
"""
X, y = validate_data(
self,
X,
y,
dtype=[np.float64, np.float32],
force_writeable=True,
y_numeric=True,
ensure_min_samples=2,
)
dtype = X.dtype
n_samples, n_features = X.shape
coef_ = np.zeros(n_features, dtype=dtype)
X, y, X_offset_, y_offset_, X_scale_, _ = _preprocess_data(
X, y, fit_intercept=self.fit_intercept, copy=self.copy_X
)
self.X_offset_ = X_offset_
self.X_scale_ = X_scale_
# Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
# Initialization of the values of the parameters
eps = np.finfo(np.float64).eps
# Add `eps` in the denominator to omit division by zero if `np.var(y)`
# is zero.
# Explicitly set dtype to avoid unintended type promotion with numpy 2.
alpha_ = np.asarray(1.0 / (np.var(y) + eps), dtype=dtype)
lambda_ = np.ones(n_features, dtype=dtype)
self.scores_ = list()
coef_old_ = None
def update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_):
coef_[keep_lambda] = alpha_ * np.linalg.multi_dot(
[sigma_, X[:, keep_lambda].T, y]
)
return coef_
update_sigma = (
self._update_sigma
if n_samples >= n_features
else self._update_sigma_woodbury
)
# Iterative procedure of ARDRegression
for iter_ in range(self.max_iter):
sigma_ = update_sigma(X, alpha_, lambda_, keep_lambda)
coef_ = update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_)
# Update alpha and lambda
sse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1.0 - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = (gamma_ + 2.0 * lambda_1) / (
(coef_[keep_lambda]) ** 2 + 2.0 * lambda_2
)
alpha_ = (n_samples - gamma_.sum() + 2.0 * alpha_1) / (sse_ + 2.0 * alpha_2)
# Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
# Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (
fast_logdet(sigma_)
+ n_samples * log(alpha_)
+ np.sum(np.log(lambda_))
)
s -= 0.5 * (alpha_ * sse_ + (lambda_ * coef_**2).sum())
self.scores_.append(s)
# Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
if not keep_lambda.any():
break
self.n_iter_ = iter_ + 1
if keep_lambda.any():
# update sigma and mu using updated params from the last iteration
sigma_ = update_sigma(X, alpha_, lambda_, keep_lambda)
coef_ = update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_)
else:
sigma_ = np.array([]).reshape(0, 0)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_offset_, y_offset_, X_scale_)
return self
def _update_sigma_woodbury(self, X, alpha_, lambda_, keep_lambda):
# See slides as referenced in the docstring note
# this function is used when n_samples < n_features and will invert
# a matrix of shape (n_samples, n_samples) making use of the
# woodbury formula:
# https://en.wikipedia.org/wiki/Woodbury_matrix_identity
n_samples = X.shape[0]
X_keep = X[:, keep_lambda]
inv_lambda = 1 / lambda_[keep_lambda].reshape(1, -1)
sigma_ = pinvh(
np.eye(n_samples, dtype=X.dtype) / alpha_
+ np.dot(X_keep * inv_lambda, X_keep.T)
)
sigma_ = np.dot(sigma_, X_keep * inv_lambda)
sigma_ = -np.dot(inv_lambda.reshape(-1, 1) * X_keep.T, sigma_)
sigma_[np.diag_indices(sigma_.shape[1])] += 1.0 / lambda_[keep_lambda]
return sigma_
def _update_sigma(self, X, alpha_, lambda_, keep_lambda):
# See slides as referenced in the docstring note
# this function is used when n_samples >= n_features and will
# invert a matrix of shape (n_features, n_features)
X_keep = X[:, keep_lambda]
gram = np.dot(X_keep.T, X_keep)
eye = np.eye(gram.shape[0], dtype=X.dtype)
sigma_inv = lambda_[keep_lambda] * eye + alpha_ * gram
sigma_ = pinvh(sigma_inv)
return sigma_
def predict(self, X, return_std=False):
"""Predict using the linear model.
In addition to the mean of the predictive distribution, also its
standard deviation can be returned.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Samples.
return_std : bool, default=False
Whether to return the standard deviation of posterior prediction.
Returns
-------
y_mean : array-like of shape (n_samples,)
Mean of predictive distribution of query points.
y_std : array-like of shape (n_samples,)
Standard deviation of predictive distribution of query points.
"""
y_mean = self._decision_function(X)
if return_std is False:
return y_mean
else:
col_index = self.lambda_ < self.threshold_lambda
X = _safe_indexing(X, indices=col_index, axis=1)
sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)
y_std = np.sqrt(sigmas_squared_data + (1.0 / self.alpha_))
return y_mean, y_std
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/_sag.py | sklearn/linear_model/_sag.py | """Solvers for Ridge and LogisticRegression using SAG algorithm"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
import numpy as np
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model._base import make_dataset
from sklearn.linear_model._sag_fast import sag32, sag64
from sklearn.utils import check_array
from sklearn.utils.extmath import row_norms
from sklearn.utils.validation import _check_sample_weight
def get_auto_step_size(
max_squared_sum, alpha_scaled, loss, fit_intercept, n_samples=None, is_saga=False
):
"""Compute automatic step size for SAG solver.
The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is
the max sum of squares for over all samples.
Parameters
----------
max_squared_sum : float
Maximum squared sum of X over samples.
alpha_scaled : float
Constant that multiplies the regularization term, scaled by
1. / n_samples, the number of samples.
loss : {'log', 'squared', 'multinomial'}
The loss function used in SAG solver.
fit_intercept : bool
Specifies if a constant (a.k.a. bias or intercept) will be
added to the decision function.
n_samples : int, default=None
Number of rows in X. Useful if is_saga=True.
is_saga : bool, default=False
Whether to return step size for the SAGA algorithm or the SAG
algorithm.
Returns
-------
step_size : float
Step size used in SAG solver.
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document
:arxiv:`Defazio, A., Bach F. & Lacoste-Julien S. (2014).
"SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives" <1407.0202>`
"""
if loss in ("log", "multinomial"):
L = 0.25 * (max_squared_sum + int(fit_intercept)) + alpha_scaled
elif loss == "squared":
# inverse Lipschitz constant for squared loss
L = max_squared_sum + int(fit_intercept) + alpha_scaled
else:
raise ValueError(
"Unknown loss function for SAG solver, got %s instead of 'log' or 'squared'"
% loss
)
if is_saga:
# SAGA theoretical step size is 1/3L or 1 / (2 * (L + mu n))
# See Defazio et al. 2014
mun = min(2 * n_samples * alpha_scaled, L)
step = 1.0 / (2 * L + mun)
else:
# SAG theoretical step size is 1/16L but it is recommended to use 1 / L
# see http://www.birs.ca//workshops//2014/14w5003/files/schmidt.pdf,
# slide 65
step = 1.0 / L
return step
def sag_solver(
X,
y,
sample_weight=None,
loss="log",
alpha=1.0,
beta=0.0,
max_iter=1000,
tol=0.001,
verbose=0,
random_state=None,
check_input=True,
max_squared_sum=None,
warm_start_mem=None,
is_saga=False,
):
"""SAG solver for Ridge and LogisticRegression.
SAG stands for Stochastic Average Gradient: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a constant learning rate.
IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the
same scale. You can normalize the data by using
sklearn.preprocessing.StandardScaler on your data before passing it to the
fit method.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values for the features. It will
fit the data according to squared loss or log loss.
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using the squared euclidean norm L2.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values. With loss='multinomial', y must be label encoded
(see preprocessing.LabelEncoder). For loss='log' it must be in [0, 1].
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
loss : {'log', 'squared', 'multinomial'}, default='log'
Loss function that will be optimized:
-'log' is the binary logistic loss, as used in LogisticRegression.
-'squared' is the squared loss, as used in Ridge.
-'multinomial' is the multinomial logistic loss, as used in
LogisticRegression.
.. versionadded:: 0.18
*loss='multinomial'*
alpha : float, default=1.
L2 regularization term in the objective function
``(0.5 * alpha * || W ||_F^2)``.
beta : float, default=0.
L1 regularization term in the objective function
``(beta * || W ||_1)``. Only applied if ``is_saga`` is set to True.
max_iter : int, default=1000
The max number of passes over the training data if the stopping
criteria is not reached.
tol : float, default=0.001
The stopping criteria for the weights. The iterations will stop when
max(change in weights) / max(weights) < tol.
verbose : int, default=0
The verbosity level.
random_state : int, RandomState instance or None, default=None
Used when shuffling the data. Pass an int for reproducible output
across multiple function calls.
See :term:`Glossary <random_state>`.
check_input : bool, default=True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default=None
Maximum squared sum of X over samples. If None, it will be computed,
going through all the samples. The value should be precomputed
to speed up cross validation.
warm_start_mem : dict, default=None
The initialization parameters used for warm starting. Warm starting is
currently used in LogisticRegression but not in Ridge.
It contains:
- 'coef': the weight vector, with the intercept in last line
if the intercept is fitted.
- 'gradient_memory': the scalar gradient for all seen samples.
- 'sum_gradient': the sum of gradient over all seen samples,
for each feature.
- 'intercept_sum_gradient': the sum of gradient over all seen
samples, for the intercept.
- 'seen': array of boolean describing the seen samples.
- 'num_seen': the number of seen samples.
is_saga : bool, default=False
Whether to use the SAGA algorithm or the SAG algorithm. SAGA behaves
better in the first epochs, and allow for l1 regularisation.
Returns
-------
coef_ : ndarray of shape (n_features,)
Weight vector.
n_iter_ : int
The number of full pass on all samples.
warm_start_mem : dict
Contains a 'coef' key with the fitted result, and possibly the
fitted intercept at the end of the array. Contains also other keys
used for warm starting.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> X = rng.randn(n_samples, n_features)
>>> y = rng.randn(n_samples)
>>> clf = linear_model.Ridge(solver='sag')
>>> clf.fit(X, y)
Ridge(solver='sag')
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> clf = linear_model.LogisticRegression(solver='sag')
>>> clf.fit(X, y)
LogisticRegression(solver='sag')
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document
:arxiv:`Defazio, A., Bach F. & Lacoste-Julien S. (2014).
"SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives" <1407.0202>`
See Also
--------
Ridge, SGDRegressor, ElasticNet, Lasso, SVR,
LogisticRegression, SGDClassifier, LinearSVC, Perceptron
"""
if warm_start_mem is None:
warm_start_mem = {}
# Ridge default max_iter is None
if max_iter is None:
max_iter = 1000
if check_input:
_dtype = [np.float64, np.float32]
X = check_array(X, dtype=_dtype, accept_sparse="csr", order="C")
y = check_array(y, dtype=_dtype, ensure_2d=False, order="C")
n_samples, n_features = X.shape[0], X.shape[1]
# As in SGD, the alpha is scaled by n_samples.
alpha_scaled = float(alpha) / n_samples
beta_scaled = float(beta) / n_samples
# if loss == 'multinomial', y should be label encoded.
n_classes = int(y.max()) + 1 if loss == "multinomial" else 1
# initialization
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
if "coef" in warm_start_mem.keys():
coef_init = warm_start_mem["coef"]
else:
# assume fit_intercept is False
coef_init = np.zeros((n_features, n_classes), dtype=X.dtype, order="C")
# coef_init contains possibly the intercept_init at the end.
# Note that Ridge centers the data before fitting, so fit_intercept=False.
fit_intercept = coef_init.shape[0] == (n_features + 1)
if fit_intercept:
intercept_init = coef_init[-1, :]
coef_init = coef_init[:-1, :]
else:
intercept_init = np.zeros(n_classes, dtype=X.dtype)
if "intercept_sum_gradient" in warm_start_mem.keys():
intercept_sum_gradient = warm_start_mem["intercept_sum_gradient"]
else:
intercept_sum_gradient = np.zeros(n_classes, dtype=X.dtype)
if "gradient_memory" in warm_start_mem.keys():
gradient_memory_init = warm_start_mem["gradient_memory"]
else:
gradient_memory_init = np.zeros(
(n_samples, n_classes), dtype=X.dtype, order="C"
)
if "sum_gradient" in warm_start_mem.keys():
sum_gradient_init = warm_start_mem["sum_gradient"]
else:
sum_gradient_init = np.zeros((n_features, n_classes), dtype=X.dtype, order="C")
if "seen" in warm_start_mem.keys():
seen_init = warm_start_mem["seen"]
else:
seen_init = np.zeros(n_samples, dtype=np.int32, order="C")
if "num_seen" in warm_start_mem.keys():
num_seen_init = warm_start_mem["num_seen"]
else:
num_seen_init = 0
dataset, intercept_decay = make_dataset(X, y, sample_weight, random_state)
if max_squared_sum is None:
max_squared_sum = row_norms(X, squared=True).max()
step_size = get_auto_step_size(
max_squared_sum,
alpha_scaled,
loss,
fit_intercept,
n_samples=n_samples,
is_saga=is_saga,
)
if step_size * alpha_scaled == 1:
raise ZeroDivisionError(
"Current sag implementation does not handle "
"the case step_size * alpha_scaled == 1"
)
sag = sag64 if X.dtype == np.float64 else sag32
num_seen, n_iter_ = sag(
dataset,
coef_init,
intercept_init,
n_samples,
n_features,
n_classes,
tol,
max_iter,
loss,
step_size,
alpha_scaled,
beta_scaled,
sum_gradient_init,
gradient_memory_init,
seen_init,
num_seen_init,
fit_intercept,
intercept_sum_gradient,
intercept_decay,
is_saga,
verbose,
)
if n_iter_ == max_iter:
warnings.warn(
"The max_iter was reached which means the coef_ did not converge",
ConvergenceWarning,
)
if fit_intercept:
coef_init = np.vstack((coef_init, intercept_init))
warm_start_mem = {
"coef": coef_init,
"sum_gradient": sum_gradient_init,
"intercept_sum_gradient": intercept_sum_gradient,
"gradient_memory": gradient_memory_init,
"seen": seen_init,
"num_seen": num_seen,
}
if loss == "multinomial":
coef_ = coef_init.T
else:
coef_ = coef_init[:, 0]
return coef_, n_iter_, warm_start_mem
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/__init__.py | sklearn/linear_model/__init__.py | """A variety of linear models."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from sklearn.linear_model._base import LinearRegression
from sklearn.linear_model._bayes import ARDRegression, BayesianRidge
from sklearn.linear_model._coordinate_descent import (
ElasticNet,
ElasticNetCV,
Lasso,
LassoCV,
MultiTaskElasticNet,
MultiTaskElasticNetCV,
MultiTaskLasso,
MultiTaskLassoCV,
enet_path,
lasso_path,
)
from sklearn.linear_model._glm import GammaRegressor, PoissonRegressor, TweedieRegressor
from sklearn.linear_model._huber import HuberRegressor
from sklearn.linear_model._least_angle import (
Lars,
LarsCV,
LassoLars,
LassoLarsCV,
LassoLarsIC,
lars_path,
lars_path_gram,
)
from sklearn.linear_model._logistic import LogisticRegression, LogisticRegressionCV
from sklearn.linear_model._omp import (
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
orthogonal_mp,
orthogonal_mp_gram,
)
from sklearn.linear_model._passive_aggressive import (
PassiveAggressiveClassifier,
PassiveAggressiveRegressor,
)
from sklearn.linear_model._perceptron import Perceptron
from sklearn.linear_model._quantile import QuantileRegressor
from sklearn.linear_model._ransac import RANSACRegressor
from sklearn.linear_model._ridge import (
Ridge,
RidgeClassifier,
RidgeClassifierCV,
RidgeCV,
ridge_regression,
)
from sklearn.linear_model._stochastic_gradient import (
SGDClassifier,
SGDOneClassSVM,
SGDRegressor,
)
from sklearn.linear_model._theil_sen import TheilSenRegressor
__all__ = [
"ARDRegression",
"BayesianRidge",
"ElasticNet",
"ElasticNetCV",
"GammaRegressor",
"HuberRegressor",
"Lars",
"LarsCV",
"Lasso",
"LassoCV",
"LassoLars",
"LassoLarsCV",
"LassoLarsIC",
"LinearRegression",
"LogisticRegression",
"LogisticRegressionCV",
"MultiTaskElasticNet",
"MultiTaskElasticNetCV",
"MultiTaskLasso",
"MultiTaskLassoCV",
"OrthogonalMatchingPursuit",
"OrthogonalMatchingPursuitCV",
"PassiveAggressiveClassifier",
"PassiveAggressiveRegressor",
"Perceptron",
"PoissonRegressor",
"QuantileRegressor",
"RANSACRegressor",
"Ridge",
"RidgeCV",
"RidgeClassifier",
"RidgeClassifierCV",
"SGDClassifier",
"SGDOneClassSVM",
"SGDRegressor",
"TheilSenRegressor",
"TweedieRegressor",
"enet_path",
"lars_path",
"lars_path_gram",
"lasso_path",
"orthogonal_mp",
"orthogonal_mp_gram",
"ridge_regression",
]
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/_quantile.py | sklearn/linear_model/_quantile.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from numbers import Real
import numpy as np
from scipy import sparse
from scipy.optimize import linprog
from sklearn.base import BaseEstimator, RegressorMixin, _fit_context
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model._base import LinearModel
from sklearn.utils import _safe_indexing
from sklearn.utils._param_validation import Interval, StrOptions
from sklearn.utils.fixes import parse_version, sp_version
from sklearn.utils.validation import _check_sample_weight, validate_data
class QuantileRegressor(LinearModel, RegressorMixin, BaseEstimator):
"""Linear regression model that predicts conditional quantiles.
The linear :class:`QuantileRegressor` optimizes the pinball loss for a
desired `quantile` and is robust to outliers.
This model uses an L1 regularization like
:class:`~sklearn.linear_model.Lasso`.
Read more in the :ref:`User Guide <quantile_regression>`.
.. versionadded:: 1.0
Parameters
----------
quantile : float, default=0.5
The quantile that the model tries to predict. It must be strictly
between 0 and 1. If 0.5 (default), the model predicts the 50%
quantile, i.e. the median.
alpha : float, default=1.0
Regularization constant that multiplies the L1 penalty term.
fit_intercept : bool, default=True
Whether or not to fit the intercept.
solver : {'highs-ds', 'highs-ipm', 'highs', 'interior-point', \
'revised simplex'}, default='highs'
Method used by :func:`scipy.optimize.linprog` to solve the linear
programming formulation.
It is recommended to use the highs methods because
they are the fastest ones. Solvers "highs-ds", "highs-ipm" and "highs"
support sparse input data and, in fact, always convert to sparse csc.
From `scipy>=1.11.0`, "interior-point" is not available anymore.
.. versionchanged:: 1.4
The default of `solver` changed to `"highs"` in version 1.4.
solver_options : dict, default=None
Additional parameters passed to :func:`scipy.optimize.linprog` as
options. If `None` and if `solver='interior-point'`, then
`{"lstsq": True}` is passed to :func:`scipy.optimize.linprog` for the
sake of stability.
Attributes
----------
coef_ : array of shape (n_features,)
Estimated coefficients for the features.
intercept_ : float
The intercept of the model, aka bias term.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_iter_ : int
The actual number of iterations performed by the solver.
See Also
--------
Lasso : The Lasso is a linear model that estimates sparse coefficients
with l1 regularization.
HuberRegressor : Linear regression model that is robust to outliers.
Examples
--------
>>> from sklearn.linear_model import QuantileRegressor
>>> import numpy as np
>>> n_samples, n_features = 10, 2
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> # the two following lines are optional in practice
>>> from sklearn.utils.fixes import sp_version, parse_version
>>> reg = QuantileRegressor(quantile=0.8).fit(X, y)
>>> np.mean(y <= reg.predict(X))
np.float64(0.8)
"""
_parameter_constraints: dict = {
"quantile": [Interval(Real, 0, 1, closed="neither")],
"alpha": [Interval(Real, 0, None, closed="left")],
"fit_intercept": ["boolean"],
"solver": [
StrOptions(
{
"highs-ds",
"highs-ipm",
"highs",
"interior-point",
"revised simplex",
}
),
],
"solver_options": [dict, None],
}
def __init__(
self,
*,
quantile=0.5,
alpha=1.0,
fit_intercept=True,
solver="highs",
solver_options=None,
):
self.quantile = quantile
self.alpha = alpha
self.fit_intercept = fit_intercept
self.solver = solver
self.solver_options = solver_options
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
self : object
Returns self.
"""
X, y = validate_data(
self,
X,
y,
accept_sparse=["csc", "csr", "coo"],
y_numeric=True,
multi_output=False,
)
sample_weight = _check_sample_weight(sample_weight, X)
n_features = X.shape[1]
n_params = n_features
if self.fit_intercept:
n_params += 1
# Note that centering y and X with _preprocess_data does not work
# for quantile regression.
# The objective is defined as 1/n * sum(pinball loss) + alpha * L1.
# So we rescale the penalty term, which is equivalent.
alpha = np.sum(sample_weight) * self.alpha
if self.solver == "interior-point" and sp_version >= parse_version("1.11.0"):
raise ValueError(
f"Solver {self.solver} is not anymore available in SciPy >= 1.11.0."
)
if sparse.issparse(X) and self.solver not in ["highs", "highs-ds", "highs-ipm"]:
raise ValueError(
f"Solver {self.solver} does not support sparse X. "
"Use solver 'highs' for example."
)
# make default solver more stable
if self.solver_options is None and self.solver == "interior-point":
solver_options = {"lstsq": True}
else:
solver_options = self.solver_options
# After rescaling alpha, the minimization problem is
# min sum(pinball loss) + alpha * L1
# Use linear programming formulation of quantile regression
# min_x c x
# A_eq x = b_eq
# 0 <= x
# x = (s0, s, t0, t, u, v) = slack variables >= 0
# intercept = s0 - t0
# coef = s - t
# c = (0, alpha * 1_p, 0, alpha * 1_p, quantile * 1_n, (1-quantile) * 1_n)
# residual = y - X@coef - intercept = u - v
# A_eq = (1_n, X, -1_n, -X, diag(1_n), -diag(1_n))
# b_eq = y
# p = n_features
# n = n_samples
# 1_n = vector of length n with entries equal one
# see https://stats.stackexchange.com/questions/384909/
#
# Filtering out zero sample weights from the beginning makes life
# easier for the linprog solver.
indices = np.nonzero(sample_weight)[0]
n_indices = len(indices) # use n_mask instead of n_samples
if n_indices < len(sample_weight):
sample_weight = sample_weight[indices]
X = _safe_indexing(X, indices)
y = _safe_indexing(y, indices)
c = np.concatenate(
[
np.full(2 * n_params, fill_value=alpha),
sample_weight * self.quantile,
sample_weight * (1 - self.quantile),
]
)
if self.fit_intercept:
# do not penalize the intercept
c[0] = 0
c[n_params] = 0
if self.solver in ["highs", "highs-ds", "highs-ipm"]:
# Note that highs methods always use a sparse CSC memory layout internally,
# even for optimization problems parametrized using dense numpy arrays.
# Therefore, we work with CSC matrices as early as possible to limit
# unnecessary repeated memory copies.
eye = sparse.eye(n_indices, dtype=X.dtype, format="csc")
if self.fit_intercept:
ones = sparse.csc_matrix(np.ones(shape=(n_indices, 1), dtype=X.dtype))
A_eq = sparse.hstack([ones, X, -ones, -X, eye, -eye], format="csc")
else:
A_eq = sparse.hstack([X, -X, eye, -eye], format="csc")
else:
eye = np.eye(n_indices)
if self.fit_intercept:
ones = np.ones((n_indices, 1))
A_eq = np.concatenate([ones, X, -ones, -X, eye, -eye], axis=1)
else:
A_eq = np.concatenate([X, -X, eye, -eye], axis=1)
b_eq = y
result = linprog(
c=c,
A_eq=A_eq,
b_eq=b_eq,
method=self.solver,
options=solver_options,
)
solution = result.x
if not result.success:
failure = {
1: "Iteration limit reached.",
2: "Problem appears to be infeasible.",
3: "Problem appears to be unbounded.",
4: "Numerical difficulties encountered.",
}
warnings.warn(
"Linear programming for QuantileRegressor did not succeed.\n"
f"Status is {result.status}: "
+ failure.setdefault(result.status, "unknown reason")
+ "\n"
+ "Result message of linprog:\n"
+ result.message,
ConvergenceWarning,
)
# positive slack - negative slack
# solution is an array with (params_pos, params_neg, u, v)
params = solution[:n_params] - solution[n_params : 2 * n_params]
self.n_iter_ = result.nit
if self.fit_intercept:
self.coef_ = params[1:]
self.intercept_ = params[0]
else:
self.coef_ = params
self.intercept_ = 0.0
return self
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = True
return tags
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/_logistic.py | sklearn/linear_model/_logistic.py | """
Logistic Regression
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numbers
import warnings
from numbers import Integral, Real
import numpy as np
from scipy import optimize
from sklearn._loss.loss import HalfBinomialLoss, HalfMultinomialLoss
from sklearn.base import _fit_context
from sklearn.linear_model._base import (
BaseEstimator,
LinearClassifierMixin,
SparseCoefMixin,
)
from sklearn.linear_model._glm.glm import NewtonCholeskySolver
from sklearn.linear_model._linear_loss import LinearModelLoss
from sklearn.linear_model._sag import sag_solver
from sklearn.metrics import get_scorer, get_scorer_names
from sklearn.model_selection import check_cv
from sklearn.preprocessing import LabelEncoder
from sklearn.svm._base import _fit_liblinear
from sklearn.utils import (
Bunch,
check_array,
check_consistent_length,
check_random_state,
compute_class_weight,
)
from sklearn.utils._param_validation import Hidden, Interval, StrOptions
from sklearn.utils.extmath import row_norms, softmax
from sklearn.utils.fixes import _get_additional_lbfgs_options_dict
from sklearn.utils.metadata_routing import (
MetadataRouter,
MethodMapping,
_raise_for_params,
_routing_enabled,
process_routing,
)
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils.optimize import _check_optimize_result, _newton_cg
from sklearn.utils.parallel import Parallel, delayed
from sklearn.utils.validation import (
_check_method_params,
_check_sample_weight,
check_is_fitted,
validate_data,
)
_LOGISTIC_SOLVER_CONVERGENCE_MSG = (
"Please also refer to the documentation for alternative solver options:\n"
" https://scikit-learn.org/stable/modules/linear_model.html"
"#logistic-regression"
)
def _check_solver(solver, penalty, dual):
if solver not in ["liblinear", "saga"] and penalty not in ("l2", None):
raise ValueError(
f"Solver {solver} supports only 'l2' or None penalties, got {penalty} "
"penalty."
)
if solver != "liblinear" and dual:
raise ValueError(f"Solver {solver} supports only dual=False, got dual={dual}")
if penalty == "elasticnet" and solver != "saga":
raise ValueError(
f"Only 'saga' solver supports elasticnet penalty, got solver={solver}."
)
if solver == "liblinear" and penalty is None:
# TODO(1.10): update message to remove "as well as penalty=None".
raise ValueError(
"C=np.inf as well as penalty=None is not supported for the liblinear solver"
)
return solver
def _logistic_regression_path(
X,
y,
*,
classes,
Cs=10,
fit_intercept=True,
max_iter=100,
tol=1e-4,
verbose=0,
solver="lbfgs",
coef=None,
class_weight=None,
dual=False,
penalty="l2",
intercept_scaling=1.0,
random_state=None,
check_input=True,
max_squared_sum=None,
sample_weight=None,
l1_ratio=None,
n_threads=1,
):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Note that there will be no speedup with liblinear solver, since it does
not handle warm-starting.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Input data, target values.
classes : ndarray
A list of class labels known to the classifier.
Cs : int or array-like of shape (n_cs,), default=10
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
fit_intercept : bool, default=True
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int, default=100
Maximum number of iterations for the solver.
tol : float, default=1e-4
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int, default=0
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'liblinear', 'newton-cg', 'newton-cholesky', 'sag', 'saga'}, \
default='lbfgs'
Numerical solver to use.
coef : array-like of shape (n_classes, features + int(fit_intercept)) or \
(1, n_features + int(fit_intercept)) or \
(n_features + int(fit_intercept)), default=None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
class_weight : dict or 'balanced', default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool, default=False
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : {'l1', 'l2', 'elasticnet'}, default='l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver.
intercept_scaling : float, default=1.
Useful only when the solver `liblinear` is used
and `self.fit_intercept` is set to `True`. In this case, `x` becomes
`[x, self.intercept_scaling]`,
i.e. a "synthetic" feature with constant value equal to
`intercept_scaling` is appended to the instance vector.
The intercept becomes
``intercept_scaling * synthetic_feature_weight``.
.. note::
The synthetic feature weight is subject to L1 or L2
regularization as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) `intercept_scaling` has to be increased.
random_state : int, RandomState instance, default=None
Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the
data. See :term:`Glossary <random_state>` for details.
check_input : bool, default=True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default=None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like of shape (n_samples,), default=None
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
l1_ratio : float, default=None
The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
combination of L1 and L2.
n_threads : int, default=1
Number of OpenMP threads to use.
Returns
-------
coefs : ndarray of shape (n_cs, n_classes, n_features + int(fit_intercept)) or \
(n_cs, n_features + int(fit_intercept))
List of coefficients for the Logistic Regression model. If fit_intercept is set
to True, then the last dimension will be n_features + 1, where the last item
represents the intercept.
For binary problems the second dimension in n_classes is dropped, i.e. the shape
will be `(n_cs, n_features + int(fit_intercept))`.
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array of shape (n_cs,)
Actual number of iteration for each C in Cs.
Notes
-----
You might get slightly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
.. versionchanged:: 0.19
The "copy" parameter was removed.
"""
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
solver = _check_solver(solver, penalty, dual)
# Preprocessing.
if check_input:
X = check_array(
X,
accept_sparse="csr",
dtype=np.float64,
accept_large_sparse=solver not in ["liblinear", "sag", "saga"],
)
y = check_array(y, ensure_2d=False, dtype=None)
check_consistent_length(X, y)
if sample_weight is not None or class_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype, copy=True)
n_samples, n_features = X.shape
n_classes = len(classes)
is_binary = n_classes == 2
if solver == "liblinear" and not is_binary:
raise ValueError(
"The 'liblinear' solver does not support multiclass classification"
" (n_classes >= 3). Either use another solver or wrap the "
"estimator in a OneVsRestClassifier to keep applying a "
"one-versus-rest scheme."
)
random_state = check_random_state(random_state)
le = LabelEncoder().fit(classes)
if class_weight is not None:
class_weight_ = compute_class_weight(
class_weight, classes=classes, y=y, sample_weight=sample_weight
)
sample_weight *= class_weight_[le.transform(y)]
if is_binary:
w0 = np.zeros(n_features + int(fit_intercept), dtype=X.dtype)
mask = y == classes[1]
y_bin = np.ones(y.shape, dtype=X.dtype)
if solver == "liblinear":
y_bin[~mask] = -1.0
else:
# HalfBinomialLoss, used for those solvers, represents y in [0, 1] instead
# of in [-1, 1].
y_bin[~mask] = 0.0
else:
# All solvers capable of a multinomial need LabelEncoder, not LabelBinarizer,
# i.e. y as a 1d-array of integers. LabelEncoder also saves memory
# compared to LabelBinarizer, especially when n_classes is large.
Y_multi = le.transform(y).astype(X.dtype, copy=False)
# It is important that w0 is F-contiguous.
w0 = np.zeros(
(classes.size, n_features + int(fit_intercept)), order="F", dtype=X.dtype
)
# IMPORTANT NOTE:
# All solvers relying on LinearModelLoss need to scale the penalty with n_samples
# or the sum of sample weights because the implemented logistic regression
# objective here is (unfortunately)
# C * sum(pointwise_loss) + penalty
# instead of (as LinearModelLoss does)
# mean(pointwise_loss) + 1/C * penalty
if solver in ["lbfgs", "newton-cg", "newton-cholesky"]:
# This needs to be calculated after sample_weight is multiplied by
# class_weight. It is even tested that passing class_weight is equivalent to
# passing sample_weights according to class_weight.
sw_sum = n_samples if sample_weight is None else np.sum(sample_weight)
if coef is not None:
if is_binary:
if coef.ndim == 1 and coef.shape[0] == n_features + int(fit_intercept):
w0[:] = coef
elif (
coef.ndim == 2
and coef.shape[0] == 1
and coef.shape[1] == n_features + int(fit_intercept)
):
w0[:] = coef[0]
else:
msg = (
f"Initialization coef is of shape {coef.shape}, expected shape "
f"{w0.shape} or (1, {w0.shape[0]})"
)
raise ValueError(msg)
else:
if (
coef.ndim == 2
and coef.shape[0] == n_classes
and coef.shape[1] == n_features + int(fit_intercept)
):
w0[:, : coef.shape[1]] = coef
else:
msg = (
f"Initialization coef is of shape {coef.shape}, expected shape "
f"{w0.shape}"
)
raise ValueError(msg)
if is_binary:
target = y_bin
loss = LinearModelLoss(
base_loss=HalfBinomialLoss(), fit_intercept=fit_intercept
)
if solver == "lbfgs":
func = loss.loss_gradient
elif solver == "newton-cg":
func = loss.loss
grad = loss.gradient
hess = loss.gradient_hessian_product # hess = [gradient, hessp]
warm_start_sag = {"coef": np.expand_dims(w0, axis=1)}
else: # multinomial
loss = LinearModelLoss(
base_loss=HalfMultinomialLoss(n_classes=classes.size),
fit_intercept=fit_intercept,
)
target = Y_multi
if solver in ["lbfgs", "newton-cg", "newton-cholesky"]:
# scipy.optimize.minimize and newton-cg accept only ravelled parameters,
# i.e. 1d-arrays. LinearModelLoss expects classes to be contiguous and
# reconstructs the 2d-array via w0.reshape((n_classes, -1), order="F").
# As w0 is F-contiguous, ravel(order="F") also avoids a copy.
w0 = w0.ravel(order="F")
if solver == "lbfgs":
func = loss.loss_gradient
elif solver == "newton-cg":
func = loss.loss
grad = loss.gradient
hess = loss.gradient_hessian_product # hess = [gradient, hessp]
warm_start_sag = {"coef": w0.T}
coefs = list()
n_iter = np.zeros(len(Cs), dtype=np.int32)
for i, C in enumerate(Cs):
if solver == "lbfgs":
l2_reg_strength = 1.0 / (C * sw_sum)
iprint = [-1, 50, 1, 100, 101][
np.searchsorted(np.array([0, 1, 2, 3]), verbose)
]
opt_res = optimize.minimize(
func,
w0,
method="L-BFGS-B",
jac=True,
args=(X, target, sample_weight, l2_reg_strength, n_threads),
options={
"maxiter": max_iter,
"maxls": 50, # default is 20
"gtol": tol,
"ftol": 64 * np.finfo(float).eps,
**_get_additional_lbfgs_options_dict("iprint", iprint),
},
)
n_iter_i = _check_optimize_result(
solver,
opt_res,
max_iter,
extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG,
)
w0, loss = opt_res.x, opt_res.fun
elif solver == "newton-cg":
l2_reg_strength = 1.0 / (C * sw_sum)
args = (X, target, sample_weight, l2_reg_strength, n_threads)
w0, n_iter_i = _newton_cg(
grad_hess=hess,
func=func,
grad=grad,
x0=w0,
args=args,
maxiter=max_iter,
tol=tol,
verbose=verbose,
)
elif solver == "newton-cholesky":
l2_reg_strength = 1.0 / (C * sw_sum)
sol = NewtonCholeskySolver(
coef=w0,
linear_loss=loss,
l2_reg_strength=l2_reg_strength,
tol=tol,
max_iter=max_iter,
n_threads=n_threads,
verbose=verbose,
)
w0 = sol.solve(X=X, y=target, sample_weight=sample_weight)
n_iter_i = sol.iteration
elif solver == "liblinear":
coef_, intercept_, n_iter_i = _fit_liblinear(
X,
target,
C,
fit_intercept,
intercept_scaling,
None,
penalty,
dual,
verbose,
max_iter,
tol,
random_state,
sample_weight=sample_weight,
)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
# n_iter_i is an array for each class. However, `target` is always encoded
# in {-1, 1}, so we only take the first element of n_iter_i.
n_iter_i = n_iter_i.item()
elif solver in ["sag", "saga"]:
if is_binary:
loss = "log"
else:
target = target.astype(X.dtype, copy=False)
loss = "multinomial"
# alpha is for L2-norm, beta is for L1-norm
if penalty == "l1":
alpha = 0.0
beta = 1.0 / C
elif penalty == "l2":
alpha = 1.0 / C
beta = 0.0
else: # Elastic-Net penalty
alpha = (1.0 / C) * (1 - l1_ratio)
beta = (1.0 / C) * l1_ratio
w0, n_iter_i, warm_start_sag = sag_solver(
X,
target,
sample_weight,
loss,
alpha,
beta,
max_iter,
tol,
verbose,
random_state,
False,
max_squared_sum,
warm_start_sag,
is_saga=(solver == "saga"),
)
else:
msg = (
"solver must be one of {'lbfgs', 'liblinear', 'newton-cg', "
"'newton-cholesky', 'sag', 'saga'}, "
f"got '{solver}' instead."
)
raise ValueError(msg)
if is_binary:
coefs.append(w0.copy())
else:
if solver in ["lbfgs", "newton-cg", "newton-cholesky"]:
multi_w0 = np.reshape(w0, (n_classes, -1), order="F")
else:
multi_w0 = w0
coefs.append(multi_w0.copy())
n_iter[i] = n_iter_i
return np.array(coefs), np.array(Cs), n_iter
# helper function for LogisticCV
def _log_reg_scoring_path(
X,
y,
train,
test,
*,
classes,
Cs,
scoring,
fit_intercept,
max_iter,
tol,
class_weight,
verbose,
solver,
penalty,
dual,
intercept_scaling,
random_state,
max_squared_sum,
sample_weight,
l1_ratio,
score_params,
):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
classes : ndarray
A list of class labels known to the classifier.
Cs : int or list of floats
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
scoring : str, callable or None
The scoring method to use for cross-validation. Options:
- str: see :ref:`scoring_string_names` for options.
- callable: a scorer callable object (e.g., function) with signature
``scorer(estimator, X, y)``. See :ref:`scoring_callable` for details.
- `None`: :ref:`accuracy <accuracy_score>` is used.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : dict or 'balanced'
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'liblinear', 'newton-cg', 'newton-cholesky', 'sag', 'saga'}
Decides which solver to use.
penalty : {'l1', 'l2', 'elasticnet'}
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float
Useful only when the solver `liblinear` is used
and `self.fit_intercept` is set to `True`. In this case, `x` becomes
`[x, self.intercept_scaling]`,
i.e. a "synthetic" feature with constant value equal to
`intercept_scaling` is appended to the instance vector.
The intercept becomes
``intercept_scaling * synthetic_feature_weight``.
.. note::
The synthetic feature weight is subject to L1 or L2
regularization as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) `intercept_scaling` has to be increased.
random_state : int, RandomState instance
Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the
data. See :term:`Glossary <random_state>` for details.
max_squared_sum : float
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like of shape (n_samples,)
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
l1_ratio : float
The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
combination of L1 and L2.
score_params : dict
Parameters to pass to the `score` method of the underlying scorer.
Returns
-------
coefs : ndarray of shape (n_cs, n_classes, n_features + int(fit_intercept)) or \
(n_cs, n_features + int(fit_intercept))
List of coefficients for the Logistic Regression model. If fit_intercept is set
to True, then the last dimension will be n_features + 1, where the last item
represents the intercept.
For binary problems the second dimension in n_classes is dropped, i.e. the shape
will be `(n_cs, n_features + int(fit_intercept))`.
Cs : ndarray of shape (n_cs,)
Grid of Cs used for cross-validation.
scores : ndarray of shape (n_cs,)
Scores obtained for each Cs.
n_iter : ndarray of shape (n_cs,)
Actual number of iteration for each C in Cs.
"""
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
sw_train, sw_test = None, None
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
sw_train = sample_weight[train]
sw_test = sample_weight[test]
# Note: We pass classes for the whole dataset to avoid inconsistencies,
# i.e. different number of classes in different folds. This way, if a class
# is not present in a fold, _logistic_regression_path will still return
# coefficients associated to this class.
coefs, Cs, n_iter = _logistic_regression_path(
X_train,
y_train,
classes=classes,
Cs=Cs,
l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
solver=solver,
max_iter=max_iter,
class_weight=class_weight,
tol=tol,
verbose=verbose,
dual=dual,
penalty=penalty,
intercept_scaling=intercept_scaling,
random_state=random_state,
check_input=False,
max_squared_sum=max_squared_sum,
sample_weight=sw_train,
)
log_reg = LogisticRegression(solver=solver)
# The score method of Logistic Regression has a classes_ attribute.
log_reg.classes_ = classes
scores = list()
scoring = get_scorer(scoring)
for w in coefs:
if fit_intercept:
log_reg.coef_ = w[..., :-1]
log_reg.intercept_ = w[..., -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.0
if scoring is None:
scores.append(log_reg.score(X_test, y_test, sample_weight=sw_test))
else:
score_params = score_params or {}
score_params = _check_method_params(X=X, params=score_params, indices=test)
# FIXME: If scoring = "neg_brier_score" and if not all class labels
# are present in y_test, the following fails. Maybe we can pass
# "labels=classes" to the call of scoring.
scores.append(scoring(log_reg, X_test, y_test, **score_params))
return coefs, Cs, np.array(scores), n_iter
class LogisticRegression(LinearClassifierMixin, SparseCoefMixin, BaseEstimator):
"""
Logistic Regression (aka logit, MaxEnt) classifier.
This class implements regularized logistic regression using a set of available
solvers. **Note that regularization is applied by default**. It can handle both
dense and sparse input `X`. Use C-ordered arrays or CSR matrices containing 64-bit
floats for optimal performance; any other input format will be converted (and
copied).
The solvers 'lbfgs', 'newton-cg', 'newton-cholesky' and 'sag' support only L2
regularization with primal formulation, or no regularization. The 'liblinear'
solver supports both L1 and L2 regularization (but not both, i.e. elastic-net),
with a dual formulation only for the L2 penalty. The Elastic-Net (combination of L1
and L2) regularization is only supported by the 'saga' solver.
For :term:`multiclass` problems (whenever `n_classes >= 3`), all solvers except
'liblinear' optimize the (penalized) multinomial loss. 'liblinear' only handles
binary classification but can be extended to handle multiclass by using
:class:`~sklearn.multiclass.OneVsRestClassifier`.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
penalty : {'l1', 'l2', 'elasticnet', None}, default='l2'
Specify the norm of the penalty:
- `None`: no penalty is added;
- `'l2'`: add a L2 penalty term and it is the default choice;
- `'l1'`: add a L1 penalty term;
- `'elasticnet'`: both L1 and L2 penalty terms are added.
.. warning::
Some penalties may not work with some solvers. See the parameter
`solver` below, to know the compatibility between the penalty and
solver.
.. versionadded:: 0.19
l1 penalty with SAGA solver (allowing 'multinomial' + L1)
.. deprecated:: 1.8
`penalty` was deprecated in version 1.8 and will be removed in 1.10.
Use `l1_ratio` instead. `l1_ratio=0` for `penalty='l2'`, `l1_ratio=1` for
`penalty='l1'` and `l1_ratio` set to any float between 0 and 1 for
`'penalty='elasticnet'`.
C : float, default=1.0
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization. `C=np.inf` results in unpenalized logistic regression.
For a visual example on the effect of tuning the `C` parameter
with an L1 penalty, see:
:ref:`sphx_glr_auto_examples_linear_model_plot_logistic_path.py`.
l1_ratio : float, default=0.0
The Elastic-Net mixing parameter, with `0 <= l1_ratio <= 1`. Setting
`l1_ratio=1` gives a pure L1-penalty, setting `l1_ratio=0` a pure L2-penalty.
Any value between 0 and 1 gives an Elastic-Net penalty of the form
`l1_ratio * L1 + (1 - l1_ratio) * L2`.
.. warning::
Certain values of `l1_ratio`, i.e. some penalties, may not work with some
solvers. See the parameter `solver` below, to know the compatibility between
the penalty and solver.
.. versionchanged:: 1.8
Default value changed from None to 0.0.
.. deprecated:: 1.8
`None` is deprecated and will be removed in version 1.10. Always use
`l1_ratio` to specify the penalty type.
dual : bool, default=False
Dual (constrained) or primal (regularized, see also
:ref:`this equation <regularized-logistic-loss>`) formulation. Dual formulation
is only implemented for l2 penalty with liblinear solver. Prefer `dual=False`
when n_samples > n_features.
tol : float, default=1e-4
Tolerance for stopping criteria.
fit_intercept : bool, default=True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
intercept_scaling : float, default=1
Useful only when the solver `liblinear` is used
and `self.fit_intercept` is set to `True`. In this case, `x` becomes
`[x, self.intercept_scaling]`,
i.e. a "synthetic" feature with constant value equal to
`intercept_scaling` is appended to the instance vector.
The intercept becomes
``intercept_scaling * synthetic_feature_weight``.
.. note::
The synthetic feature weight is subject to L1 or L2
regularization as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) `intercept_scaling` has to be increased.
class_weight : dict or 'balanced', default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
*class_weight='balanced'*
random_state : int, RandomState instance, default=None
Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the
data. See :term:`Glossary <random_state>` for details.
solver : {'lbfgs', 'liblinear', 'newton-cg', 'newton-cholesky', 'sag', 'saga'}, \
default='lbfgs'
Algorithm to use in the optimization problem. Default is 'lbfgs'.
To choose a solver, you might want to consider the following aspects:
- 'lbfgs' is a good default solver because it works reasonably well for a wide
class of problems.
- For :term:`multiclass` problems (`n_classes >= 3`), all solvers except
'liblinear' minimize the full multinomial loss, 'liblinear' will raise an
error.
- 'newton-cholesky' is a good choice for
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/_huber.py | sklearn/linear_model/_huber.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from numbers import Integral, Real
import numpy as np
from scipy import optimize
from sklearn.base import BaseEstimator, RegressorMixin, _fit_context
from sklearn.linear_model._base import LinearModel
from sklearn.utils._mask import axis0_safe_slice
from sklearn.utils._param_validation import Interval
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.fixes import _get_additional_lbfgs_options_dict
from sklearn.utils.optimize import _check_optimize_result
from sklearn.utils.validation import _check_sample_weight, validate_data
def _huber_loss_and_gradient(w, X, y, epsilon, alpha, sample_weight=None):
"""Returns the Huber loss and the gradient.
Parameters
----------
w : ndarray, shape (n_features + 1,) or (n_features + 2,)
Feature vector.
w[:n_features] gives the coefficients
w[-1] gives the scale factor and if the intercept is fit w[-2]
gives the intercept factor.
X : ndarray of shape (n_samples, n_features)
Input data.
y : ndarray of shape (n_samples,)
Target vector.
epsilon : float
Robustness of the Huber estimator.
alpha : float
Regularization parameter.
sample_weight : ndarray of shape (n_samples,), default=None
Weight assigned to each sample.
Returns
-------
loss : float
Huber loss.
gradient : ndarray, shape (len(w))
Returns the derivative of the Huber loss with respect to each
coefficient, intercept and the scale as a vector.
"""
_, n_features = X.shape
fit_intercept = n_features + 2 == w.shape[0]
if fit_intercept:
intercept = w[-2]
sigma = w[-1]
w = w[:n_features]
n_samples = np.sum(sample_weight)
# Calculate the values where |y - X'w -c / sigma| > epsilon
# The values above this threshold are outliers.
linear_loss = y - safe_sparse_dot(X, w)
if fit_intercept:
linear_loss -= intercept
abs_linear_loss = np.abs(linear_loss)
outliers_mask = abs_linear_loss > epsilon * sigma
# Calculate the linear loss due to the outliers.
# This is equal to (2 * M * |y - X'w -c / sigma| - M**2) * sigma
outliers = abs_linear_loss[outliers_mask]
num_outliers = np.count_nonzero(outliers_mask)
n_non_outliers = X.shape[0] - num_outliers
# n_sq_outliers includes the weight give to the outliers while
# num_outliers is just the number of outliers.
outliers_sw = sample_weight[outliers_mask]
n_sw_outliers = np.sum(outliers_sw)
outlier_loss = (
2.0 * epsilon * np.sum(outliers_sw * outliers)
- sigma * n_sw_outliers * epsilon**2
)
# Calculate the quadratic loss due to the non-outliers.-
# This is equal to |(y - X'w - c)**2 / sigma**2| * sigma
non_outliers = linear_loss[~outliers_mask]
weighted_non_outliers = sample_weight[~outliers_mask] * non_outliers
weighted_loss = np.dot(weighted_non_outliers.T, non_outliers)
squared_loss = weighted_loss / sigma
if fit_intercept:
grad = np.zeros(n_features + 2)
else:
grad = np.zeros(n_features + 1)
# Gradient due to the squared loss.
X_non_outliers = -axis0_safe_slice(X, ~outliers_mask, n_non_outliers)
grad[:n_features] = (
2.0 / sigma * safe_sparse_dot(weighted_non_outliers, X_non_outliers)
)
# Gradient due to the linear loss.
signed_outliers = np.ones_like(outliers)
signed_outliers_mask = linear_loss[outliers_mask] < 0
signed_outliers[signed_outliers_mask] = -1.0
X_outliers = axis0_safe_slice(X, outliers_mask, num_outliers)
sw_outliers = sample_weight[outliers_mask] * signed_outliers
grad[:n_features] -= 2.0 * epsilon * (safe_sparse_dot(sw_outliers, X_outliers))
# Gradient due to the penalty.
grad[:n_features] += alpha * 2.0 * w
# Gradient due to sigma.
grad[-1] = n_samples
grad[-1] -= n_sw_outliers * epsilon**2
grad[-1] -= squared_loss / sigma
# Gradient due to the intercept.
if fit_intercept:
grad[-2] = -2.0 * np.sum(weighted_non_outliers) / sigma
grad[-2] -= 2.0 * epsilon * np.sum(sw_outliers)
loss = n_samples * sigma + squared_loss + outlier_loss
loss += alpha * np.dot(w, w)
return loss, grad
class HuberRegressor(LinearModel, RegressorMixin, BaseEstimator):
"""L2-regularized linear regression model that is robust to outliers.
The Huber Regressor optimizes the squared loss for the samples where
``|(y - Xw - c) / sigma| < epsilon`` and the absolute loss for the samples
where ``|(y - Xw - c) / sigma| > epsilon``, where the model coefficients
``w``, the intercept ``c`` and the scale ``sigma`` are parameters
to be optimized. The parameter `sigma` makes sure that if `y` is scaled up
or down by a certain factor, one does not need to rescale `epsilon` to
achieve the same robustness. Note that this does not take into account
the fact that the different features of `X` may be of different scales.
The Huber loss function has the advantage of not being heavily influenced
by the outliers while not completely ignoring their effect.
Read more in the :ref:`User Guide <huber_regression>`
.. versionadded:: 0.18
Parameters
----------
epsilon : float, default=1.35
The parameter epsilon controls the number of samples that should be
classified as outliers. The smaller the epsilon, the more robust it is
to outliers. Epsilon must be in the range `[1, inf)`.
max_iter : int, default=100
Maximum number of iterations that
``scipy.optimize.minimize(method="L-BFGS-B")`` should run for.
alpha : float, default=0.0001
Strength of the squared L2 regularization. Note that the penalty is
equal to ``alpha * ||w||^2``.
Must be in the range `[0, inf)`.
warm_start : bool, default=False
This is useful if the stored attributes of a previously used model
has to be reused. If set to False, then the coefficients will
be rewritten for every call to fit.
See :term:`the Glossary <warm_start>`.
fit_intercept : bool, default=True
Whether or not to fit the intercept. This can be set to False
if the data is already centered around the origin.
tol : float, default=1e-05
The iteration will stop when
``max{|proj g_i | i = 1, ..., n}`` <= ``tol``
where pg_i is the i-th component of the projected gradient.
Attributes
----------
coef_ : array, shape (n_features,)
Features got by optimizing the L2-regularized Huber loss.
intercept_ : float
Bias.
scale_ : float
The value by which ``|y - Xw - c|`` is scaled down.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_iter_ : int
Number of iterations that
``scipy.optimize.minimize(method="L-BFGS-B")`` has run for.
.. versionchanged:: 0.20
In SciPy <= 1.0.0 the number of lbfgs iterations may exceed
``max_iter``. ``n_iter_`` will now report at most ``max_iter``.
outliers_ : array, shape (n_samples,)
A boolean mask which is set to True where the samples are identified
as outliers.
See Also
--------
RANSACRegressor : RANSAC (RANdom SAmple Consensus) algorithm.
TheilSenRegressor : Theil-Sen Estimator robust multivariate regression model.
SGDRegressor : Fitted by minimizing a regularized empirical loss with SGD.
References
----------
.. [1] Peter J. Huber, Elvezio M. Ronchetti, Robust Statistics
Concomitant scale estimates, p. 172
.. [2] Art B. Owen (2006), `A robust hybrid of lasso and ridge regression.
<https://artowen.su.domains/reports/hhu.pdf>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import HuberRegressor, LinearRegression
>>> from sklearn.datasets import make_regression
>>> rng = np.random.RandomState(0)
>>> X, y, coef = make_regression(
... n_samples=200, n_features=2, noise=4.0, coef=True, random_state=0)
>>> X[:4] = rng.uniform(10, 20, (4, 2))
>>> y[:4] = rng.uniform(10, 20, 4)
>>> huber = HuberRegressor().fit(X, y)
>>> huber.score(X, y)
-7.284
>>> huber.predict(X[:1,])
array([806.7200])
>>> linear = LinearRegression().fit(X, y)
>>> print("True coefficients:", coef)
True coefficients: [20.4923... 34.1698...]
>>> print("Huber coefficients:", huber.coef_)
Huber coefficients: [17.7906... 31.0106...]
>>> print("Linear Regression coefficients:", linear.coef_)
Linear Regression coefficients: [-1.9221... 7.0226...]
"""
_parameter_constraints: dict = {
"epsilon": [Interval(Real, 1.0, None, closed="left")],
"max_iter": [Interval(Integral, 0, None, closed="left")],
"alpha": [Interval(Real, 0, None, closed="left")],
"warm_start": ["boolean"],
"fit_intercept": ["boolean"],
"tol": [Interval(Real, 0.0, None, closed="left")],
}
def __init__(
self,
*,
epsilon=1.35,
max_iter=100,
alpha=0.0001,
warm_start=False,
fit_intercept=True,
tol=1e-05,
):
self.epsilon = epsilon
self.max_iter = max_iter
self.alpha = alpha
self.warm_start = warm_start
self.fit_intercept = fit_intercept
self.tol = tol
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,)
Weight given to each sample.
Returns
-------
self : object
Fitted `HuberRegressor` estimator.
"""
X, y = validate_data(
self,
X,
y,
copy=False,
accept_sparse=["csr"],
y_numeric=True,
dtype=[np.float64, np.float32],
)
sample_weight = _check_sample_weight(sample_weight, X)
if self.warm_start and hasattr(self, "coef_"):
parameters = np.concatenate((self.coef_, [self.intercept_, self.scale_]))
else:
if self.fit_intercept:
parameters = np.zeros(X.shape[1] + 2)
else:
parameters = np.zeros(X.shape[1] + 1)
# Make sure to initialize the scale parameter to a strictly
# positive value:
parameters[-1] = 1
# Sigma or the scale factor should be non-negative.
# Setting it to be zero might cause undefined bounds hence we set it
# to a value close to zero.
bounds = np.tile([-np.inf, np.inf], (parameters.shape[0], 1))
bounds[-1][0] = np.finfo(np.float64).eps * 10
opt_res = optimize.minimize(
_huber_loss_and_gradient,
parameters,
method="L-BFGS-B",
jac=True,
args=(X, y, self.epsilon, self.alpha, sample_weight),
options={
"maxiter": self.max_iter,
"gtol": self.tol,
**_get_additional_lbfgs_options_dict("iprint", -1),
},
bounds=bounds,
)
parameters = opt_res.x
if opt_res.status == 2:
raise ValueError(
"HuberRegressor convergence failed: l-BFGS-b solver terminated with %s"
% opt_res.message
)
self.n_iter_ = _check_optimize_result("lbfgs", opt_res, self.max_iter)
self.scale_ = parameters[-1]
if self.fit_intercept:
self.intercept_ = parameters[-2]
else:
self.intercept_ = 0.0
self.coef_ = parameters[: X.shape[1]]
residual = np.abs(y - safe_sparse_dot(X, self.coef_) - self.intercept_)
self.outliers_ = residual > self.scale_ * self.epsilon
return self
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = True
return tags
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/_ransac.py | sklearn/linear_model/_ransac.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from numbers import Integral, Real
import numpy as np
from sklearn.base import (
BaseEstimator,
MetaEstimatorMixin,
MultiOutputMixin,
RegressorMixin,
_fit_context,
clone,
)
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model._base import LinearRegression
from sklearn.utils import check_consistent_length, check_random_state, get_tags
from sklearn.utils._bunch import Bunch
from sklearn.utils._param_validation import (
HasMethods,
Interval,
Options,
RealNotInt,
StrOptions,
)
from sklearn.utils.metadata_routing import (
MetadataRouter,
MethodMapping,
_raise_for_params,
_routing_enabled,
process_routing,
)
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.validation import (
_check_method_params,
_check_sample_weight,
check_is_fitted,
has_fit_parameter,
validate_data,
)
_EPSILON = np.spacing(1)
def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
"""Determine number trials such that at least one outlier-free subset is
sampled for the given inlier/outlier ratio.
Parameters
----------
n_inliers : int
Number of inliers in the data.
n_samples : int
Total number of samples in the data.
min_samples : int
Minimum number of samples chosen randomly from original data.
probability : float
Probability (confidence) that one outlier-free sample is generated.
Returns
-------
trials : int
Number of trials.
"""
inlier_ratio = n_inliers / float(n_samples)
nom = max(_EPSILON, 1 - probability)
denom = max(_EPSILON, 1 - inlier_ratio**min_samples)
if nom == 1:
return 0
if denom == 1:
return float("inf")
return abs(float(np.ceil(np.log(nom) / np.log(denom))))
class RANSACRegressor(
MetaEstimatorMixin,
RegressorMixin,
MultiOutputMixin,
BaseEstimator,
):
"""RANSAC (RANdom SAmple Consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set.
Read more in the :ref:`User Guide <ransac_regression>`.
Parameters
----------
estimator : object, default=None
Base estimator object which implements the following methods:
* `fit(X, y)`: Fit model to given training data and target values.
* `score(X, y)`: Returns the mean accuracy on the given test data,
which is used for the stop criterion defined by `stop_score`.
Additionally, the score is used to decide which of two equally
large consensus sets is chosen as the better one.
* `predict(X)`: Returns predicted values using the linear model,
which is used to compute residual error using loss function.
If `estimator` is None, then
:class:`~sklearn.linear_model.LinearRegression` is used for
target values of dtype float.
Note that the current implementation only supports regression
estimators.
min_samples : int (>= 1) or float ([0, 1]), default=None
Minimum number of samples chosen randomly from original data. Treated
as an absolute number of samples for `min_samples >= 1`, treated as a
relative number `ceil(min_samples * X.shape[0])` for
`min_samples < 1`. This is typically chosen as the minimal number of
samples necessary to estimate the given `estimator`. By default a
:class:`~sklearn.linear_model.LinearRegression` estimator is assumed and
`min_samples` is chosen as ``X.shape[1] + 1``. This parameter is highly
dependent upon the model, so if a `estimator` other than
:class:`~sklearn.linear_model.LinearRegression` is used, the user must
provide a value.
residual_threshold : float, default=None
Maximum residual for a data sample to be classified as an inlier.
By default the threshold is chosen as the MAD (median absolute
deviation) of the target values `y`. Points whose residuals are
strictly equal to the threshold are considered as inliers.
is_data_valid : callable, default=None
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
is_model_valid : callable, default=None
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
Rejecting samples with this function is computationally costlier than
with `is_data_valid`. `is_model_valid` should therefore only be used if
the estimated model is needed for making the rejection decision.
max_trials : int, default=100
Maximum number of iterations for random sample selection.
max_skips : int, default=np.inf
Maximum number of iterations that can be skipped due to finding zero
inliers or invalid data defined by ``is_data_valid`` or invalid models
defined by ``is_model_valid``.
.. versionadded:: 0.19
stop_n_inliers : int, default=np.inf
Stop iteration if at least this number of inliers are found.
stop_score : float, default=np.inf
Stop iteration if score is greater equal than this threshold.
stop_probability : float in range [0, 1], default=0.99
RANSAC iteration stops if at least one outlier-free set of the training
data is sampled in RANSAC. This requires to generate at least N
samples (iterations)::
N >= log(1 - probability) / log(1 - e**m)
where the probability (confidence) is typically set to high value such
as 0.99 (the default) and e is the current fraction of inliers w.r.t.
the total number of samples.
loss : str, callable, default='absolute_error'
String inputs, 'absolute_error' and 'squared_error' are supported which
find the absolute error and squared error per sample respectively.
If ``loss`` is a callable, then it should be a function that takes
two arrays as inputs, the true and predicted value and returns a 1-D
array with the i-th value of the array corresponding to the loss
on ``X[i]``.
If the loss on a sample is greater than the ``residual_threshold``,
then this sample is classified as an outlier.
.. versionadded:: 0.18
random_state : int, RandomState instance, default=None
The generator used to initialize the centers.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
estimator_ : object
Final model fitted on the inliers predicted by the "best" model found
during RANSAC sampling (copy of the `estimator` object).
n_trials_ : int
Number of random selection trials until one of the stop criteria is
met. It is always ``<= max_trials``.
inlier_mask_ : bool array of shape [n_samples]
Boolean mask of inliers classified as ``True``.
n_skips_no_inliers_ : int
Number of iterations skipped due to finding zero inliers.
.. versionadded:: 0.19
n_skips_invalid_data_ : int
Number of iterations skipped due to invalid data defined by
``is_data_valid``.
.. versionadded:: 0.19
n_skips_invalid_model_ : int
Number of iterations skipped due to an invalid model defined by
``is_model_valid``.
.. versionadded:: 0.19
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
HuberRegressor : Linear regression model that is robust to outliers.
TheilSenRegressor : Theil-Sen Estimator robust multivariate regression model.
SGDRegressor : Fitted by minimizing a regularized empirical loss with SGD.
References
----------
.. [1] https://en.wikipedia.org/wiki/RANSAC
.. [2] https://www.sri.com/wp-content/uploads/2021/12/ransac-publication.pdf
.. [3] https://bmva-archive.org.uk/bmvc/2009/Papers/Paper355/Paper355.pdf
Examples
--------
>>> from sklearn.linear_model import RANSACRegressor
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(
... n_samples=200, n_features=2, noise=4.0, random_state=0)
>>> reg = RANSACRegressor(random_state=0).fit(X, y)
>>> reg.score(X, y)
0.9885
>>> reg.predict(X[:1,])
array([-31.9417])
For a more detailed example, see
:ref:`sphx_glr_auto_examples_linear_model_plot_ransac.py`
"""
_parameter_constraints: dict = {
"estimator": [HasMethods(["fit", "score", "predict"]), None],
"min_samples": [
Interval(Integral, 1, None, closed="left"),
Interval(RealNotInt, 0, 1, closed="both"),
None,
],
"residual_threshold": [Interval(Real, 0, None, closed="left"), None],
"is_data_valid": [callable, None],
"is_model_valid": [callable, None],
"max_trials": [
Interval(Integral, 0, None, closed="left"),
Options(Real, {np.inf}),
],
"max_skips": [
Interval(Integral, 0, None, closed="left"),
Options(Real, {np.inf}),
],
"stop_n_inliers": [
Interval(Integral, 0, None, closed="left"),
Options(Real, {np.inf}),
],
"stop_score": [Interval(Real, None, None, closed="both")],
"stop_probability": [Interval(Real, 0, 1, closed="both")],
"loss": [StrOptions({"absolute_error", "squared_error"}), callable],
"random_state": ["random_state"],
}
def __init__(
self,
estimator=None,
*,
min_samples=None,
residual_threshold=None,
is_data_valid=None,
is_model_valid=None,
max_trials=100,
max_skips=np.inf,
stop_n_inliers=np.inf,
stop_score=np.inf,
stop_probability=0.99,
loss="absolute_error",
random_state=None,
):
self.estimator = estimator
self.min_samples = min_samples
self.residual_threshold = residual_threshold
self.is_data_valid = is_data_valid
self.is_model_valid = is_model_valid
self.max_trials = max_trials
self.max_skips = max_skips
self.stop_n_inliers = stop_n_inliers
self.stop_score = stop_score
self.stop_probability = stop_probability
self.random_state = random_state
self.loss = loss
@_fit_context(
# RansacRegressor.estimator is not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y, sample_weight=None, **fit_params):
"""Fit estimator using RANSAC algorithm.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Individual weights for each sample
raises error if sample_weight is passed and estimator
fit method does not support it.
.. versionadded:: 0.18
**fit_params : dict
Parameters routed to the `fit` method of the sub-estimator via the
metadata routing API.
.. versionadded:: 1.5
Only available if
`sklearn.set_config(enable_metadata_routing=True)` is set. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
self : object
Fitted `RANSACRegressor` estimator.
Raises
------
ValueError
If no valid consensus set could be found. This occurs if
`is_data_valid` and `is_model_valid` return False for all
`max_trials` randomly chosen sub-samples.
"""
# Need to validate separately here. We can't pass multi_output=True
# because that would allow y to be csr. Delay expensive finiteness
# check to the estimator's own input validation.
_raise_for_params(fit_params, self, "fit")
check_X_params = dict(accept_sparse="csr", ensure_all_finite=False)
check_y_params = dict(ensure_2d=False)
X, y = validate_data(
self, X, y, validate_separately=(check_X_params, check_y_params)
)
check_consistent_length(X, y)
if self.estimator is not None:
estimator = clone(self.estimator)
else:
estimator = LinearRegression()
if self.min_samples is None:
if not isinstance(estimator, LinearRegression):
raise ValueError(
"`min_samples` needs to be explicitly set when estimator "
"is not a LinearRegression."
)
min_samples = X.shape[1] + 1
elif 0 < self.min_samples < 1:
min_samples = np.ceil(self.min_samples * X.shape[0])
elif self.min_samples >= 1:
min_samples = self.min_samples
if min_samples > X.shape[0]:
raise ValueError(
"`min_samples` may not be larger than number "
"of samples: n_samples = %d." % (X.shape[0])
)
if self.residual_threshold is None:
# MAD (median absolute deviation)
residual_threshold = np.median(np.abs(y - np.median(y)))
else:
residual_threshold = self.residual_threshold
if self.loss == "absolute_error":
if y.ndim == 1:
loss_function = lambda y_true, y_pred: np.abs(y_true - y_pred)
else:
loss_function = lambda y_true, y_pred: np.sum(
np.abs(y_true - y_pred), axis=1
)
elif self.loss == "squared_error":
if y.ndim == 1:
loss_function = lambda y_true, y_pred: (y_true - y_pred) ** 2
else:
loss_function = lambda y_true, y_pred: np.sum(
(y_true - y_pred) ** 2, axis=1
)
elif callable(self.loss):
loss_function = self.loss
random_state = check_random_state(self.random_state)
try: # Not all estimator accept a random_state
estimator.set_params(random_state=random_state)
except ValueError:
pass
estimator_fit_has_sample_weight = has_fit_parameter(estimator, "sample_weight")
estimator_name = type(estimator).__name__
if sample_weight is not None and not estimator_fit_has_sample_weight:
raise ValueError(
"%s does not support sample_weight. Sample"
" weights are only used for the calibration"
" itself." % estimator_name
)
if sample_weight is not None:
fit_params["sample_weight"] = sample_weight
if _routing_enabled():
routed_params = process_routing(self, "fit", **fit_params)
else:
routed_params = Bunch()
routed_params.estimator = Bunch(fit={}, predict={}, score={})
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
routed_params.estimator.fit = {"sample_weight": sample_weight}
n_inliers_best = 1
score_best = -np.inf
inlier_mask_best = None
X_inlier_best = None
y_inlier_best = None
inlier_best_idxs_subset = None
self.n_skips_no_inliers_ = 0
self.n_skips_invalid_data_ = 0
self.n_skips_invalid_model_ = 0
# number of data samples
n_samples = X.shape[0]
sample_idxs = np.arange(n_samples)
self.n_trials_ = 0
max_trials = self.max_trials
while self.n_trials_ < max_trials:
self.n_trials_ += 1
if (
self.n_skips_no_inliers_
+ self.n_skips_invalid_data_
+ self.n_skips_invalid_model_
) > self.max_skips:
break
# choose random sample set
subset_idxs = sample_without_replacement(
n_samples, min_samples, random_state=random_state
)
X_subset = X[subset_idxs]
y_subset = y[subset_idxs]
# check if random sample set is valid
if self.is_data_valid is not None and not self.is_data_valid(
X_subset, y_subset
):
self.n_skips_invalid_data_ += 1
continue
# cut `fit_params` down to `subset_idxs`
fit_params_subset = _check_method_params(
X, params=routed_params.estimator.fit, indices=subset_idxs
)
# fit model for current random sample set
estimator.fit(X_subset, y_subset, **fit_params_subset)
# check if estimated model is valid
if self.is_model_valid is not None and not self.is_model_valid(
estimator, X_subset, y_subset
):
self.n_skips_invalid_model_ += 1
continue
# residuals of all data for current random sample model
y_pred = estimator.predict(X)
residuals_subset = loss_function(y, y_pred)
# classify data into inliers and outliers
inlier_mask_subset = residuals_subset <= residual_threshold
n_inliers_subset = np.sum(inlier_mask_subset)
# less inliers -> skip current random sample
if n_inliers_subset < n_inliers_best:
self.n_skips_no_inliers_ += 1
continue
# extract inlier data set
inlier_idxs_subset = sample_idxs[inlier_mask_subset]
X_inlier_subset = X[inlier_idxs_subset]
y_inlier_subset = y[inlier_idxs_subset]
# cut `fit_params` down to `inlier_idxs_subset`
score_params_inlier_subset = _check_method_params(
X, params=routed_params.estimator.score, indices=inlier_idxs_subset
)
# score of inlier data set
score_subset = estimator.score(
X_inlier_subset,
y_inlier_subset,
**score_params_inlier_subset,
)
# same number of inliers but worse score -> skip current random
# sample
if n_inliers_subset == n_inliers_best and score_subset < score_best:
continue
# save current random sample as best sample
n_inliers_best = n_inliers_subset
score_best = score_subset
inlier_mask_best = inlier_mask_subset
X_inlier_best = X_inlier_subset
y_inlier_best = y_inlier_subset
inlier_best_idxs_subset = inlier_idxs_subset
max_trials = min(
max_trials,
_dynamic_max_trials(
n_inliers_best, n_samples, min_samples, self.stop_probability
),
)
# break if sufficient number of inliers or score is reached
if n_inliers_best >= self.stop_n_inliers or score_best >= self.stop_score:
break
# if none of the iterations met the required criteria
if inlier_mask_best is None:
if (
self.n_skips_no_inliers_
+ self.n_skips_invalid_data_
+ self.n_skips_invalid_model_
) > self.max_skips:
raise ValueError(
"RANSAC skipped more iterations than `max_skips` without"
" finding a valid consensus set. Iterations were skipped"
" because each randomly chosen sub-sample failed the"
" passing criteria. See estimator attributes for"
" diagnostics (n_skips*)."
)
else:
raise ValueError(
"RANSAC could not find a valid consensus set. All"
" `max_trials` iterations were skipped because each"
" randomly chosen sub-sample failed the passing criteria."
" See estimator attributes for diagnostics (n_skips*)."
)
else:
if (
self.n_skips_no_inliers_
+ self.n_skips_invalid_data_
+ self.n_skips_invalid_model_
) > self.max_skips:
warnings.warn(
(
"RANSAC found a valid consensus set but exited"
" early due to skipping more iterations than"
" `max_skips`. See estimator attributes for"
" diagnostics (n_skips*)."
),
ConvergenceWarning,
)
# estimate final model using all inliers
fit_params_best_idxs_subset = _check_method_params(
X, params=routed_params.estimator.fit, indices=inlier_best_idxs_subset
)
estimator.fit(X_inlier_best, y_inlier_best, **fit_params_best_idxs_subset)
self.estimator_ = estimator
self.inlier_mask_ = inlier_mask_best
return self
def predict(self, X, **params):
"""Predict using the estimated model.
This is a wrapper for `estimator_.predict(X)`.
Parameters
----------
X : {array-like or sparse matrix} of shape (n_samples, n_features)
Input data.
**params : dict
Parameters routed to the `predict` method of the sub-estimator via
the metadata routing API.
.. versionadded:: 1.5
Only available if
`sklearn.set_config(enable_metadata_routing=True)` is set. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self)
X = validate_data(
self,
X,
ensure_all_finite=False,
accept_sparse=True,
reset=False,
)
_raise_for_params(params, self, "predict")
if _routing_enabled():
predict_params = process_routing(self, "predict", **params).estimator[
"predict"
]
else:
predict_params = {}
return self.estimator_.predict(X, **predict_params)
def score(self, X, y, **params):
"""Return the score of the prediction.
This is a wrapper for `estimator_.score(X, y)`.
Parameters
----------
X : (array-like or sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
**params : dict
Parameters routed to the `score` method of the sub-estimator via
the metadata routing API.
.. versionadded:: 1.5
Only available if
`sklearn.set_config(enable_metadata_routing=True)` is set. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
z : float
Score of the prediction.
"""
check_is_fitted(self)
X = validate_data(
self,
X,
ensure_all_finite=False,
accept_sparse=True,
reset=False,
)
_raise_for_params(params, self, "score")
if _routing_enabled():
score_params = process_routing(self, "score", **params).estimator["score"]
else:
score_params = {}
return self.estimator_.score(X, y, **score_params)
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.5
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = MetadataRouter(owner=self).add(
estimator=self.estimator,
method_mapping=MethodMapping()
.add(caller="fit", callee="fit")
.add(caller="fit", callee="score")
.add(caller="score", callee="score")
.add(caller="predict", callee="predict"),
)
return router
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
if self.estimator is None:
tags.input_tags.sparse = True # default estimator is LinearRegression
else:
tags.input_tags.sparse = get_tags(self.estimator).input_tags.sparse
return tags
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/tests/test_quantile.py | sklearn/linear_model/tests/test_quantile.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
import pytest
from pytest import approx
from scipy.optimize import minimize
from sklearn.datasets import make_regression
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model import HuberRegressor, QuantileRegressor
from sklearn.metrics import mean_pinball_loss
from sklearn.utils._testing import assert_allclose
from sklearn.utils.fixes import (
COO_CONTAINERS,
CSC_CONTAINERS,
CSR_CONTAINERS,
parse_version,
sp_version,
)
@pytest.fixture
def X_y_data():
X, y = make_regression(n_samples=10, n_features=1, random_state=0, noise=1)
return X, y
@pytest.mark.skipif(
parse_version(sp_version.base_version) >= parse_version("1.11"),
reason="interior-point solver is not available in SciPy 1.11",
)
@pytest.mark.parametrize("solver", ["interior-point", "revised simplex"])
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
def test_incompatible_solver_for_sparse_input(X_y_data, solver, csc_container):
X, y = X_y_data
X_sparse = csc_container(X)
err_msg = (
f"Solver {solver} does not support sparse X. Use solver 'highs' for example."
)
with pytest.raises(ValueError, match=err_msg):
QuantileRegressor(solver=solver).fit(X_sparse, y)
@pytest.mark.parametrize(
"quantile, alpha, intercept, coef",
[
# for 50% quantile w/o regularization, any slope in [1, 10] is okay
[0.5, 0, 1, None],
# if positive error costs more, the slope is maximal
[0.51, 0, 1, 10],
# if negative error costs more, the slope is minimal
[0.49, 0, 1, 1],
# for a small lasso penalty, the slope is also minimal
[0.5, 0.01, 1, 1],
# for a large lasso penalty, the model predicts the constant median
[0.5, 100, 2, 0],
],
)
def test_quantile_toy_example(quantile, alpha, intercept, coef):
# test how different parameters affect a small intuitive example
X = [[0], [1], [1]]
y = [1, 2, 11]
model = QuantileRegressor(quantile=quantile, alpha=alpha).fit(X, y)
assert_allclose(model.intercept_, intercept, atol=1e-2)
if coef is not None:
assert_allclose(model.coef_[0], coef, atol=1e-2)
if alpha < 100:
assert model.coef_[0] >= 1
assert model.coef_[0] <= 10
@pytest.mark.parametrize("fit_intercept", [True, False])
def test_quantile_equals_huber_for_low_epsilon(fit_intercept):
X, y = make_regression(n_samples=100, n_features=20, random_state=0, noise=1.0)
alpha = 1e-4
huber = HuberRegressor(
epsilon=1 + 1e-4, alpha=alpha, fit_intercept=fit_intercept
).fit(X, y)
quant = QuantileRegressor(alpha=alpha, fit_intercept=fit_intercept).fit(X, y)
assert_allclose(huber.coef_, quant.coef_, atol=1e-1)
if fit_intercept:
assert huber.intercept_ == approx(quant.intercept_, abs=1e-1)
# check that we still predict fraction
assert np.mean(y < quant.predict(X)) == approx(0.5, abs=1e-1)
@pytest.mark.parametrize("q", [0.5, 0.9, 0.05])
def test_quantile_estimates_calibration(q):
# Test that model estimates percentage of points below the prediction
X, y = make_regression(n_samples=1000, n_features=20, random_state=0, noise=1.0)
quant = QuantileRegressor(quantile=q, alpha=0).fit(X, y)
assert np.mean(y < quant.predict(X)) == approx(q, abs=1e-2)
def test_quantile_sample_weight():
# test that with unequal sample weights we still estimate weighted fraction
n = 1000
X, y = make_regression(n_samples=n, n_features=5, random_state=0, noise=10.0)
weight = np.ones(n)
# when we increase weight of upper observations,
# estimate of quantile should go up
weight[y > y.mean()] = 100
quant = QuantileRegressor(quantile=0.5, alpha=1e-8)
quant.fit(X, y, sample_weight=weight)
fraction_below = np.mean(y < quant.predict(X))
assert fraction_below > 0.5
weighted_fraction_below = np.average(y < quant.predict(X), weights=weight)
assert weighted_fraction_below == approx(0.5, abs=3e-2)
@pytest.mark.parametrize("quantile", [0.2, 0.5, 0.8])
def test_asymmetric_error(quantile):
"""Test quantile regression for asymmetric distributed targets."""
n_samples = 1000
rng = np.random.RandomState(42)
X = np.concatenate(
(
np.abs(rng.randn(n_samples)[:, None]),
-rng.randint(2, size=(n_samples, 1)),
),
axis=1,
)
intercept = 1.23
coef = np.array([0.5, -2])
# Take care that X @ coef + intercept > 0
assert np.min(X @ coef + intercept) > 0
# For an exponential distribution with rate lambda, e.g. exp(-lambda * x),
# the quantile at level q is:
# quantile(q) = - log(1 - q) / lambda
# scale = 1/lambda = -quantile(q) / log(1 - q)
y = rng.exponential(
scale=-(X @ coef + intercept) / np.log(1 - quantile), size=n_samples
)
model = QuantileRegressor(
quantile=quantile,
alpha=0,
).fit(X, y)
# This test can be made to pass with any solver but in the interest
# of sparing continuous integration resources, the test is performed
# with the fastest solver only.
assert model.intercept_ == approx(intercept, rel=0.2)
assert_allclose(model.coef_, coef, rtol=0.6)
assert_allclose(np.mean(model.predict(X) > y), quantile, atol=1e-2)
# Now compare to Nelder-Mead optimization with L1 penalty
alpha = 0.01
model.set_params(alpha=alpha).fit(X, y)
model_coef = np.r_[model.intercept_, model.coef_]
def func(coef):
loss = mean_pinball_loss(y, X @ coef[1:] + coef[0], alpha=quantile)
L1 = np.sum(np.abs(coef[1:]))
return loss + alpha * L1
res = minimize(
fun=func,
x0=[1, 0, -1],
method="Nelder-Mead",
tol=1e-12,
options={"maxiter": 2000},
)
assert func(model_coef) == approx(func(res.x))
assert_allclose(model.intercept_, res.x[0])
assert_allclose(model.coef_, res.x[1:])
assert_allclose(np.mean(model.predict(X) > y), quantile, atol=1e-2)
@pytest.mark.parametrize("quantile", [0.2, 0.5, 0.8])
def test_equivariance(quantile):
"""Test equivariace of quantile regression.
See Koenker (2005) Quantile Regression, Chapter 2.2.3.
"""
rng = np.random.RandomState(42)
n_samples, n_features = 100, 5
X, y = make_regression(
n_samples=n_samples,
n_features=n_features,
n_informative=n_features,
noise=0,
random_state=rng,
shuffle=False,
)
# make y asymmetric
y += rng.exponential(scale=100, size=y.shape)
params = dict(alpha=0)
model1 = QuantileRegressor(quantile=quantile, **params).fit(X, y)
# coef(q; a*y, X) = a * coef(q; y, X)
a = 2.5
model2 = QuantileRegressor(quantile=quantile, **params).fit(X, a * y)
assert model2.intercept_ == approx(a * model1.intercept_, rel=1e-5)
assert_allclose(model2.coef_, a * model1.coef_, rtol=1e-5)
# coef(1-q; -a*y, X) = -a * coef(q; y, X)
model2 = QuantileRegressor(quantile=1 - quantile, **params).fit(X, -a * y)
assert model2.intercept_ == approx(-a * model1.intercept_, rel=1e-5)
assert_allclose(model2.coef_, -a * model1.coef_, rtol=1e-5)
# coef(q; y + X @ g, X) = coef(q; y, X) + g
g_intercept, g_coef = rng.randn(), rng.randn(n_features)
model2 = QuantileRegressor(quantile=quantile, **params)
model2.fit(X, y + X @ g_coef + g_intercept)
assert model2.intercept_ == approx(model1.intercept_ + g_intercept)
assert_allclose(model2.coef_, model1.coef_ + g_coef, rtol=1e-6)
# coef(q; y, X @ A) = A^-1 @ coef(q; y, X)
A = rng.randn(n_features, n_features)
model2 = QuantileRegressor(quantile=quantile, **params)
model2.fit(X @ A, y)
assert model2.intercept_ == approx(model1.intercept_, rel=1e-5)
assert_allclose(model2.coef_, np.linalg.solve(A, model1.coef_), rtol=1e-5)
@pytest.mark.skipif(
parse_version(sp_version.base_version) >= parse_version("1.11"),
reason="interior-point solver is not available in SciPy 1.11",
)
@pytest.mark.filterwarnings("ignore:`method='interior-point'` is deprecated")
def test_linprog_failure():
"""Test that linprog fails."""
X = np.linspace(0, 10, num=10).reshape(-1, 1)
y = np.linspace(0, 10, num=10)
reg = QuantileRegressor(
alpha=0, solver="interior-point", solver_options={"maxiter": 1}
)
msg = "Linear programming for QuantileRegressor did not succeed."
with pytest.warns(ConvergenceWarning, match=msg):
reg.fit(X, y)
@pytest.mark.parametrize(
"sparse_container", CSC_CONTAINERS + CSR_CONTAINERS + COO_CONTAINERS
)
@pytest.mark.parametrize("solver", ["highs", "highs-ds", "highs-ipm"])
@pytest.mark.parametrize("fit_intercept", [True, False])
def test_sparse_input(sparse_container, solver, fit_intercept, global_random_seed):
"""Test that sparse and dense X give same results."""
n_informative = 10
quantile_level = 0.6
X, y = make_regression(
n_samples=300,
n_features=20,
n_informative=10,
random_state=global_random_seed,
noise=1.0,
)
X_sparse = sparse_container(X)
alpha = 0.1
quant_dense = QuantileRegressor(
quantile=quantile_level, alpha=alpha, fit_intercept=fit_intercept
).fit(X, y)
quant_sparse = QuantileRegressor(
quantile=quantile_level, alpha=alpha, fit_intercept=fit_intercept, solver=solver
).fit(X_sparse, y)
assert_allclose(quant_sparse.coef_, quant_dense.coef_, rtol=1e-2)
sparse_support = quant_sparse.coef_ != 0
dense_support = quant_dense.coef_ != 0
assert dense_support.sum() == pytest.approx(n_informative, abs=1)
assert sparse_support.sum() == pytest.approx(n_informative, abs=1)
if fit_intercept:
assert quant_sparse.intercept_ == approx(quant_dense.intercept_)
# check that we still predict fraction
empirical_coverage = np.mean(y < quant_sparse.predict(X_sparse))
assert empirical_coverage == approx(quantile_level, abs=3e-2)
def test_error_interior_point_future(X_y_data, monkeypatch):
"""Check that we will raise a proper error when requesting
`solver='interior-point'` in SciPy >= 1.11.
"""
X, y = X_y_data
import sklearn.linear_model._quantile
with monkeypatch.context() as m:
m.setattr(sklearn.linear_model._quantile, "sp_version", parse_version("1.11.0"))
err_msg = "Solver interior-point is not anymore available in SciPy >= 1.11.0."
with pytest.raises(ValueError, match=err_msg):
QuantileRegressor(solver="interior-point").fit(X, y)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/tests/test_passive_aggressive.py | sklearn/linear_model/tests/test_passive_aggressive.py | import numpy as np
import pytest
from numpy.testing import assert_allclose
from scipy.sparse import issparse
from sklearn.base import ClassifierMixin
from sklearn.datasets import load_iris, make_classification, make_regression
from sklearn.linear_model import (
PassiveAggressiveClassifier,
PassiveAggressiveRegressor,
SGDClassifier,
SGDRegressor,
)
from sklearn.linear_model._base import SPARSE_INTERCEPT_DECAY
from sklearn.linear_model._stochastic_gradient import DEFAULT_EPSILON
from sklearn.utils import check_random_state
from sklearn.utils._testing import (
assert_almost_equal,
assert_array_equal,
)
from sklearn.utils.fixes import CSR_CONTAINERS
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
# TODO(1.10): Move to test_sgd.py
class MyPassiveAggressive(ClassifierMixin):
def __init__(
self,
C=1.0,
epsilon=DEFAULT_EPSILON,
loss="hinge",
fit_intercept=True,
n_iter=1,
random_state=None,
):
self.C = C
self.epsilon = epsilon
self.loss = loss
self.fit_intercept = fit_intercept
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
# Mimic SGD's behavior for intercept
intercept_decay = 1.0
if issparse(X):
intercept_decay = SPARSE_INTERCEPT_DECAY
X = X.toarray()
for t in range(self.n_iter):
for i in range(n_samples):
p = self.project(X[i])
if self.loss in ("hinge", "squared_hinge"):
loss = max(1 - y[i] * p, 0)
else:
loss = max(np.abs(p - y[i]) - self.epsilon, 0)
sqnorm = np.dot(X[i], X[i])
if self.loss in ("hinge", "epsilon_insensitive"):
step = min(self.C, loss / sqnorm)
elif self.loss in ("squared_hinge", "squared_epsilon_insensitive"):
step = loss / (sqnorm + 1.0 / (2 * self.C))
if self.loss in ("hinge", "squared_hinge"):
step *= y[i]
else:
step *= np.sign(y[i] - p)
self.w += step * X[i]
if self.fit_intercept:
self.b += intercept_decay * step
def project(self, X):
return np.dot(X, self.w) + self.b
@pytest.mark.filterwarnings("ignore::FutureWarning")
@pytest.mark.parametrize("average", [False, True])
@pytest.mark.parametrize("fit_intercept", [True, False])
@pytest.mark.parametrize("csr_container", [None, *CSR_CONTAINERS])
def test_classifier_accuracy(csr_container, fit_intercept, average):
data = csr_container(X) if csr_container is not None else X
clf = PassiveAggressiveClassifier(
C=1.0,
max_iter=30,
fit_intercept=fit_intercept,
random_state=1,
average=average,
tol=None,
)
clf.fit(data, y)
score = clf.score(data, y)
assert score > 0.79
if average:
assert hasattr(clf, "_average_coef")
assert hasattr(clf, "_average_intercept")
assert hasattr(clf, "_standard_intercept")
assert hasattr(clf, "_standard_coef")
@pytest.mark.filterwarnings("ignore::FutureWarning")
@pytest.mark.parametrize("average", [False, True])
@pytest.mark.parametrize("csr_container", [None, *CSR_CONTAINERS])
def test_classifier_partial_fit(csr_container, average):
classes = np.unique(y)
data = csr_container(X) if csr_container is not None else X
clf = PassiveAggressiveClassifier(random_state=0, average=average, max_iter=5)
for t in range(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert score > 0.79
if average:
assert hasattr(clf, "_average_coef")
assert hasattr(clf, "_average_intercept")
assert hasattr(clf, "_standard_intercept")
assert hasattr(clf, "_standard_coef")
@pytest.mark.filterwarnings("ignore::FutureWarning")
def test_classifier_refit():
# Classifier can be retrained on different labels and features.
clf = PassiveAggressiveClassifier(max_iter=5).fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
clf.fit(X[:, :-1], iris.target_names[y])
assert_array_equal(clf.classes_, iris.target_names)
# TODO(1.10): Move to test_sgd.py
@pytest.mark.filterwarnings("ignore::FutureWarning")
@pytest.mark.parametrize("csr_container", [None, *CSR_CONTAINERS])
@pytest.mark.parametrize("loss", ("hinge", "squared_hinge"))
def test_classifier_correctness(loss, csr_container):
y_bin = y.copy()
y_bin[y != 1] = -1
data = csr_container(X) if csr_container is not None else X
clf1 = MyPassiveAggressive(loss=loss, n_iter=4)
clf1.fit(data, y_bin)
clf2 = PassiveAggressiveClassifier(loss=loss, max_iter=4, shuffle=False, tol=None)
clf2.fit(data, y_bin)
assert_allclose(clf1.w, clf2.coef_.ravel())
@pytest.mark.filterwarnings("ignore::FutureWarning")
@pytest.mark.parametrize(
"response_method", ["predict_proba", "predict_log_proba", "transform"]
)
def test_classifier_undefined_methods(response_method):
clf = PassiveAggressiveClassifier(max_iter=100)
with pytest.raises(AttributeError):
getattr(clf, response_method)
@pytest.mark.filterwarnings("ignore::FutureWarning")
def test_class_weights():
# Test class weights.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(
C=0.1, max_iter=100, class_weight=None, random_state=100
)
clf.fit(X2, y2)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = PassiveAggressiveClassifier(
C=0.1, max_iter=100, class_weight={1: 0.001}, random_state=100
)
clf.fit(X2, y2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@pytest.mark.filterwarnings("ignore::FutureWarning")
def test_partial_fit_weight_class_balanced():
# partial_fit with class_weight='balanced' not supported
clf = PassiveAggressiveClassifier(class_weight="balanced", max_iter=100)
with pytest.raises(ValueError):
clf.partial_fit(X, y, classes=np.unique(y))
@pytest.mark.filterwarnings("ignore::FutureWarning")
def test_equal_class_weight():
X2 = [[1, 0], [1, 0], [0, 1], [0, 1]]
y2 = [0, 0, 1, 1]
clf = PassiveAggressiveClassifier(C=0.1, tol=None, class_weight=None)
clf.fit(X2, y2)
# Already balanced, so "balanced" weights should have no effect
clf_balanced = PassiveAggressiveClassifier(C=0.1, tol=None, class_weight="balanced")
clf_balanced.fit(X2, y2)
clf_weighted = PassiveAggressiveClassifier(
C=0.1, tol=None, class_weight={0: 0.5, 1: 0.5}
)
clf_weighted.fit(X2, y2)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
assert_almost_equal(clf.coef_, clf_balanced.coef_, decimal=2)
@pytest.mark.filterwarnings("ignore::FutureWarning")
def test_wrong_class_weight_label():
# ValueError due to wrong class_weight label.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight={0: 0.5}, max_iter=100)
with pytest.raises(ValueError):
clf.fit(X2, y2)
@pytest.mark.filterwarnings("ignore::FutureWarning")
@pytest.mark.parametrize("average", [False, True])
@pytest.mark.parametrize("fit_intercept", [True, False])
@pytest.mark.parametrize("csr_container", [None, *CSR_CONTAINERS])
def test_regressor_mse(csr_container, fit_intercept, average):
y_bin = y.copy()
y_bin[y != 1] = -1
data = csr_container(X) if csr_container is not None else X
reg = PassiveAggressiveRegressor(
C=1.0,
fit_intercept=fit_intercept,
random_state=0,
average=average,
max_iter=5,
)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert np.mean((pred - y_bin) ** 2) < 1.7
if average:
assert hasattr(reg, "_average_coef")
assert hasattr(reg, "_average_intercept")
assert hasattr(reg, "_standard_intercept")
assert hasattr(reg, "_standard_coef")
@pytest.mark.filterwarnings("ignore::FutureWarning")
@pytest.mark.parametrize("average", [False, True])
@pytest.mark.parametrize("csr_container", [None, *CSR_CONTAINERS])
def test_regressor_partial_fit(csr_container, average):
y_bin = y.copy()
y_bin[y != 1] = -1
data = csr_container(X) if csr_container is not None else X
reg = PassiveAggressiveRegressor(random_state=0, average=average, max_iter=100)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert np.mean((pred - y_bin) ** 2) < 1.7
if average:
assert hasattr(reg, "_average_coef")
assert hasattr(reg, "_average_intercept")
assert hasattr(reg, "_standard_intercept")
assert hasattr(reg, "_standard_coef")
# TODO(1.10): Move to test_sgd.py
@pytest.mark.filterwarnings("ignore::FutureWarning")
@pytest.mark.parametrize("csr_container", [None, *CSR_CONTAINERS])
@pytest.mark.parametrize("loss", ("epsilon_insensitive", "squared_epsilon_insensitive"))
def test_regressor_correctness(loss, csr_container):
y_bin = y.copy()
y_bin[y != 1] = -1
data = csr_container(X) if csr_container is not None else X
reg1 = MyPassiveAggressive(loss=loss, n_iter=4)
reg1.fit(data, y_bin)
reg2 = PassiveAggressiveRegressor(loss=loss, max_iter=4, shuffle=False, tol=None)
reg2.fit(data, y_bin)
assert_allclose(reg1.w, reg2.coef_.ravel())
@pytest.mark.filterwarnings("ignore::FutureWarning")
def test_regressor_undefined_methods():
reg = PassiveAggressiveRegressor(max_iter=100)
with pytest.raises(AttributeError):
reg.transform(X)
# TODO(1.10): remove
@pytest.mark.parametrize(
"Estimator", [PassiveAggressiveClassifier, PassiveAggressiveRegressor]
)
def test_class_deprecation(Estimator):
# Check that we raise the proper deprecation warning.
with pytest.warns(FutureWarning, match="Class PassiveAggressive.+is deprecated"):
Estimator()
@pytest.mark.parametrize(["loss", "lr"], [("hinge", "pa1"), ("squared_hinge", "pa2")])
def test_passive_aggressive_classifier_vs_sgd(loss, lr):
"""Test that both are equivalent."""
X, y = make_classification(
n_samples=100, n_features=10, n_informative=5, random_state=1234
)
pa = PassiveAggressiveClassifier(loss=loss, C=0.987, random_state=42).fit(X, y)
sgd = SGDClassifier(
loss="hinge", penalty=None, learning_rate=lr, eta0=0.987, random_state=42
).fit(X, y)
assert_allclose(pa.decision_function(X), sgd.decision_function(X))
@pytest.mark.parametrize(
["loss", "lr"],
[("epsilon_insensitive", "pa1"), ("squared_epsilon_insensitive", "pa2")],
)
def test_passive_aggressive_regressor_vs_sgd(loss, lr):
"""Test that both are equivalent."""
X, y = make_regression(
n_samples=100, n_features=10, n_informative=5, random_state=1234
)
pa = PassiveAggressiveRegressor(
loss=loss, epsilon=0.123, C=0.987, random_state=42
).fit(X, y)
sgd = SGDRegressor(
loss="epsilon_insensitive",
epsilon=0.123,
penalty=None,
learning_rate=lr,
eta0=0.987,
random_state=42,
).fit(X, y)
assert_allclose(pa.predict(X), sgd.predict(X))
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/tests/test_sparse_coordinate_descent.py | sklearn/linear_model/tests/test_sparse_coordinate_descent.py | import numpy as np
import pytest
import scipy.sparse as sp
from numpy.testing import assert_allclose
from sklearn.datasets import make_regression
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model import ElasticNet, ElasticNetCV, Lasso, LassoCV
from sklearn.utils._testing import (
assert_almost_equal,
assert_array_almost_equal,
create_memmap_backed_data,
ignore_warnings,
)
from sklearn.utils.fixes import COO_CONTAINERS, CSC_CONTAINERS, LIL_CONTAINERS
def test_sparse_coef():
# Check that the sparse_coef property works
clf = ElasticNet()
clf.coef_ = [1, 2, 3]
assert sp.issparse(clf.sparse_coef_)
assert clf.sparse_coef_.toarray().tolist()[0] == clf.coef_
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
def test_lasso_zero(csc_container):
# Check that the sparse lasso can handle zero data without crashing
X = csc_container((3, 1))
y = [0, 0, 0]
T = np.array([[1], [2], [3]])
clf = Lasso().fit(X, y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
@pytest.mark.parametrize("with_sample_weight", [True, False])
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
def test_enet_toy_list_input(with_sample_weight, csc_container):
# Test ElasticNet for various values of alpha and l1_ratio with list X
X = np.array([[-1], [0], [1]])
X = csc_container(X)
Y = [-1, 0, 1] # just a straight line
T = np.array([[2], [3], [4]]) # test sample
if with_sample_weight:
sw = np.array([2.0, 2, 2])
else:
sw = None
# this should be the same as unregularized least squares
clf = ElasticNet(alpha=0, l1_ratio=1.0)
# catch warning about alpha=0.
# this is discouraged but should work.
ignore_warnings(clf.fit)(X, Y, sample_weight=sw)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3)
clf.fit(X, Y, sample_weight=sw)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y, sample_weight=sw)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
@pytest.mark.parametrize("lil_container", LIL_CONTAINERS)
def test_enet_toy_explicit_sparse_input(lil_container):
# Test ElasticNet for various values of alpha and l1_ratio with sparse X
# training samples
X = lil_container((3, 1))
X[0, 0] = -1
# X[1, 0] = 0
X[2, 0] = 1
Y = [-1, 0, 1] # just a straight line (the identity function)
# test samples
T = lil_container((3, 1))
T[0, 0] = 2
T[1, 0] = 3
T[2, 0] = 4
# this should be the same as lasso
clf = ElasticNet(alpha=0, l1_ratio=1.0)
ignore_warnings(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def make_sparse_data(
sparse_container,
n_samples=100,
n_features=100,
n_informative=10,
seed=42,
positive=False,
n_targets=1,
):
random_state = np.random.RandomState(seed)
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
# generate a ground truth model
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0 # only the top features are impacting the model
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0 # 50% of zeros in input signal
# generate training ground truth labels
y = np.dot(X, w)
X = sparse_container(X)
if n_targets == 1:
y = np.ravel(y)
return X, y
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
@pytest.mark.parametrize(
"alpha, fit_intercept, positive",
[(0.1, False, False), (0.1, True, False), (1e-3, False, True), (1e-3, True, True)],
)
def test_sparse_enet_not_as_toy_dataset(csc_container, alpha, fit_intercept, positive):
n_samples, n_features, max_iter = 100, 100, 1000
n_informative = 10
X, y = make_sparse_data(
csc_container, n_samples, n_features, n_informative, positive=positive
)
X_train, X_test = X[n_samples // 2 :], X[: n_samples // 2]
y_train, y_test = y[n_samples // 2 :], y[: n_samples // 2]
s_clf = ElasticNet(
alpha=alpha,
l1_ratio=0.8,
fit_intercept=fit_intercept,
max_iter=max_iter,
tol=1e-7,
positive=positive,
warm_start=True,
)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert s_clf.score(X_test, y_test) > 0.85
# check the convergence is the same as the dense version
d_clf = ElasticNet(
alpha=alpha,
l1_ratio=0.8,
fit_intercept=fit_intercept,
max_iter=max_iter,
tol=1e-7,
positive=positive,
warm_start=True,
)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert d_clf.score(X_test, y_test) > 0.85
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
# check that the coefs are sparse
assert np.sum(s_clf.coef_ != 0.0) < 2 * n_informative
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
def test_sparse_lasso_not_as_toy_dataset(csc_container):
n_samples = 100
max_iter = 1000
n_informative = 10
X, y = make_sparse_data(
csc_container, n_samples=n_samples, n_informative=n_informative
)
X_train, X_test = X[n_samples // 2 :], X[: n_samples // 2]
y_train, y_test = y[n_samples // 2 :], y[: n_samples // 2]
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert s_clf.score(X_test, y_test) > 0.85
# check the convergence is the same as the dense version
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert d_clf.score(X_test, y_test) > 0.85
# check that the coefs are sparse
assert np.sum(s_clf.coef_ != 0.0) == n_informative
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
def test_enet_multitarget(csc_container):
n_targets = 3
X, y = make_sparse_data(csc_container, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, precompute=False)
# XXX: There is a bug when precompute is not False!
estimator.fit(X, y)
coef, intercept, dual_gap = (
estimator.coef_,
estimator.intercept_,
estimator.dual_gap_,
)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
def test_path_parameters(csc_container):
X, y = make_sparse_data(csc_container)
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(
alphas=n_alphas,
eps=1e-3,
max_iter=max_iter,
l1_ratio=0.5,
fit_intercept=False,
)
clf.fit(X, y)
assert_almost_equal(0.5, clf.l1_ratio)
assert clf.alphas == n_alphas
assert len(clf.alphas_) == n_alphas
sparse_mse_path = clf.mse_path_
# compare with dense data
clf.fit(X.toarray(), y)
assert_almost_equal(clf.mse_path_, sparse_mse_path)
@pytest.mark.parametrize("Model", [Lasso, ElasticNet, LassoCV, ElasticNetCV])
@pytest.mark.parametrize("fit_intercept", [False, True])
@pytest.mark.parametrize("l1_ratio", [0.5, 0])
@pytest.mark.parametrize("n_samples, n_features", [(24, 6), (6, 24)])
@pytest.mark.parametrize("with_sample_weight", [True, False])
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
def test_sparse_dense_equality(
Model,
fit_intercept,
l1_ratio,
n_samples,
n_features,
with_sample_weight,
csc_container,
):
X, y = make_regression(
n_samples=n_samples,
n_features=n_features,
effective_rank=n_features // 2,
n_informative=n_features // 2,
bias=4 * fit_intercept,
noise=1,
random_state=42,
)
if with_sample_weight:
sw = np.abs(np.random.RandomState(42).normal(scale=10, size=y.shape))
else:
sw = None
Xs = csc_container(X)
params = {"fit_intercept": fit_intercept, "tol": 1e-6}
if Model != ElasticNet:
if l1_ratio == 0:
return
else:
params["l1_ratio"] = l1_ratio
reg_dense = Model(**params).fit(X, y, sample_weight=sw)
reg_sparse = Model(**params).fit(Xs, y, sample_weight=sw)
if fit_intercept:
assert reg_sparse.intercept_ == pytest.approx(reg_dense.intercept_)
# balance property
assert np.average(reg_sparse.predict(X), weights=sw) == pytest.approx(
np.average(y, weights=sw)
)
assert_allclose(reg_sparse.coef_, reg_dense.coef_)
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
def test_same_output_sparse_dense_lasso_and_enet_cv(csc_container):
X, y = make_sparse_data(csc_container, n_samples=40, n_features=10)
clfs = ElasticNetCV(max_iter=100, tol=1e-7)
clfs.fit(X, y)
clfd = ElasticNetCV(max_iter=100, tol=1e-7)
clfd.fit(X.toarray(), y)
assert_allclose(clfs.alpha_, clfd.alpha_)
assert_allclose(clfs.intercept_, clfd.intercept_)
assert_allclose(clfs.mse_path_, clfd.mse_path_)
assert_allclose(clfs.alphas_, clfd.alphas_)
clfs = LassoCV(max_iter=100, cv=4, tol=1e-8)
clfs.fit(X, y)
clfd = LassoCV(max_iter=100, cv=4, tol=1e-8)
clfd.fit(X.toarray(), y)
assert_allclose(clfs.alpha_, clfd.alpha_)
assert_allclose(clfs.intercept_, clfd.intercept_)
assert_allclose(clfs.mse_path_, clfd.mse_path_)
assert_allclose(clfs.alphas_, clfd.alphas_)
@pytest.mark.parametrize("coo_container", COO_CONTAINERS)
def test_same_multiple_output_sparse_dense(coo_container):
l = ElasticNet()
X = [
[0, 1, 2, 3, 4],
[0, 2, 5, 8, 11],
[9, 10, 11, 12, 13],
[10, 11, 12, 13, 14],
]
y = [
[1, 2, 3, 4, 5],
[1, 3, 6, 9, 12],
[10, 11, 12, 13, 14],
[11, 12, 13, 14, 15],
]
l.fit(X, y)
sample = np.array([1, 2, 3, 4, 5]).reshape(1, -1)
predict_dense = l.predict(sample)
l_sp = ElasticNet()
X_sp = coo_container(X)
l_sp.fit(X_sp, y)
sample_sparse = coo_container(sample)
predict_sparse = l_sp.predict(sample_sparse)
assert_array_almost_equal(predict_sparse, predict_dense)
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
def test_sparse_enet_coordinate_descent(csc_container):
"""Test that a warning is issued if model does not converge"""
clf = Lasso(
alpha=1e-10, fit_intercept=False, warm_start=True, max_iter=2, tol=1e-10
)
# Set initial coefficients to very bad values.
clf.coef_ = np.array([1, 1, 1, 1000])
X = np.array([[-1, -1, 1, 1], [1, 1, -1, -1]])
X = csc_container(X)
y = np.array([-1, 1])
warning_message = (
"Objective did not converge. You might want "
"to increase the number of iterations."
)
with pytest.warns(ConvergenceWarning, match=warning_message):
clf.fit(X, y)
@pytest.mark.parametrize("copy_X", (True, False))
def test_sparse_read_only_buffer(copy_X):
"""Test that sparse coordinate descent works for read-only buffers"""
rng = np.random.RandomState(0)
clf = ElasticNet(alpha=0.1, copy_X=copy_X, random_state=rng)
X = sp.random(100, 20, format="csc", random_state=rng)
# Make X.data read-only
X.data = create_memmap_backed_data(X.data)
y = rng.rand(100)
clf.fit(X, y)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/tests/test_omp.py | sklearn/linear_model/tests/test_omp.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
import pytest
from sklearn.datasets import make_sparse_coded_signal
from sklearn.linear_model import (
LinearRegression,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
orthogonal_mp,
orthogonal_mp_gram,
)
from sklearn.utils import check_random_state
from sklearn.utils._testing import (
assert_allclose,
assert_array_almost_equal,
assert_array_equal,
ignore_warnings,
)
n_samples, n_features, n_nonzero_coefs, n_targets = 25, 35, 5, 3
y, X, gamma = make_sparse_coded_signal(
n_samples=n_targets,
n_components=n_features,
n_features=n_samples,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0,
)
y, X, gamma = y.T, X.T, gamma.T
# Make X not of norm 1 for testing
X *= 10
y *= 10
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape == (n_features,)
assert orthogonal_mp(X, y, n_nonzero_coefs=5).shape == (n_features, 3)
def test_correct_shapes_gram():
assert orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape == (n_features,)
assert orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape == (n_features, 3)
def test_n_nonzero_coefs():
assert np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5)) <= 5
assert (
np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5, precompute=True))
<= 5
)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol
assert np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True),
)
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.0), orthogonal_mp(X, y, tol=1.0, precompute=True)
)
def test_unreachable_accuracy():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0), orthogonal_mp(X, y, n_nonzero_coefs=n_features)
)
warning_message = (
"Orthogonal matching pursuit ended prematurely "
"due to linear dependence in the dictionary. "
"The requested precision might not have been met."
)
with pytest.warns(RuntimeWarning, match=warning_message):
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0, precompute=True),
orthogonal_mp(X, y, precompute=True, n_nonzero_coefs=n_features),
)
@pytest.mark.parametrize("positional_params", [(X, y), (G, Xy)])
@pytest.mark.parametrize(
"keyword_params",
[{"n_nonzero_coefs": n_features + 1}],
)
def test_bad_input(positional_params, keyword_params):
with pytest.raises(ValueError):
orthogonal_mp(*positional_params, **keyword_params)
def test_perfect_signal_recovery():
(idx,) = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_orthogonal_mp_gram_readonly():
# Non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/5956
(idx,) = gamma[:, 0].nonzero()
G_readonly = G.copy()
G_readonly.setflags(write=False)
Xy_readonly = Xy.copy()
Xy_readonly.setflags(write=False)
gamma_gram = orthogonal_mp_gram(
G_readonly, Xy_readonly[:, 0], n_nonzero_coefs=5, copy_Gram=False, copy_Xy=False
)
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert omp.coef_.shape == (n_features,)
assert omp.intercept_.shape == ()
assert np.count_nonzero(omp.coef_) <= n_nonzero_coefs
omp.fit(X, y)
assert omp.coef_.shape == (n_targets, n_features)
assert omp.intercept_.shape == (n_targets,)
assert np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs
coef_normalized = omp.coef_[0].copy()
omp.set_params(fit_intercept=True)
omp.fit(X, y[:, 0])
assert_array_almost_equal(coef_normalized, omp.coef_)
omp.set_params(fit_intercept=False)
omp.fit(X, y[:, 0])
assert np.count_nonzero(omp.coef_) <= n_nonzero_coefs
assert omp.coef_.shape == (n_features,)
assert omp.intercept_ == 0
omp.fit(X, y)
assert omp.coef_.shape == (n_targets, n_features)
assert omp.intercept_ == 0
assert np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs
def test_estimator_n_nonzero_coefs():
"""Check `n_nonzero_coefs_` correct when `tol` is and isn't set."""
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert omp.n_nonzero_coefs_ == n_nonzero_coefs
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs, tol=0.5)
omp.fit(X, y[:, 0])
assert omp.n_nonzero_coefs_ is None
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.0
newy = np.dot(newX, gamma)
warning_message = (
"Orthogonal matching pursuit ended prematurely "
"due to linear dependence in the dictionary. "
"The requested precision might not have been met."
)
with pytest.warns(RuntimeWarning, match=warning_message):
orthogonal_mp(newX, newy, n_nonzero_coefs=2)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, n_nonzero_coefs=2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, n_nonzero_coefs=2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, n_nonzero_coefs=1)
gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, n_nonzero_coefs=1)
assert np.all(gamma_empty == 0)
assert np.all(gamma_empty_gram == 0)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert path.shape == (n_features, n_targets, 5)
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert path.shape == (n_features, n_targets, 5)
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_return_path_prop_with_gram():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True, precompute=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False, precompute=True)
assert path.shape == (n_features, n_targets, 5)
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(fit_intercept=False, max_iter=10)
ompcv.fit(X, y_)
assert ompcv.n_nonzero_coefs_ == n_nonzero_coefs
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(
fit_intercept=False, n_nonzero_coefs=ompcv.n_nonzero_coefs_
)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
@pytest.mark.parametrize("data_type", (np.float32, np.float64))
def test_omp_gram_dtype_match(data_type):
# verify matching input data type and output data type
coef = orthogonal_mp_gram(
G.astype(data_type), Xy.astype(data_type), n_nonzero_coefs=5
)
assert coef.dtype == data_type
def test_omp_gram_numerical_consistency():
# verify numericaly consistency among np.float32 and np.float64
coef_32 = orthogonal_mp_gram(
G.astype(np.float32), Xy.astype(np.float32), n_nonzero_coefs=5
)
coef_64 = orthogonal_mp_gram(
G.astype(np.float32), Xy.astype(np.float64), n_nonzero_coefs=5
)
assert_allclose(coef_32, coef_64)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/tests/test_common.py | sklearn/linear_model/tests/test_common.py | # SPDX-License-Identifier: BSD-3-Clause
import inspect
import numpy as np
import pytest
from sklearn.base import clone, is_classifier
from sklearn.datasets import make_classification, make_low_rank_matrix, make_regression
from sklearn.linear_model import (
ARDRegression,
BayesianRidge,
ElasticNet,
ElasticNetCV,
GammaRegressor,
HuberRegressor,
Lars,
LarsCV,
Lasso,
LassoCV,
LassoLars,
LassoLarsCV,
LassoLarsIC,
LinearRegression,
LogisticRegression,
LogisticRegressionCV,
MultiTaskElasticNet,
MultiTaskElasticNetCV,
MultiTaskLasso,
MultiTaskLassoCV,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
PassiveAggressiveClassifier,
PassiveAggressiveRegressor,
Perceptron,
PoissonRegressor,
Ridge,
RidgeClassifier,
RidgeClassifierCV,
RidgeCV,
SGDClassifier,
SGDRegressor,
TheilSenRegressor,
TweedieRegressor,
)
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.svm import LinearSVC, LinearSVR
from sklearn.utils._testing import assert_allclose, set_random_state
from sklearn.utils.fixes import CSR_CONTAINERS
# Note: GammaRegressor() and TweedieRegressor(power != 1) have a non-canonical link.
@pytest.mark.parametrize(
"model",
[
ARDRegression(),
BayesianRidge(),
ElasticNet(),
ElasticNetCV(),
Lars(),
LarsCV(),
Lasso(),
LassoCV(),
LassoLarsCV(),
LassoLarsIC(),
LinearRegression(),
# TODO: FIx SAGA which fails badly with sample_weights.
# This is a known limitation, see:
# https://github.com/scikit-learn/scikit-learn/issues/21305
pytest.param(
LogisticRegression(l1_ratio=0.5, solver="saga", tol=1e-15),
marks=pytest.mark.xfail(reason="Missing importance sampling scheme"),
),
LogisticRegressionCV(tol=1e-6, use_legacy_attributes=False, l1_ratios=(0,)),
MultiTaskElasticNet(),
MultiTaskElasticNetCV(),
MultiTaskLasso(),
MultiTaskLassoCV(),
OrthogonalMatchingPursuit(),
OrthogonalMatchingPursuitCV(),
PoissonRegressor(),
Ridge(),
RidgeCV(),
pytest.param(
SGDRegressor(tol=1e-15),
marks=pytest.mark.xfail(reason="Insufficient precision."),
),
SGDRegressor(penalty="elasticnet", max_iter=10_000),
TweedieRegressor(power=0), # same as Ridge
],
ids=lambda x: x.__class__.__name__,
)
@pytest.mark.parametrize("with_sample_weight", [False, True])
def test_balance_property(model, with_sample_weight, global_random_seed):
# Test that sum(y_predicted) == sum(y_observed) on the training set.
# This must hold for all linear models with deviance of an exponential disperson
# family as loss and the corresponding canonical link if fit_intercept=True.
# Examples:
# - squared error and identity link (most linear models)
# - Poisson deviance with log link
# - log loss with logit link
# This is known as balance property or unconditional calibration/unbiasedness.
# For reference, see Corollary 3.18, 3.20 and Chapter 5.1.5 of
# M.V. Wuthrich and M. Merz, "Statistical Foundations of Actuarial Learning and its
# Applications" (June 3, 2022). http://doi.org/10.2139/ssrn.3822407
model = clone(model) # Avoid side effects from shared instances.
if (
with_sample_weight
and "sample_weight" not in inspect.signature(model.fit).parameters.keys()
):
pytest.skip("Estimator does not support sample_weight.")
rel = 2e-4 # test precision
if isinstance(model, SGDRegressor):
rel = 1e-1
elif hasattr(model, "solver") and model.solver == "saga":
rel = 1e-2
rng = np.random.RandomState(global_random_seed)
n_train, n_features, n_targets = 100, 10, None
if isinstance(
model,
(MultiTaskElasticNet, MultiTaskElasticNetCV, MultiTaskLasso, MultiTaskLassoCV),
):
n_targets = 3
X = make_low_rank_matrix(n_samples=n_train, n_features=n_features, random_state=rng)
if n_targets:
coef = (
rng.uniform(low=-2, high=2, size=(n_features, n_targets))
/ np.max(X, axis=0)[:, None]
)
else:
coef = rng.uniform(low=-2, high=2, size=n_features) / np.max(X, axis=0)
expectation = np.exp(X @ coef + 0.5)
y = rng.poisson(lam=expectation) + 1 # strict positive, i.e. y > 0
if is_classifier(model):
y = (y > expectation + 1).astype(np.float64)
if with_sample_weight:
sw = rng.uniform(low=1, high=10, size=y.shape[0])
else:
sw = None
model.set_params(fit_intercept=True) # to be sure
if with_sample_weight:
model.fit(X, y, sample_weight=sw)
else:
model.fit(X, y)
# Assert balance property.
if is_classifier(model):
assert np.average(model.predict_proba(X)[:, 1], weights=sw) == pytest.approx(
np.average(y, weights=sw), rel=rel
)
else:
assert np.average(model.predict(X), weights=sw, axis=0) == pytest.approx(
np.average(y, weights=sw, axis=0), rel=rel
)
@pytest.mark.filterwarnings("ignore:The default of 'normalize'")
@pytest.mark.filterwarnings("ignore:lbfgs failed to converge")
@pytest.mark.filterwarnings("ignore:A column-vector y was passed when a 1d array.*")
@pytest.mark.parametrize(
"Regressor",
[
ARDRegression,
BayesianRidge,
ElasticNet,
ElasticNetCV,
GammaRegressor,
HuberRegressor,
Lars,
LarsCV,
Lasso,
LassoCV,
LassoLars,
LassoLarsCV,
LassoLarsIC,
LinearSVR,
LinearRegression,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
PassiveAggressiveRegressor,
PoissonRegressor,
Ridge,
RidgeCV,
SGDRegressor,
TheilSenRegressor,
TweedieRegressor,
],
)
@pytest.mark.parametrize("ndim", [1, 2])
def test_linear_model_regressor_coef_shape(Regressor, ndim):
"""Check the consistency of linear models `coef` shape."""
if Regressor is LinearRegression:
pytest.xfail("LinearRegression does not follow `coef_` shape contract!")
X, y = make_regression(random_state=0, n_samples=200, n_features=20)
y = MinMaxScaler().fit_transform(y.reshape(-1, 1))[:, 0] + 1
y = y[:, np.newaxis] if ndim == 2 else y
regressor = Regressor()
set_random_state(regressor)
regressor.fit(X, y)
assert regressor.coef_.shape == (X.shape[1],)
@pytest.mark.parametrize(
["Classifier", "params"],
[
(LinearSVC, {}),
(LogisticRegression, {}),
(
LogisticRegressionCV,
{
"solver": "newton-cholesky",
"use_legacy_attributes": False,
"l1_ratios": (0,),
},
),
(PassiveAggressiveClassifier, {}),
(Perceptron, {}),
(RidgeClassifier, {}),
(RidgeClassifierCV, {}),
(SGDClassifier, {}),
],
)
@pytest.mark.parametrize("n_classes", [2, 3])
def test_linear_model_classifier_coef_shape(Classifier, params, n_classes):
if Classifier in (RidgeClassifier, RidgeClassifierCV):
pytest.xfail(f"{Classifier} does not follow `coef_` shape contract!")
X, y = make_classification(n_informative=10, n_classes=n_classes, random_state=0)
n_features = X.shape[1]
classifier = Classifier(**params)
set_random_state(classifier)
classifier.fit(X, y)
expected_shape = (1, n_features) if n_classes == 2 else (n_classes, n_features)
assert classifier.coef_.shape == expected_shape
@pytest.mark.parametrize(
"LinearModel, params",
[
(Lasso, {"tol": 1e-15, "alpha": 0.01}),
(LassoCV, {"tol": 1e-15}),
(ElasticNetCV, {"tol": 1e-15}),
(RidgeClassifier, {"solver": "sparse_cg", "alpha": 0.1}),
(ElasticNet, {"tol": 1e-15, "l1_ratio": 1, "alpha": 0.01}),
(ElasticNet, {"tol": 1e-15, "l1_ratio": 1e-5, "alpha": 0.01}),
(Ridge, {"solver": "sparse_cg", "tol": 1e-12, "alpha": 0.1}),
(LinearRegression, {}),
(RidgeCV, {}),
(RidgeClassifierCV, {}),
],
)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_model_pipeline_same_dense_and_sparse(LinearModel, params, csr_container):
"""Test that sparse and dense linear models give same results.
Models use a preprocessing pipeline with a StandardScaler.
"""
model_dense = make_pipeline(StandardScaler(with_mean=False), LinearModel(**params))
model_sparse = make_pipeline(StandardScaler(with_mean=False), LinearModel(**params))
# prepare the data
rng = np.random.RandomState(0)
n_samples = 100
n_features = 2
X = rng.randn(n_samples, n_features)
X[X < 0.1] = 0.0
X_sparse = csr_container(X)
y = rng.rand(n_samples)
if is_classifier(model_dense):
y = np.sign(y)
model_dense.fit(X, y)
model_sparse.fit(X_sparse, y)
assert_allclose(model_sparse[1].coef_, model_dense[1].coef_, atol=1e-15)
y_pred_dense = model_dense.predict(X)
y_pred_sparse = model_sparse.predict(X_sparse)
assert_allclose(y_pred_dense, y_pred_sparse)
assert_allclose(model_dense[1].intercept_, model_sparse[1].intercept_)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/tests/test_least_angle.py | sklearn/linear_model/tests/test_least_angle.py | import warnings
import numpy as np
import pytest
from scipy import linalg
from sklearn import datasets, linear_model
from sklearn.base import clone
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model import (
Lars,
LarsCV,
LassoLars,
LassoLarsCV,
LassoLarsIC,
lars_path,
)
from sklearn.linear_model._least_angle import _lars_path_residues
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.utils._testing import (
TempMemmap,
assert_allclose,
assert_array_almost_equal,
ignore_warnings,
)
# TODO: use another dataset that has multiple drops
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
n_samples = y.size
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
import sys
from io import StringIO
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
_, _, coef_path_ = linear_model.lars_path(X, y, method="lar", verbose=10)
sys.stdout = old_stdout
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert ocur == i + 1
else:
# no more than max_pred variables can go into the active set
assert ocur == X.shape[1]
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
_, _, coef_path_ = linear_model.lars_path(X, y, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert ocur == i + 1
else:
# no more than max_pred variables can go into the active set
assert ocur == X.shape[1]
def _assert_same_lars_path_result(output1, output2):
assert len(output1) == len(output2)
for o1, o2 in zip(output1, output2):
assert_allclose(o1, o2)
@pytest.mark.parametrize("method", ["lar", "lasso"])
@pytest.mark.parametrize("return_path", [True, False])
def test_lars_path_gram_equivalent(method, return_path):
_assert_same_lars_path_result(
linear_model.lars_path_gram(
Xy=Xy, Gram=G, n_samples=n_samples, method=method, return_path=return_path
),
linear_model.lars_path(X, y, Gram=G, method=method, return_path=return_path),
)
def test_x_none_gram_none_raises_value_error():
# Test that lars_path with no X and Gram raises exception
Xy = np.dot(X.T, y)
with pytest.raises(ValueError, match="X and Gram cannot both be unspecified"):
linear_model.lars_path(None, y, Gram=None, Xy=Xy)
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in "lar", "lasso":
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
# TODO: remove warning filter when numpy min version >= 2.0.0
@pytest.mark.filterwarnings("ignore: `rcond` parameter will change")
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * X # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.0)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
# TODO: remove warning filter when numpy min version >= 2.0.0
@pytest.mark.filterwarnings("ignore: `rcond` parameter will change")
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
_, _, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3.0, 3.0, 1.0], [2.0, 2.0, 0.0], [1.0, 1.0, 0]])
y = np.array([1.0, 0.0, 0])
rng = np.random.RandomState(0)
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert not np.isnan(coef_path_).any()
residual = np.dot(X, coef_path_[:, -1]) - y
assert (residual**2).sum() < 1.0 # just make sure it's bounded
n_samples = 10
X = rng.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(
X,
y,
Gram="auto",
copy_X=False,
copy_Gram=False,
alpha_min=0.0,
method="lasso",
verbose=0,
max_iter=500,
)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, _, coef_path_ = linear_model.lars_path(X, y, method="lar")
alpha_, _, coef = linear_model.lars_path(X, y, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert alpha_ == alphas_[-1]
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
alphas_, _, coef_path_ = linear_model.lars_path(X, y, method="lar", Gram=G)
alpha_, _, coef = linear_model.lars_path(
X, y, method="lar", Gram=G, return_path=False
)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert alpha_ == alphas_[-1]
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains
# correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, _, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Xy=Xy, Gram=G, alpha_min=0.9
)
alpha_, _, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False
)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert alpha_ == alphas_[-1]
@pytest.mark.parametrize(
"classifier", [linear_model.Lars, linear_model.LarsCV, linear_model.LassoLarsIC]
)
def test_lars_precompute(classifier):
# Check for different values of precompute
G = np.dot(X.T, X)
clf = classifier(precompute=G)
output_1 = ignore_warnings(clf.fit)(X, y).coef_
for precompute in [True, False, "auto", None]:
clf = classifier(precompute=precompute)
output_2 = clf.fit(X, y).coef_
assert_array_almost_equal(output_1, output_2, decimal=8)
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.0], [1.0, 1.0]])
y1 = np.array([1, 1])
_, _, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0], [0, 5], [10, 10]], [[10, 10, 0], [1e-32, 0, 0], [0, 0, 1]]):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(0.1)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = 1.0 / (2.0 * 3.0) * linalg.norm(
y - np.dot(X, coef_lars_)
) ** 2 + 0.1 * linalg.norm(coef_lars_, 1)
coord_descent = linear_model.Lasso(0.1, tol=1e-6)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = (1.0 / (2.0 * 3.0)) * linalg.norm(
y - np.dot(X, coef_cd_)
) ** 2 + 0.1 * linalg.norm(coef_cd_, 1)
assert obj_lars < obj_cd * (1.0 + 1e-8)
def test_lasso_lars_vs_lasso_cd():
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method="lasso")
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert error < 0.01
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert err < 1e-3
# same test, with normalized data
X = diabetes.data
X = X - X.sum(axis=0)
X /= np.linalg.norm(X, axis=0)
alphas, _, lasso_path = linear_model.lars_path(X, y, method="lasso")
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert error < 0.01
def test_lasso_lars_vs_lasso_cd_early_stopping():
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
X = diabetes.data
for alpha_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(
X, y, method="lasso", alpha_min=alpha_min
)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert error < 0.01
# same test, with normalization
X = diabetes.data - diabetes.data.sum(axis=0)
X /= np.linalg.norm(X, axis=0)
for alpha_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(
X, y, method="lasso", alpha_min=alpha_min
)
lasso_cd = linear_model.Lasso(tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert error < 0.01
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert np.all(np.diff(lasso.alphas_) < 0)
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method="lasso")
_, lasso_coef2, _ = linear_model.lasso_path(X, y, alphas=lars_alphas, tol=1e-6)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0], [-1e-32, 0, 0], [1, 1, 1]]
y = [10, 10, 1]
alpha = 0.0001
def objective_function(coef):
return 1.0 / (2.0 * len(X)) * linalg.norm(
y - np.dot(X, coef)
) ** 2 + alpha * linalg.norm(coef, 1)
lars = linear_model.LassoLars(alpha=alpha)
warning_message = "Regressors in active set degenerate."
with pytest.warns(ConvergenceWarning, match=warning_message):
lars.fit(X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-4)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert lars_obj < cd_obj * (1.0 + 1e-8)
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1.0 / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(H, np.arange(n))
assert np.all(np.isfinite(clf.coef_))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert len(lars.coef_.nonzero()[0]) == 6
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert len(lars.alphas_) == 7
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
Y = np.vstack([y, y**2]).T
n_targets = Y.shape[1]
estimators = [
linear_model.LassoLars(),
linear_model.Lars(),
# regression test for gh-1615
linear_model.LassoLars(fit_intercept=False),
linear_model.Lars(fit_intercept=False),
]
for estimator in estimators:
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
alphas, active, coef, path = (
estimator.alphas_,
estimator.active_,
estimator.coef_,
estimator.coef_path_,
)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually guaranteed in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
assert not hasattr(lars_cv, "n_nonzero_coefs")
def test_lars_cv_max_iter(recwarn):
warnings.simplefilter("always")
with np.errstate(divide="raise", invalid="raise"):
X = diabetes.data
y = diabetes.target
rng = np.random.RandomState(42)
x = rng.randn(len(y))
X = diabetes.data
X = np.c_[X, x, x] # add correlated features
X = StandardScaler().fit_transform(X)
lars_cv = linear_model.LassoLarsCV(max_iter=5, cv=5)
lars_cv.fit(X, y)
# Check that there is no warning in general and no ConvergenceWarning
# in particular.
# Materialize the string representation of the warning to get a more
# informative error message in case of AssertionError.
recorded_warnings = [str(w) for w in recwarn]
assert len(recorded_warnings) == 0
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC("bic")
lars_aic = linear_model.LassoLarsIC("aic")
rng = np.random.RandomState(42)
X = diabetes.data
X = np.c_[X, rng.randn(X.shape[0], 5)] # add 5 bad features
X = StandardScaler().fit_transform(X)
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert lars_bic.alpha_ > lars_aic.alpha_
assert len(nonzero_bic) < len(nonzero_aic)
assert np.max(nonzero_bic) < diabetes.data.shape[1]
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test):
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False)
def test_lars_path_positive_constraint():
# this is the main test for the positive parameter on the lars_path method
# the estimator classes just make use of this function
# we do the test on the diabetes dataset
# ensure that we get negative coefficients when positive=False
# and all positive when positive=True
# for method 'lar' (default) and lasso
err_msg = "Positive constraint not supported for 'lar' coding method."
with pytest.raises(ValueError, match=err_msg):
linear_model.lars_path(
diabetes["data"], diabetes["target"], method="lar", positive=True
)
method = "lasso"
_, _, coefs = linear_model.lars_path(
X, y, return_path=True, method=method, positive=False
)
assert coefs.min() < 0
_, _, coefs = linear_model.lars_path(
X, y, return_path=True, method=method, positive=True
)
assert coefs.min() >= 0
# now we gonna test the positive option for all estimator classes
default_parameter = {"fit_intercept": False}
estimator_parameter_map = {
"LassoLars": {"alpha": 0.1},
"LassoLarsCV": {},
"LassoLarsIC": {},
}
def test_estimatorclasses_positive_constraint():
# testing the transmissibility for the positive option of all estimator
# classes in this same function here
default_parameter = {"fit_intercept": False}
estimator_parameter_map = {
"LassoLars": {"alpha": 0.1},
"LassoLarsCV": {},
"LassoLarsIC": {},
}
for estname in estimator_parameter_map:
params = default_parameter.copy()
params.update(estimator_parameter_map[estname])
estimator = getattr(linear_model, estname)(positive=False, **params)
estimator.fit(X, y)
assert estimator.coef_.min() < 0
estimator = getattr(linear_model, estname)(positive=True, **params)
estimator.fit(X, y)
assert min(estimator.coef_) >= 0
def test_lasso_lars_vs_lasso_cd_positive():
# Test that LassoLars and Lasso using coordinate descent give the
# same results when using the positive option
# This test is basically a copy of the above with additional positive
# option. However for the middle part, the comparison of coefficient values
# for a range of alphas, we had to make an adaptations. See below.
# not normalized data
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method="lasso", positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert error < 0.01
# The range of alphas chosen for coefficient comparison here is restricted
# as compared with the above test without the positive option. This is due
# to the circumstance that the Lars-Lasso algorithm does not converge to
# the least-squares-solution for small alphas, see 'Least Angle Regression'
# by Efron et al 2004. The coefficients are typically in congruence up to
# the smallest alpha reached by the Lars-Lasso algorithm and start to
# diverge thereafter. See
# https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff
for alpha in np.linspace(6e-1, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(
fit_intercept=False, alpha=alpha, positive=True
).fit(X, y)
clf2 = linear_model.Lasso(
fit_intercept=False, alpha=alpha, tol=1e-8, positive=True
).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert err < 1e-3
# normalized data
X = diabetes.data - diabetes.data.sum(axis=0)
X /= np.linalg.norm(X, axis=0)
alphas, _, lasso_path = linear_model.lars_path(X, y, method="lasso", positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True)
for c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert error < 0.01
def test_lasso_lars_vs_R_implementation():
# Test that sklearn LassoLars implementation agrees with the LassoLars
# implementation available in R (lars library) when fit_intercept=False.
# Let's generate the data used in the bug report 7778
y = np.array([-6.45006793, -3.51251449, -8.52445396, 6.12277822, -19.42109366])
x = np.array(
[
[0.47299829, 0, 0, 0, 0],
[0.08239882, 0.85784863, 0, 0, 0],
[0.30114139, -0.07501577, 0.80895216, 0, 0],
[-0.01460346, -0.1015233, 0.0407278, 0.80338378, 0],
[-0.69363927, 0.06754067, 0.18064514, -0.0803561, 0.40427291],
]
)
X = x.T
# The R result was obtained using the following code:
#
# library(lars)
# model_lasso_lars = lars(X, t(y), type="lasso", intercept=FALSE,
# trace=TRUE, normalize=FALSE)
# r = t(model_lasso_lars$beta)
#
r = np.array(
[
[
0,
0,
0,
0,
0,
-79.810362809499026,
-83.528788732782829,
-83.777653739190711,
-83.784156932888934,
-84.033390591756657,
],
[0, 0, 0, 0, -0.476624256777266, 0, 0, 0, 0, 0.025219751009936],
[
0,
-3.577397088285891,
-4.702795355871871,
-7.016748621359461,
-7.614898471899412,
-0.336938391359179,
0,
0,
0.001213370600853,
0.048162321585148,
],
[
0,
0,
0,
2.231558436628169,
2.723267514525966,
2.811549786389614,
2.813766976061531,
2.817462468949557,
2.817368178703816,
2.816221090636795,
],
[
0,
0,
-1.218422599914637,
-3.457726183014808,
-4.021304522060710,
-45.827461592423745,
-47.776608869312305,
-47.911561610746404,
-47.914845922736234,
-48.039562334265717,
],
]
)
model_lasso_lars = linear_model.LassoLars(alpha=0, fit_intercept=False)
model_lasso_lars.fit(X, y)
skl_betas = model_lasso_lars.coef_path_
assert_array_almost_equal(r, skl_betas, decimal=12)
@pytest.mark.parametrize("copy_X", [True, False])
def test_lasso_lars_copyX_behaviour(copy_X):
"""
Test that user input regarding copy_X is not being overridden (it was until
at least version 0.21)
"""
lasso_lars = LassoLarsIC(copy_X=copy_X, precompute=False)
rng = np.random.RandomState(0)
X = rng.normal(0, 1, (100, 5))
X_copy = X.copy()
y = X[:, 2]
lasso_lars.fit(X, y)
assert copy_X == np.array_equal(X, X_copy)
@pytest.mark.parametrize("copy_X", [True, False])
def test_lasso_lars_fit_copyX_behaviour(copy_X):
"""
Test that user input to .fit for copy_X overrides default __init__ value
"""
lasso_lars = LassoLarsIC(precompute=False)
rng = np.random.RandomState(0)
X = rng.normal(0, 1, (100, 5))
X_copy = X.copy()
y = X[:, 2]
lasso_lars.fit(X, y, copy_X=copy_X)
assert copy_X == np.array_equal(X, X_copy)
@pytest.mark.parametrize("est", (LassoLars(alpha=1e-3), Lars()))
def test_lars_with_jitter(est):
est = clone(est) # Avoid side effects from previous tests.
# Test that a small amount of jitter helps stability,
# using example provided in issue #2746
X = np.array([[0.0, 0.0, 0.0, -1.0, 0.0], [0.0, -1.0, 0.0, 0.0, 0.0]])
y = [-2.5, -2.5]
expected_coef = [0, 2.5, 0, 2.5, 0]
# set to fit_intercept to False since target is constant and we want check
# the value of coef. coef would be all zeros otherwise.
est.set_params(fit_intercept=False)
est_jitter = clone(est).set_params(jitter=10e-8, random_state=0)
est.fit(X, y)
est_jitter.fit(X, y)
assert np.mean((est.coef_ - est_jitter.coef_) ** 2) > 0.1
np.testing.assert_allclose(est_jitter.coef_, expected_coef, rtol=1e-3)
def test_X_none_gram_not_none():
with pytest.raises(ValueError, match="X cannot be None if Gram is not None"):
lars_path(X=None, y=np.array([1]), Gram=True)
def test_copy_X_with_auto_gram():
# Non-regression test for #17789, `copy_X=True` and Gram='auto' does not
# overwrite X
rng = np.random.RandomState(42)
X = rng.rand(6, 6)
y = rng.rand(6)
X_before = X.copy()
linear_model.lars_path(X, y, Gram="auto", copy_X=True, method="lasso")
# X did not change
assert_allclose(X, X_before)
@pytest.mark.parametrize(
"LARS, has_coef_path, args",
(
(Lars, True, {}),
(LassoLars, True, {}),
(LassoLarsIC, False, {}),
(LarsCV, True, {}),
# max_iter=5 is for avoiding ConvergenceWarning
(LassoLarsCV, True, {"max_iter": 5}),
),
)
@pytest.mark.parametrize("dtype", (np.float32, np.float64))
def test_lars_dtype_match(LARS, has_coef_path, args, dtype):
# The test ensures that the fit method preserves input dtype
rng = np.random.RandomState(0)
X = rng.rand(20, 6).astype(dtype)
y = rng.rand(20).astype(dtype)
model = LARS(**args)
model.fit(X, y)
assert model.coef_.dtype == dtype
if has_coef_path:
assert model.coef_path_.dtype == dtype
assert model.intercept_.dtype == dtype
@pytest.mark.parametrize(
"LARS, has_coef_path, args",
(
(Lars, True, {}),
(LassoLars, True, {}),
(LassoLarsIC, False, {}),
(LarsCV, True, {}),
# max_iter=5 is for avoiding ConvergenceWarning
(LassoLarsCV, True, {"max_iter": 5}),
),
)
def test_lars_numeric_consistency(LARS, has_coef_path, args):
# The test ensures numerical consistency between trained coefficients
# of float32 and float64.
rtol = 1e-5
atol = 1e-5
rng = np.random.RandomState(0)
X_64 = rng.rand(10, 6)
y_64 = rng.rand(10)
model_64 = LARS(**args).fit(X_64, y_64)
model_32 = LARS(**args).fit(X_64.astype(np.float32), y_64.astype(np.float32))
assert_allclose(model_64.coef_, model_32.coef_, rtol=rtol, atol=atol)
if has_coef_path:
assert_allclose(model_64.coef_path_, model_32.coef_path_, rtol=rtol, atol=atol)
assert_allclose(model_64.intercept_, model_32.intercept_, rtol=rtol, atol=atol)
@pytest.mark.parametrize("criterion", ["aic", "bic"])
def test_lassolarsic_alpha_selection(criterion):
"""Check that we properly compute the AIC and BIC score.
In this test, we reproduce the example of the Fig. 2 of Zou et al.
(reference [1] in LassoLarsIC) In this example, only 7 features should be
selected.
"""
model = make_pipeline(StandardScaler(), LassoLarsIC(criterion=criterion))
model.fit(X, y)
best_alpha_selected = np.argmin(model[-1].criterion_)
assert best_alpha_selected == 7
@pytest.mark.parametrize("fit_intercept", [True, False])
def test_lassolarsic_noise_variance(fit_intercept):
"""Check the behaviour when `n_samples` < `n_features` and that one needs
to provide the noise variance."""
rng = np.random.RandomState(0)
X, y = datasets.make_regression(
n_samples=10, n_features=11 - fit_intercept, random_state=rng
)
model = make_pipeline(StandardScaler(), LassoLarsIC(fit_intercept=fit_intercept))
err_msg = (
"You are using LassoLarsIC in the case where the number of samples is smaller"
" than the number of features"
)
with pytest.raises(ValueError, match=err_msg):
model.fit(X, y)
model.set_params(lassolarsic__noise_variance=1.0)
model.fit(X, y).predict(X)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/tests/test_base.py | sklearn/linear_model/tests/test_base.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
import numpy as np
import pytest
from scipy import linalg, sparse
from sklearn.datasets import load_iris, make_regression, make_sparse_uncorrelated
from sklearn.linear_model import LinearRegression
from sklearn.linear_model._base import (
_preprocess_data,
_rescale_data,
make_dataset,
)
from sklearn.preprocessing import add_dummy_feature
from sklearn.utils._testing import (
assert_allclose,
assert_array_almost_equal,
assert_array_equal,
)
from sklearn.utils.fixes import (
COO_CONTAINERS,
CSC_CONTAINERS,
CSR_CONTAINERS,
LIL_CONTAINERS,
)
rtol = 1e-6
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
reg = LinearRegression()
reg.fit(X, Y)
assert_array_almost_equal(reg.coef_, [1])
assert_array_almost_equal(reg.intercept_, [0])
assert_array_almost_equal(reg.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
reg = LinearRegression()
reg.fit(X, Y)
assert_array_almost_equal(reg.coef_, [0])
assert_array_almost_equal(reg.intercept_, [0])
assert_array_almost_equal(reg.predict(X), [0])
@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS)
@pytest.mark.parametrize("fit_intercept", [True, False])
def test_linear_regression_sample_weights(
sparse_container, fit_intercept, global_random_seed
):
rng = np.random.RandomState(global_random_seed)
# It would not work with under-determined systems
n_samples, n_features = 6, 5
X = rng.normal(size=(n_samples, n_features))
if sparse_container is not None:
X = sparse_container(X)
y = rng.normal(size=n_samples)
sample_weight = 1.0 + rng.uniform(size=n_samples)
# LinearRegression with explicit sample_weight
reg = LinearRegression(fit_intercept=fit_intercept, tol=1e-16)
reg.fit(X, y, sample_weight=sample_weight)
coefs1 = reg.coef_
inter1 = reg.intercept_
assert reg.coef_.shape == (X.shape[1],) # sanity checks
# Closed form of the weighted least square
# theta = (X^T W X)^(-1) @ X^T W y
W = np.diag(sample_weight)
X_aug = X if not fit_intercept else add_dummy_feature(X)
Xw = X_aug.T @ W @ X_aug
yw = X_aug.T @ W @ y
coefs2 = linalg.solve(Xw, yw)
if not fit_intercept:
assert_allclose(coefs1, coefs2)
else:
assert_allclose(coefs1, coefs2[1:])
assert_allclose(inter1, coefs2[0])
def test_raises_value_error_if_positive_and_sparse():
error_msg = "Sparse data was passed for X, but dense data is required."
# X must not be sparse if positive == True
X = sparse.eye(10)
y = np.ones(10)
reg = LinearRegression(positive=True)
with pytest.raises(TypeError, match=error_msg):
reg.fit(X, y)
@pytest.mark.parametrize("n_samples, n_features", [(2, 3), (3, 2)])
def test_raises_value_error_if_sample_weights_greater_than_1d(n_samples, n_features):
# Sample weights must be either scalar or 1D
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.0
sample_weights_OK_2 = 2.0
reg = LinearRegression()
# make sure the "OK" sample weights actually work
reg.fit(X, y, sample_weights_OK)
reg.fit(X, y, sample_weights_OK_1)
reg.fit(X, y, sample_weights_OK_2)
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022], [0.58853682, 0.41146318]])
X3 = np.array(
[[0.27677969, 0.70693172, 0.01628859], [0.08385139, 0.20692515, 0.70922346]]
)
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression().fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression().fit(X3, y)
assert lr2_with_intercept.coef_.shape == lr2_without_intercept.coef_.shape
assert lr3_with_intercept.coef_.shape == lr3_without_intercept.coef_.shape
assert lr2_without_intercept.coef_.ndim == lr3_without_intercept.coef_.ndim
def test_linear_regression_sparse(global_random_seed):
# Test that linear regression also works with sparse data
rng = np.random.RandomState(global_random_seed)
n = 100
X = sparse.eye(n, n)
beta = rng.rand(n)
y = X @ beta
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.predict(X) - y.ravel(), 0)
@pytest.mark.parametrize("fit_intercept", [True, False])
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_linear_regression_sparse_equal_dense(fit_intercept, csr_container):
# Test that linear regression agrees between sparse and dense
rng = np.random.RandomState(0)
n_samples = 200
n_features = 2
X = rng.randn(n_samples, n_features)
X[X < 0.1] = 0.0
Xcsr = csr_container(X)
y = rng.rand(n_samples)
params = dict(fit_intercept=fit_intercept)
clf_dense = LinearRegression(**params)
clf_sparse = LinearRegression(**params)
clf_dense.fit(X, y)
clf_sparse.fit(Xcsr, y)
assert clf_dense.intercept_ == pytest.approx(clf_sparse.intercept_)
assert_allclose(clf_dense.coef_, clf_sparse.coef_)
def test_linear_regression_multiple_outcome():
# Test multiple-outcome linear regressions
rng = np.random.RandomState(0)
X, y = make_regression(random_state=rng)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
reg = LinearRegression()
reg.fit((X), Y)
assert reg.coef_.shape == (2, n_features)
Y_pred = reg.predict(X)
reg.fit(X, y)
y_pred = reg.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
@pytest.mark.parametrize("coo_container", COO_CONTAINERS)
def test_linear_regression_sparse_multiple_outcome(global_random_seed, coo_container):
# Test multiple-outcome linear regressions with sparse data
rng = np.random.RandomState(global_random_seed)
X, y = make_sparse_uncorrelated(random_state=rng)
X = coo_container(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert ols.coef_.shape == (2, n_features)
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_positive():
# Test nonnegative LinearRegression on a simple dataset.
X = [[1], [2]]
y = [1, 2]
reg = LinearRegression(positive=True)
reg.fit(X, y)
assert_array_almost_equal(reg.coef_, [1])
assert_array_almost_equal(reg.intercept_, [0])
assert_array_almost_equal(reg.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
y = [0]
reg = LinearRegression(positive=True)
reg.fit(X, y)
assert_allclose(reg.coef_, [0])
assert_allclose(reg.intercept_, [0])
assert_allclose(reg.predict(X), [0])
def test_linear_regression_positive_multiple_outcome(global_random_seed):
# Test multiple-outcome nonnegative linear regressions
rng = np.random.RandomState(global_random_seed)
X, y = make_sparse_uncorrelated(random_state=rng)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression(positive=True)
ols.fit(X, Y)
assert ols.coef_.shape == (2, n_features)
assert np.all(ols.coef_ >= 0.0)
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_allclose(np.vstack((y_pred, y_pred)).T, Y_pred)
def test_linear_regression_positive_vs_nonpositive(global_random_seed):
# Test differences with LinearRegression when positive=False.
rng = np.random.RandomState(global_random_seed)
X, y = make_sparse_uncorrelated(random_state=rng)
reg = LinearRegression(positive=True)
reg.fit(X, y)
regn = LinearRegression(positive=False)
regn.fit(X, y)
assert np.mean((reg.coef_ - regn.coef_) ** 2) > 1e-3
def test_linear_regression_positive_vs_nonpositive_when_positive(global_random_seed):
# Test LinearRegression fitted coefficients
# when the problem is positive.
rng = np.random.RandomState(global_random_seed)
n_samples = 200
n_features = 4
X = rng.rand(n_samples, n_features)
y = X[:, 0] + 2 * X[:, 1] + 3 * X[:, 2] + 1.5 * X[:, 3]
reg = LinearRegression(positive=True)
reg.fit(X, y)
regn = LinearRegression(positive=False)
regn.fit(X, y)
assert np.mean((reg.coef_ - regn.coef_) ** 2) < 1e-6
@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS)
@pytest.mark.parametrize("use_sw", [True, False])
def test_inplace_data_preprocessing(sparse_container, use_sw, global_random_seed):
# Check that the data is not modified inplace by the linear regression
# estimator.
rng = np.random.RandomState(global_random_seed)
original_X_data = rng.randn(10, 12)
original_y_data = rng.randn(10, 2)
orginal_sw_data = rng.rand(10)
if sparse_container is not None:
X = sparse_container(original_X_data)
else:
X = original_X_data.copy()
y = original_y_data.copy()
# XXX: Note hat y_sparse is not supported (broken?) in the current
# implementation of LinearRegression.
if use_sw:
sample_weight = orginal_sw_data.copy()
else:
sample_weight = None
# Do not allow inplace preprocessing of X and y:
reg = LinearRegression()
reg.fit(X, y, sample_weight=sample_weight)
if sparse_container is not None:
assert_allclose(X.toarray(), original_X_data)
else:
assert_allclose(X, original_X_data)
assert_allclose(y, original_y_data)
if use_sw:
assert_allclose(sample_weight, orginal_sw_data)
# Allow inplace preprocessing of X and y
reg = LinearRegression(copy_X=False)
reg.fit(X, y, sample_weight=sample_weight)
if sparse_container is not None:
# No optimization relying on the inplace modification of sparse input
# data has been implemented at this time.
assert_allclose(X.toarray(), original_X_data)
else:
# X has been offset (and optionally rescaled by sample weights)
# inplace. The 0.42 threshold is arbitrary and has been found to be
# robust to any random seed in the admissible range.
assert np.linalg.norm(X - original_X_data) > 0.42
# y should not have been modified inplace by LinearRegression.fit.
assert_allclose(y, original_y_data)
if use_sw:
# Sample weights have no reason to ever be modified inplace.
assert_allclose(sample_weight, orginal_sw_data)
def test_linear_regression_pd_sparse_dataframe_warning():
pd = pytest.importorskip("pandas")
# Warning is raised only when some of the columns is sparse
df = pd.DataFrame({"0": np.random.randn(10)})
for col in range(1, 4):
arr = np.random.randn(10)
arr[:8] = 0
# all columns but the first column is sparse
if col != 0:
arr = pd.arrays.SparseArray(arr, fill_value=0)
df[str(col)] = arr
msg = "pandas.DataFrame with sparse columns found."
reg = LinearRegression()
with pytest.warns(UserWarning, match=msg):
reg.fit(df.iloc[:, 0:2], df.iloc[:, 3])
# does not warn when the whole dataframe is sparse
df["0"] = pd.arrays.SparseArray(df["0"], fill_value=0)
assert hasattr(df, "sparse")
with warnings.catch_warnings():
warnings.simplefilter("error", UserWarning)
reg.fit(df.iloc[:, 0:2], df.iloc[:, 3])
def test_preprocess_data(global_random_seed):
rng = np.random.RandomState(global_random_seed)
n_samples = 200
n_features = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_scale, sqrt_sw = _preprocess_data(
X, y, fit_intercept=False
)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_scale, np.ones(n_features))
assert sqrt_sw is None
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_scale, sqrt_sw = _preprocess_data(
X, y, fit_intercept=True
)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_scale, np.ones(n_features))
assert sqrt_sw is None
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
@pytest.mark.parametrize("sparse_container", [None] + CSC_CONTAINERS)
def test_preprocess_data_multioutput(global_random_seed, sparse_container):
rng = np.random.RandomState(global_random_seed)
n_samples = 200
n_features = 3
n_outputs = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
if sparse_container is not None:
X = sparse_container(X)
_, yt, _, y_mean, _, _ = _preprocess_data(X, y, fit_intercept=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _, _ = _preprocess_data(X, y, fit_intercept=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
@pytest.mark.parametrize("rescale_with_sw", [False, True])
@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS)
def test_preprocess_data_weighted(
rescale_with_sw, sparse_container, global_random_seed
):
rng = np.random.RandomState(global_random_seed)
n_samples = 200
n_features = 4
# Generate random data with 50% of zero values to make sure
# that the sparse variant of this test is actually sparse. This also
# shifts the mean value for each columns in X further away from
# zero.
X = rng.rand(n_samples, n_features)
X[X < 0.5] = 0.0
# Scale the first feature of X to be 10 larger than the other to
# better check the impact of feature scaling.
X[:, 0] *= 10
# Constant non-zero feature.
X[:, 2] = 1.0
# Constant zero feature (non-materialized in the sparse case)
X[:, 3] = 0.0
y = rng.rand(n_samples)
sample_weight = np.abs(rng.rand(n_samples)) + 1
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
X_sample_weight_avg = np.average(X, weights=sample_weight, axis=0)
X_sample_weight_var = np.average(
(X - X_sample_weight_avg) ** 2, weights=sample_weight, axis=0
)
constant_mask = X_sample_weight_var < 10 * np.finfo(X.dtype).eps
assert_array_equal(constant_mask, [0, 0, 1, 1])
expected_X_scale = np.sqrt(X_sample_weight_var) * np.sqrt(sample_weight.sum())
# near constant features should not be scaled
expected_X_scale[constant_mask] = 1
if sparse_container is not None:
X = sparse_container(X)
Xt, yt, X_mean, y_mean, X_scale, sqrt_sw = _preprocess_data(
X,
y,
fit_intercept=True,
sample_weight=sample_weight,
rescale_with_sw=rescale_with_sw,
)
if sparse_container is not None:
# Simplifies asserts
X = X.toarray()
Xt = Xt.toarray()
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_scale, np.ones(n_features))
if rescale_with_sw:
assert_allclose(sqrt_sw, np.sqrt(sample_weight))
if sparse_container is not None:
assert_allclose(Xt, sqrt_sw[:, None] * X)
else:
assert_allclose(Xt, sqrt_sw[:, None] * (X - expected_X_mean))
assert_allclose(yt, sqrt_sw * (y - expected_y_mean))
else:
assert sqrt_sw is None
if sparse_container is not None:
assert_allclose(Xt, X)
else:
assert_allclose(Xt, X - expected_X_mean)
assert_allclose(yt, y - expected_y_mean)
@pytest.mark.parametrize("lil_container", LIL_CONTAINERS)
def test_sparse_preprocess_data_offsets(global_random_seed, lil_container):
rng = np.random.RandomState(global_random_seed)
n_samples = 200
n_features = 2
X = sparse.rand(n_samples, n_features, density=0.5, random_state=rng)
X = lil_container(X)
y = rng.rand(n_samples)
XA = X.toarray()
Xt, yt, X_mean, y_mean, X_scale, sqrt_sw = _preprocess_data(
X, y, fit_intercept=False
)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_scale, np.ones(n_features))
assert sqrt_sw is None
assert_array_almost_equal(Xt.toarray(), XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_scale, sqrt_sw = _preprocess_data(
X, y, fit_intercept=True
)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_scale, np.ones(n_features))
assert sqrt_sw is None
assert_array_almost_equal(Xt.toarray(), XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_csr_preprocess_data(csr_container):
# Test output format of _preprocess_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = csr_container(X)
csr_, y, _, _, _, _ = _preprocess_data(csr, y, fit_intercept=True)
assert csr_.format == "csr"
@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS)
@pytest.mark.parametrize("to_copy", (True, False))
def test_preprocess_copy_data_no_checks(sparse_container, to_copy):
X, y = make_regression()
X[X < 2.5] = 0.0
if sparse_container is not None:
X = sparse_container(X)
X_, y_, _, _, _, _ = _preprocess_data(
X, y, fit_intercept=True, copy=to_copy, check_input=False
)
if to_copy and sparse_container is not None:
assert not np.may_share_memory(X_.data, X.data)
elif to_copy:
assert not np.may_share_memory(X_, X)
elif sparse_container is not None:
assert np.may_share_memory(X_.data, X.data)
else:
assert np.may_share_memory(X_, X)
@pytest.mark.parametrize("rescale_with_sw", [False, True])
@pytest.mark.parametrize("fit_intercept", [False, True])
def test_dtype_preprocess_data(rescale_with_sw, fit_intercept, global_random_seed):
rng = np.random.RandomState(global_random_seed)
n_samples = 200
n_features = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sw = rng.rand(n_samples) + 1
X_32 = np.asarray(X, dtype=np.float32)
y_32 = np.asarray(y, dtype=np.float32)
sw_32 = np.asarray(sw, dtype=np.float32)
X_64 = np.asarray(X, dtype=np.float64)
y_64 = np.asarray(y, dtype=np.float64)
sw_64 = np.asarray(sw, dtype=np.float64)
Xt_32, yt_32, X_mean_32, y_mean_32, X_scale_32, sqrt_sw_32 = _preprocess_data(
X_32,
y_32,
fit_intercept=fit_intercept,
sample_weight=sw_32,
rescale_with_sw=rescale_with_sw,
)
Xt_64, yt_64, X_mean_64, y_mean_64, X_scale_64, sqrt_sw_64 = _preprocess_data(
X_64,
y_64,
fit_intercept=fit_intercept,
sample_weight=sw_64,
rescale_with_sw=rescale_with_sw,
)
Xt_3264, yt_3264, X_mean_3264, y_mean_3264, X_scale_3264, sqrt_sw_3264 = (
_preprocess_data(
X_32,
y_64,
fit_intercept=fit_intercept,
sample_weight=sw_32, # sample_weight must have same dtype as X
rescale_with_sw=rescale_with_sw,
)
)
Xt_6432, yt_6432, X_mean_6432, y_mean_6432, X_scale_6432, sqrt_sw_6432 = (
_preprocess_data(
X_64,
y_32,
fit_intercept=fit_intercept,
sample_weight=sw_64, # sample_weight must have same dtype as X
rescale_with_sw=rescale_with_sw,
)
)
assert Xt_32.dtype == np.float32
assert yt_32.dtype == np.float32
assert X_mean_32.dtype == np.float32
assert y_mean_32.dtype == np.float32
assert X_scale_32.dtype == np.float32
if rescale_with_sw:
assert sqrt_sw_32.dtype == np.float32
assert Xt_64.dtype == np.float64
assert yt_64.dtype == np.float64
assert X_mean_64.dtype == np.float64
assert y_mean_64.dtype == np.float64
assert X_scale_64.dtype == np.float64
if rescale_with_sw:
assert sqrt_sw_64.dtype == np.float64
assert Xt_3264.dtype == np.float32
assert yt_3264.dtype == np.float32
assert X_mean_3264.dtype == np.float32
assert y_mean_3264.dtype == np.float32
assert X_scale_3264.dtype == np.float32
if rescale_with_sw:
assert sqrt_sw_3264.dtype == np.float32
assert Xt_6432.dtype == np.float64
assert yt_6432.dtype == np.float64
assert X_mean_6432.dtype == np.float64
assert y_mean_6432.dtype == np.float64
assert X_scale_3264.dtype == np.float32
if rescale_with_sw:
assert sqrt_sw_6432.dtype == np.float64
assert X_32.dtype == np.float32
assert y_32.dtype == np.float32
assert X_64.dtype == np.float64
assert y_64.dtype == np.float64
assert_allclose(Xt_32, Xt_64, rtol=1e-3, atol=1e-6)
assert_allclose(yt_32, yt_64, rtol=1e-3, atol=1e-6)
assert_allclose(X_mean_32, X_mean_64, rtol=1e-6)
assert_allclose(y_mean_32, y_mean_64, rtol=1e-6)
assert_allclose(X_scale_32, X_scale_64)
if rescale_with_sw:
assert_allclose(sqrt_sw_32, sqrt_sw_64, rtol=1e-6)
@pytest.mark.parametrize("n_targets", [None, 2])
@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS)
def test_rescale_data(n_targets, sparse_container, global_random_seed):
rng = np.random.RandomState(global_random_seed)
n_samples = 200
n_features = 2
sample_weight = 1.0 + rng.rand(n_samples)
X = rng.rand(n_samples, n_features)
if n_targets is None:
y = rng.rand(n_samples)
else:
y = rng.rand(n_samples, n_targets)
expected_sqrt_sw = np.sqrt(sample_weight)
expected_rescaled_X = X * expected_sqrt_sw[:, np.newaxis]
if n_targets is None:
expected_rescaled_y = y * expected_sqrt_sw
else:
expected_rescaled_y = y * expected_sqrt_sw[:, np.newaxis]
if sparse_container is not None:
X = sparse_container(X)
if n_targets is None:
y = sparse_container(y.reshape(-1, 1))
else:
y = sparse_container(y)
rescaled_X, rescaled_y, sqrt_sw = _rescale_data(X, y, sample_weight)
assert_allclose(sqrt_sw, expected_sqrt_sw)
if sparse_container is not None:
rescaled_X = rescaled_X.toarray()
rescaled_y = rescaled_y.toarray()
if n_targets is None:
rescaled_y = rescaled_y.ravel()
assert_allclose(rescaled_X, expected_rescaled_X)
assert_allclose(rescaled_y, expected_rescaled_y)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_fused_types_make_dataset(csr_container):
iris = load_iris()
X_32 = iris.data.astype(np.float32)
y_32 = iris.target.astype(np.float32)
X_csr_32 = csr_container(X_32)
sample_weight_32 = np.arange(y_32.size, dtype=np.float32)
X_64 = iris.data.astype(np.float64)
y_64 = iris.target.astype(np.float64)
X_csr_64 = csr_container(X_64)
sample_weight_64 = np.arange(y_64.size, dtype=np.float64)
# array
dataset_32, _ = make_dataset(X_32, y_32, sample_weight_32)
dataset_64, _ = make_dataset(X_64, y_64, sample_weight_64)
xi_32, yi_32, _, _ = dataset_32._next_py()
xi_64, yi_64, _, _ = dataset_64._next_py()
xi_data_32, _, _ = xi_32
xi_data_64, _, _ = xi_64
assert xi_data_32.dtype == np.float32
assert xi_data_64.dtype == np.float64
assert_allclose(yi_64, yi_32, rtol=rtol)
# csr
datasetcsr_32, _ = make_dataset(X_csr_32, y_32, sample_weight_32)
datasetcsr_64, _ = make_dataset(X_csr_64, y_64, sample_weight_64)
xicsr_32, yicsr_32, _, _ = datasetcsr_32._next_py()
xicsr_64, yicsr_64, _, _ = datasetcsr_64._next_py()
xicsr_data_32, _, _ = xicsr_32
xicsr_data_64, _, _ = xicsr_64
assert xicsr_data_32.dtype == np.float32
assert xicsr_data_64.dtype == np.float64
assert_allclose(xicsr_data_64, xicsr_data_32, rtol=rtol)
assert_allclose(yicsr_64, yicsr_32, rtol=rtol)
assert_array_equal(xi_data_32, xicsr_data_32)
assert_array_equal(xi_data_64, xicsr_data_64)
assert_array_equal(yi_32, yicsr_32)
assert_array_equal(yi_64, yicsr_64)
@pytest.mark.parametrize("X_shape", [(10, 5), (10, 20), (100, 100)])
@pytest.mark.parametrize(
"sparse_container",
[None]
+ [
pytest.param(
container,
marks=pytest.mark.xfail(
reason="Known to fail for CSR arrays, see issue #30131."
),
)
for container in CSR_CONTAINERS
],
)
@pytest.mark.parametrize("fit_intercept", [False, True])
def test_linear_regression_sample_weight_consistency(
X_shape, sparse_container, fit_intercept, global_random_seed
):
"""Test that the impact of sample_weight is consistent.
Note that this test is stricter than the common test
check_sample_weight_equivalence alone and also tests sparse X.
It is very similar to test_enet_sample_weight_consistency.
"""
rng = np.random.RandomState(global_random_seed)
n_samples, n_features = X_shape
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
if sparse_container is not None:
X = sparse_container(X)
params = dict(fit_intercept=fit_intercept)
reg = LinearRegression(**params).fit(X, y, sample_weight=None)
coef = reg.coef_.copy()
if fit_intercept:
intercept = reg.intercept_
# 1) sample_weight=np.ones(..) must be equivalent to sample_weight=None,
# a special case of check_sample_weight_equivalence(name, reg), but we also
# test with sparse input.
sample_weight = np.ones_like(y)
reg.fit(X, y, sample_weight=sample_weight)
assert_allclose(reg.coef_, coef, rtol=1e-6)
if fit_intercept:
assert_allclose(reg.intercept_, intercept)
# 2) sample_weight=None should be equivalent to sample_weight = number
sample_weight = 123.0
reg.fit(X, y, sample_weight=sample_weight)
assert_allclose(reg.coef_, coef, rtol=1e-6)
if fit_intercept:
assert_allclose(reg.intercept_, intercept)
# 3) scaling of sample_weight should have no effect, cf. np.average()
sample_weight = rng.uniform(low=0.01, high=2, size=X.shape[0])
reg = reg.fit(X, y, sample_weight=sample_weight)
coef = reg.coef_.copy()
if fit_intercept:
intercept = reg.intercept_
reg.fit(X, y, sample_weight=np.pi * sample_weight)
assert_allclose(reg.coef_, coef, rtol=1e-6 if sparse_container is None else 1e-5)
if fit_intercept:
assert_allclose(reg.intercept_, intercept)
# 4) setting elements of sample_weight to 0 is equivalent to removing these samples
sample_weight_0 = sample_weight.copy()
sample_weight_0[-5:] = 0
y[-5:] *= 1000 # to make excluding those samples important
reg.fit(X, y, sample_weight=sample_weight_0)
coef_0 = reg.coef_.copy()
if fit_intercept:
intercept_0 = reg.intercept_
reg.fit(X[:-5], y[:-5], sample_weight=sample_weight[:-5])
assert_allclose(reg.coef_, coef_0, rtol=1e-5)
if fit_intercept:
assert_allclose(reg.intercept_, intercept_0)
# 5) check that multiplying sample_weight by 2 is equivalent to repeating
# corresponding samples twice
if sparse_container is not None:
X2 = sparse.vstack([X, X[: n_samples // 2]], format="csc")
else:
X2 = np.concatenate([X, X[: n_samples // 2]], axis=0)
y2 = np.concatenate([y, y[: n_samples // 2]])
sample_weight_1 = sample_weight.copy()
sample_weight_1[: n_samples // 2] *= 2
sample_weight_2 = np.concatenate(
[sample_weight, sample_weight[: n_samples // 2]], axis=0
)
reg1 = LinearRegression(**params).fit(X, y, sample_weight=sample_weight_1)
reg2 = LinearRegression(**params).fit(X2, y2, sample_weight=sample_weight_2)
assert_allclose(reg1.coef_, reg2.coef_, rtol=1e-6)
if fit_intercept:
assert_allclose(reg1.intercept_, reg2.intercept_)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/tests/test_sgd.py | sklearn/linear_model/tests/test_sgd.py | import pickle
import warnings
from unittest.mock import Mock
import joblib
import numpy as np
import pytest
import scipy.sparse as sp
from scipy.optimize import minimize
from sklearn import datasets, linear_model, metrics
from sklearn.base import clone, is_classifier
from sklearn.datasets import make_blobs
from sklearn.exceptions import ConvergenceWarning
from sklearn.kernel_approximation import Nystroem
from sklearn.linear_model import _sgd_fast as sgd_fast
from sklearn.linear_model import _stochastic_gradient
from sklearn.model_selection import (
RandomizedSearchCV,
ShuffleSplit,
StratifiedShuffleSplit,
)
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import LabelEncoder, MinMaxScaler, StandardScaler, scale
from sklearn.svm import OneClassSVM
from sklearn.utils import get_tags
from sklearn.utils._testing import (
assert_allclose,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
)
def _update_kwargs(kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
if "tol" not in kwargs:
kwargs["tol"] = None
if "max_iter" not in kwargs:
kwargs["max_iter"] = 5
class _SparseSGDClassifier(linear_model.SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super().fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super().partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super().decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super().predict_proba(X)
class _SparseSGDRegressor(linear_model.SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return linear_model.SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return linear_model.SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
# XXX untested as of v0.22
X = sp.csr_matrix(X)
return linear_model.SGDRegressor.decision_function(self, X, *args, **kw)
class _SparseSGDOneClassSVM(linear_model.SGDOneClassSVM):
def fit(self, X, *args, **kw):
X = sp.csr_matrix(X)
return linear_model.SGDOneClassSVM.fit(self, X, *args, **kw)
def partial_fit(self, X, *args, **kw):
X = sp.csr_matrix(X)
return linear_model.SGDOneClassSVM.partial_fit(self, X, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return linear_model.SGDOneClassSVM.decision_function(self, X, *args, **kw)
def SGDClassifier(**kwargs):
_update_kwargs(kwargs)
return linear_model.SGDClassifier(**kwargs)
def SGDRegressor(**kwargs):
_update_kwargs(kwargs)
return linear_model.SGDRegressor(**kwargs)
def SGDOneClassSVM(**kwargs):
_update_kwargs(kwargs)
return linear_model.SGDOneClassSVM(**kwargs)
def SparseSGDClassifier(**kwargs):
_update_kwargs(kwargs)
return _SparseSGDClassifier(**kwargs)
def SparseSGDRegressor(**kwargs):
_update_kwargs(kwargs)
return _SparseSGDRegressor(**kwargs)
def SparseSGDOneClassSVM(**kwargs):
_update_kwargs(kwargs)
return _SparseSGDOneClassSVM(**kwargs)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array(
[
[-1, 1],
[-0.75, 0.5],
[-1.5, 1.5],
[1, 1],
[0.75, 0.5],
[1.5, 1.5],
[-1, -1],
[0, -0.5],
[1, -1],
]
)
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array(
[
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0],
]
)
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundant feature groups
X4 = np.array(
[
[1, 0.9, 0.8, 0, 0, 0],
[1, 0.84, 0.98, 0, 0, 0],
[1, 0.96, 0.88, 0, 0, 0],
[1, 0.91, 0.99, 0, 0, 0],
[0, 0, 0, 0.89, 0.91, 1],
[0, 0, 0, 0.79, 0.84, 1],
[0, 0, 0, 0.91, 0.95, 1],
[0, 0, 0, 0.93, 1, 1],
]
)
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
###############################################################################
# Common Test Case to classification and regression
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(klass, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if klass in (SparseSGDClassifier, SparseSGDRegressor):
decay = 0.01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
def _test_warm_start(klass, X, Y, lr):
# Test that explicit warm restart...
clf = klass(alpha=0.01, eta0=0.01, shuffle=False, learning_rate=lr)
clf.fit(X, Y)
clf2 = klass(alpha=0.001, eta0=0.01, shuffle=False, learning_rate=lr)
clf2.fit(X, Y, coef_init=clf.coef_.copy(), intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = klass(
alpha=0.01, eta0=0.01, shuffle=False, warm_start=True, learning_rate=lr
)
clf3.fit(X, Y)
assert clf3.t_ == clf.t_
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert clf3.t_ == clf2.t_
assert_array_almost_equal(clf3.coef_, clf2.coef_)
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
@pytest.mark.parametrize("lr", ["constant", "optimal", "invscaling", "adaptive"])
def test_warm_start(klass, lr):
_test_warm_start(klass, X, Y, lr)
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_input_format(klass):
# Input format tests.
clf = klass(alpha=0.01, shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
with pytest.raises(ValueError):
clf.fit(X, Y_)
@pytest.mark.parametrize("lr", ["pa1", "pa2"])
@pytest.mark.parametrize(
["est", "loss"], [(SGDClassifier, "squared_hinge"), (SGDRegressor, "squared_error")]
)
def test_learning_rate_PA_raises(lr, est, loss):
"""Test that SGD raises with forbidden loss for passive-aggressive algo."""
est = est(loss=loss, learning_rate=lr)
with pytest.raises(ValueError):
est.fit(X, Y)
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_clone(klass):
# Test whether clone works ok.
clf = klass(alpha=0.01, penalty="l1")
clf = clone(clf)
clf.set_params(penalty="l2")
clf.fit(X, Y)
clf2 = klass(alpha=0.01, penalty="l2")
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
@pytest.mark.parametrize(
"klass",
[
SGDClassifier,
SparseSGDClassifier,
SGDRegressor,
SparseSGDRegressor,
SGDOneClassSVM,
SparseSGDOneClassSVM,
],
)
def test_plain_has_no_average_attr(klass):
clf = klass(average=True, eta0=0.01)
clf.fit(X, Y)
assert hasattr(clf, "_average_coef")
assert hasattr(clf, "_average_intercept")
assert hasattr(clf, "_standard_intercept")
assert hasattr(clf, "_standard_coef")
clf = klass()
clf.fit(X, Y)
assert not hasattr(clf, "_average_coef")
assert not hasattr(clf, "_average_intercept")
assert not hasattr(clf, "_standard_intercept")
assert not hasattr(clf, "_standard_coef")
@pytest.mark.parametrize(
"klass",
[
SGDClassifier,
SparseSGDClassifier,
SGDRegressor,
SparseSGDRegressor,
SGDOneClassSVM,
SparseSGDOneClassSVM,
],
)
def test_late_onset_averaging_not_reached(klass):
clf1 = klass(average=600)
clf2 = klass()
for _ in range(100):
if is_classifier(clf1):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
if klass in [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]:
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
elif klass in [SGDOneClassSVM, SparseSGDOneClassSVM]:
assert_allclose(clf1.offset_, clf2.offset_)
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_late_onset_averaging_reached(klass):
eta0 = 0.001
alpha = 0.0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = klass(
average=7,
learning_rate="constant",
loss="squared_error",
eta0=eta0,
alpha=alpha,
max_iter=2,
shuffle=False,
)
clf2 = klass(
average=False,
learning_rate="constant",
loss="squared_error",
eta0=eta0,
alpha=alpha,
max_iter=1,
shuffle=False,
)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = asgd(
klass,
X,
Y_encode,
eta0,
alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_,
)
assert_array_almost_equal(clf1.coef_.ravel(), average_weights.ravel(), decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_early_stopping(klass):
X = iris.data[iris.target > 0]
Y = iris.target[iris.target > 0]
for early_stopping in [True, False]:
max_iter = 1000
clf = klass(early_stopping=early_stopping, tol=1e-3, max_iter=max_iter).fit(
X, Y
)
assert clf.n_iter_ < max_iter
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_adaptive_longer_than_constant(klass):
clf1 = klass(learning_rate="adaptive", eta0=0.01, tol=1e-3, max_iter=100)
clf1.fit(iris.data, iris.target)
clf2 = klass(learning_rate="constant", eta0=0.01, tol=1e-3, max_iter=100)
clf2.fit(iris.data, iris.target)
assert clf1.n_iter_ > clf2.n_iter_
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_validation_set_not_used_for_training(klass):
X, Y = iris.data, iris.target
validation_fraction = 0.4
seed = 42
shuffle = False
max_iter = 10
clf1 = klass(
early_stopping=True,
random_state=np.random.RandomState(seed),
validation_fraction=validation_fraction,
learning_rate="constant",
eta0=0.01,
tol=None,
max_iter=max_iter,
shuffle=shuffle,
)
clf1.fit(X, Y)
assert clf1.n_iter_ == max_iter
clf2 = klass(
early_stopping=False,
random_state=np.random.RandomState(seed),
learning_rate="constant",
eta0=0.01,
tol=None,
max_iter=max_iter,
shuffle=shuffle,
)
if is_classifier(clf2):
cv = StratifiedShuffleSplit(test_size=validation_fraction, random_state=seed)
else:
cv = ShuffleSplit(test_size=validation_fraction, random_state=seed)
idx_train, idx_val = next(cv.split(X, Y))
idx_train = np.sort(idx_train) # remove shuffling
clf2.fit(X[idx_train], Y[idx_train])
assert clf2.n_iter_ == max_iter
assert_array_equal(clf1.coef_, clf2.coef_)
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_n_iter_no_change(klass):
X, Y = iris.data, iris.target
# test that n_iter_ increases monotonically with n_iter_no_change
for early_stopping in [True, False]:
n_iter_list = [
klass(
early_stopping=early_stopping,
n_iter_no_change=n_iter_no_change,
tol=1e-4,
max_iter=1000,
)
.fit(X, Y)
.n_iter_
for n_iter_no_change in [2, 3, 10]
]
assert_array_equal(n_iter_list, sorted(n_iter_list))
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_not_enough_sample_for_early_stopping(klass):
# test an error is raised if the training or validation set is empty
clf = klass(early_stopping=True, validation_fraction=0.99)
with pytest.raises(ValueError):
clf.fit(X3, Y3)
@pytest.mark.parametrize("Estimator", [SGDClassifier, SGDRegressor])
@pytest.mark.parametrize("l1_ratio", [0, 0.7, 1])
def test_sgd_l1_ratio_not_used(Estimator, l1_ratio):
"""Check that l1_ratio is not used when penalty is not 'elasticnet'"""
clf1 = Estimator(penalty="l1", l1_ratio=None, random_state=0).fit(X, Y)
clf2 = Estimator(penalty="l1", l1_ratio=l1_ratio, random_state=0).fit(X, Y)
assert_allclose(clf1.coef_, clf2.coef_)
@pytest.mark.parametrize(
"Estimator", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_sgd_failing_penalty_validation(Estimator):
clf = Estimator(penalty="elasticnet", l1_ratio=None)
with pytest.raises(
ValueError, match="l1_ratio must be set when penalty is 'elasticnet'"
):
clf.fit(X, Y)
# TODO(1.10): remove this test
@pytest.mark.parametrize(
"klass",
[
SGDClassifier,
SparseSGDClassifier,
SGDRegressor,
SparseSGDRegressor,
SGDOneClassSVM,
SparseSGDOneClassSVM,
],
)
def test_power_t_limits(klass):
"""Check that a warning is raised when `power_t` is negative."""
# Check that negative values of `power_t` raise a warning
clf = klass(power_t=-1.0)
with pytest.warns(
FutureWarning, match="Negative values for `power_t` are deprecated"
):
clf.fit(X, Y)
# Check that values of 'power_t in range [0, inf) do not raise a warning
with warnings.catch_warnings(record=True) as w:
clf = klass(power_t=0.5)
clf.fit(X, Y)
assert len(w) == 0
###############################################################################
# Classification Test Case
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_clf(klass):
# Check that SGD gives any results :-)
for loss in ("hinge", "squared_hinge", "log_loss", "modified_huber"):
clf = klass(
penalty="l2",
alpha=0.01,
fit_intercept=True,
loss=loss,
max_iter=10,
shuffle=True,
)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDOneClassSVM, SparseSGDOneClassSVM]
)
def test_provide_coef(klass):
"""Check that the shape of `coef_init` is validated."""
with pytest.raises(ValueError, match="Provided coef_init does not match dataset"):
klass().fit(X, Y, coef_init=np.zeros((3,)))
@pytest.mark.parametrize(
"klass, fit_params",
[
(SGDClassifier, {"intercept_init": np.zeros((3,))}),
(SparseSGDClassifier, {"intercept_init": np.zeros((3,))}),
(SGDOneClassSVM, {"offset_init": np.zeros((3,))}),
(SparseSGDOneClassSVM, {"offset_init": np.zeros((3,))}),
],
)
def test_set_intercept_offset(klass, fit_params):
"""Check that `intercept_init` or `offset_init` is validated."""
sgd_estimator = klass()
with pytest.raises(ValueError, match="does not match dataset"):
sgd_estimator.fit(X, Y, **fit_params)
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_sgd_early_stopping_with_partial_fit(klass):
"""Check that we raise an error for `early_stopping` used with
`partial_fit`.
"""
err_msg = "early_stopping should be False with partial_fit"
with pytest.raises(ValueError, match=err_msg):
klass(early_stopping=True).partial_fit(X, Y)
@pytest.mark.parametrize(
"klass, fit_params",
[
(SGDClassifier, {"intercept_init": 0}),
(SparseSGDClassifier, {"intercept_init": 0}),
(SGDOneClassSVM, {"offset_init": 0}),
(SparseSGDOneClassSVM, {"offset_init": 0}),
],
)
def test_set_intercept_offset_binary(klass, fit_params):
"""Check that we can pass a scaler with binary classification to
`intercept_init` or `offset_init`."""
klass().fit(X5, Y5, **fit_params)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_average_binary_computed_correctly(klass):
# Checks the SGDClassifier correctly computes the average weights
eta = 0.1
alpha = 2.0
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = klass(
loss="squared_error",
learning_rate="constant",
eta0=eta,
alpha=alpha,
fit_intercept=True,
max_iter=1,
average=True,
shuffle=False,
)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = asgd(klass, X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_, average_weights, decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_set_intercept_to_intercept(klass):
# Checks intercept_ shape consistency for the warm starts
# Inconsistent intercept_ shape.
clf = klass().fit(X5, Y5)
klass().fit(X5, Y5, intercept_init=clf.intercept_)
clf = klass().fit(X, Y)
klass().fit(X, Y, intercept_init=clf.intercept_)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_at_least_two_labels(klass):
# Target must have at least two labels
clf = klass(alpha=0.01, max_iter=20)
with pytest.raises(ValueError):
clf.fit(X2, np.ones(9))
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_partial_fit_weight_class_balanced(klass):
# partial_fit with class_weight='balanced' not supported"""
regex = (
r"class_weight 'balanced' is not supported for "
r"partial_fit\. In order to use 'balanced' weights, "
r"use compute_class_weight\('balanced', classes=classes, y=y\). "
r"In place of y you can use a large enough sample "
r"of the full training set target to properly "
r"estimate the class frequency distributions\. "
r"Pass the resulting weights as the class_weight "
r"parameter\."
)
with pytest.raises(ValueError, match=regex):
klass(class_weight="balanced").partial_fit(X, Y, classes=np.unique(Y))
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_multiclass(klass):
# Multi-class test case
clf = klass(alpha=0.01, max_iter=20).fit(X2, Y2)
assert clf.coef_.shape == (3, 2)
assert clf.intercept_.shape == (3,)
assert clf.decision_function([[0, 0]]).shape == (1, 3)
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_multiclass_average(klass):
eta = 0.001
alpha = 0.01
# Multi-class average test case
clf = klass(
loss="squared_error",
learning_rate="constant",
eta0=eta,
alpha=alpha,
fit_intercept=True,
max_iter=1,
average=True,
shuffle=False,
)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = asgd(klass, X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept, clf.intercept_[i], decimal=16)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_multiclass_with_init_coef(klass):
# Multi-class test case
clf = klass(alpha=0.01, max_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)), intercept_init=np.zeros(3))
assert clf.coef_.shape == (3, 2)
assert clf.intercept_.shape, (3,)
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_multiclass_njobs(klass):
# Multi-class test case with multi-core support
clf = klass(alpha=0.01, max_iter=20, n_jobs=2).fit(X2, Y2)
assert clf.coef_.shape == (3, 2)
assert clf.intercept_.shape == (3,)
assert clf.decision_function([[0, 0]]).shape == (1, 3)
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_set_coef_multiclass(klass):
# Checks coef_init and intercept_init shape for multi-class
# problems
# Provided coef_ does not match dataset
clf = klass()
with pytest.raises(ValueError):
clf.fit(X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = klass().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = klass()
with pytest.raises(ValueError):
clf.fit(X2, Y2, intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = klass().fit(X2, Y2, intercept_init=np.zeros((3,)))
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_predict_proba_method_access(klass):
# Checks that SGDClassifier predict_proba and predict_log_proba methods
# can either be accessed or raise an appropriate error message
# otherwise. See
# https://github.com/scikit-learn/scikit-learn/issues/10938 for more
# details.
for loss in linear_model.SGDClassifier.loss_functions:
clf = SGDClassifier(loss=loss)
if loss in ("log_loss", "modified_huber"):
assert hasattr(clf, "predict_proba")
assert hasattr(clf, "predict_log_proba")
else:
inner_msg = "probability estimates are not available for loss={!r}".format(
loss
)
assert not hasattr(clf, "predict_proba")
assert not hasattr(clf, "predict_log_proba")
with pytest.raises(
AttributeError, match="has no attribute 'predict_proba'"
) as exec_info:
clf.predict_proba
assert isinstance(exec_info.value.__cause__, AttributeError)
assert inner_msg in str(exec_info.value.__cause__)
with pytest.raises(
AttributeError, match="has no attribute 'predict_log_proba'"
) as exec_info:
clf.predict_log_proba
assert isinstance(exec_info.value.__cause__, AttributeError)
assert inner_msg in str(exec_info.value.__cause__)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_proba(klass):
# Check SGD.predict_proba
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01, max_iter=10, tol=None).fit(X, Y)
assert not hasattr(clf, "predict_proba")
assert not hasattr(clf, "predict_log_proba")
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log_loss", "modified_huber"]:
clf = klass(loss=loss, alpha=0.01, max_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([[3, 2]])
assert p[0, 1] > 0.5
p = clf.predict_proba([[-1, -1]])
assert p[0, 1] < 0.5
# If predict_proba is 0, we get "RuntimeWarning: divide by zero encountered
# in log". We avoid it here.
with np.errstate(divide="ignore"):
p = clf.predict_log_proba([[3, 2]])
assert p[0, 1] > p[0, 0]
p = clf.predict_log_proba([[-1, -1]])
assert p[0, 1] < p[0, 0]
# log loss multiclass probability estimates
clf = klass(loss="log_loss", alpha=0.01, max_iter=10).fit(X2, Y2)
d = clf.decision_function([[0.1, -0.1], [0.3, 0.2]])
p = clf.predict_proba([[0.1, -0.1], [0.3, 0.2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert np.all(p[0] >= 0)
p = clf.predict_proba([[-1, -1]])
d = clf.decision_function([[-1, -1]])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
lp = clf.predict_log_proba([[3, 2]])
p = clf.predict_proba([[3, 2]])
assert_array_almost_equal(np.log(p), lp)
lp = clf.predict_log_proba([[-1, -1]])
p = clf.predict_proba([[-1, -1]])
assert_array_almost_equal(np.log(p), lp)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = klass(loss="modified_huber", alpha=0.01, max_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([[3, 2]])
p = clf.predict_proba([[3, 2]])
if klass != SparseSGDClassifier:
assert np.argmax(d, axis=1) == np.argmax(p, axis=1)
else: # XXX the sparse test gets a different X2 (?)
assert np.argmin(d, axis=1) == np.argmin(p, axis=1)
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function([x])
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba([x])
assert_array_almost_equal(p[0], [1 / 3.0] * 3)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_l1(klass):
# Test L1 regularization
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = klass(
penalty="l1",
alpha=0.2,
fit_intercept=False,
max_iter=2000,
tol=None,
shuffle=False,
)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert sp.issparse(clf.coef_)
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert sp.issparse(clf.coef_)
pred = clf.predict(X)
assert_array_equal(pred, Y)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_class_weights(klass):
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = klass(alpha=0.1, max_iter=1000, fit_intercept=False, class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = klass(alpha=0.1, max_iter=1000, fit_intercept=False, class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_equal_class_weight(klass):
# Test if equal class weights approx. equals no class weights.
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = klass(alpha=0.1, max_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = klass(alpha=0.1, max_iter=1000, class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_wrong_class_weight_label(klass):
# ValueError due to not existing class label.
clf = klass(alpha=0.1, max_iter=1000, class_weight={0: 0.5})
with pytest.raises(ValueError):
clf.fit(X, Y)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_weights_multiplied(klass):
# Tests that class_weight and sample_weight are multiplicative
class_weights = {1: 0.6, 2: 0.3}
rng = np.random.RandomState(0)
sample_weights = rng.random_sample(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = klass(alpha=0.1, max_iter=20, class_weight=class_weights)
clf2 = klass(alpha=0.1, max_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_almost_equal(clf1.coef_, clf2.coef_)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_balanced_weight(klass):
# Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = klass(alpha=0.0001, max_iter=1000, class_weight=None, shuffle=False).fit(X, y)
f1 = metrics.f1_score(y, clf.predict(X), average="weighted")
assert_almost_equal(f1, 0.96, decimal=1)
# make the same prediction using balanced class_weight
clf_balanced = klass(
alpha=0.0001, max_iter=1000, class_weight="balanced", shuffle=False
).fit(X, y)
f1 = metrics.f1_score(y, clf_balanced.predict(X), average="weighted")
assert_almost_equal(f1, 0.96, decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "balanced"
assert_array_almost_equal(clf.coef_, clf_balanced.coef_, 6)
# build a very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = klass(max_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert metrics.f1_score(y, y_pred, average="weighted") < 0.96
# fit a model with balanced class_weight enabled
clf = klass(max_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert metrics.f1_score(y, y_pred, average="weighted") > 0.96
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sample_weights(klass):
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.