repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/feature_extraction/tests/__init__.py
sklearn/feature_extraction/tests/__init__.py
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/feature_extraction/tests/test_dict_vectorizer.py
sklearn/feature_extraction/tests/test_dict_vectorizer.py
# Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from random import Random import numpy as np import pytest import scipy.sparse as sp from numpy.testing import assert_allclose, assert_array_equal from sklearn.exceptions import NotFittedError from sklearn.feature_extraction import DictVectorizer from sklearn.feature_selection import SelectKBest, chi2 @pytest.mark.parametrize("sparse", (True, False)) @pytest.mark.parametrize("dtype", (int, np.float32, np.int16)) @pytest.mark.parametrize("sort", (True, False)) @pytest.mark.parametrize("iterable", (True, False)) def test_dictvectorizer(sparse, dtype, sort, iterable): D = [{"foo": 1, "bar": 3}, {"bar": 4, "baz": 2}, {"bar": 1, "quux": 1, "quuux": 2}] v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort) X = v.fit_transform(iter(D) if iterable else D) assert sp.issparse(X) == sparse assert X.shape == (3, 5) assert X.sum() == 14 assert v.inverse_transform(X) == D if sparse: # CSR matrices can't be compared for equality assert_array_equal( X.toarray(), v.transform(iter(D) if iterable else D).toarray() ) else: assert_array_equal(X, v.transform(iter(D) if iterable else D)) if sort: assert v.feature_names_ == sorted(v.feature_names_) def test_feature_selection(): # make two feature dicts with two useful features and a bunch of useless # ones, in terms of chi2 d1 = dict([("useless%d" % i, 10) for i in range(20)], useful1=1, useful2=20) d2 = dict([("useless%d" % i, 10) for i in range(20)], useful1=20, useful2=1) for indices in (True, False): v = DictVectorizer().fit([d1, d2]) X = v.transform([d1, d2]) sel = SelectKBest(chi2, k=2).fit(X, [0, 1]) v.restrict(sel.get_support(indices=indices), indices=indices) assert_array_equal(v.get_feature_names_out(), ["useful1", "useful2"]) def test_one_of_k(): D_in = [ {"version": "1", "ham": 2}, {"version": "2", "spam": 0.3}, {"version=3": True, "spam": -1}, ] v = DictVectorizer() X = v.fit_transform(D_in) assert X.shape == (3, 5) D_out = v.inverse_transform(X) assert D_out[0] == {"version=1": 1, "ham": 2} names = v.get_feature_names_out() assert "version=2" in names assert "version" not in names def test_iterable_value(): D_names = ["ham", "spam", "version=1", "version=2", "version=3"] X_expected = [ [2.0, 0.0, 2.0, 1.0, 0.0], [0.0, 0.3, 0.0, 1.0, 0.0], [0.0, -1.0, 0.0, 0.0, 1.0], ] D_in = [ {"version": ["1", "2", "1"], "ham": 2}, {"version": "2", "spam": 0.3}, {"version=3": True, "spam": -1}, ] v = DictVectorizer() X = v.fit_transform(D_in) X = X.toarray() assert_array_equal(X, X_expected) D_out = v.inverse_transform(X) assert D_out[0] == {"version=1": 2, "version=2": 1, "ham": 2} names = v.get_feature_names_out() assert_array_equal(names, D_names) def test_iterable_not_string_error(): error_value = ( "Unsupported type <class 'int'> in iterable value. " "Only iterables of string are supported." ) D2 = [{"foo": "1", "bar": "2"}, {"foo": "3", "baz": "1"}, {"foo": [1, "three"]}] v = DictVectorizer(sparse=False) with pytest.raises(TypeError) as error: v.fit(D2) assert str(error.value) == error_value def test_mapping_error(): error_value = ( "Unsupported value type <class 'dict'> " "for foo: {'one': 1, 'three': 3}.\n" "Mapping objects are not supported." ) D2 = [ {"foo": "1", "bar": "2"}, {"foo": "3", "baz": "1"}, {"foo": {"one": 1, "three": 3}}, ] v = DictVectorizer(sparse=False) with pytest.raises(TypeError) as error: v.fit(D2) assert str(error.value) == error_value def test_unseen_or_no_features(): D = [{"camelot": 0, "spamalot": 1}] for sparse in [True, False]: v = DictVectorizer(sparse=sparse).fit(D) X = v.transform({"push the pram a lot": 2}) if sparse: X = X.toarray() assert_array_equal(X, np.zeros((1, 2))) X = v.transform({}) if sparse: X = X.toarray() assert_array_equal(X, np.zeros((1, 2))) with pytest.raises(ValueError, match="empty"): v.transform([]) def test_deterministic_vocabulary(global_random_seed): # Generate equal dictionaries with different memory layouts items = [("%03d" % i, i) for i in range(1000)] rng = Random(global_random_seed) d_sorted = dict(items) rng.shuffle(items) d_shuffled = dict(items) # check that the memory layout does not impact the resulting vocabulary v_1 = DictVectorizer().fit([d_sorted]) v_2 = DictVectorizer().fit([d_shuffled]) assert v_1.vocabulary_ == v_2.vocabulary_ def test_n_features_in(): # For vectorizers, n_features_in_ does not make sense and does not exist. dv = DictVectorizer() assert not hasattr(dv, "n_features_in_") d = [{"foo": 1, "bar": 2}, {"foo": 3, "baz": 1}] dv.fit(d) assert not hasattr(dv, "n_features_in_") def test_dictvectorizer_dense_sparse_equivalence(): """Check the equivalence between between sparse and dense DictVectorizer. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/19978 """ movie_entry_fit = [ {"category": ["thriller", "drama"], "year": 2003}, {"category": ["animation", "family"], "year": 2011}, {"year": 1974}, ] movie_entry_transform = [{"category": ["thriller"], "unseen_feature": "3"}] dense_vectorizer = DictVectorizer(sparse=False) sparse_vectorizer = DictVectorizer(sparse=True) dense_vector_fit = dense_vectorizer.fit_transform(movie_entry_fit) sparse_vector_fit = sparse_vectorizer.fit_transform(movie_entry_fit) assert not sp.issparse(dense_vector_fit) assert sp.issparse(sparse_vector_fit) assert_allclose(dense_vector_fit, sparse_vector_fit.toarray()) dense_vector_transform = dense_vectorizer.transform(movie_entry_transform) sparse_vector_transform = sparse_vectorizer.transform(movie_entry_transform) assert not sp.issparse(dense_vector_transform) assert sp.issparse(sparse_vector_transform) assert_allclose(dense_vector_transform, sparse_vector_transform.toarray()) dense_inverse_transform = dense_vectorizer.inverse_transform(dense_vector_transform) sparse_inverse_transform = sparse_vectorizer.inverse_transform( sparse_vector_transform ) expected_inverse = [{"category=thriller": 1.0}] assert dense_inverse_transform == expected_inverse assert sparse_inverse_transform == expected_inverse def test_dict_vectorizer_unsupported_value_type(): """Check that we raise an error when the value associated to a feature is not supported. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/19489 """ class A: pass vectorizer = DictVectorizer(sparse=True) X = [{"foo": A()}] err_msg = "Unsupported value Type" with pytest.raises(TypeError, match=err_msg): vectorizer.fit_transform(X) def test_dict_vectorizer_get_feature_names_out(): """Check that integer feature names are converted to strings in feature_names_out.""" X = [{1: 2, 3: 4}, {2: 4}] dv = DictVectorizer(sparse=False).fit(X) feature_names = dv.get_feature_names_out() assert isinstance(feature_names, np.ndarray) assert feature_names.dtype == object assert_array_equal(feature_names, ["1", "2", "3"]) @pytest.mark.parametrize( "method, input", [ ("transform", [{1: 2, 3: 4}, {2: 4}]), ("inverse_transform", [{1: 2, 3: 4}, {2: 4}]), ("restrict", [True, False, True]), ], ) def test_dict_vectorizer_not_fitted_error(method, input): """Check that unfitted DictVectorizer instance raises NotFittedError. This should be part of the common test but currently they test estimator accepting text input. """ dv = DictVectorizer(sparse=False) with pytest.raises(NotFittedError): getattr(dv, method)(input)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/feature_selection/_variance_threshold.py
sklearn/feature_selection/_variance_threshold.py
# Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from numbers import Real import numpy as np from sklearn.base import BaseEstimator, _fit_context from sklearn.feature_selection._base import SelectorMixin from sklearn.utils._param_validation import Interval from sklearn.utils.sparsefuncs import mean_variance_axis, min_max_axis from sklearn.utils.validation import check_is_fitted, validate_data class VarianceThreshold(SelectorMixin, BaseEstimator): """Feature selector that removes all low-variance features. This feature selection algorithm looks only at the features (X), not the desired outputs (y), and can thus be used for unsupervised learning. Read more in the :ref:`User Guide <variance_threshold>`. Parameters ---------- threshold : float, default=0 Features with a training-set variance lower than this threshold will be removed. The default is to keep all features with non-zero variance, i.e. remove the features that have the same value in all samples. Attributes ---------- variances_ : array, shape (n_features,) Variances of individual features. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- SelectFromModel: Meta-transformer for selecting features based on importance weights. SelectPercentile : Select features according to a percentile of the highest scores. SequentialFeatureSelector : Transformer that performs Sequential Feature Selection. Notes ----- Allows NaN in the input. Raises ValueError if no feature in X meets the variance threshold. Examples -------- The following dataset has integer features, two of which are the same in every sample. These are removed with the default setting for threshold:: >>> from sklearn.feature_selection import VarianceThreshold >>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]] >>> selector = VarianceThreshold() >>> selector.fit_transform(X) array([[2, 0], [1, 4], [1, 1]]) """ _parameter_constraints: dict = { "threshold": [Interval(Real, 0, None, closed="left")] } def __init__(self, threshold=0.0): self.threshold = threshold @_fit_context(prefer_skip_nested_validation=True) def fit(self, X, y=None): """Learn empirical variances from X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Data from which to compute variances, where `n_samples` is the number of samples and `n_features` is the number of features. y : any, default=None Ignored. This parameter exists only for compatibility with sklearn.pipeline.Pipeline. Returns ------- self : object Returns the instance itself. """ X = validate_data( self, X, accept_sparse=("csr", "csc"), dtype=np.float64, ensure_all_finite="allow-nan", ) if hasattr(X, "toarray"): # sparse matrix _, self.variances_ = mean_variance_axis(X, axis=0) if self.threshold == 0: mins, maxes = min_max_axis(X, axis=0) peak_to_peaks = maxes - mins else: self.variances_ = np.nanvar(X, axis=0) if self.threshold == 0: peak_to_peaks = np.ptp(X, axis=0) if self.threshold == 0: # Use peak-to-peak to avoid numeric precision issues # for constant features compare_arr = np.array([self.variances_, peak_to_peaks]) self.variances_ = np.nanmin(compare_arr, axis=0) if np.all(~np.isfinite(self.variances_) | (self.variances_ <= self.threshold)): msg = "No feature in X meets the variance threshold {0:.5f}" if X.shape[0] == 1: msg += " (X contains only one sample)" raise ValueError(msg.format(self.threshold)) return self def _get_support_mask(self): check_is_fitted(self) return self.variances_ > self.threshold def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.input_tags.allow_nan = True tags.input_tags.sparse = True return tags
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/feature_selection/_from_model.py
sklearn/feature_selection/_from_model.py
# Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from copy import deepcopy from numbers import Integral, Real import numpy as np from sklearn.base import BaseEstimator, MetaEstimatorMixin, _fit_context, clone from sklearn.exceptions import NotFittedError from sklearn.feature_selection._base import SelectorMixin, _get_feature_importances from sklearn.utils._param_validation import HasMethods, Interval, Options from sklearn.utils._tags import get_tags from sklearn.utils.metadata_routing import ( MetadataRouter, MethodMapping, _routing_enabled, process_routing, ) from sklearn.utils.metaestimators import available_if from sklearn.utils.validation import ( _check_feature_names, _estimator_has, check_is_fitted, check_scalar, ) def _calculate_threshold(estimator, importances, threshold): """Interpret the threshold value""" if threshold is None: # determine default from estimator est_name = estimator.__class__.__name__ is_l1_penalized = hasattr(estimator, "penalty") and estimator.penalty == "l1" is_lasso = "Lasso" in est_name is_elasticnet_l1_penalized = est_name == "ElasticNet" and ( hasattr(estimator, "l1_ratio") and np.isclose(estimator.l1_ratio, 1.0) ) is_elasticnetcv_l1_penalized = est_name == "ElasticNetCV" and ( hasattr(estimator, "l1_ratio_") and np.isclose(estimator.l1_ratio_, 1.0) ) is_logreg_l1_penalized = est_name == "LogisticRegression" and ( hasattr(estimator, "l1_ratio") and np.isclose(estimator.l1_ratio, 1.0) ) is_logregcv_l1_penalized = est_name == "LogisticRegressionCV" and ( hasattr(estimator, "l1_ratio_") and np.all(np.isclose(estimator.l1_ratio_, 1.0)) ) if ( is_l1_penalized or is_lasso or is_elasticnet_l1_penalized or is_elasticnetcv_l1_penalized or is_logreg_l1_penalized or is_logregcv_l1_penalized ): # the natural default threshold is 0 when l1 penalty was used threshold = 1e-5 else: threshold = "mean" if isinstance(threshold, str): if "*" in threshold: scale, reference = threshold.split("*") scale = float(scale.strip()) reference = reference.strip() if reference == "median": reference = np.median(importances) elif reference == "mean": reference = np.mean(importances) else: raise ValueError("Unknown reference: " + reference) threshold = scale * reference elif threshold == "median": threshold = np.median(importances) elif threshold == "mean": threshold = np.mean(importances) else: raise ValueError( "Expected threshold='mean' or threshold='median' got %s" % threshold ) else: threshold = float(threshold) return threshold class SelectFromModel(MetaEstimatorMixin, SelectorMixin, BaseEstimator): """Meta-transformer for selecting features based on importance weights. .. versionadded:: 0.17 Read more in the :ref:`User Guide <select_from_model>`. Parameters ---------- estimator : object The base estimator from which the transformer is built. This can be both a fitted (if ``prefit`` is set to True) or a non-fitted estimator. The estimator should have a ``feature_importances_`` or ``coef_`` attribute after fitting. Otherwise, the ``importance_getter`` parameter should be used. threshold : str or float, default=None The threshold value to use for feature selection. Features whose absolute importance value is greater or equal are kept while the others are discarded. If "median" (resp. "mean"), then the ``threshold`` value is the median (resp. the mean) of the feature importances. A scaling factor (e.g., "1.25*mean") may also be used. If None and if the estimator has a parameter penalty set to l1, either explicitly or implicitly (e.g, Lasso), the threshold used is 1e-5. Otherwise, "mean" is used by default. prefit : bool, default=False Whether a prefit model is expected to be passed into the constructor directly or not. If `True`, `estimator` must be a fitted estimator. If `False`, `estimator` is fitted and updated by calling `fit` and `partial_fit`, respectively. norm_order : non-zero int, inf, -inf, default=1 Order of the norm used to filter the vectors of coefficients below ``threshold`` in the case where the ``coef_`` attribute of the estimator is of dimension 2. max_features : int, callable, default=None The maximum number of features to select. - If an integer, then it specifies the maximum number of features to allow. - If a callable, then it specifies how to calculate the maximum number of features allowed. The callable will receive `X` as input: `max_features(X)`. - If `None`, then all features are kept. To only select based on ``max_features``, set ``threshold=-np.inf``. .. versionadded:: 0.20 .. versionchanged:: 1.1 `max_features` accepts a callable. importance_getter : str or callable, default='auto' If 'auto', uses the feature importance either through a ``coef_`` attribute or ``feature_importances_`` attribute of estimator. Also accepts a string that specifies an attribute name/path for extracting feature importance (implemented with `attrgetter`). For example, give `regressor_.coef_` in case of :class:`~sklearn.compose.TransformedTargetRegressor` or `named_steps.clf.feature_importances_` in case of :class:`~sklearn.pipeline.Pipeline` with its last step named `clf`. If `callable`, overrides the default feature importance getter. The callable is passed with the fitted estimator and it should return importance for each feature. .. versionadded:: 0.24 Attributes ---------- estimator_ : estimator The base estimator from which the transformer is built. This attribute exist only when `fit` has been called. - If `prefit=True`, it is a deep copy of `estimator`. - If `prefit=False`, it is a clone of `estimator` and fit on the data passed to `fit` or `partial_fit`. n_features_in_ : int Number of features seen during :term:`fit`. Only defined if the underlying estimator exposes such an attribute when fit. .. versionadded:: 0.24 max_features_ : int Maximum number of features calculated during :term:`fit`. Only defined if the ``max_features`` is not `None`. - If `max_features` is an `int`, then `max_features_ = max_features`. - If `max_features` is a callable, then `max_features_ = max_features(X)`. .. versionadded:: 1.1 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 threshold_ : float The threshold value used for feature selection. See Also -------- RFE : Recursive feature elimination based on importance weights. RFECV : Recursive feature elimination with built-in cross-validated selection of the best number of features. SequentialFeatureSelector : Sequential cross-validation based feature selection. Does not rely on importance weights. Notes ----- Allows NaN/Inf in the input if the underlying estimator does as well. Examples -------- >>> from sklearn.feature_selection import SelectFromModel >>> from sklearn.linear_model import LogisticRegression >>> X = [[ 0.87, -1.34, 0.31 ], ... [-2.79, -0.02, -0.85 ], ... [-1.34, -0.48, -2.55 ], ... [ 1.92, 1.48, 0.65 ]] >>> y = [0, 1, 0, 1] >>> selector = SelectFromModel(estimator=LogisticRegression()).fit(X, y) >>> selector.estimator_.coef_ array([[-0.3252, 0.8345, 0.4976]]) >>> selector.threshold_ np.float64(0.55249) >>> selector.get_support() array([False, True, False]) >>> selector.transform(X) array([[-1.34], [-0.02], [-0.48], [ 1.48]]) Using a callable to create a selector that can use no more than half of the input features. >>> def half_callable(X): ... return round(len(X[0]) / 2) >>> half_selector = SelectFromModel(estimator=LogisticRegression(), ... max_features=half_callable) >>> _ = half_selector.fit(X, y) >>> half_selector.max_features_ 2 """ _parameter_constraints: dict = { "estimator": [HasMethods("fit")], "threshold": [Interval(Real, None, None, closed="both"), str, None], "prefit": ["boolean"], "norm_order": [ Interval(Integral, None, -1, closed="right"), Interval(Integral, 1, None, closed="left"), Options(Real, {np.inf, -np.inf}), ], "max_features": [Interval(Integral, 0, None, closed="left"), callable, None], "importance_getter": [str, callable], } def __init__( self, estimator, *, threshold=None, prefit=False, norm_order=1, max_features=None, importance_getter="auto", ): self.estimator = estimator self.threshold = threshold self.prefit = prefit self.importance_getter = importance_getter self.norm_order = norm_order self.max_features = max_features def _get_support_mask(self): estimator = getattr(self, "estimator_", self.estimator) max_features = getattr(self, "max_features_", self.max_features) if self.prefit: try: check_is_fitted(self.estimator) except NotFittedError as exc: raise NotFittedError( "When `prefit=True`, `estimator` is expected to be a fitted " "estimator." ) from exc if callable(max_features): # This branch is executed when `transform` is called directly and thus # `max_features_` is not set and we fallback using `self.max_features` # that is not validated raise NotFittedError( "When `prefit=True` and `max_features` is a callable, call `fit` " "before calling `transform`." ) elif max_features is not None and not isinstance(max_features, Integral): raise ValueError( f"`max_features` must be an integer. Got `max_features={max_features}` " "instead." ) scores = _get_feature_importances( estimator=estimator, getter=self.importance_getter, transform_func="norm", norm_order=self.norm_order, ) threshold = _calculate_threshold(estimator, scores, self.threshold) if self.max_features is not None: mask = np.zeros_like(scores, dtype=bool) candidate_indices = np.argsort(-scores, kind="mergesort")[:max_features] mask[candidate_indices] = True else: mask = np.ones_like(scores, dtype=bool) mask[scores < threshold] = False return mask def _check_max_features(self, X): if self.max_features is not None: if callable(self.max_features): max_features = self.max_features(X) else: # int max_features = self.max_features check_scalar( max_features, "max_features", Integral, min_val=0, max_val=None, ) self.max_features_ = max_features @_fit_context( # SelectFromModel.estimator is not validated yet prefer_skip_nested_validation=False ) def fit(self, X, y=None, **fit_params): """Fit the SelectFromModel meta-transformer. Parameters ---------- X : array-like of shape (n_samples, n_features) The training input samples. y : array-like of shape (n_samples,), default=None The target values (integers that correspond to classes in classification, real numbers in regression). **fit_params : dict - If `enable_metadata_routing=False` (default): Parameters directly passed to the `fit` method of the sub-estimator. They are ignored if `prefit=True`. - If `enable_metadata_routing=True`: Parameters safely routed to the `fit` method of the sub-estimator. They are ignored if `prefit=True`. .. versionchanged:: 1.4 See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Fitted estimator. """ self._check_max_features(X) if self.prefit: try: check_is_fitted(self.estimator) except NotFittedError as exc: raise NotFittedError( "When `prefit=True`, `estimator` is expected to be a fitted " "estimator." ) from exc self.estimator_ = deepcopy(self.estimator) else: if _routing_enabled(): routed_params = process_routing(self, "fit", **fit_params) self.estimator_ = clone(self.estimator) self.estimator_.fit(X, y, **routed_params.estimator.fit) else: # TODO(SLEP6): remove when metadata routing cannot be disabled. self.estimator_ = clone(self.estimator) self.estimator_.fit(X, y, **fit_params) if hasattr(self.estimator_, "feature_names_in_"): self.feature_names_in_ = self.estimator_.feature_names_in_ else: _check_feature_names(self, X, reset=True) return self @property def threshold_(self): """Threshold value used for feature selection.""" scores = _get_feature_importances( estimator=self.estimator_, getter=self.importance_getter, transform_func="norm", norm_order=self.norm_order, ) return _calculate_threshold(self.estimator, scores, self.threshold) @available_if(_estimator_has("partial_fit")) @_fit_context( # SelectFromModel.estimator is not validated yet prefer_skip_nested_validation=False ) def partial_fit(self, X, y=None, **partial_fit_params): """Fit the SelectFromModel meta-transformer only once. Parameters ---------- X : array-like of shape (n_samples, n_features) The training input samples. y : array-like of shape (n_samples,), default=None The target values (integers that correspond to classes in classification, real numbers in regression). **partial_fit_params : dict - If `enable_metadata_routing=False` (default): Parameters directly passed to the `partial_fit` method of the sub-estimator. - If `enable_metadata_routing=True`: Parameters passed to the `partial_fit` method of the sub-estimator. They are ignored if `prefit=True`. .. versionchanged:: 1.4 `**partial_fit_params` are routed to the sub-estimator, if `enable_metadata_routing=True` is set via :func:`~sklearn.set_config`, which allows for aliasing. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Fitted estimator. """ first_call = not hasattr(self, "estimator_") if first_call: self._check_max_features(X) if self.prefit: if first_call: try: check_is_fitted(self.estimator) except NotFittedError as exc: raise NotFittedError( "When `prefit=True`, `estimator` is expected to be a fitted " "estimator." ) from exc self.estimator_ = deepcopy(self.estimator) return self if first_call: self.estimator_ = clone(self.estimator) if _routing_enabled(): routed_params = process_routing(self, "partial_fit", **partial_fit_params) self.estimator_ = clone(self.estimator) self.estimator_.partial_fit(X, y, **routed_params.estimator.partial_fit) else: # TODO(SLEP6): remove when metadata routing cannot be disabled. self.estimator_.partial_fit(X, y, **partial_fit_params) if hasattr(self.estimator_, "feature_names_in_"): self.feature_names_in_ = self.estimator_.feature_names_in_ else: _check_feature_names(self, X, reset=first_call) return self @property def n_features_in_(self): """Number of features seen during `fit`.""" # For consistency with other estimators we raise an AttributeError so # that hasattr() fails if the estimator isn't fitted. try: check_is_fitted(self) except NotFittedError as nfe: raise AttributeError( "{} object has no n_features_in_ attribute.".format( self.__class__.__name__ ) ) from nfe return self.estimator_.n_features_in_ def get_metadata_routing(self): """Get metadata routing of this object. Please check :ref:`User Guide <metadata_routing>` on how the routing mechanism works. .. versionadded:: 1.4 Returns ------- routing : MetadataRouter A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating routing information. """ router = MetadataRouter(owner=self).add( estimator=self.estimator, method_mapping=MethodMapping() .add(caller="partial_fit", callee="partial_fit") .add(caller="fit", callee="fit"), ) return router def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.input_tags.sparse = get_tags(self.estimator).input_tags.sparse tags.input_tags.allow_nan = get_tags(self.estimator).input_tags.allow_nan return tags
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/feature_selection/_base.py
sklearn/feature_selection/_base.py
"""Generic feature selection mixin""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import warnings from abc import ABCMeta, abstractmethod from operator import attrgetter import numpy as np from scipy.sparse import csc_matrix, issparse from sklearn.base import TransformerMixin from sklearn.utils import _safe_indexing, check_array, safe_sqr from sklearn.utils._dataframe import is_pandas_df from sklearn.utils._set_output import _get_output_config from sklearn.utils._tags import get_tags from sklearn.utils.validation import ( _check_feature_names_in, check_is_fitted, validate_data, ) class SelectorMixin(TransformerMixin, metaclass=ABCMeta): """ Transformer mixin that performs feature selection given a support mask This mixin provides a feature selector implementation with `transform` and `inverse_transform` functionality given an implementation of `_get_support_mask`. Examples -------- >>> import numpy as np >>> from sklearn.datasets import load_iris >>> from sklearn.base import BaseEstimator >>> from sklearn.feature_selection import SelectorMixin >>> class FeatureSelector(SelectorMixin, BaseEstimator): ... def fit(self, X, y=None): ... self.n_features_in_ = X.shape[1] ... return self ... def _get_support_mask(self): ... mask = np.zeros(self.n_features_in_, dtype=bool) ... mask[:2] = True # select the first two features ... return mask >>> X, y = load_iris(return_X_y=True) >>> FeatureSelector().fit_transform(X, y).shape (150, 2) """ def get_support(self, indices=False): """ Get a mask, or integer index, of the features selected. Parameters ---------- indices : bool, default=False If True, the return value will be an array of integers, rather than a boolean mask. Returns ------- support : array An index that selects the retained features from a feature vector. If `indices` is False, this is a boolean array of shape [# input features], in which an element is True iff its corresponding feature is selected for retention. If `indices` is True, this is an integer array of shape [# output features] whose values are indices into the input feature vector. """ mask = self._get_support_mask() return mask if not indices else np.nonzero(mask)[0] @abstractmethod def _get_support_mask(self): """ Get the boolean mask indicating which features are selected Returns ------- support : boolean array of shape [# input features] An element is True iff its corresponding feature is selected for retention. """ def transform(self, X): """Reduce X to the selected features. Parameters ---------- X : array of shape [n_samples, n_features] The input samples. Returns ------- X_r : array of shape [n_samples, n_selected_features] The input samples with only the selected features. """ # Preserve X when X is a dataframe and the output is configured to # be pandas. output_config_dense = _get_output_config("transform", estimator=self)["dense"] preserve_X = output_config_dense != "default" and is_pandas_df(X) # note: we use get_tags instead of __sklearn_tags__ because this is a # public Mixin. X = validate_data( self, X, dtype=None, accept_sparse="csr", ensure_all_finite=not get_tags(self).input_tags.allow_nan, skip_check_array=preserve_X, reset=False, ) return self._transform(X) def _transform(self, X): """Reduce X to the selected features.""" mask = self.get_support() if not mask.any(): warnings.warn( ( "No features were selected: either the data is" " too noisy or the selection test too strict." ), UserWarning, ) if hasattr(X, "iloc"): return X.iloc[:, :0] return np.empty(0, dtype=X.dtype).reshape((X.shape[0], 0)) return _safe_indexing(X, mask, axis=1) def inverse_transform(self, X): """Reverse the transformation operation. Parameters ---------- X : array of shape [n_samples, n_selected_features] The input samples. Returns ------- X_original : array of shape [n_samples, n_original_features] `X` with columns of zeros inserted where features would have been removed by :meth:`transform`. """ if issparse(X): X = X.tocsc() # insert additional entries in indptr: # e.g. if transform changed indptr from [0 2 6 7] to [0 2 3] # col_nonzeros here will be [2 0 1] so indptr becomes [0 2 2 3] it = self.inverse_transform(np.diff(X.indptr).reshape(1, -1)) col_nonzeros = it.ravel() indptr = np.concatenate([[0], np.cumsum(col_nonzeros)]) Xt = csc_matrix( (X.data, X.indices, indptr), shape=(X.shape[0], len(indptr) - 1), dtype=X.dtype, ) return Xt support = self.get_support() X = check_array(X, dtype=None) if support.sum() != X.shape[1]: raise ValueError("X has a different shape than during fitting.") if X.ndim == 1: X = X[None, :] Xt = np.zeros((X.shape[0], support.size), dtype=X.dtype) Xt[:, support] = X return Xt def get_feature_names_out(self, input_features=None): """Mask feature names according to selected features. Parameters ---------- input_features : array-like of str or None, default=None Input features. - If `input_features` is `None`, then `feature_names_in_` is used as feature names in. If `feature_names_in_` is not defined, then the following input feature names are generated: `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. - If `input_features` is an array-like, then `input_features` must match `feature_names_in_` if `feature_names_in_` is defined. Returns ------- feature_names_out : ndarray of str objects Transformed feature names. """ check_is_fitted(self) input_features = _check_feature_names_in(self, input_features) return input_features[self.get_support()] def _get_feature_importances(estimator, getter, transform_func=None, norm_order=1): """ Retrieve and aggregate (ndim > 1) the feature importances from an estimator. Also optionally applies transformation. Parameters ---------- estimator : estimator A scikit-learn estimator from which we want to get the feature importances. getter : "auto", str or callable An attribute or a callable to get the feature importance. If `"auto"`, `estimator` is expected to expose `coef_` or `feature_importances`. transform_func : {"norm", "square"}, default=None The transform to apply to the feature importances. By default (`None`) no transformation is applied. norm_order : int, default=1 The norm order to apply when `transform_func="norm"`. Only applied when `importances.ndim > 1`. Returns ------- importances : ndarray of shape (n_features,) The features importances, optionally transformed. """ if isinstance(getter, str): if getter == "auto": if hasattr(estimator, "coef_"): getter = attrgetter("coef_") elif hasattr(estimator, "feature_importances_"): getter = attrgetter("feature_importances_") else: raise ValueError( "when `importance_getter=='auto'`, the underlying " f"estimator {estimator.__class__.__name__} should have " "`coef_` or `feature_importances_` attribute. Either " "pass a fitted estimator to feature selector or call fit " "before calling transform." ) else: getter = attrgetter(getter) elif not callable(getter): raise ValueError("`importance_getter` has to be a string or `callable`") importances = getter(estimator) if transform_func is None: return importances elif transform_func == "norm": if importances.ndim == 1: importances = np.abs(importances) else: importances = np.linalg.norm(importances, axis=0, ord=norm_order) elif transform_func == "square": if importances.ndim == 1: importances = safe_sqr(importances) else: importances = safe_sqr(importances).sum(axis=0) else: raise ValueError( "Valid values for `transform_func` are " "None, 'norm' and 'square'. Those two " "transformation are only supported now" ) return importances
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/feature_selection/_sequential.py
sklearn/feature_selection/_sequential.py
""" Sequential feature selection """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from numbers import Integral, Real import numpy as np from sklearn.base import ( BaseEstimator, MetaEstimatorMixin, _fit_context, clone, is_classifier, ) from sklearn.feature_selection._base import SelectorMixin from sklearn.metrics import check_scoring, get_scorer_names from sklearn.model_selection import check_cv, cross_val_score from sklearn.utils._metadata_requests import ( MetadataRouter, MethodMapping, _raise_for_params, _routing_enabled, process_routing, ) from sklearn.utils._param_validation import HasMethods, Interval, RealNotInt, StrOptions from sklearn.utils._tags import get_tags from sklearn.utils.validation import check_is_fitted, validate_data class SequentialFeatureSelector(SelectorMixin, MetaEstimatorMixin, BaseEstimator): """Transformer that performs Sequential Feature Selection. This Sequential Feature Selector adds (forward selection) or removes (backward selection) features to form a feature subset in a greedy fashion. At each stage, this estimator chooses the best feature to add or remove based on the cross-validation score of an estimator. In the case of unsupervised learning, this Sequential Feature Selector looks only at the features (X), not the desired outputs (y). Read more in the :ref:`User Guide <sequential_feature_selection>`. .. versionadded:: 0.24 Parameters ---------- estimator : estimator instance An unfitted estimator. n_features_to_select : "auto", int or float, default="auto" If `"auto"`, the behaviour depends on the `tol` parameter: - if `tol` is not `None`, then features are selected while the score change does not exceed `tol`. - otherwise, half of the features are selected. If integer, the parameter is the absolute number of features to select. If float between 0 and 1, it is the fraction of features to select. .. versionadded:: 1.1 The option `"auto"` was added in version 1.1. .. versionchanged:: 1.3 The default changed from `"warn"` to `"auto"` in 1.3. tol : float, default=None If the score is not incremented by at least `tol` between two consecutive feature additions or removals, stop adding or removing. `tol` can be negative when removing features using `direction="backward"`. `tol` is required to be strictly positive when doing forward selection. It can be useful to reduce the number of features at the cost of a small decrease in the score. `tol` is enabled only when `n_features_to_select` is `"auto"`. .. versionadded:: 1.1 direction : {'forward', 'backward'}, default='forward' Whether to perform forward selection or backward selection. scoring : str or callable, default=None Scoring method to use for cross-validation. Options: - str: see :ref:`scoring_string_names` for options. - callable: a scorer callable object (e.g., function) with signature ``scorer(estimator, X, y)`` that returns a single value. See :ref:`scoring_callable` for details. - `None`: the `estimator`'s :ref:`default evaluation criterion <scoring_api_overview>` is used. cv : int, cross-validation generator or an iterable, default=None Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - integer, to specify the number of folds in a `(Stratified)KFold`, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`~sklearn.model_selection.StratifiedKFold` is used. In all other cases, :class:`~sklearn.model_selection.KFold` is used. These splitters are instantiated with `shuffle=False` so the splits will be the same across calls. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. n_jobs : int, default=None Number of jobs to run in parallel. When evaluating a new feature to add or remove, the cross-validation procedure is parallel over the folds. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. Attributes ---------- n_features_in_ : int Number of features seen during :term:`fit`. Only defined if the underlying estimator exposes such an attribute when fit. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 n_features_to_select_ : int The number of features that were selected. support_ : ndarray of shape (n_features,), dtype=bool The mask of selected features. See Also -------- GenericUnivariateSelect : Univariate feature selector with configurable strategy. RFE : Recursive feature elimination based on importance weights. RFECV : Recursive feature elimination based on importance weights, with automatic selection of the number of features. SelectFromModel : Feature selection based on thresholds of importance weights. Examples -------- >>> from sklearn.feature_selection import SequentialFeatureSelector >>> from sklearn.neighbors import KNeighborsClassifier >>> from sklearn.datasets import load_iris >>> X, y = load_iris(return_X_y=True) >>> knn = KNeighborsClassifier(n_neighbors=3) >>> sfs = SequentialFeatureSelector(knn, n_features_to_select=3) >>> sfs.fit(X, y) SequentialFeatureSelector(estimator=KNeighborsClassifier(n_neighbors=3), n_features_to_select=3) >>> sfs.get_support() array([ True, False, True, True]) >>> sfs.transform(X).shape (150, 3) """ _parameter_constraints: dict = { "estimator": [HasMethods(["fit"])], "n_features_to_select": [ StrOptions({"auto"}), Interval(RealNotInt, 0, 1, closed="right"), Interval(Integral, 0, None, closed="neither"), ], "tol": [None, Interval(Real, None, None, closed="neither")], "direction": [StrOptions({"forward", "backward"})], "scoring": [None, StrOptions(set(get_scorer_names())), callable], "cv": ["cv_object"], "n_jobs": [None, Integral], } def __init__( self, estimator, *, n_features_to_select="auto", tol=None, direction="forward", scoring=None, cv=5, n_jobs=None, ): self.estimator = estimator self.n_features_to_select = n_features_to_select self.tol = tol self.direction = direction self.scoring = scoring self.cv = cv self.n_jobs = n_jobs @_fit_context( # SequentialFeatureSelector.estimator is not validated yet prefer_skip_nested_validation=False ) def fit(self, X, y=None, **params): """Learn the features to select from X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training vectors, where `n_samples` is the number of samples and `n_features` is the number of predictors. y : array-like of shape (n_samples,), default=None Target values. This parameter may be ignored for unsupervised learning. **params : dict, default=None Parameters to be passed to the underlying `estimator`, `cv` and `scorer` objects. .. versionadded:: 1.6 Only available if `enable_metadata_routing=True`, which can be set by using ``sklearn.set_config(enable_metadata_routing=True)``. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Returns the instance itself. """ _raise_for_params(params, self, "fit") tags = self.__sklearn_tags__() X = validate_data( self, X, accept_sparse="csc", ensure_min_features=2, ensure_all_finite=not tags.input_tags.allow_nan, ) n_features = X.shape[1] if self.n_features_to_select == "auto": if self.tol is not None: # With auto feature selection, `n_features_to_select_` will be updated # to `support_.sum()` after features are selected. self.n_features_to_select_ = n_features - 1 else: self.n_features_to_select_ = n_features // 2 elif isinstance(self.n_features_to_select, Integral): if self.n_features_to_select >= n_features: raise ValueError("n_features_to_select must be < n_features.") self.n_features_to_select_ = self.n_features_to_select elif isinstance(self.n_features_to_select, Real): self.n_features_to_select_ = int(n_features * self.n_features_to_select) if self.tol is not None and self.tol < 0 and self.direction == "forward": raise ValueError( "tol must be strictly positive when doing forward selection" ) cv = check_cv(self.cv, y, classifier=is_classifier(self.estimator)) cloned_estimator = clone(self.estimator) # the current mask corresponds to the set of features: # - that we have already *selected* if we do forward selection # - that we have already *excluded* if we do backward selection current_mask = np.zeros(shape=n_features, dtype=bool) n_iterations = ( self.n_features_to_select_ if self.n_features_to_select == "auto" or self.direction == "forward" else n_features - self.n_features_to_select_ ) old_score = -np.inf is_auto_select = self.tol is not None and self.n_features_to_select == "auto" # We only need to verify the routing here and not use the routed params # because internally the actual routing will also take place inside the # `cross_val_score` function. if _routing_enabled(): process_routing(self, "fit", **params) for _ in range(n_iterations): new_feature_idx, new_score = self._get_best_new_feature_score( cloned_estimator, X, y, cv, current_mask, **params ) if is_auto_select and ((new_score - old_score) < self.tol): break old_score = new_score current_mask[new_feature_idx] = True if self.direction == "backward": current_mask = ~current_mask self.support_ = current_mask self.n_features_to_select_ = self.support_.sum() return self def _get_best_new_feature_score(self, estimator, X, y, cv, current_mask, **params): # Return the best new feature and its score to add to the current_mask, # i.e. return the best new feature and its score to add (resp. remove) # when doing forward selection (resp. backward selection). # Feature will be added if the current score and past score are greater # than tol when n_feature is auto, candidate_feature_indices = np.flatnonzero(~current_mask) scores = {} for feature_idx in candidate_feature_indices: candidate_mask = current_mask.copy() candidate_mask[feature_idx] = True if self.direction == "backward": candidate_mask = ~candidate_mask X_new = X[:, candidate_mask] scores[feature_idx] = cross_val_score( estimator, X_new, y, cv=cv, scoring=self.scoring, n_jobs=self.n_jobs, params=params, ).mean() new_feature_idx = max(scores, key=lambda feature_idx: scores[feature_idx]) return new_feature_idx, scores[new_feature_idx] def _get_support_mask(self): check_is_fitted(self) return self.support_ def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.input_tags.allow_nan = get_tags(self.estimator).input_tags.allow_nan tags.input_tags.sparse = get_tags(self.estimator).input_tags.sparse return tags def get_metadata_routing(self): """Get metadata routing of this object. Please check :ref:`User Guide <metadata_routing>` on how the routing mechanism works. .. versionadded:: 1.6 Returns ------- routing : MetadataRouter A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating routing information. """ router = MetadataRouter(owner=self) router.add( estimator=self.estimator, method_mapping=MethodMapping().add(caller="fit", callee="fit"), ) router.add( splitter=check_cv(self.cv, classifier=is_classifier(self.estimator)), method_mapping=MethodMapping().add(caller="fit", callee="split"), ) router.add( scorer=check_scoring(self.estimator, scoring=self.scoring), method_mapping=MethodMapping().add(caller="fit", callee="score"), ) return router
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/feature_selection/_mutual_info.py
sklearn/feature_selection/_mutual_info.py
# Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from numbers import Integral import numpy as np from scipy.sparse import issparse from scipy.special import digamma from sklearn.metrics.cluster import mutual_info_score from sklearn.neighbors import KDTree, NearestNeighbors from sklearn.preprocessing import scale from sklearn.utils import check_random_state from sklearn.utils._param_validation import Interval, StrOptions, validate_params from sklearn.utils.multiclass import check_classification_targets from sklearn.utils.parallel import Parallel, delayed from sklearn.utils.validation import check_array, check_X_y def _compute_mi_cc(x, y, n_neighbors): """Compute mutual information between two continuous variables. Parameters ---------- x, y : ndarray, shape (n_samples,) Samples of two continuous random variables, must have an identical shape. n_neighbors : int Number of nearest neighbors to search for each point, see [1]_. Returns ------- mi : float Estimated mutual information in nat units. If it turned out to be negative it is replaced by 0. Notes ----- True mutual information can't be negative. If its estimate by a numerical method is negative, it means (providing the method is adequate) that the mutual information is close to 0 and replacing it by 0 is a reasonable strategy. References ---------- .. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual information". Phys. Rev. E 69, 2004. """ n_samples = x.size x = x.reshape((-1, 1)) y = y.reshape((-1, 1)) xy = np.hstack((x, y)) # Here we rely on NearestNeighbors to select the fastest algorithm. nn = NearestNeighbors(metric="chebyshev", n_neighbors=n_neighbors) nn.fit(xy) radius = nn.kneighbors()[0] radius = np.nextafter(radius[:, -1], 0) # KDTree is explicitly fit to allow for the querying of number of # neighbors within a specified radius kd = KDTree(x, metric="chebyshev") nx = kd.query_radius(x, radius, count_only=True, return_distance=False) nx = np.array(nx) - 1.0 kd = KDTree(y, metric="chebyshev") ny = kd.query_radius(y, radius, count_only=True, return_distance=False) ny = np.array(ny) - 1.0 mi = ( digamma(n_samples) + digamma(n_neighbors) - np.mean(digamma(nx + 1)) - np.mean(digamma(ny + 1)) ) return max(0, mi) def _compute_mi_cd(c, d, n_neighbors): """Compute mutual information between continuous and discrete variables. Parameters ---------- c : ndarray, shape (n_samples,) Samples of a continuous random variable. d : ndarray, shape (n_samples,) Samples of a discrete random variable. n_neighbors : int Number of nearest neighbors to search for each point, see [1]_. Returns ------- mi : float Estimated mutual information in nat units. If it turned out to be negative it is replaced by 0. Notes ----- True mutual information can't be negative. If its estimate by a numerical method is negative, it means (providing the method is adequate) that the mutual information is close to 0 and replacing it by 0 is a reasonable strategy. References ---------- .. [1] B. C. Ross "Mutual Information between Discrete and Continuous Data Sets". PLoS ONE 9(2), 2014. """ n_samples = c.shape[0] c = c.reshape((-1, 1)) radius = np.empty(n_samples) label_counts = np.empty(n_samples) k_all = np.empty(n_samples) nn = NearestNeighbors() for label in np.unique(d): mask = d == label count = np.sum(mask) if count > 1: k = min(n_neighbors, count - 1) nn.set_params(n_neighbors=k) nn.fit(c[mask]) r = nn.kneighbors()[0] radius[mask] = np.nextafter(r[:, -1], 0) k_all[mask] = k label_counts[mask] = count # Ignore points with unique labels. mask = label_counts > 1 n_samples = np.sum(mask) label_counts = label_counts[mask] k_all = k_all[mask] c = c[mask] radius = radius[mask] kd = KDTree(c) m_all = kd.query_radius(c, radius, count_only=True, return_distance=False) m_all = np.array(m_all) mi = ( digamma(n_samples) + np.mean(digamma(k_all)) - np.mean(digamma(label_counts)) - np.mean(digamma(m_all)) ) return max(0, mi) def _compute_mi(x, y, x_discrete, y_discrete, n_neighbors=3): """Compute mutual information between two variables. This is a simple wrapper which selects a proper function to call based on whether `x` and `y` are discrete or not. """ if x_discrete and y_discrete: return mutual_info_score(x, y) elif x_discrete and not y_discrete: return _compute_mi_cd(y, x, n_neighbors) elif not x_discrete and y_discrete: return _compute_mi_cd(x, y, n_neighbors) else: return _compute_mi_cc(x, y, n_neighbors) def _iterate_columns(X, columns=None): """Iterate over columns of a matrix. Parameters ---------- X : ndarray or csc_matrix, shape (n_samples, n_features) Matrix over which to iterate. columns : iterable or None, default=None Indices of columns to iterate over. If None, iterate over all columns. Yields ------ x : ndarray, shape (n_samples,) Columns of `X` in dense format. """ if columns is None: columns = range(X.shape[1]) if issparse(X): for i in columns: x = np.zeros(X.shape[0]) start_ptr, end_ptr = X.indptr[i], X.indptr[i + 1] x[X.indices[start_ptr:end_ptr]] = X.data[start_ptr:end_ptr] yield x else: for i in columns: yield X[:, i] def _estimate_mi( X, y, *, discrete_features="auto", discrete_target=False, n_neighbors=3, copy=True, random_state=None, n_jobs=None, ): """Estimate mutual information between the features and the target. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) Feature matrix. y : array-like of shape (n_samples,) Target vector. discrete_features : {'auto', bool, array-like}, default='auto' If bool, then determines whether to consider all features discrete or continuous. If array, then it should be either a boolean mask with shape (n_features,) or array with indices of discrete features. If 'auto', it is assigned to False for dense `X` and to True for sparse `X`. discrete_target : bool, default=False Whether to consider `y` as a discrete variable. n_neighbors : int, default=3 Number of neighbors to use for MI estimation for continuous variables, see [1]_ and [2]_. Higher values reduce variance of the estimation, but could introduce a bias. copy : bool, default=True Whether to make a copy of the given data. If set to False, the initial data will be overwritten. random_state : int, RandomState instance or None, default=None Determines random number generation for adding small noise to continuous variables in order to remove repeated values. Pass an int for reproducible results across multiple function calls. See :term:`Glossary <random_state>`. n_jobs : int, default=None The number of jobs to use for computing the mutual information. The parallelization is done on the columns of `X`. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. .. versionadded:: 1.5 Returns ------- mi : ndarray, shape (n_features,) Estimated mutual information between each feature and the target in nat units. A negative value will be replaced by 0. References ---------- .. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual information". Phys. Rev. E 69, 2004. .. [2] B. C. Ross "Mutual Information between Discrete and Continuous Data Sets". PLoS ONE 9(2), 2014. """ X, y = check_X_y(X, y, accept_sparse="csc", y_numeric=not discrete_target) n_samples, n_features = X.shape if isinstance(discrete_features, (str, bool)): if isinstance(discrete_features, str): if discrete_features == "auto": discrete_features = issparse(X) else: raise ValueError("Invalid string value for discrete_features.") discrete_mask = np.empty(n_features, dtype=bool) discrete_mask.fill(discrete_features) else: discrete_features = check_array(discrete_features, ensure_2d=False) if discrete_features.dtype != "bool": discrete_mask = np.zeros(n_features, dtype=bool) discrete_mask[discrete_features] = True else: discrete_mask = discrete_features continuous_mask = ~discrete_mask if np.any(continuous_mask) and issparse(X): raise ValueError("Sparse matrix `X` can't have continuous features.") rng = check_random_state(random_state) if np.any(continuous_mask): X = X.astype(np.float64, copy=copy) X[:, continuous_mask] = scale( X[:, continuous_mask], with_mean=False, copy=False ) # Add small noise to continuous features as advised in Kraskov et. al. means = np.maximum(1, np.mean(np.abs(X[:, continuous_mask]), axis=0)) X[:, continuous_mask] += ( 1e-10 * means * rng.standard_normal(size=(n_samples, np.sum(continuous_mask))) ) if not discrete_target: y = scale(y, with_mean=False) y += ( 1e-10 * np.maximum(1, np.mean(np.abs(y))) * rng.standard_normal(size=n_samples) ) mi = Parallel(n_jobs=n_jobs)( delayed(_compute_mi)(x, y, discrete_feature, discrete_target, n_neighbors) for x, discrete_feature in zip(_iterate_columns(X), discrete_mask) ) return np.array(mi) @validate_params( { "X": ["array-like", "sparse matrix"], "y": ["array-like"], "discrete_features": [StrOptions({"auto"}), "boolean", "array-like"], "n_neighbors": [Interval(Integral, 1, None, closed="left")], "copy": ["boolean"], "random_state": ["random_state"], "n_jobs": [Integral, None], }, prefer_skip_nested_validation=True, ) def mutual_info_regression( X, y, *, discrete_features="auto", n_neighbors=3, copy=True, random_state=None, n_jobs=None, ): """Estimate mutual information for a continuous target variable. Mutual information (MI) [1]_ between two random variables is a non-negative value, which measures the dependency between the variables. It is equal to zero if and only if two random variables are independent, and higher values mean higher dependency. The function relies on nonparametric methods based on entropy estimation from k-nearest neighbors distances as described in [2]_ and [3]_. Both methods are based on the idea originally proposed in [4]_. It can be used for univariate features selection, read more in the :ref:`User Guide <univariate_feature_selection>`. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) Feature matrix. y : array-like of shape (n_samples,) Target vector. discrete_features : {'auto', bool, array-like}, default='auto' If bool, then determines whether to consider all features discrete or continuous. If array, then it should be either a boolean mask with shape (n_features,) or array with indices of discrete features. If 'auto', it is assigned to False for dense `X` and to True for sparse `X`. n_neighbors : int, default=3 Number of neighbors to use for MI estimation for continuous variables, see [2]_ and [3]_. Higher values reduce variance of the estimation, but could introduce a bias. copy : bool, default=True Whether to make a copy of the given data. If set to False, the initial data will be overwritten. random_state : int, RandomState instance or None, default=None Determines random number generation for adding small noise to continuous variables in order to remove repeated values. Pass an int for reproducible results across multiple function calls. See :term:`Glossary <random_state>`. n_jobs : int, default=None The number of jobs to use for computing the mutual information. The parallelization is done on the columns of `X`. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. .. versionadded:: 1.5 Returns ------- mi : ndarray, shape (n_features,) Estimated mutual information between each feature and the target in nat units. Notes ----- 1. The term "discrete features" is used instead of naming them "categorical", because it describes the essence more accurately. For example, pixel intensities of an image are discrete features (but hardly categorical) and you will get better results if mark them as such. Also note, that treating a continuous variable as discrete and vice versa will usually give incorrect results, so be attentive about that. 2. True mutual information can't be negative. If its estimate turns out to be negative, it is replaced by zero. References ---------- .. [1] `Mutual Information <https://en.wikipedia.org/wiki/Mutual_information>`_ on Wikipedia. .. [2] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual information". Phys. Rev. E 69, 2004. .. [3] B. C. Ross "Mutual Information between Discrete and Continuous Data Sets". PLoS ONE 9(2), 2014. .. [4] L. F. Kozachenko, N. N. Leonenko, "Sample Estimate of the Entropy of a Random Vector", Probl. Peredachi Inf., 23:2 (1987), 9-16 Examples -------- >>> from sklearn.datasets import make_regression >>> from sklearn.feature_selection import mutual_info_regression >>> X, y = make_regression( ... n_samples=50, n_features=3, n_informative=1, noise=1e-4, random_state=42 ... ) >>> mutual_info_regression(X, y) array([0.117, 2.645, 0.0287]) """ return _estimate_mi( X, y, discrete_features=discrete_features, discrete_target=False, n_neighbors=n_neighbors, copy=copy, random_state=random_state, n_jobs=n_jobs, ) @validate_params( { "X": ["array-like", "sparse matrix"], "y": ["array-like"], "discrete_features": [StrOptions({"auto"}), "boolean", "array-like"], "n_neighbors": [Interval(Integral, 1, None, closed="left")], "copy": ["boolean"], "random_state": ["random_state"], "n_jobs": [Integral, None], }, prefer_skip_nested_validation=True, ) def mutual_info_classif( X, y, *, discrete_features="auto", n_neighbors=3, copy=True, random_state=None, n_jobs=None, ): """Estimate mutual information for a discrete target variable. Mutual information (MI) [1]_ between two random variables is a non-negative value, which measures the dependency between the variables. It is equal to zero if and only if two random variables are independent, and higher values mean higher dependency. The function relies on nonparametric methods based on entropy estimation from k-nearest neighbors distances as described in [2]_ and [3]_. Both methods are based on the idea originally proposed in [4]_. It can be used for univariate features selection, read more in the :ref:`User Guide <univariate_feature_selection>`. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Feature matrix. y : array-like of shape (n_samples,) Target vector. discrete_features : 'auto', bool or array-like, default='auto' If bool, then determines whether to consider all features discrete or continuous. If array, then it should be either a boolean mask with shape (n_features,) or array with indices of discrete features. If 'auto', it is assigned to False for dense `X` and to True for sparse `X`. n_neighbors : int, default=3 Number of neighbors to use for MI estimation for continuous variables, see [2]_ and [3]_. Higher values reduce variance of the estimation, but could introduce a bias. copy : bool, default=True Whether to make a copy of the given data. If set to False, the initial data will be overwritten. random_state : int, RandomState instance or None, default=None Determines random number generation for adding small noise to continuous variables in order to remove repeated values. Pass an int for reproducible results across multiple function calls. See :term:`Glossary <random_state>`. n_jobs : int, default=None The number of jobs to use for computing the mutual information. The parallelization is done on the columns of `X`. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. .. versionadded:: 1.5 Returns ------- mi : ndarray, shape (n_features,) Estimated mutual information between each feature and the target in nat units. Notes ----- 1. The term "discrete features" is used instead of naming them "categorical", because it describes the essence more accurately. For example, pixel intensities of an image are discrete features (but hardly categorical) and you will get better results if mark them as such. Also note, that treating a continuous variable as discrete and vice versa will usually give incorrect results, so be attentive about that. 2. True mutual information can't be negative. If its estimate turns out to be negative, it is replaced by zero. References ---------- .. [1] `Mutual Information <https://en.wikipedia.org/wiki/Mutual_information>`_ on Wikipedia. .. [2] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual information". Phys. Rev. E 69, 2004. .. [3] B. C. Ross "Mutual Information between Discrete and Continuous Data Sets". PLoS ONE 9(2), 2014. .. [4] L. F. Kozachenko, N. N. Leonenko, "Sample Estimate of the Entropy of a Random Vector:, Probl. Peredachi Inf., 23:2 (1987), 9-16 Examples -------- >>> from sklearn.datasets import make_classification >>> from sklearn.feature_selection import mutual_info_classif >>> X, y = make_classification( ... n_samples=100, n_features=10, n_informative=2, n_clusters_per_class=1, ... shuffle=False, random_state=42 ... ) >>> mutual_info_classif(X, y) array([0.589, 0.107, 0.196, 0.0968 , 0., 0. , 0. , 0. , 0. , 0.]) """ check_classification_targets(y) return _estimate_mi( X, y, discrete_features=discrete_features, discrete_target=True, n_neighbors=n_neighbors, copy=copy, random_state=random_state, n_jobs=n_jobs, )
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/feature_selection/_rfe.py
sklearn/feature_selection/_rfe.py
# Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause """Recursive feature elimination for feature ranking""" import warnings from copy import deepcopy from numbers import Integral import numpy as np from joblib import effective_n_jobs from sklearn.base import ( BaseEstimator, MetaEstimatorMixin, _fit_context, clone, is_classifier, ) from sklearn.feature_selection._base import SelectorMixin, _get_feature_importances from sklearn.metrics import get_scorer from sklearn.model_selection import check_cv from sklearn.model_selection._validation import _score from sklearn.utils import Bunch, metadata_routing from sklearn.utils._metadata_requests import ( MetadataRouter, MethodMapping, _raise_for_params, _routing_enabled, process_routing, ) from sklearn.utils._param_validation import HasMethods, Interval, RealNotInt from sklearn.utils._tags import get_tags from sklearn.utils.metaestimators import _safe_split, available_if from sklearn.utils.parallel import Parallel, delayed from sklearn.utils.validation import ( _check_method_params, _estimator_has, check_is_fitted, validate_data, ) def _rfe_single_fit(rfe, estimator, X, y, train, test, scorer, routed_params): """ Return the score and n_features per step for a fit across one fold. """ X_train, y_train = _safe_split(estimator, X, y, train) X_test, y_test = _safe_split(estimator, X, y, test, train) fit_params = _check_method_params( X, params=routed_params.estimator.fit, indices=train ) score_params = _check_method_params( X=X, params=routed_params.scorer.score, indices=test ) rfe._fit( X_train, y_train, lambda estimator, features: _score( estimator, X_test[:, features], y_test, scorer, score_params=score_params, ), **fit_params, ) return rfe.step_scores_, rfe.step_support_, rfe.step_ranking_, rfe.step_n_features_ class RFE(SelectorMixin, MetaEstimatorMixin, BaseEstimator): """Feature ranking with recursive feature elimination. Given an external estimator that assigns weights to features (e.g., the coefficients of a linear model), the goal of recursive feature elimination (RFE) is to select features by recursively considering smaller and smaller sets of features. First, the estimator is trained on the initial set of features and the importance of each feature is obtained either through any specific attribute or callable. Then, the least important features are pruned from current set of features. That procedure is recursively repeated on the pruned set until the desired number of features to select is eventually reached. Read more in the :ref:`User Guide <rfe>`. Parameters ---------- estimator : ``Estimator`` instance A supervised learning estimator with a ``fit`` method that provides information about feature importance (e.g. `coef_`, `feature_importances_`). n_features_to_select : int or float, default=None The number of features to select. If `None`, half of the features are selected. If integer, the parameter is the absolute number of features to select. If float between 0 and 1, it is the fraction of features to select. .. versionchanged:: 0.24 Added float values for fractions. step : int or float, default=1 If greater than or equal to 1, then ``step`` corresponds to the (integer) number of features to remove at each iteration. If within (0.0, 1.0), then ``step`` corresponds to the percentage (rounded down) of features to remove at each iteration. verbose : int, default=0 Controls verbosity of output. importance_getter : str or callable, default='auto' If 'auto', uses the feature importance either through a `coef_` or `feature_importances_` attributes of estimator. Also accepts a string that specifies an attribute name/path for extracting feature importance (implemented with `attrgetter`). For example, give `regressor_.coef_` in case of :class:`~sklearn.compose.TransformedTargetRegressor` or `named_steps.clf.feature_importances_` in case of class:`~sklearn.pipeline.Pipeline` with its last step named `clf`. If `callable`, overrides the default feature importance getter. The callable is passed with the fitted estimator and it should return importance for each feature. .. versionadded:: 0.24 Attributes ---------- classes_ : ndarray of shape (n_classes,) The classes labels. Only available when `estimator` is a classifier. estimator_ : ``Estimator`` instance The fitted estimator used to select features. n_features_ : int The number of selected features. n_features_in_ : int Number of features seen during :term:`fit`. Only defined if the underlying estimator exposes such an attribute when fit. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 ranking_ : ndarray of shape (n_features,) The feature ranking, such that ``ranking_[i]`` corresponds to the ranking position of the i-th feature. Selected (i.e., estimated best) features are assigned rank 1. support_ : ndarray of shape (n_features,) The mask of selected features. See Also -------- RFECV : Recursive feature elimination with built-in cross-validated selection of the best number of features. SelectFromModel : Feature selection based on thresholds of importance weights. SequentialFeatureSelector : Sequential cross-validation based feature selection. Does not rely on importance weights. Notes ----- Allows NaN/Inf in the input if the underlying estimator does as well. References ---------- .. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection for cancer classification using support vector machines", Mach. Learn., 46(1-3), 389--422, 2002. Examples -------- The following example shows how to retrieve the 5 most informative features in the Friedman #1 dataset. >>> from sklearn.datasets import make_friedman1 >>> from sklearn.feature_selection import RFE >>> from sklearn.svm import SVR >>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0) >>> estimator = SVR(kernel="linear") >>> selector = RFE(estimator, n_features_to_select=5, step=1) >>> selector = selector.fit(X, y) >>> selector.support_ array([ True, True, True, True, True, False, False, False, False, False]) >>> selector.ranking_ array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5]) """ _parameter_constraints: dict = { "estimator": [HasMethods(["fit"])], "n_features_to_select": [ None, Interval(RealNotInt, 0, 1, closed="right"), Interval(Integral, 0, None, closed="neither"), ], "step": [ Interval(Integral, 0, None, closed="neither"), Interval(RealNotInt, 0, 1, closed="neither"), ], "verbose": ["verbose"], "importance_getter": [str, callable], } def __init__( self, estimator, *, n_features_to_select=None, step=1, verbose=0, importance_getter="auto", ): self.estimator = estimator self.n_features_to_select = n_features_to_select self.step = step self.importance_getter = importance_getter self.verbose = verbose @property def classes_(self): """Classes labels available when `estimator` is a classifier. Returns ------- ndarray of shape (n_classes,) """ return self.estimator_.classes_ @_fit_context( # RFE.estimator is not validated yet prefer_skip_nested_validation=False ) def fit(self, X, y, **fit_params): """Fit the RFE model and then the underlying estimator on the selected features. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. y : array-like of shape (n_samples,) The target values. **fit_params : dict - If `enable_metadata_routing=False` (default): Parameters directly passed to the ``fit`` method of the underlying estimator. - If `enable_metadata_routing=True`: Parameters safely routed to the ``fit`` method of the underlying estimator. .. versionchanged:: 1.6 See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Fitted estimator. """ if _routing_enabled(): routed_params = process_routing(self, "fit", **fit_params) else: routed_params = Bunch(estimator=Bunch(fit=fit_params)) return self._fit(X, y, **routed_params.estimator.fit) def _fit(self, X, y, step_score=None, **fit_params): # Parameter step_score controls the calculation of self.step_scores_ # step_score is not exposed to users and is used when implementing RFECV # self.step_scores_ will not be calculated when calling _fit through fit X, y = validate_data( self, X, y, accept_sparse="csc", ensure_min_features=2, ensure_all_finite=False, multi_output=True, ) # Initialization n_features = X.shape[1] if self.n_features_to_select is None: n_features_to_select = n_features // 2 elif isinstance(self.n_features_to_select, Integral): # int n_features_to_select = self.n_features_to_select if n_features_to_select > n_features: warnings.warn( ( f"Found {n_features_to_select=} > {n_features=}. There will be" " no feature selection and all features will be kept." ), UserWarning, ) else: # float n_features_to_select = int(n_features * self.n_features_to_select) if 0.0 < self.step < 1.0: step = int(max(1, self.step * n_features)) else: step = int(self.step) support_ = np.ones(n_features, dtype=bool) ranking_ = np.ones(n_features, dtype=int) if step_score: self.step_n_features_ = [] self.step_scores_ = [] self.step_support_ = [] self.step_ranking_ = [] # Elimination while np.sum(support_) > n_features_to_select: # Remaining features features = np.arange(n_features)[support_] # Rank the remaining features estimator = clone(self.estimator) if self.verbose > 0: print("Fitting estimator with %d features." % np.sum(support_)) estimator.fit(X[:, features], y, **fit_params) # Compute step values on the previous selection iteration because # 'estimator' must use features that have not been eliminated yet if step_score: self.step_n_features_.append(len(features)) self.step_scores_.append(step_score(estimator, features)) self.step_support_.append(list(support_)) self.step_ranking_.append(list(ranking_)) # Get importance and rank them importances = _get_feature_importances( estimator, self.importance_getter, transform_func="square", ) ranks = np.argsort(importances) # for sparse case ranks is matrix ranks = np.ravel(ranks) # Eliminate the worse features threshold = min(step, np.sum(support_) - n_features_to_select) support_[features[ranks][:threshold]] = False ranking_[np.logical_not(support_)] += 1 # Set final attributes features = np.arange(n_features)[support_] self.estimator_ = clone(self.estimator) self.estimator_.fit(X[:, features], y, **fit_params) # Compute step values when only n_features_to_select features left if step_score: self.step_n_features_.append(len(features)) self.step_scores_.append(step_score(self.estimator_, features)) self.step_support_.append(support_) self.step_ranking_.append(ranking_) self.n_features_ = support_.sum() self.support_ = support_ self.ranking_ = ranking_ return self @available_if(_estimator_has("predict")) def predict(self, X, **predict_params): """Reduce X to the selected features and predict using the estimator. Parameters ---------- X : array of shape [n_samples, n_features] The input samples. **predict_params : dict Parameters to route to the ``predict`` method of the underlying estimator. .. versionadded:: 1.6 Only available if `enable_metadata_routing=True`, which can be set by using ``sklearn.set_config(enable_metadata_routing=True)``. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- y : array of shape [n_samples] The predicted target values. """ _raise_for_params(predict_params, self, "predict") check_is_fitted(self) if _routing_enabled(): routed_params = process_routing(self, "predict", **predict_params) else: routed_params = Bunch(estimator=Bunch(predict={})) return self.estimator_.predict( self.transform(X), **routed_params.estimator.predict ) @available_if(_estimator_has("score")) def score(self, X, y, **score_params): """Reduce X to the selected features and return the score of the estimator. Parameters ---------- X : array of shape [n_samples, n_features] The input samples. y : array of shape [n_samples] The target values. **score_params : dict - If `enable_metadata_routing=False` (default): Parameters directly passed to the ``score`` method of the underlying estimator. - If `enable_metadata_routing=True`: Parameters safely routed to the `score` method of the underlying estimator. .. versionadded:: 1.0 .. versionchanged:: 1.6 See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- score : float Score of the underlying base estimator computed with the selected features returned by `rfe.transform(X)` and `y`. """ check_is_fitted(self) if _routing_enabled(): routed_params = process_routing(self, "score", **score_params) else: routed_params = Bunch(estimator=Bunch(score=score_params)) return self.estimator_.score( self.transform(X), y, **routed_params.estimator.score ) def _get_support_mask(self): check_is_fitted(self) return self.support_ @available_if(_estimator_has("decision_function")) def decision_function(self, X): """Compute the decision function of ``X``. Parameters ---------- X : {array-like or sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- score : array, shape = [n_samples, n_classes] or [n_samples] The decision function of the input samples. The order of the classes corresponds to that in the attribute :term:`classes_`. Regression and binary classification produce an array of shape [n_samples]. """ check_is_fitted(self) return self.estimator_.decision_function(self.transform(X)) @available_if(_estimator_has("predict_proba")) def predict_proba(self, X): """Predict class probabilities for X. Parameters ---------- X : {array-like or sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- p : array of shape (n_samples, n_classes) The class probabilities of the input samples. The order of the classes corresponds to that in the attribute :term:`classes_`. """ check_is_fitted(self) return self.estimator_.predict_proba(self.transform(X)) @available_if(_estimator_has("predict_log_proba")) def predict_log_proba(self, X): """Predict class log-probabilities for X. Parameters ---------- X : array of shape [n_samples, n_features] The input samples. Returns ------- p : array of shape (n_samples, n_classes) The class log-probabilities of the input samples. The order of the classes corresponds to that in the attribute :term:`classes_`. """ check_is_fitted(self) return self.estimator_.predict_log_proba(self.transform(X)) def __sklearn_tags__(self): tags = super().__sklearn_tags__() sub_estimator_tags = get_tags(self.estimator) tags.estimator_type = sub_estimator_tags.estimator_type tags.classifier_tags = deepcopy(sub_estimator_tags.classifier_tags) tags.regressor_tags = deepcopy(sub_estimator_tags.regressor_tags) if tags.classifier_tags is not None: tags.classifier_tags.poor_score = True if tags.regressor_tags is not None: tags.regressor_tags.poor_score = True tags.target_tags.required = True tags.input_tags.sparse = sub_estimator_tags.input_tags.sparse tags.input_tags.allow_nan = sub_estimator_tags.input_tags.allow_nan return tags def get_metadata_routing(self): """Get metadata routing of this object. Please check :ref:`User Guide <metadata_routing>` on how the routing mechanism works. .. versionadded:: 1.6 Returns ------- routing : MetadataRouter A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating routing information. """ router = MetadataRouter(owner=self).add( estimator=self.estimator, method_mapping=MethodMapping() .add(caller="fit", callee="fit") .add(caller="predict", callee="predict") .add(caller="score", callee="score"), ) return router class RFECV(RFE): """Recursive feature elimination with cross-validation to select features. The number of features selected is tuned automatically by fitting an :class:`RFE` selector on the different cross-validation splits (provided by the `cv` parameter). The performance of each :class:`RFE` selector is evaluated using `scoring` for different numbers of selected features and aggregated together. Finally, the scores are averaged across folds and the number of features selected is set to the number of features that maximize the cross-validation score. See glossary entry for :term:`cross-validation estimator`. Read more in the :ref:`User Guide <rfe>`. Parameters ---------- estimator : ``Estimator`` instance A supervised learning estimator with a ``fit`` method that provides information about feature importance either through a ``coef_`` attribute or through a ``feature_importances_`` attribute. step : int or float, default=1 If greater than or equal to 1, then ``step`` corresponds to the (integer) number of features to remove at each iteration. If within (0.0, 1.0), then ``step`` corresponds to the percentage (rounded down) of features to remove at each iteration. Note that the last iteration may remove fewer than ``step`` features in order to reach ``min_features_to_select``. min_features_to_select : int, default=1 The minimum number of features to be selected. This number of features will always be scored, even if the difference between the original feature count and ``min_features_to_select`` isn't divisible by ``step``. .. versionadded:: 0.20 cv : int, cross-validation generator or an iterable, default=None Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross-validation, - integer, to specify the number of folds. - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For integer/None inputs, if ``y`` is binary or multiclass, :class:`~sklearn.model_selection.StratifiedKFold` is used. If the estimator is not a classifier or if ``y`` is neither binary nor multiclass, :class:`~sklearn.model_selection.KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. .. versionchanged:: 0.22 ``cv`` default value of None changed from 3-fold to 5-fold. scoring : str or callable, default=None Scoring method to evaluate the :class:`RFE` selectors' performance. Options: - str: see :ref:`scoring_string_names` for options. - callable: a scorer callable object (e.g., function) with signature ``scorer(estimator, X, y)``. See :ref:`scoring_callable` for details. - `None`: the `estimator`'s :ref:`default evaluation criterion <scoring_api_overview>` is used. verbose : int, default=0 Controls verbosity of output. n_jobs : int or None, default=None Number of cores to run in parallel while fitting across folds. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. .. versionadded:: 0.18 importance_getter : str or callable, default='auto' If 'auto', uses the feature importance either through a `coef_` or `feature_importances_` attributes of estimator. Also accepts a string that specifies an attribute name/path for extracting feature importance. For example, give `regressor_.coef_` in case of :class:`~sklearn.compose.TransformedTargetRegressor` or `named_steps.clf.feature_importances_` in case of :class:`~sklearn.pipeline.Pipeline` with its last step named `clf`. If `callable`, overrides the default feature importance getter. The callable is passed with the fitted estimator and it should return importance for each feature. .. versionadded:: 0.24 Attributes ---------- classes_ : ndarray of shape (n_classes,) The classes labels. Only available when `estimator` is a classifier. estimator_ : ``Estimator`` instance The fitted estimator used to select features. cv_results_ : dict of ndarrays All arrays (values of the dictionary) are sorted in ascending order by the number of features used (i.e., the first element of the array represents the models that used the least number of features, while the last element represents the models that used all available features). .. versionadded:: 1.0 This dictionary contains the following keys: split(k)_test_score : ndarray of shape (n_subsets_of_features,) The cross-validation scores across (k)th fold. mean_test_score : ndarray of shape (n_subsets_of_features,) Mean of scores over the folds. std_test_score : ndarray of shape (n_subsets_of_features,) Standard deviation of scores over the folds. n_features : ndarray of shape (n_subsets_of_features,) Number of features used at each step. .. versionadded:: 1.5 split(k)_ranking : ndarray of shape (n_subsets_of_features,) The cross-validation rankings across (k)th fold. Selected (i.e., estimated best) features are assigned rank 1. Illustration in :ref:`sphx_glr_auto_examples_feature_selection_plot_rfe_with_cross_validation.py` .. versionadded:: 1.7 split(k)_support : ndarray of shape (n_subsets_of_features,) The cross-validation supports across (k)th fold. The support is the mask of selected features. .. versionadded:: 1.7 n_features_ : int The number of selected features with cross-validation. n_features_in_ : int Number of features seen during :term:`fit`. Only defined if the underlying estimator exposes such an attribute when fit. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 ranking_ : narray of shape (n_features,) The feature ranking, such that `ranking_[i]` corresponds to the ranking position of the i-th feature. Selected (i.e., estimated best) features are assigned rank 1. support_ : ndarray of shape (n_features,) The mask of selected features. See Also -------- RFE : Recursive feature elimination. Notes ----- The size of all values in ``cv_results_`` is equal to ``ceil((n_features - min_features_to_select) / step) + 1``, where step is the number of features removed at each iteration. Allows NaN/Inf in the input if the underlying estimator does as well. References ---------- .. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection for cancer classification using support vector machines", Mach. Learn., 46(1-3), 389--422, 2002. Examples -------- The following example shows how to retrieve the a-priori not known 5 informative features in the Friedman #1 dataset. >>> from sklearn.datasets import make_friedman1 >>> from sklearn.feature_selection import RFECV >>> from sklearn.svm import SVR >>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0) >>> estimator = SVR(kernel="linear") >>> selector = RFECV(estimator, step=1, cv=5) >>> selector = selector.fit(X, y) >>> selector.support_ array([ True, True, True, True, True, False, False, False, False, False]) >>> selector.ranking_ array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5]) For a detailed example of using RFECV to select features when training a :class:`~sklearn.linear_model.LogisticRegression`, see :ref:`sphx_glr_auto_examples_feature_selection_plot_rfe_with_cross_validation.py`. """ _parameter_constraints: dict = { **RFE._parameter_constraints, "min_features_to_select": [Interval(Integral, 0, None, closed="neither")], "cv": ["cv_object"], "scoring": [None, str, callable], "n_jobs": [None, Integral], } _parameter_constraints.pop("n_features_to_select") __metadata_request__fit = {"groups": metadata_routing.UNUSED} def __init__( self, estimator, *, step=1, min_features_to_select=1, cv=None, scoring=None, verbose=0, n_jobs=None, importance_getter="auto", ): self.estimator = estimator self.step = step self.importance_getter = importance_getter self.cv = cv self.scoring = scoring self.verbose = verbose self.n_jobs = n_jobs self.min_features_to_select = min_features_to_select @_fit_context( # RFECV.estimator is not validated yet prefer_skip_nested_validation=False ) def fit(self, X, y, **params): """Fit the RFE model and automatically tune the number of selected features. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vector, where `n_samples` is the number of samples and `n_features` is the total number of features. y : array-like of shape (n_samples,) Target values (integers for classification, real numbers for regression). **params : dict of str -> object Parameters passed to the ``fit`` method of the estimator, the scorer, and the CV splitter. .. versionadded:: 1.6 Only available if `enable_metadata_routing=True`, which can be set by using ``sklearn.set_config(enable_metadata_routing=True)``. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Fitted estimator. """ _raise_for_params(params, self, "fit", allow=["groups"]) X, y = validate_data( self, X, y, accept_sparse="csr", ensure_min_features=2, ensure_all_finite=False, multi_output=True, ) if _routing_enabled(): routed_params = process_routing(self, "fit", **params) else: routed_params = Bunch( estimator=Bunch(fit={}), splitter=Bunch(split={"groups": params.pop("groups", None)}), scorer=Bunch(score={}), ) # Initialization cv = check_cv(self.cv, y, classifier=is_classifier(self.estimator)) scorer = self._get_scorer() # Build an RFE object, which will evaluate and score each possible # feature count, down to self.min_features_to_select n_features = X.shape[1] if self.min_features_to_select > n_features: warnings.warn( ( f"Found min_features_to_select={self.min_features_to_select} > " f"{n_features=}. There will be no feature selection and all " "features will be kept." ), UserWarning, ) rfe = RFE( estimator=self.estimator, n_features_to_select=min(self.min_features_to_select, n_features), importance_getter=self.importance_getter, step=self.step, verbose=self.verbose, ) # Determine the number of subsets of features by fitting across # the train folds and choosing the "features_to_select" parameter # that gives the least averaged error across all folds. # Note that joblib raises a non-picklable error for bound methods # even if n_jobs is set to 1 with the default multiprocessing # backend. # This branching is done so that to # make sure that user code that sets n_jobs to 1 # and provides bound methods as scorers is not broken with the # addition of n_jobs parameter in version 0.18. if effective_n_jobs(self.n_jobs) == 1: parallel, func = list, _rfe_single_fit else: parallel = Parallel(n_jobs=self.n_jobs) func = delayed(_rfe_single_fit) step_results = parallel( func(clone(rfe), self.estimator, X, y, train, test, scorer, routed_params) for train, test in cv.split(X, y, **routed_params.splitter.split)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
true
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/feature_selection/__init__.py
sklearn/feature_selection/__init__.py
"""Feature selection algorithms. These include univariate filter selection methods and the recursive feature elimination algorithm. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from sklearn.feature_selection._base import SelectorMixin from sklearn.feature_selection._from_model import SelectFromModel from sklearn.feature_selection._mutual_info import ( mutual_info_classif, mutual_info_regression, ) from sklearn.feature_selection._rfe import RFE, RFECV from sklearn.feature_selection._sequential import SequentialFeatureSelector from sklearn.feature_selection._univariate_selection import ( GenericUnivariateSelect, SelectFdr, SelectFpr, SelectFwe, SelectKBest, SelectPercentile, chi2, f_classif, f_oneway, f_regression, r_regression, ) from sklearn.feature_selection._variance_threshold import VarianceThreshold __all__ = [ "RFE", "RFECV", "GenericUnivariateSelect", "SelectFdr", "SelectFpr", "SelectFromModel", "SelectFwe", "SelectKBest", "SelectPercentile", "SelectorMixin", "SequentialFeatureSelector", "VarianceThreshold", "chi2", "f_classif", "f_oneway", "f_regression", "mutual_info_classif", "mutual_info_regression", "r_regression", ]
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/feature_selection/_univariate_selection.py
sklearn/feature_selection/_univariate_selection.py
"""Univariate features selection.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import warnings from numbers import Integral, Real import numpy as np from scipy import special, stats from scipy.sparse import issparse from sklearn.base import BaseEstimator, _fit_context from sklearn.feature_selection._base import SelectorMixin from sklearn.preprocessing import LabelBinarizer from sklearn.utils import as_float_array, check_array, check_X_y, safe_mask, safe_sqr from sklearn.utils._param_validation import Interval, StrOptions, validate_params from sklearn.utils.extmath import row_norms, safe_sparse_dot from sklearn.utils.validation import check_is_fitted, validate_data def _clean_nans(scores): """ Fixes Issue #1240: NaNs can't be properly compared, so change them to the smallest value of scores's dtype. -inf seems to be unreliable. """ # XXX where should this function be called? fit? scoring functions # themselves? scores = as_float_array(scores, copy=True) scores[np.isnan(scores)] = np.finfo(scores.dtype).min return scores ###################################################################### # Scoring functions # The following function is a rewriting of scipy.stats.f_oneway # Contrary to the scipy.stats.f_oneway implementation it does not # copy the data while keeping the inputs unchanged. def f_oneway(*args): """Perform a 1-way ANOVA. The one-way ANOVA tests the null hypothesis that 2 or more groups have the same population mean. The test is applied to samples from two or more groups, possibly with differing sizes. Read more in the :ref:`User Guide <univariate_feature_selection>`. Parameters ---------- *args : {array-like, sparse matrix} Sample1, sample2... The sample measurements should be given as arguments. Returns ------- f_statistic : float The computed F-value of the test. p_value : float The associated p-value from the F-distribution. Notes ----- The ANOVA test has important assumptions that must be satisfied in order for the associated p-value to be valid. 1. The samples are independent 2. Each sample is from a normally distributed population 3. The population standard deviations of the groups are all equal. This property is known as homoscedasticity. If these assumptions are not true for a given set of data, it may still be possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`_) although with some loss of power. The algorithm is from Heiman[2], pp.394-7. See ``scipy.stats.f_oneway`` that should give the same results while being less efficient. References ---------- .. [1] Lowry, Richard. "Concepts and Applications of Inferential Statistics". Chapter 14. http://vassarstats.net/textbook .. [2] Heiman, G.W. Research Methods in Statistics. 2002. """ n_classes = len(args) args = [as_float_array(a) for a in args] n_samples_per_class = np.array([a.shape[0] for a in args]) n_samples = np.sum(n_samples_per_class) ss_alldata = sum(safe_sqr(a).sum(axis=0) for a in args) sums_args = [np.asarray(a.sum(axis=0)) for a in args] square_of_sums_alldata = sum(sums_args) ** 2 square_of_sums_args = [s**2 for s in sums_args] sstot = ss_alldata - square_of_sums_alldata / float(n_samples) ssbn = 0.0 for k, _ in enumerate(args): ssbn += square_of_sums_args[k] / n_samples_per_class[k] ssbn -= square_of_sums_alldata / float(n_samples) sswn = sstot - ssbn dfbn = n_classes - 1 dfwn = n_samples - n_classes msb = ssbn / float(dfbn) msw = sswn / float(dfwn) constant_features_idx = np.where(msw == 0.0)[0] if np.nonzero(msb)[0].size != msb.size and constant_features_idx.size: warnings.warn("Features %s are constant." % constant_features_idx, UserWarning) f = msb / msw # flatten matrix to vector in sparse case f = np.asarray(f).ravel() prob = special.fdtrc(dfbn, dfwn, f) return f, prob @validate_params( { "X": ["array-like", "sparse matrix"], "y": ["array-like"], }, prefer_skip_nested_validation=True, ) def f_classif(X, y): """Compute the ANOVA F-value for the provided sample. Read more in the :ref:`User Guide <univariate_feature_selection>`. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The set of regressors that will be tested sequentially. y : array-like of shape (n_samples,) The target vector. Returns ------- f_statistic : ndarray of shape (n_features,) F-statistic for each feature. p_values : ndarray of shape (n_features,) P-values associated with the F-statistic. See Also -------- chi2 : Chi-squared stats of non-negative features for classification tasks. f_regression : F-value between label/feature for regression tasks. Examples -------- >>> from sklearn.datasets import make_classification >>> from sklearn.feature_selection import f_classif >>> X, y = make_classification( ... n_samples=100, n_features=10, n_informative=2, n_clusters_per_class=1, ... shuffle=False, random_state=42 ... ) >>> f_statistic, p_values = f_classif(X, y) >>> f_statistic array([2.21e+02, 7.02e-01, 1.70e+00, 9.31e-01, 5.41e+00, 3.25e-01, 4.71e-02, 5.72e-01, 7.54e-01, 8.90e-02]) >>> p_values array([7.14e-27, 4.04e-01, 1.96e-01, 3.37e-01, 2.21e-02, 5.70e-01, 8.29e-01, 4.51e-01, 3.87e-01, 7.66e-01]) """ X, y = check_X_y(X, y, accept_sparse=["csr", "csc", "coo"]) args = [X[safe_mask(X, y == k)] for k in np.unique(y)] return f_oneway(*args) def _chisquare(f_obs, f_exp): """Fast replacement for scipy.stats.chisquare. Version from https://github.com/scipy/scipy/pull/2525 with additional optimizations. """ f_obs = np.asarray(f_obs, dtype=np.float64) k = len(f_obs) # Reuse f_obs for chi-squared statistics chisq = f_obs chisq -= f_exp chisq **= 2 with np.errstate(invalid="ignore"): chisq /= f_exp chisq = chisq.sum(axis=0) return chisq, special.chdtrc(k - 1, chisq) @validate_params( { "X": ["array-like", "sparse matrix"], "y": ["array-like"], }, prefer_skip_nested_validation=True, ) def chi2(X, y): """Compute chi-squared stats between each non-negative feature and class. This score can be used to select the `n_features` features with the highest values for the test chi-squared statistic from X, which must contain only **non-negative integer feature values** such as booleans or frequencies (e.g., term counts in document classification), relative to the classes. If some of your features are continuous, you need to bin them, for example by using :class:`~sklearn.preprocessing.KBinsDiscretizer`. Recall that the chi-square test measures dependence between stochastic variables, so using this function "weeds out" the features that are the most likely to be independent of class and therefore irrelevant for classification. Read more in the :ref:`User Guide <univariate_feature_selection>`. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Sample vectors. y : array-like of shape (n_samples,) Target vector (class labels). Returns ------- chi2 : ndarray of shape (n_features,) Chi2 statistics for each feature. p_values : ndarray of shape (n_features,) P-values for each feature. See Also -------- f_classif : ANOVA F-value between label/feature for classification tasks. f_regression : F-value between label/feature for regression tasks. Notes ----- Complexity of this algorithm is O(n_classes * n_features). Examples -------- >>> import numpy as np >>> from sklearn.feature_selection import chi2 >>> X = np.array([[1, 1, 3], ... [0, 1, 5], ... [5, 4, 1], ... [6, 6, 2], ... [1, 4, 0], ... [0, 0, 0]]) >>> y = np.array([1, 1, 0, 0, 2, 2]) >>> chi2_stats, p_values = chi2(X, y) >>> chi2_stats array([15.3, 6.5 , 8.9]) >>> p_values array([0.000456, 0.0387, 0.0116 ]) """ # XXX: we might want to do some of the following in logspace instead for # numerical stability. # Converting X to float allows getting better performance for the # safe_sparse_dot call made below. X = check_array(X, accept_sparse="csr", dtype=(np.float64, np.float32)) if np.any((X.data if issparse(X) else X) < 0): raise ValueError("Input X must be non-negative.") # Use a sparse representation for Y by default to reduce memory usage when # y has many unique classes. Y = LabelBinarizer(sparse_output=True).fit_transform(y) if Y.shape[1] == 1: Y = Y.toarray() Y = np.append(1 - Y, Y, axis=1) observed = safe_sparse_dot(Y.T, X) # n_classes * n_features if issparse(observed): # convert back to a dense array before calling _chisquare # XXX: could _chisquare be reimplement to accept sparse matrices for # cases where both n_classes and n_features are large (and X is # sparse)? observed = observed.toarray() feature_count = X.sum(axis=0).reshape(1, -1) class_prob = Y.mean(axis=0).reshape(1, -1) expected = np.dot(class_prob.T, feature_count) return _chisquare(observed, expected) @validate_params( { "X": ["array-like", "sparse matrix"], "y": ["array-like"], "center": ["boolean"], "force_finite": ["boolean"], }, prefer_skip_nested_validation=True, ) def r_regression(X, y, *, center=True, force_finite=True): """Compute Pearson's r for each features and the target. Pearson's r is also known as the Pearson correlation coefficient. Linear model for testing the individual effect of each of many regressors. This is a scoring function to be used in a feature selection procedure, not a free standing feature selection procedure. The cross correlation between each regressor and the target is computed as:: E[(X[:, i] - mean(X[:, i])) * (y - mean(y))] / (std(X[:, i]) * std(y)) For more on usage see the :ref:`User Guide <univariate_feature_selection>`. .. versionadded:: 1.0 Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data matrix. y : array-like of shape (n_samples,) The target vector. center : bool, default=True Whether or not to center the data matrix `X` and the target vector `y`. By default, `X` and `y` will be centered. force_finite : bool, default=True Whether or not to force the Pearson's R correlation to be finite. In the particular case where some features in `X` or the target `y` are constant, the Pearson's R correlation is not defined. When `force_finite=False`, a correlation of `np.nan` is returned to acknowledge this case. When `force_finite=True`, this value will be forced to a minimal correlation of `0.0`. .. versionadded:: 1.1 Returns ------- correlation_coefficient : ndarray of shape (n_features,) Pearson's R correlation coefficients of features. See Also -------- f_regression: Univariate linear regression tests returning f-statistic and p-values. mutual_info_regression: Mutual information for a continuous target. f_classif: ANOVA F-value between label/feature for classification tasks. chi2: Chi-squared stats of non-negative features for classification tasks. Examples -------- >>> from sklearn.datasets import make_regression >>> from sklearn.feature_selection import r_regression >>> X, y = make_regression( ... n_samples=50, n_features=3, n_informative=1, noise=1e-4, random_state=42 ... ) >>> r_regression(X, y) array([-0.157, 1. , -0.229]) """ X, y = check_X_y(X, y, accept_sparse=["csr", "csc", "coo"], dtype=np.float64) n_samples = X.shape[0] # Compute centered values # Note that E[(x - mean(x))*(y - mean(y))] = E[x*(y - mean(y))], so we # need not center X if center: y = y - np.mean(y) # TODO: for Scipy <= 1.10, `isspmatrix(X)` returns `True` for sparse arrays. # Here, we check the output of the `.mean` operation that returns a `np.matrix` # for sparse matrices while a `np.array` for dense and sparse arrays. # We can reconsider using `isspmatrix` when the minimum version is # SciPy >= 1.11 X_means = X.mean(axis=0) X_means = X_means.getA1() if isinstance(X_means, np.matrix) else X_means # Compute the scaled standard deviations via moments X_norms = np.sqrt(row_norms(X.T, squared=True) - n_samples * X_means**2) else: X_norms = row_norms(X.T) correlation_coefficient = safe_sparse_dot(y, X) with np.errstate(divide="ignore", invalid="ignore"): correlation_coefficient /= X_norms correlation_coefficient /= np.linalg.norm(y) if force_finite and not np.isfinite(correlation_coefficient).all(): # case where the target or some features are constant # the correlation coefficient(s) is/are set to the minimum (i.e. 0.0) nan_mask = np.isnan(correlation_coefficient) correlation_coefficient[nan_mask] = 0.0 return correlation_coefficient @validate_params( { "X": ["array-like", "sparse matrix"], "y": ["array-like"], "center": ["boolean"], "force_finite": ["boolean"], }, prefer_skip_nested_validation=True, ) def f_regression(X, y, *, center=True, force_finite=True): """Univariate linear regression tests returning F-statistic and p-values. Quick linear model for testing the effect of a single regressor, sequentially for many regressors. This is done in 2 steps: 1. The cross correlation between each regressor and the target is computed using :func:`r_regression` as:: E[(X[:, i] - mean(X[:, i])) * (y - mean(y))] / (std(X[:, i]) * std(y)) 2. It is converted to an F score and then to a p-value. :func:`f_regression` is derived from :func:`r_regression` and will rank features in the same order if all the features are positively correlated with the target. Note however that contrary to :func:`f_regression`, :func:`r_regression` values lie in [-1, 1] and can thus be negative. :func:`f_regression` is therefore recommended as a feature selection criterion to identify potentially predictive feature for a downstream classifier, irrespective of the sign of the association with the target variable. Furthermore :func:`f_regression` returns p-values while :func:`r_regression` does not. Read more in the :ref:`User Guide <univariate_feature_selection>`. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data matrix. y : array-like of shape (n_samples,) The target vector. center : bool, default=True Whether or not to center the data matrix `X` and the target vector `y`. By default, `X` and `y` will be centered. force_finite : bool, default=True Whether or not to force the F-statistics and associated p-values to be finite. There are two cases where the F-statistic is expected to not be finite: - when the target `y` or some features in `X` are constant. In this case, the Pearson's R correlation is not defined leading to obtain `np.nan` values in the F-statistic and p-value. When `force_finite=True`, the F-statistic is set to `0.0` and the associated p-value is set to `1.0`. - when a feature in `X` is perfectly correlated (or anti-correlated) with the target `y`. In this case, the F-statistic is expected to be `np.inf`. When `force_finite=True`, the F-statistic is set to `np.finfo(dtype).max` and the associated p-value is set to `0.0`. .. versionadded:: 1.1 Returns ------- f_statistic : ndarray of shape (n_features,) F-statistic for each feature. p_values : ndarray of shape (n_features,) P-values associated with the F-statistic. See Also -------- r_regression: Pearson's R between label/feature for regression tasks. f_classif: ANOVA F-value between label/feature for classification tasks. chi2: Chi-squared stats of non-negative features for classification tasks. SelectKBest: Select features based on the k highest scores. SelectFpr: Select features based on a false positive rate test. SelectFdr: Select features based on an estimated false discovery rate. SelectFwe: Select features based on family-wise error rate. SelectPercentile: Select features based on percentile of the highest scores. Examples -------- >>> from sklearn.datasets import make_regression >>> from sklearn.feature_selection import f_regression >>> X, y = make_regression( ... n_samples=50, n_features=3, n_informative=1, noise=1e-4, random_state=42 ... ) >>> f_statistic, p_values = f_regression(X, y) >>> f_statistic array([1.21, 2.67e13, 2.66]) >>> p_values array([0.276, 1.54e-283, 0.11]) """ correlation_coefficient = r_regression( X, y, center=center, force_finite=force_finite ) deg_of_freedom = y.size - (2 if center else 1) corr_coef_squared = correlation_coefficient**2 with np.errstate(divide="ignore", invalid="ignore"): f_statistic = corr_coef_squared / (1 - corr_coef_squared) * deg_of_freedom p_values = stats.f.sf(f_statistic, 1, deg_of_freedom) if force_finite and not np.isfinite(f_statistic).all(): # case where there is a perfect (anti-)correlation # f-statistics can be set to the maximum and p-values to zero mask_inf = np.isinf(f_statistic) f_statistic[mask_inf] = np.finfo(f_statistic.dtype).max # case where the target or some features are constant # f-statistics would be minimum and thus p-values large mask_nan = np.isnan(f_statistic) f_statistic[mask_nan] = 0.0 p_values[mask_nan] = 1.0 return f_statistic, p_values ###################################################################### # Base classes class _BaseFilter(SelectorMixin, BaseEstimator): """Initialize the univariate feature selection. Parameters ---------- score_func : callable Function taking two arrays X and y, and returning a pair of arrays (scores, pvalues) or a single array with scores. """ _parameter_constraints: dict = {"score_func": [callable]} def __init__(self, score_func): self.score_func = score_func @_fit_context(prefer_skip_nested_validation=True) def fit(self, X, y=None): """Run score function on (X, y) and get the appropriate features. Parameters ---------- X : array-like of shape (n_samples, n_features) The training input samples. y : array-like of shape (n_samples,) or None The target values (class labels in classification, real numbers in regression). If the selector is unsupervised then `y` can be set to `None`. Returns ------- self : object Returns the instance itself. """ if y is None: X = validate_data(self, X, accept_sparse=["csr", "csc"]) else: X, y = validate_data( self, X, y, accept_sparse=["csr", "csc"], multi_output=True ) self._check_params(X, y) score_func_ret = self.score_func(X, y) if isinstance(score_func_ret, (list, tuple)): self.scores_, self.pvalues_ = score_func_ret self.pvalues_ = np.asarray(self.pvalues_) else: self.scores_ = score_func_ret self.pvalues_ = None self.scores_ = np.asarray(self.scores_) return self def _check_params(self, X, y): pass def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.target_tags.required = True tags.input_tags.sparse = True return tags ###################################################################### # Specific filters ###################################################################### class SelectPercentile(_BaseFilter): """Select features according to a percentile of the highest scores. Read more in the :ref:`User Guide <univariate_feature_selection>`. Parameters ---------- score_func : callable, default=f_classif Function taking two arrays X and y, and returning a pair of arrays (scores, pvalues) or a single array with scores. Default is f_classif (see below "See Also"). The default function only works with classification tasks. .. versionadded:: 0.18 percentile : int, default=10 Percent of features to keep. Attributes ---------- scores_ : array-like of shape (n_features,) Scores of features. pvalues_ : array-like of shape (n_features,) p-values of feature scores, None if `score_func` returned only scores. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- f_classif : ANOVA F-value between label/feature for classification tasks. mutual_info_classif : Mutual information for a discrete target. chi2 : Chi-squared stats of non-negative features for classification tasks. f_regression : F-value between label/feature for regression tasks. mutual_info_regression : Mutual information for a continuous target. SelectKBest : Select features based on the k highest scores. SelectFpr : Select features based on a false positive rate test. SelectFdr : Select features based on an estimated false discovery rate. SelectFwe : Select features based on family-wise error rate. GenericUnivariateSelect : Univariate feature selector with configurable mode. Notes ----- Ties between features with equal scores will be broken in an unspecified way. This filter supports unsupervised feature selection that only requests `X` for computing the scores. Examples -------- >>> from sklearn.datasets import load_digits >>> from sklearn.feature_selection import SelectPercentile, chi2 >>> X, y = load_digits(return_X_y=True) >>> X.shape (1797, 64) >>> X_new = SelectPercentile(chi2, percentile=10).fit_transform(X, y) >>> X_new.shape (1797, 7) """ _parameter_constraints: dict = { **_BaseFilter._parameter_constraints, "percentile": [Interval(Real, 0, 100, closed="both")], } def __init__(self, score_func=f_classif, *, percentile=10): super().__init__(score_func=score_func) self.percentile = percentile def _get_support_mask(self): check_is_fitted(self) # Cater for NaNs if self.percentile == 100: return np.ones(len(self.scores_), dtype=bool) elif self.percentile == 0: return np.zeros(len(self.scores_), dtype=bool) scores = _clean_nans(self.scores_) threshold = np.percentile(scores, 100 - self.percentile) mask = scores > threshold ties = np.where(scores == threshold)[0] if len(ties): max_feats = int(len(scores) * self.percentile / 100) kept_ties = ties[: max_feats - mask.sum()] mask[kept_ties] = True return mask def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.target_tags.required = False return tags class SelectKBest(_BaseFilter): """Select features according to the k highest scores. Read more in the :ref:`User Guide <univariate_feature_selection>`. Parameters ---------- score_func : callable, default=f_classif Function taking two arrays X and y, and returning a pair of arrays (scores, pvalues) or a single array with scores. Default is f_classif (see below "See Also"). The default function only works with classification tasks. .. versionadded:: 0.18 k : int or "all", default=10 Number of top features to select. The "all" option bypasses selection, for use in a parameter search. Attributes ---------- scores_ : array-like of shape (n_features,) Scores of features. pvalues_ : array-like of shape (n_features,) p-values of feature scores, None if `score_func` returned only scores. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- f_classif: ANOVA F-value between label/feature for classification tasks. mutual_info_classif: Mutual information for a discrete target. chi2: Chi-squared stats of non-negative features for classification tasks. f_regression: F-value between label/feature for regression tasks. mutual_info_regression: Mutual information for a continuous target. SelectPercentile: Select features based on percentile of the highest scores. SelectFpr : Select features based on a false positive rate test. SelectFdr : Select features based on an estimated false discovery rate. SelectFwe : Select features based on family-wise error rate. GenericUnivariateSelect : Univariate feature selector with configurable mode. Notes ----- Ties between features with equal scores will be broken in an unspecified way. This filter supports unsupervised feature selection that only requests `X` for computing the scores. Examples -------- >>> from sklearn.datasets import load_digits >>> from sklearn.feature_selection import SelectKBest, chi2 >>> X, y = load_digits(return_X_y=True) >>> X.shape (1797, 64) >>> X_new = SelectKBest(chi2, k=20).fit_transform(X, y) >>> X_new.shape (1797, 20) """ _parameter_constraints: dict = { **_BaseFilter._parameter_constraints, "k": [StrOptions({"all"}), Interval(Integral, 0, None, closed="left")], } def __init__(self, score_func=f_classif, *, k=10): super().__init__(score_func=score_func) self.k = k def _check_params(self, X, y): if not isinstance(self.k, str) and self.k > X.shape[1]: warnings.warn( f"k={self.k} is greater than n_features={X.shape[1]}. " "All the features will be returned." ) def _get_support_mask(self): check_is_fitted(self) if self.k == "all": return np.ones(self.scores_.shape, dtype=bool) elif self.k == 0: return np.zeros(self.scores_.shape, dtype=bool) else: scores = _clean_nans(self.scores_) mask = np.zeros(scores.shape, dtype=bool) # Request a stable sort. Mergesort takes more memory (~40MB per # megafeature on x86-64). mask[np.argsort(scores, kind="mergesort")[-self.k :]] = 1 return mask def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.target_tags.required = False return tags class SelectFpr(_BaseFilter): """Filter: Select the pvalues below alpha based on a FPR test. FPR test stands for False Positive Rate test. It controls the total amount of false detections. Read more in the :ref:`User Guide <univariate_feature_selection>`. Parameters ---------- score_func : callable, default=f_classif Function taking two arrays X and y, and returning a pair of arrays (scores, pvalues). Default is f_classif (see below "See Also"). The default function only works with classification tasks. alpha : float, default=5e-2 Features with p-values less than `alpha` are selected. Attributes ---------- scores_ : array-like of shape (n_features,) Scores of features. pvalues_ : array-like of shape (n_features,) p-values of feature scores. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- f_classif : ANOVA F-value between label/feature for classification tasks. chi2 : Chi-squared stats of non-negative features for classification tasks. mutual_info_classif: Mutual information for a discrete target. f_regression : F-value between label/feature for regression tasks. mutual_info_regression : Mutual information for a continuous target. SelectPercentile : Select features based on percentile of the highest scores. SelectKBest : Select features based on the k highest scores. SelectFdr : Select features based on an estimated false discovery rate. SelectFwe : Select features based on family-wise error rate. GenericUnivariateSelect : Univariate feature selector with configurable mode. Examples -------- >>> from sklearn.datasets import load_breast_cancer >>> from sklearn.feature_selection import SelectFpr, chi2 >>> X, y = load_breast_cancer(return_X_y=True) >>> X.shape (569, 30) >>> X_new = SelectFpr(chi2, alpha=0.01).fit_transform(X, y) >>> X_new.shape (569, 16) """ _parameter_constraints: dict = { **_BaseFilter._parameter_constraints, "alpha": [Interval(Real, 0, 1, closed="both")], } def __init__(self, score_func=f_classif, *, alpha=5e-2): super().__init__(score_func=score_func) self.alpha = alpha def _get_support_mask(self): check_is_fitted(self) return self.pvalues_ < self.alpha class SelectFdr(_BaseFilter): """Filter: Select the p-values for an estimated false discovery rate. This uses the Benjamini-Hochberg procedure. ``alpha`` is an upper bound on the expected false discovery rate. Read more in the :ref:`User Guide <univariate_feature_selection>`. Parameters ---------- score_func : callable, default=f_classif Function taking two arrays X and y, and returning a pair of arrays (scores, pvalues). Default is f_classif (see below "See Also"). The default function only works with classification tasks. alpha : float, default=5e-2 The highest uncorrected p-value for features to keep. Attributes ---------- scores_ : array-like of shape (n_features,) Scores of features. pvalues_ : array-like of shape (n_features,) p-values of feature scores. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- f_classif : ANOVA F-value between label/feature for classification tasks. mutual_info_classif : Mutual information for a discrete target. chi2 : Chi-squared stats of non-negative features for classification tasks. f_regression : F-value between label/feature for regression tasks. mutual_info_regression : Mutual information for a continuous target. SelectPercentile : Select features based on percentile of the highest scores.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
true
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/feature_selection/tests/test_mutual_info.py
sklearn/feature_selection/tests/test_mutual_info.py
import numpy as np import pytest from sklearn.datasets import make_classification, make_regression from sklearn.feature_selection import mutual_info_classif, mutual_info_regression from sklearn.feature_selection._mutual_info import _compute_mi from sklearn.utils import check_random_state from sklearn.utils._testing import ( assert_allclose, assert_array_equal, ) from sklearn.utils.fixes import CSR_CONTAINERS def test_compute_mi_dd(): # In discrete case computations are straightforward and can be done # by hand on given vectors. x = np.array([0, 1, 1, 0, 0]) y = np.array([1, 0, 0, 0, 1]) H_x = H_y = -(3 / 5) * np.log(3 / 5) - (2 / 5) * np.log(2 / 5) H_xy = -1 / 5 * np.log(1 / 5) - 2 / 5 * np.log(2 / 5) - 2 / 5 * np.log(2 / 5) I_xy = H_x + H_y - H_xy assert_allclose(_compute_mi(x, y, x_discrete=True, y_discrete=True), I_xy) def test_compute_mi_cc(global_dtype): # For two continuous variables a good approach is to test on bivariate # normal distribution, where mutual information is known. # Mean of the distribution, irrelevant for mutual information. mean = np.zeros(2) # Setup covariance matrix with correlation coeff. equal 0.5. sigma_1 = 1 sigma_2 = 10 corr = 0.5 cov = np.array( [ [sigma_1**2, corr * sigma_1 * sigma_2], [corr * sigma_1 * sigma_2, sigma_2**2], ] ) # True theoretical mutual information. I_theory = np.log(sigma_1) + np.log(sigma_2) - 0.5 * np.log(np.linalg.det(cov)) rng = check_random_state(0) Z = rng.multivariate_normal(mean, cov, size=1000).astype(global_dtype, copy=False) x, y = Z[:, 0], Z[:, 1] # Theory and computed values won't be very close # We here check with a large relative tolerance for n_neighbors in [3, 5, 7]: I_computed = _compute_mi( x, y, x_discrete=False, y_discrete=False, n_neighbors=n_neighbors ) assert_allclose(I_computed, I_theory, rtol=1e-1) def test_compute_mi_cd(global_dtype): # To test define a joint distribution as follows: # p(x, y) = p(x) p(y | x) # X ~ Bernoulli(p) # (Y | x = 0) ~ Uniform(-1, 1) # (Y | x = 1) ~ Uniform(0, 2) # Use the following formula for mutual information: # I(X; Y) = H(Y) - H(Y | X) # Two entropies can be computed by hand: # H(Y) = -(1-p)/2 * ln((1-p)/2) - p/2*log(p/2) - 1/2*log(1/2) # H(Y | X) = ln(2) # Now we need to implement sampling from out distribution, which is # done easily using conditional distribution logic. n_samples = 1000 rng = check_random_state(0) for p in [0.3, 0.5, 0.7]: x = rng.uniform(size=n_samples) > p y = np.empty(n_samples, global_dtype) mask = x == 0 y[mask] = rng.uniform(-1, 1, size=np.sum(mask)) y[~mask] = rng.uniform(0, 2, size=np.sum(~mask)) I_theory = -0.5 * ( (1 - p) * np.log(0.5 * (1 - p)) + p * np.log(0.5 * p) + np.log(0.5) ) - np.log(2) # Assert the same tolerance. for n_neighbors in [3, 5, 7]: I_computed = _compute_mi( x, y, x_discrete=True, y_discrete=False, n_neighbors=n_neighbors ) assert_allclose(I_computed, I_theory, rtol=1e-1) def test_compute_mi_cd_unique_label(global_dtype): # Test that adding unique label doesn't change MI. n_samples = 100 x = np.random.uniform(size=n_samples) > 0.5 y = np.empty(n_samples, global_dtype) mask = x == 0 y[mask] = np.random.uniform(-1, 1, size=np.sum(mask)) y[~mask] = np.random.uniform(0, 2, size=np.sum(~mask)) mi_1 = _compute_mi(x, y, x_discrete=True, y_discrete=False) x = np.hstack((x, 2)) y = np.hstack((y, 10)) mi_2 = _compute_mi(x, y, x_discrete=True, y_discrete=False) assert_allclose(mi_1, mi_2) # We are going test that feature ordering by MI matches our expectations. def test_mutual_info_classif_discrete(global_dtype): X = np.array( [[0, 0, 0], [1, 1, 0], [2, 0, 1], [2, 0, 1], [2, 0, 1]], dtype=global_dtype ) y = np.array([0, 1, 2, 2, 1]) # Here X[:, 0] is the most informative feature, and X[:, 1] is weakly # informative. mi = mutual_info_classif(X, y, discrete_features=True) assert_array_equal(np.argsort(-mi), np.array([0, 2, 1])) def test_mutual_info_regression(global_dtype): # We generate sample from multivariate normal distribution, using # transformation from initially uncorrelated variables. The zero # variables after transformation is selected as the target vector, # it has the strongest correlation with the variable 2, and # the weakest correlation with the variable 1. T = np.array([[1, 0.5, 2, 1], [0, 1, 0.1, 0.0], [0, 0.1, 1, 0.1], [0, 0.1, 0.1, 1]]) cov = T.dot(T.T) mean = np.zeros(4) rng = check_random_state(0) Z = rng.multivariate_normal(mean, cov, size=1000).astype(global_dtype, copy=False) X = Z[:, 1:] y = Z[:, 0] mi = mutual_info_regression(X, y, random_state=0) assert_array_equal(np.argsort(-mi), np.array([1, 2, 0])) # XXX: should mutual_info_regression be fixed to avoid # up-casting float32 inputs to float64? assert mi.dtype == np.float64 def test_mutual_info_classif_mixed(global_dtype): # Here the target is discrete and there are two continuous and one # discrete feature. The idea of this test is clear from the code. rng = check_random_state(0) X = rng.rand(1000, 3).astype(global_dtype, copy=False) X[:, 1] += X[:, 0] y = ((0.5 * X[:, 0] + X[:, 2]) > 0.5).astype(int) X[:, 2] = X[:, 2] > 0.5 mi = mutual_info_classif(X, y, discrete_features=[2], n_neighbors=3, random_state=0) assert_array_equal(np.argsort(-mi), [2, 0, 1]) for n_neighbors in [5, 7, 9]: mi_nn = mutual_info_classif( X, y, discrete_features=[2], n_neighbors=n_neighbors, random_state=0 ) # Check that the continuous values have a higher MI with greater # n_neighbors assert mi_nn[0] > mi[0] assert mi_nn[1] > mi[1] # The n_neighbors should not have any effect on the discrete value # The MI should be the same assert mi_nn[2] == mi[2] @pytest.mark.parametrize("csr_container", CSR_CONTAINERS) def test_mutual_info_options(global_dtype, csr_container): X = np.array( [[0, 0, 0], [1, 1, 0], [2, 0, 1], [2, 0, 1], [2, 0, 1]], dtype=global_dtype ) y = np.array([0, 1, 2, 2, 1], dtype=global_dtype) X_csr = csr_container(X) for mutual_info in (mutual_info_regression, mutual_info_classif): with pytest.raises(ValueError): mutual_info(X_csr, y, discrete_features=False) with pytest.raises(ValueError): mutual_info(X, y, discrete_features="manual") with pytest.raises(ValueError): mutual_info(X_csr, y, discrete_features=[True, False, True]) with pytest.raises(IndexError): mutual_info(X, y, discrete_features=[True, False, True, False]) with pytest.raises(IndexError): mutual_info(X, y, discrete_features=[1, 4]) mi_1 = mutual_info(X, y, discrete_features="auto", random_state=0) mi_2 = mutual_info(X, y, discrete_features=False, random_state=0) mi_3 = mutual_info(X_csr, y, discrete_features="auto", random_state=0) mi_4 = mutual_info(X_csr, y, discrete_features=True, random_state=0) mi_5 = mutual_info(X, y, discrete_features=[True, False, True], random_state=0) mi_6 = mutual_info(X, y, discrete_features=[0, 2], random_state=0) assert_allclose(mi_1, mi_2) assert_allclose(mi_3, mi_4) assert_allclose(mi_5, mi_6) assert not np.allclose(mi_1, mi_3) @pytest.mark.parametrize("correlated", [True, False]) def test_mutual_information_symmetry_classif_regression(correlated, global_random_seed): """Check that `mutual_info_classif` and `mutual_info_regression` are symmetric by switching the target `y` as `feature` in `X` and vice versa. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/23720 """ rng = np.random.RandomState(global_random_seed) n = 100 d = rng.randint(10, size=n) if correlated: c = d.astype(np.float64) else: c = rng.normal(0, 1, size=n) mi_classif = mutual_info_classif( c[:, None], d, discrete_features=[False], random_state=global_random_seed ) mi_regression = mutual_info_regression( d[:, None], c, discrete_features=[True], random_state=global_random_seed ) assert mi_classif == pytest.approx(mi_regression) def test_mutual_info_regression_X_int_dtype(global_random_seed): """Check that results agree when X is integer dtype and float dtype. Non-regression test for Issue #26696. """ rng = np.random.RandomState(global_random_seed) X = rng.randint(100, size=(100, 10)) X_float = X.astype(np.float64, copy=True) y = rng.randint(100, size=100) expected = mutual_info_regression(X_float, y, random_state=global_random_seed) result = mutual_info_regression(X, y, random_state=global_random_seed) assert_allclose(result, expected) @pytest.mark.parametrize( "mutual_info_func, data_generator", [ (mutual_info_regression, make_regression), (mutual_info_classif, make_classification), ], ) def test_mutual_info_n_jobs(global_random_seed, mutual_info_func, data_generator): """Check that results are consistent with different `n_jobs`.""" X, y = data_generator(random_state=global_random_seed) single_job = mutual_info_func(X, y, random_state=global_random_seed, n_jobs=1) multi_job = mutual_info_func(X, y, random_state=global_random_seed, n_jobs=2) assert_allclose(single_job, multi_job)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/feature_selection/tests/test_rfe.py
sklearn/feature_selection/tests/test_rfe.py
""" Testing Recursive feature elimination """ import re from operator import attrgetter import numpy as np import pytest from joblib import parallel_backend from numpy.testing import assert_allclose, assert_array_almost_equal, assert_array_equal from sklearn.base import BaseEstimator, ClassifierMixin, is_classifier from sklearn.compose import TransformedTargetRegressor from sklearn.cross_decomposition import CCA, PLSCanonical, PLSRegression from sklearn.datasets import load_iris, make_classification, make_friedman1 from sklearn.ensemble import RandomForestClassifier from sklearn.feature_selection import RFE, RFECV from sklearn.impute import SimpleImputer from sklearn.linear_model import LinearRegression, LogisticRegression from sklearn.metrics import get_scorer, make_scorer, zero_one_loss from sklearn.model_selection import GroupKFold, cross_val_score from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC, SVR, LinearSVR from sklearn.utils import check_random_state from sklearn.utils._testing import ignore_warnings from sklearn.utils.fixes import CSR_CONTAINERS class MockClassifier(ClassifierMixin, BaseEstimator): """ Dummy classifier to test recursive feature elimination """ def __init__(self, foo_param=0): self.foo_param = foo_param def fit(self, X, y): assert len(X) == len(y) self.coef_ = np.ones(X.shape[1], dtype=np.float64) self.classes_ = sorted(set(y)) return self def predict(self, T): return np.ones(T.shape[0]) predict_proba = predict decision_function = predict transform = predict def score(self, X=None, y=None): return 0.0 def get_params(self, deep=True): return {"foo_param": self.foo_param} def set_params(self, **params): return self def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.input_tags.allow_nan = True return tags def test_rfe_features_importance(): generator = check_random_state(0) iris = load_iris() # Add some irrelevant features. Random seed is set to make sure that # irrelevant features are always irrelevant. X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] y = iris.target clf = RandomForestClassifier(n_estimators=20, random_state=generator, max_depth=2) rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1) rfe.fit(X, y) assert len(rfe.ranking_) == X.shape[1] clf_svc = SVC(kernel="linear") rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1) rfe_svc.fit(X, y) # Check if the supports are equal assert_array_equal(rfe.get_support(), rfe_svc.get_support()) @pytest.mark.parametrize("csr_container", CSR_CONTAINERS) def test_rfe(csr_container): generator = check_random_state(0) iris = load_iris() # Add some irrelevant features. Random seed is set to make sure that # irrelevant features are always irrelevant. X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] X_sparse = csr_container(X) y = iris.target # dense model clf = SVC(kernel="linear") rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1) rfe.fit(X, y) X_r = rfe.transform(X) clf.fit(X_r, y) assert len(rfe.ranking_) == X.shape[1] # sparse model clf_sparse = SVC(kernel="linear") rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1) rfe_sparse.fit(X_sparse, y) X_r_sparse = rfe_sparse.transform(X_sparse) assert X_r.shape == iris.data.shape assert_array_almost_equal(X_r[:10], iris.data[:10]) assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data)) assert rfe.score(X, y) == clf.score(iris.data, iris.target) assert_array_almost_equal(X_r, X_r_sparse.toarray()) def test_RFE_fit_score_params(): # Make sure RFE passes the metadata down to fit and score methods of the # underlying estimator class TestEstimator(BaseEstimator, ClassifierMixin): def fit(self, X, y, prop=None): if prop is None: raise ValueError("fit: prop cannot be None") self.svc_ = SVC(kernel="linear").fit(X, y) self.coef_ = self.svc_.coef_ return self def score(self, X, y, prop=None): if prop is None: raise ValueError("score: prop cannot be None") return self.svc_.score(X, y) X, y = load_iris(return_X_y=True) with pytest.raises(ValueError, match="fit: prop cannot be None"): RFE(estimator=TestEstimator()).fit(X, y) with pytest.raises(ValueError, match="score: prop cannot be None"): RFE(estimator=TestEstimator()).fit(X, y, prop="foo").score(X, y) RFE(estimator=TestEstimator()).fit(X, y, prop="foo").score(X, y, prop="foo") def test_rfe_percent_n_features(): # test that the results are the same generator = check_random_state(0) iris = load_iris() # Add some irrelevant features. Random seed is set to make sure that # irrelevant features are always irrelevant. X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] y = iris.target # there are 10 features in the data. We select 40%. clf = SVC(kernel="linear") rfe_num = RFE(estimator=clf, n_features_to_select=4, step=0.1) rfe_num.fit(X, y) rfe_perc = RFE(estimator=clf, n_features_to_select=0.4, step=0.1) rfe_perc.fit(X, y) assert_array_equal(rfe_perc.ranking_, rfe_num.ranking_) assert_array_equal(rfe_perc.support_, rfe_num.support_) def test_rfe_mockclassifier(): generator = check_random_state(0) iris = load_iris() # Add some irrelevant features. Random seed is set to make sure that # irrelevant features are always irrelevant. X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] y = iris.target # dense model clf = MockClassifier() rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1) rfe.fit(X, y) X_r = rfe.transform(X) clf.fit(X_r, y) assert len(rfe.ranking_) == X.shape[1] assert X_r.shape == iris.data.shape @pytest.mark.parametrize("csr_container", CSR_CONTAINERS) def test_rfecv(csr_container): generator = check_random_state(0) iris = load_iris() # Add some irrelevant features. Random seed is set to make sure that # irrelevant features are always irrelevant. X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] y = list(iris.target) # regression test: list should be supported # Test using the score function rfecv = RFECV(estimator=SVC(kernel="linear"), step=1) rfecv.fit(X, y) # non-regression test for missing worst feature: for key in rfecv.cv_results_.keys(): assert len(rfecv.cv_results_[key]) == X.shape[1] assert len(rfecv.ranking_) == X.shape[1] X_r = rfecv.transform(X) # All the noisy variable were filtered out assert_array_equal(X_r, iris.data) # same in sparse rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1) X_sparse = csr_container(X) rfecv_sparse.fit(X_sparse, y) X_r_sparse = rfecv_sparse.transform(X_sparse) assert_array_equal(X_r_sparse.toarray(), iris.data) # Test using a customized loss function scoring = make_scorer(zero_one_loss, greater_is_better=False) rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, scoring=scoring) ignore_warnings(rfecv.fit)(X, y) X_r = rfecv.transform(X) assert_array_equal(X_r, iris.data) # Test using a scorer scorer = get_scorer("accuracy") rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, scoring=scorer) rfecv.fit(X, y) X_r = rfecv.transform(X) assert_array_equal(X_r, iris.data) # Test fix on cv_results_ def test_scorer(estimator, X, y): return 1.0 rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, scoring=test_scorer) rfecv.fit(X, y) # In the event of cross validation score ties, the expected behavior of # RFECV is to return the FEWEST features that maximize the CV score. # Because test_scorer always returns 1.0 in this example, RFECV should # reduce the dimensionality to a single feature (i.e. n_features_ = 1) assert rfecv.n_features_ == 1 # Same as the first two tests, but with step=2 rfecv = RFECV(estimator=SVC(kernel="linear"), step=2) rfecv.fit(X, y) for key in rfecv.cv_results_.keys(): assert len(rfecv.cv_results_[key]) == 6 assert len(rfecv.ranking_) == X.shape[1] X_r = rfecv.transform(X) assert_array_equal(X_r, iris.data) rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2) X_sparse = csr_container(X) rfecv_sparse.fit(X_sparse, y) X_r_sparse = rfecv_sparse.transform(X_sparse) assert_array_equal(X_r_sparse.toarray(), iris.data) # Verifying that steps < 1 don't blow up. rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=0.2) X_sparse = csr_container(X) rfecv_sparse.fit(X_sparse, y) X_r_sparse = rfecv_sparse.transform(X_sparse) assert_array_equal(X_r_sparse.toarray(), iris.data) def test_rfecv_mockclassifier(): generator = check_random_state(0) iris = load_iris() X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] y = list(iris.target) # regression test: list should be supported # Test using the score function rfecv = RFECV(estimator=MockClassifier(), step=1) rfecv.fit(X, y) # non-regression test for missing worst feature: for key in rfecv.cv_results_.keys(): assert len(rfecv.cv_results_[key]) == X.shape[1] assert len(rfecv.ranking_) == X.shape[1] def test_rfecv_verbose_output(): # Check verbose=1 is producing an output. import sys from io import StringIO sys.stdout = StringIO() generator = check_random_state(0) iris = load_iris() X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] y = list(iris.target) rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, verbose=1) rfecv.fit(X, y) verbose_output = sys.stdout verbose_output.seek(0) assert len(verbose_output.readline()) > 0 def test_rfecv_cv_results_size(global_random_seed): generator = check_random_state(global_random_seed) iris = load_iris() X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] y = list(iris.target) # regression test: list should be supported # Non-regression test for varying combinations of step and # min_features_to_select. for step, min_features_to_select in [[2, 1], [2, 2], [3, 3]]: rfecv = RFECV( estimator=MockClassifier(), step=step, min_features_to_select=min_features_to_select, ) rfecv.fit(X, y) score_len = np.ceil((X.shape[1] - min_features_to_select) / step) + 1 for key in rfecv.cv_results_.keys(): assert len(rfecv.cv_results_[key]) == score_len assert len(rfecv.ranking_) == X.shape[1] assert rfecv.n_features_ >= min_features_to_select def test_rfe_estimator_tags(): rfe = RFE(SVC(kernel="linear")) assert is_classifier(rfe) # make sure that cross-validation is stratified iris = load_iris() score = cross_val_score(rfe, iris.data, iris.target) assert score.min() > 0.7 def test_rfe_min_step(global_random_seed): n_features = 10 X, y = make_friedman1( n_samples=50, n_features=n_features, random_state=global_random_seed ) n_samples, n_features = X.shape estimator = SVR(kernel="linear") # Test when floor(step * n_features) <= 0 selector = RFE(estimator, step=0.01) sel = selector.fit(X, y) assert sel.support_.sum() == n_features // 2 # Test when step is between (0,1) and floor(step * n_features) > 0 selector = RFE(estimator, step=0.20) sel = selector.fit(X, y) assert sel.support_.sum() == n_features // 2 # Test when step is an integer selector = RFE(estimator, step=5) sel = selector.fit(X, y) assert sel.support_.sum() == n_features // 2 def test_number_of_subsets_of_features(global_random_seed): # In RFE, 'number_of_subsets_of_features' # = the number of iterations in '_fit' # = max(ranking_) # = 1 + (n_features + step - n_features_to_select - 1) // step # After optimization #4534, this number # = 1 + np.ceil((n_features - n_features_to_select) / float(step)) # This test case is to test their equivalence, refer to #4534 and #3824 def formula1(n_features, n_features_to_select, step): return 1 + ((n_features + step - n_features_to_select - 1) // step) def formula2(n_features, n_features_to_select, step): return 1 + np.ceil((n_features - n_features_to_select) / float(step)) # RFE # Case 1, n_features - n_features_to_select is divisible by step # Case 2, n_features - n_features_to_select is not divisible by step n_features_list = [11, 11] n_features_to_select_list = [3, 3] step_list = [2, 3] for n_features, n_features_to_select, step in zip( n_features_list, n_features_to_select_list, step_list ): generator = check_random_state(global_random_seed) X = generator.normal(size=(100, n_features)) y = generator.rand(100).round() rfe = RFE( estimator=SVC(kernel="linear"), n_features_to_select=n_features_to_select, step=step, ) rfe.fit(X, y) # this number also equals to the maximum of ranking_ assert np.max(rfe.ranking_) == formula1(n_features, n_features_to_select, step) assert np.max(rfe.ranking_) == formula2(n_features, n_features_to_select, step) # In RFECV, 'fit' calls 'RFE._fit' # 'number_of_subsets_of_features' of RFE # = the size of each score in 'cv_results_' of RFECV # = the number of iterations of the for loop before optimization #4534 # RFECV, n_features_to_select = 1 # Case 1, n_features - 1 is divisible by step # Case 2, n_features - 1 is not divisible by step n_features_to_select = 1 n_features_list = [11, 10] step_list = [2, 2] for n_features, step in zip(n_features_list, step_list): generator = check_random_state(global_random_seed) X = generator.normal(size=(100, n_features)) y = generator.rand(100).round() rfecv = RFECV(estimator=SVC(kernel="linear"), step=step) rfecv.fit(X, y) for key in rfecv.cv_results_.keys(): assert len(rfecv.cv_results_[key]) == formula1( n_features, n_features_to_select, step ) assert len(rfecv.cv_results_[key]) == formula2( n_features, n_features_to_select, step ) def test_rfe_cv_n_jobs(global_random_seed): generator = check_random_state(global_random_seed) iris = load_iris() X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] y = iris.target rfecv = RFECV(estimator=SVC(kernel="linear")) rfecv.fit(X, y) rfecv_ranking = rfecv.ranking_ rfecv_cv_results_ = rfecv.cv_results_ rfecv.set_params(n_jobs=2) rfecv.fit(X, y) assert_array_almost_equal(rfecv.ranking_, rfecv_ranking) assert rfecv_cv_results_.keys() == rfecv.cv_results_.keys() for key in rfecv_cv_results_.keys(): assert rfecv_cv_results_[key] == pytest.approx(rfecv.cv_results_[key]) def test_rfe_cv_groups(): generator = check_random_state(0) iris = load_iris() number_groups = 4 groups = np.floor(np.linspace(0, number_groups, len(iris.target))) X = iris.data y = (iris.target > 0).astype(int) est_groups = RFECV( estimator=RandomForestClassifier(random_state=generator), step=1, scoring="accuracy", cv=GroupKFold(n_splits=2), ) est_groups.fit(X, y, groups=groups) assert est_groups.n_features_ > 0 @pytest.mark.parametrize( "importance_getter", [attrgetter("regressor_.coef_"), "regressor_.coef_"] ) @pytest.mark.parametrize("selector, expected_n_features", [(RFE, 5), (RFECV, 4)]) def test_rfe_wrapped_estimator(importance_getter, selector, expected_n_features): # Non-regression test for # https://github.com/scikit-learn/scikit-learn/issues/15312 X, y = make_friedman1(n_samples=50, n_features=10, random_state=0) estimator = LinearSVR(random_state=0) log_estimator = TransformedTargetRegressor( regressor=estimator, func=np.log, inverse_func=np.exp ) selector = selector(log_estimator, importance_getter=importance_getter) sel = selector.fit(X, y) assert sel.support_.sum() == expected_n_features @pytest.mark.parametrize( "importance_getter, err_type", [ ("auto", ValueError), ("random", AttributeError), (lambda x: x.importance, AttributeError), ], ) @pytest.mark.parametrize("Selector", [RFE, RFECV]) def test_rfe_importance_getter_validation(importance_getter, err_type, Selector): X, y = make_friedman1(n_samples=50, n_features=10, random_state=42) estimator = LinearSVR() log_estimator = TransformedTargetRegressor( regressor=estimator, func=np.log, inverse_func=np.exp ) with pytest.raises(err_type): model = Selector(log_estimator, importance_getter=importance_getter) model.fit(X, y) @pytest.mark.parametrize("cv", [None, 5]) def test_rfe_allow_nan_inf_in_x(cv): iris = load_iris() X = iris.data y = iris.target # add nan and inf value to X X[0][0] = np.nan X[0][1] = np.inf clf = MockClassifier() if cv is not None: rfe = RFECV(estimator=clf, cv=cv) else: rfe = RFE(estimator=clf) rfe.fit(X, y) rfe.transform(X) def test_w_pipeline_2d_coef_(): pipeline = make_pipeline(StandardScaler(), LogisticRegression()) data, y = load_iris(return_X_y=True) sfm = RFE( pipeline, n_features_to_select=2, importance_getter="named_steps.logisticregression.coef_", ) sfm.fit(data, y) assert sfm.transform(data).shape[1] == 2 def test_rfecv_std_and_mean(global_random_seed): generator = check_random_state(global_random_seed) iris = load_iris() X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] y = iris.target rfecv = RFECV(estimator=SVC(kernel="linear")) rfecv.fit(X, y) split_keys = [ key for key in rfecv.cv_results_.keys() if re.search(r"split\d+_test_score", key) ] cv_scores = np.asarray([rfecv.cv_results_[key] for key in split_keys]) expected_mean = np.mean(cv_scores, axis=0) expected_std = np.std(cv_scores, axis=0) assert_allclose(rfecv.cv_results_["mean_test_score"], expected_mean) assert_allclose(rfecv.cv_results_["std_test_score"], expected_std) @pytest.mark.parametrize( ["min_features_to_select", "n_features", "step", "cv_results_n_features"], [ [1, 4, 1, np.array([1, 2, 3, 4])], [1, 5, 1, np.array([1, 2, 3, 4, 5])], [1, 4, 2, np.array([1, 2, 4])], [1, 5, 2, np.array([1, 3, 5])], [1, 4, 3, np.array([1, 4])], [1, 5, 3, np.array([1, 2, 5])], [1, 4, 4, np.array([1, 4])], [1, 5, 4, np.array([1, 5])], [4, 4, 2, np.array([4])], [4, 5, 1, np.array([4, 5])], [4, 5, 2, np.array([4, 5])], ], ) def test_rfecv_cv_results_n_features( min_features_to_select, n_features, step, cv_results_n_features, ): X, y = make_classification( n_samples=20, n_features=n_features, n_informative=n_features, n_redundant=0 ) rfecv = RFECV( estimator=SVC(kernel="linear"), step=step, min_features_to_select=min_features_to_select, ) rfecv.fit(X, y) assert_array_equal(rfecv.cv_results_["n_features"], cv_results_n_features) assert all( len(value) == len(rfecv.cv_results_["n_features"]) for value in rfecv.cv_results_.values() ) @pytest.mark.parametrize("ClsRFE", [RFE, RFECV]) def test_multioutput(ClsRFE): X = np.random.normal(size=(10, 3)) y = np.random.randint(2, size=(10, 2)) clf = RandomForestClassifier(n_estimators=5) rfe_test = ClsRFE(clf) rfe_test.fit(X, y) @pytest.mark.parametrize("ClsRFE", [RFE, RFECV]) def test_pipeline_with_nans(ClsRFE): """Check that RFE works with pipeline that accept nans. Non-regression test for gh-21743. """ X, y = load_iris(return_X_y=True) X[0, 0] = np.nan pipe = make_pipeline( SimpleImputer(), StandardScaler(), LogisticRegression(), ) fs = ClsRFE( estimator=pipe, importance_getter="named_steps.logisticregression.coef_", ) fs.fit(X, y) @pytest.mark.parametrize("ClsRFE", [RFE, RFECV]) @pytest.mark.parametrize("PLSEstimator", [CCA, PLSCanonical, PLSRegression]) def test_rfe_pls(ClsRFE, PLSEstimator): """Check the behaviour of RFE with PLS estimators. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/12410 """ X, y = make_friedman1(n_samples=50, n_features=10, random_state=0) estimator = PLSEstimator(n_components=1) selector = ClsRFE(estimator, step=1).fit(X, y) assert selector.score(X, y) > 0.5 def test_rfe_estimator_attribute_error(): """Check that we raise the proper AttributeError when the estimator does not implement the `decision_function` method, which is decorated with `available_if`. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/28108 """ iris = load_iris() # `LinearRegression` does not implement 'decision_function' and should raise an # AttributeError rfe = RFE(estimator=LinearRegression()) outer_msg = "This 'RFE' has no attribute 'decision_function'" inner_msg = "'LinearRegression' object has no attribute 'decision_function'" with pytest.raises(AttributeError, match=outer_msg) as exec_info: rfe.fit(iris.data, iris.target).decision_function(iris.data) assert isinstance(exec_info.value.__cause__, AttributeError) assert inner_msg in str(exec_info.value.__cause__) @pytest.mark.parametrize( "ClsRFE, param", [(RFE, "n_features_to_select"), (RFECV, "min_features_to_select")] ) def test_rfe_n_features_to_select_warning(ClsRFE, param): """Check if the correct warning is raised when trying to initialize a RFE object with a n_features_to_select attribute larger than the number of features present in the X variable that is passed to the fit method """ X, y = make_classification(n_features=20, random_state=0) with pytest.warns(UserWarning, match=f"{param}=21 > n_features=20"): # Create RFE/RFECV with n_features_to_select/min_features_to_select # larger than the number of features present in the X variable clsrfe = ClsRFE(estimator=LogisticRegression(), **{param: 21}) clsrfe.fit(X, y) def test_rfe_with_sample_weight(): """Test that `RFE` works correctly with sample weights.""" X, y = make_classification(random_state=0) n_samples = X.shape[0] # Assign the first half of the samples with twice the weight sample_weight = np.ones_like(y) sample_weight[: n_samples // 2] = 2 # Duplicate the first half of the data samples to replicate the effect # of sample weights for comparison X2 = np.concatenate([X, X[: n_samples // 2]], axis=0) y2 = np.concatenate([y, y[: n_samples // 2]]) estimator = SVC(kernel="linear") rfe_sw = RFE(estimator=estimator, step=0.1) rfe_sw.fit(X, y, sample_weight=sample_weight) rfe = RFE(estimator=estimator, step=0.1) rfe.fit(X2, y2) assert_array_equal(rfe_sw.ranking_, rfe.ranking_) # Also verify that when sample weights are not doubled the results # are different from the duplicated data rfe_sw_2 = RFE(estimator=estimator, step=0.1) sample_weight_2 = np.ones_like(y) rfe_sw_2.fit(X, y, sample_weight=sample_weight_2) assert not np.array_equal(rfe_sw_2.ranking_, rfe.ranking_) def test_rfe_with_joblib_threading_backend(global_random_seed): X, y = make_classification(random_state=global_random_seed) clf = LogisticRegression() rfe = RFECV( estimator=clf, n_jobs=2, ) rfe.fit(X, y) ranking_ref = rfe.ranking_ with parallel_backend("threading"): rfe.fit(X, y) assert_array_equal(ranking_ref, rfe.ranking_) def test_results_per_cv_in_rfecv(global_random_seed): """ Test that the results of RFECV are consistent across the different folds in terms of length of the arrays. """ X, y = make_classification(random_state=global_random_seed) clf = LogisticRegression() rfecv = RFECV( estimator=clf, n_jobs=2, cv=5, ) rfecv.fit(X, y) assert len(rfecv.cv_results_["split1_test_score"]) == len( rfecv.cv_results_["split2_test_score"] ) assert len(rfecv.cv_results_["split1_support"]) == len( rfecv.cv_results_["split2_support"] ) assert len(rfecv.cv_results_["split1_ranking"]) == len( rfecv.cv_results_["split2_ranking"] )
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/feature_selection/tests/test_feature_select.py
sklearn/feature_selection/tests/test_feature_select.py
""" Todo: cross-check the F-value with stats model """ import itertools import warnings import numpy as np import pytest from numpy.testing import assert_allclose from scipy import sparse, stats from sklearn.datasets import load_iris, make_classification, make_regression from sklearn.feature_selection import ( GenericUnivariateSelect, SelectFdr, SelectFpr, SelectFwe, SelectKBest, SelectPercentile, chi2, f_classif, f_oneway, f_regression, mutual_info_classif, mutual_info_regression, r_regression, ) from sklearn.utils import safe_mask from sklearn.utils._testing import ( _convert_container, assert_almost_equal, assert_array_almost_equal, assert_array_equal, ignore_warnings, ) from sklearn.utils.fixes import CSR_CONTAINERS ############################################################################## # Test the score functions def test_f_oneway_vs_scipy_stats(): # Test that our f_oneway gives the same result as scipy.stats rng = np.random.RandomState(0) X1 = rng.randn(10, 3) X2 = 1 + rng.randn(10, 3) f, pv = stats.f_oneway(X1, X2) f2, pv2 = f_oneway(X1, X2) assert np.allclose(f, f2) assert np.allclose(pv, pv2) def test_f_oneway_ints(): # Smoke test f_oneway on integers: that it does raise casting errors # with recent numpys rng = np.random.RandomState(0) X = rng.randint(10, size=(10, 10)) y = np.arange(10) fint, pint = f_oneway(X, y) # test that is gives the same result as with float f, p = f_oneway(X.astype(float), y) assert_array_almost_equal(f, fint, decimal=4) assert_array_almost_equal(p, pint, decimal=4) @pytest.mark.parametrize("csr_container", CSR_CONTAINERS) def test_f_classif(csr_container): # Test whether the F test yields meaningful results # on a simple simulated classification problem X, y = make_classification( n_samples=200, n_features=20, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, flip_y=0.0, class_sep=10, shuffle=False, random_state=0, ) F, pv = f_classif(X, y) F_sparse, pv_sparse = f_classif(csr_container(X), y) assert (F > 0).all() assert (pv > 0).all() assert (pv < 1).all() assert (pv[:5] < 0.05).all() assert (pv[5:] > 1.0e-4).all() assert_array_almost_equal(F_sparse, F) assert_array_almost_equal(pv_sparse, pv) @pytest.mark.parametrize("center", [True, False]) def test_r_regression(center): X, y = make_regression( n_samples=2000, n_features=20, n_informative=5, shuffle=False, random_state=0 ) corr_coeffs = r_regression(X, y, center=center) assert (-1 < corr_coeffs).all() assert (corr_coeffs < 1).all() sparse_X = _convert_container(X, "sparse") sparse_corr_coeffs = r_regression(sparse_X, y, center=center) assert_allclose(sparse_corr_coeffs, corr_coeffs) # Testing against numpy for reference Z = np.hstack((X, y[:, np.newaxis])) correlation_matrix = np.corrcoef(Z, rowvar=False) np_corr_coeffs = correlation_matrix[:-1, -1] assert_array_almost_equal(np_corr_coeffs, corr_coeffs, decimal=3) @pytest.mark.parametrize("csr_container", CSR_CONTAINERS) def test_f_regression(csr_container): # Test whether the F test yields meaningful results # on a simple simulated regression problem X, y = make_regression( n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0 ) F, pv = f_regression(X, y) assert (F > 0).all() assert (pv > 0).all() assert (pv < 1).all() assert (pv[:5] < 0.05).all() assert (pv[5:] > 1.0e-4).all() # with centering, compare with sparse F, pv = f_regression(X, y, center=True) F_sparse, pv_sparse = f_regression(csr_container(X), y, center=True) assert_allclose(F_sparse, F) assert_allclose(pv_sparse, pv) # again without centering, compare with sparse F, pv = f_regression(X, y, center=False) F_sparse, pv_sparse = f_regression(csr_container(X), y, center=False) assert_allclose(F_sparse, F) assert_allclose(pv_sparse, pv) def test_f_regression_input_dtype(): # Test whether f_regression returns the same value # for any numeric data_type rng = np.random.RandomState(0) X = rng.rand(10, 20) y = np.arange(10).astype(int) F1, pv1 = f_regression(X, y) F2, pv2 = f_regression(X, y.astype(float)) assert_allclose(F1, F2, 5) assert_allclose(pv1, pv2, 5) def test_f_regression_center(): # Test whether f_regression preserves dof according to 'center' argument # We use two centered variates so we have a simple relationship between # F-score with variates centering and F-score without variates centering. # Create toy example X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean n_samples = X.size Y = np.ones(n_samples) Y[::2] *= -1.0 Y[0] = 0.0 # have Y mean being null F1, _ = f_regression(X, Y, center=True) F2, _ = f_regression(X, Y, center=False) assert_allclose(F1 * (n_samples - 1.0) / (n_samples - 2.0), F2) assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS @pytest.mark.parametrize( "X, y, expected_corr_coef, force_finite", [ ( # A feature in X is constant - forcing finite np.array([[2, 1], [2, 0], [2, 10], [2, 4]]), np.array([0, 1, 1, 0]), np.array([0.0, 0.32075]), True, ), ( # The target y is constant - forcing finite np.array([[5, 1], [3, 0], [2, 10], [8, 4]]), np.array([0, 0, 0, 0]), np.array([0.0, 0.0]), True, ), ( # A feature in X is constant - not forcing finite np.array([[2, 1], [2, 0], [2, 10], [2, 4]]), np.array([0, 1, 1, 0]), np.array([np.nan, 0.32075]), False, ), ( # The target y is constant - not forcing finite np.array([[5, 1], [3, 0], [2, 10], [8, 4]]), np.array([0, 0, 0, 0]), np.array([np.nan, np.nan]), False, ), ], ) def test_r_regression_force_finite(X, y, expected_corr_coef, force_finite): """Check the behaviour of `force_finite` for some corner cases with `r_regression`. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/15672 """ with warnings.catch_warnings(): warnings.simplefilter("error", RuntimeWarning) corr_coef = r_regression(X, y, force_finite=force_finite) np.testing.assert_array_almost_equal(corr_coef, expected_corr_coef) @pytest.mark.parametrize( "X, y, expected_f_statistic, expected_p_values, force_finite", [ ( # A feature in X is constant - forcing finite np.array([[2, 1], [2, 0], [2, 10], [2, 4]]), np.array([0, 1, 1, 0]), np.array([0.0, 0.2293578]), np.array([1.0, 0.67924985]), True, ), ( # The target y is constant - forcing finite np.array([[5, 1], [3, 0], [2, 10], [8, 4]]), np.array([0, 0, 0, 0]), np.array([0.0, 0.0]), np.array([1.0, 1.0]), True, ), ( # Feature in X correlated with y - forcing finite np.array([[0, 1], [1, 0], [2, 10], [3, 4]]), np.array([0, 1, 2, 3]), np.array([np.finfo(np.float64).max, 0.845433]), np.array([0.0, 0.454913]), True, ), ( # Feature in X anti-correlated with y - forcing finite np.array([[3, 1], [2, 0], [1, 10], [0, 4]]), np.array([0, 1, 2, 3]), np.array([np.finfo(np.float64).max, 0.845433]), np.array([0.0, 0.454913]), True, ), ( # A feature in X is constant - not forcing finite np.array([[2, 1], [2, 0], [2, 10], [2, 4]]), np.array([0, 1, 1, 0]), np.array([np.nan, 0.2293578]), np.array([np.nan, 0.67924985]), False, ), ( # The target y is constant - not forcing finite np.array([[5, 1], [3, 0], [2, 10], [8, 4]]), np.array([0, 0, 0, 0]), np.array([np.nan, np.nan]), np.array([np.nan, np.nan]), False, ), ( # Feature in X correlated with y - not forcing finite np.array([[0, 1], [1, 0], [2, 10], [3, 4]]), np.array([0, 1, 2, 3]), np.array([np.inf, 0.845433]), np.array([0.0, 0.454913]), False, ), ( # Feature in X anti-correlated with y - not forcing finite np.array([[3, 1], [2, 0], [1, 10], [0, 4]]), np.array([0, 1, 2, 3]), np.array([np.inf, 0.845433]), np.array([0.0, 0.454913]), False, ), ], ) def test_f_regression_corner_case( X, y, expected_f_statistic, expected_p_values, force_finite ): """Check the behaviour of `force_finite` for some corner cases with `f_regression`. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/15672 """ with warnings.catch_warnings(): warnings.simplefilter("error", RuntimeWarning) f_statistic, p_values = f_regression(X, y, force_finite=force_finite) np.testing.assert_array_almost_equal(f_statistic, expected_f_statistic) np.testing.assert_array_almost_equal(p_values, expected_p_values) def test_f_classif_multi_class(): # Test whether the F test yields meaningful results # on a simple simulated classification problem X, y = make_classification( n_samples=200, n_features=20, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, flip_y=0.0, class_sep=10, shuffle=False, random_state=0, ) F, pv = f_classif(X, y) assert (F > 0).all() assert (pv > 0).all() assert (pv < 1).all() assert (pv[:5] < 0.05).all() assert (pv[5:] > 1.0e-4).all() def test_select_percentile_classif(): # Test whether the relative univariate feature selection # gets the correct items in a simple classification problem # with the percentile heuristic X, y = make_classification( n_samples=200, n_features=20, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, flip_y=0.0, class_sep=10, shuffle=False, random_state=0, ) univariate_filter = SelectPercentile(f_classif, percentile=25) X_r = univariate_filter.fit(X, y).transform(X) X_r2 = ( GenericUnivariateSelect(f_classif, mode="percentile", param=25) .fit(X, y) .transform(X) ) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(20) gtruth[:5] = 1 assert_array_equal(support, gtruth) @pytest.mark.parametrize("csr_container", CSR_CONTAINERS) def test_select_percentile_classif_sparse(csr_container): # Test whether the relative univariate feature selection # gets the correct items in a simple classification problem # with the percentile heuristic X, y = make_classification( n_samples=200, n_features=20, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, flip_y=0.0, class_sep=10, shuffle=False, random_state=0, ) X = csr_container(X) univariate_filter = SelectPercentile(f_classif, percentile=25) X_r = univariate_filter.fit(X, y).transform(X) X_r2 = ( GenericUnivariateSelect(f_classif, mode="percentile", param=25) .fit(X, y) .transform(X) ) assert_array_equal(X_r.toarray(), X_r2.toarray()) support = univariate_filter.get_support() gtruth = np.zeros(20) gtruth[:5] = 1 assert_array_equal(support, gtruth) X_r2inv = univariate_filter.inverse_transform(X_r2) assert sparse.issparse(X_r2inv) support_mask = safe_mask(X_r2inv, support) assert X_r2inv.shape == X.shape assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray()) # Check other columns are empty assert X_r2inv.nnz == X_r.nnz ############################################################################## # Test univariate selection in classification settings def test_select_kbest_classif(): # Test whether the relative univariate feature selection # gets the correct items in a simple classification problem # with the k best heuristic X, y = make_classification( n_samples=200, n_features=20, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, flip_y=0.0, class_sep=10, shuffle=False, random_state=0, ) univariate_filter = SelectKBest(f_classif, k=5) X_r = univariate_filter.fit(X, y).transform(X) X_r2 = ( GenericUnivariateSelect(f_classif, mode="k_best", param=5) .fit(X, y) .transform(X) ) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(20) gtruth[:5] = 1 assert_array_equal(support, gtruth) def test_select_kbest_all(): # Test whether k="all" correctly returns all features. X, y = make_classification( n_samples=20, n_features=10, shuffle=False, random_state=0 ) univariate_filter = SelectKBest(f_classif, k="all") X_r = univariate_filter.fit(X, y).transform(X) assert_array_equal(X, X_r) # Non-regression test for: # https://github.com/scikit-learn/scikit-learn/issues/24949 X_r2 = ( GenericUnivariateSelect(f_classif, mode="k_best", param="all") .fit(X, y) .transform(X) ) assert_array_equal(X_r, X_r2) @pytest.mark.parametrize("dtype_in", [np.float32, np.float64]) def test_select_kbest_zero(dtype_in): # Test whether k=0 correctly returns no features. X, y = make_classification( n_samples=20, n_features=10, shuffle=False, random_state=0 ) X = X.astype(dtype_in) univariate_filter = SelectKBest(f_classif, k=0) univariate_filter.fit(X, y) support = univariate_filter.get_support() gtruth = np.zeros(10, dtype=bool) assert_array_equal(support, gtruth) with pytest.warns(UserWarning, match="No features were selected"): X_selected = univariate_filter.transform(X) assert X_selected.shape == (20, 0) assert X_selected.dtype == dtype_in def test_select_heuristics_classif(): # Test whether the relative univariate feature selection # gets the correct items in a simple classification problem # with the fdr, fwe and fpr heuristics X, y = make_classification( n_samples=200, n_features=20, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, flip_y=0.0, class_sep=10, shuffle=False, random_state=0, ) univariate_filter = SelectFwe(f_classif, alpha=0.01) X_r = univariate_filter.fit(X, y).transform(X) gtruth = np.zeros(20) gtruth[:5] = 1 for mode in ["fdr", "fpr", "fwe"]: X_r2 = ( GenericUnivariateSelect(f_classif, mode=mode, param=0.01) .fit(X, y) .transform(X) ) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() assert_allclose(support, gtruth) ############################################################################## # Test univariate selection in regression settings def assert_best_scores_kept(score_filter): scores = score_filter.scores_ support = score_filter.get_support() assert_allclose(np.sort(scores[support]), np.sort(scores)[-support.sum() :]) def test_select_percentile_regression(): # Test whether the relative univariate feature selection # gets the correct items in a simple regression problem # with the percentile heuristic X, y = make_regression( n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0 ) univariate_filter = SelectPercentile(f_regression, percentile=25) X_r = univariate_filter.fit(X, y).transform(X) assert_best_scores_kept(univariate_filter) X_r2 = ( GenericUnivariateSelect(f_regression, mode="percentile", param=25) .fit(X, y) .transform(X) ) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(20) gtruth[:5] = 1 assert_array_equal(support, gtruth) X_2 = X.copy() X_2[:, np.logical_not(support)] = 0 assert_array_equal(X_2, univariate_filter.inverse_transform(X_r)) # Check inverse_transform respects dtype assert_array_equal( X_2.astype(bool), univariate_filter.inverse_transform(X_r.astype(bool)) ) def test_select_percentile_regression_full(): # Test whether the relative univariate feature selection # selects all features when '100%' is asked. X, y = make_regression( n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0 ) univariate_filter = SelectPercentile(f_regression, percentile=100) X_r = univariate_filter.fit(X, y).transform(X) assert_best_scores_kept(univariate_filter) X_r2 = ( GenericUnivariateSelect(f_regression, mode="percentile", param=100) .fit(X, y) .transform(X) ) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.ones(20) assert_array_equal(support, gtruth) def test_select_kbest_regression(): # Test whether the relative univariate feature selection # gets the correct items in a simple regression problem # with the k best heuristic X, y = make_regression( n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0, noise=10, ) univariate_filter = SelectKBest(f_regression, k=5) X_r = univariate_filter.fit(X, y).transform(X) assert_best_scores_kept(univariate_filter) X_r2 = ( GenericUnivariateSelect(f_regression, mode="k_best", param=5) .fit(X, y) .transform(X) ) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(20) gtruth[:5] = 1 assert_array_equal(support, gtruth) def test_select_heuristics_regression(): # Test whether the relative univariate feature selection # gets the correct items in a simple regression problem # with the fpr, fdr or fwe heuristics X, y = make_regression( n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0, noise=10, ) univariate_filter = SelectFpr(f_regression, alpha=0.01) X_r = univariate_filter.fit(X, y).transform(X) gtruth = np.zeros(20) gtruth[:5] = 1 for mode in ["fdr", "fpr", "fwe"]: X_r2 = ( GenericUnivariateSelect(f_regression, mode=mode, param=0.01) .fit(X, y) .transform(X) ) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() assert_array_equal(support[:5], np.ones((5,), dtype=bool)) assert np.sum(support[5:] == 1) < 3 def test_boundary_case_ch2(): # Test boundary case, and always aim to select 1 feature. X = np.array([[10, 20], [20, 20], [20, 30]]) y = np.array([[1], [0], [0]]) scores, pvalues = chi2(X, y) assert_array_almost_equal(scores, np.array([4.0, 0.71428571])) assert_array_almost_equal(pvalues, np.array([0.04550026, 0.39802472])) filter_fdr = SelectFdr(chi2, alpha=0.1) filter_fdr.fit(X, y) support_fdr = filter_fdr.get_support() assert_array_equal(support_fdr, np.array([True, False])) filter_kbest = SelectKBest(chi2, k=1) filter_kbest.fit(X, y) support_kbest = filter_kbest.get_support() assert_array_equal(support_kbest, np.array([True, False])) filter_percentile = SelectPercentile(chi2, percentile=50) filter_percentile.fit(X, y) support_percentile = filter_percentile.get_support() assert_array_equal(support_percentile, np.array([True, False])) filter_fpr = SelectFpr(chi2, alpha=0.1) filter_fpr.fit(X, y) support_fpr = filter_fpr.get_support() assert_array_equal(support_fpr, np.array([True, False])) filter_fwe = SelectFwe(chi2, alpha=0.1) filter_fwe.fit(X, y) support_fwe = filter_fwe.get_support() assert_array_equal(support_fwe, np.array([True, False])) @pytest.mark.parametrize("alpha", [0.001, 0.01, 0.1]) @pytest.mark.parametrize("n_informative", [1, 5, 10]) def test_select_fdr_regression(alpha, n_informative): # Test that fdr heuristic actually has low FDR. def single_fdr(alpha, n_informative, random_state): X, y = make_regression( n_samples=150, n_features=20, n_informative=n_informative, shuffle=False, random_state=random_state, noise=10, ) with warnings.catch_warnings(record=True): # Warnings can be raised when no features are selected # (low alpha or very noisy data) univariate_filter = SelectFdr(f_regression, alpha=alpha) X_r = univariate_filter.fit(X, y).transform(X) X_r2 = ( GenericUnivariateSelect(f_regression, mode="fdr", param=alpha) .fit(X, y) .transform(X) ) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() num_false_positives = np.sum(support[n_informative:] == 1) num_true_positives = np.sum(support[:n_informative] == 1) if num_false_positives == 0: return 0.0 false_discovery_rate = num_false_positives / ( num_true_positives + num_false_positives ) return false_discovery_rate # As per Benjamini-Hochberg, the expected false discovery rate # should be lower than alpha: # FDR = E(FP / (TP + FP)) <= alpha false_discovery_rate = np.mean( [single_fdr(alpha, n_informative, random_state) for random_state in range(100)] ) assert alpha >= false_discovery_rate # Make sure that the empirical false discovery rate increases # with alpha: if false_discovery_rate != 0: assert false_discovery_rate > alpha / 10 def test_select_fwe_regression(): # Test whether the relative univariate feature selection # gets the correct items in a simple regression problem # with the fwe heuristic X, y = make_regression( n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0 ) univariate_filter = SelectFwe(f_regression, alpha=0.01) X_r = univariate_filter.fit(X, y).transform(X) X_r2 = ( GenericUnivariateSelect(f_regression, mode="fwe", param=0.01) .fit(X, y) .transform(X) ) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(20) gtruth[:5] = 1 assert_array_equal(support[:5], np.ones((5,), dtype=bool)) assert np.sum(support[5:] == 1) < 2 def test_selectkbest_tiebreaking(): # Test whether SelectKBest actually selects k features in case of ties. # Prior to 0.11, SelectKBest would return more features than requested. Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]] y = [1] dummy_score = lambda X, y: (X[0], X[0]) for X in Xs: sel = SelectKBest(dummy_score, k=1) X1 = ignore_warnings(sel.fit_transform)([X], y) assert X1.shape[1] == 1 assert_best_scores_kept(sel) sel = SelectKBest(dummy_score, k=2) X2 = ignore_warnings(sel.fit_transform)([X], y) assert X2.shape[1] == 2 assert_best_scores_kept(sel) def test_selectpercentile_tiebreaking(): # Test if SelectPercentile selects the right n_features in case of ties. Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]] y = [1] dummy_score = lambda X, y: (X[0], X[0]) for X in Xs: sel = SelectPercentile(dummy_score, percentile=34) X1 = ignore_warnings(sel.fit_transform)([X], y) assert X1.shape[1] == 1 assert_best_scores_kept(sel) sel = SelectPercentile(dummy_score, percentile=67) X2 = ignore_warnings(sel.fit_transform)([X], y) assert X2.shape[1] == 2 assert_best_scores_kept(sel) def test_tied_pvalues(): # Test whether k-best and percentiles work with tied pvalues from chi2. # chi2 will return the same p-values for the following features, but it # will return different scores. X0 = np.array([[10000, 9999, 9998], [1, 1, 1]]) y = [0, 1] for perm in itertools.permutations((0, 1, 2)): X = X0[:, perm] Xt = SelectKBest(chi2, k=2).fit_transform(X, y) assert Xt.shape == (2, 2) assert 9998 not in Xt Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y) assert Xt.shape == (2, 2) assert 9998 not in Xt def test_scorefunc_multilabel(): # Test whether k-best and percentiles works with multilabels with chi2. X = np.array([[10000, 9999, 0], [100, 9999, 0], [1000, 99, 0]]) y = [[1, 1], [0, 1], [1, 0]] Xt = SelectKBest(chi2, k=2).fit_transform(X, y) assert Xt.shape == (3, 2) assert 0 not in Xt Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y) assert Xt.shape == (3, 2) assert 0 not in Xt def test_tied_scores(): # Test for stable sorting in k-best with tied scores. X_train = np.array([[0, 0, 0], [1, 1, 1]]) y_train = [0, 1] for n_features in [1, 2, 3]: sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train) X_test = sel.transform([[0, 1, 2]]) assert_array_equal(X_test[0], np.arange(3)[-n_features:]) def test_nans(): # Assert that SelectKBest and SelectPercentile can handle NaNs. # First feature has zero variance to confuse f_classif (ANOVA) and # make it return a NaN. X = [[0, 1, 0], [0, -1, -1], [0, 0.5, 0.5]] y = [1, 0, 1] for select in ( SelectKBest(f_classif, k=2), SelectPercentile(f_classif, percentile=67), ): ignore_warnings(select.fit)(X, y) assert_array_equal(select.get_support(indices=True), np.array([1, 2])) def test_invalid_k(): X = [[0, 1, 0], [0, -1, -1], [0, 0.5, 0.5]] y = [1, 0, 1] msg = "k=4 is greater than n_features=3. All the features will be returned." with pytest.warns(UserWarning, match=msg): SelectKBest(k=4).fit(X, y) with pytest.warns(UserWarning, match=msg): GenericUnivariateSelect(mode="k_best", param=4).fit(X, y) def test_f_classif_constant_feature(): # Test that f_classif warns if a feature is constant throughout. X, y = make_classification(n_samples=10, n_features=5) X[:, 0] = 2.0 with pytest.warns(UserWarning): f_classif(X, y) def test_no_feature_selected(): rng = np.random.RandomState(0) # Generate random uncorrelated data: a strict univariate test should # rejects all the features X = rng.rand(40, 10) y = rng.randint(0, 4, size=40) strict_selectors = [ SelectFwe(alpha=0.01).fit(X, y), SelectFdr(alpha=0.01).fit(X, y), SelectFpr(alpha=0.01).fit(X, y), SelectPercentile(percentile=0).fit(X, y), SelectKBest(k=0).fit(X, y), ] for selector in strict_selectors: assert_array_equal(selector.get_support(), np.zeros(10)) with pytest.warns(UserWarning, match="No features were selected"): X_selected = selector.transform(X) assert X_selected.shape == (40, 0) def test_mutual_info_classif(): X, y = make_classification( n_samples=100, n_features=5, n_informative=1, n_redundant=1, n_repeated=0, n_classes=2, n_clusters_per_class=1, flip_y=0.0, class_sep=10, shuffle=False, random_state=0, ) # Test in KBest mode. univariate_filter = SelectKBest(mutual_info_classif, k=2) X_r = univariate_filter.fit(X, y).transform(X) X_r2 = ( GenericUnivariateSelect(mutual_info_classif, mode="k_best", param=2) .fit(X, y) .transform(X) ) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(5) gtruth[:2] = 1 assert_array_equal(support, gtruth) # Test in Percentile mode. univariate_filter = SelectPercentile(mutual_info_classif, percentile=40) X_r = univariate_filter.fit(X, y).transform(X) X_r2 = ( GenericUnivariateSelect(mutual_info_classif, mode="percentile", param=40) .fit(X, y) .transform(X) ) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(5) gtruth[:2] = 1 assert_array_equal(support, gtruth) def test_mutual_info_regression(): X, y = make_regression( n_samples=100, n_features=10, n_informative=2, shuffle=False, random_state=0, noise=10, ) # Test in KBest mode. univariate_filter = SelectKBest(mutual_info_regression, k=2) X_r = univariate_filter.fit(X, y).transform(X) assert_best_scores_kept(univariate_filter) X_r2 = ( GenericUnivariateSelect(mutual_info_regression, mode="k_best", param=2) .fit(X, y) .transform(X) ) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(10) gtruth[:2] = 1 assert_array_equal(support, gtruth) # Test in Percentile mode. univariate_filter = SelectPercentile(mutual_info_regression, percentile=20) X_r = univariate_filter.fit(X, y).transform(X) X_r2 = ( GenericUnivariateSelect(mutual_info_regression, mode="percentile", param=20) .fit(X, y) .transform(X) ) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(10) gtruth[:2] = 1 assert_array_equal(support, gtruth) def test_dataframe_output_dtypes(): """Check that the output datafarme dtypes are the same as the input. Non-regression test for gh-24860. """ pd = pytest.importorskip("pandas") X, y = load_iris(return_X_y=True, as_frame=True) X = X.astype( { "petal length (cm)": np.float32, "petal width (cm)": np.float64, } ) X["petal_width_binned"] = pd.cut(X["petal width (cm)"], bins=10) column_order = X.columns def selector(X, y): ranking = { "sepal length (cm)": 1, "sepal width (cm)": 2, "petal length (cm)": 3, "petal width (cm)": 4, "petal_width_binned": 5, } return np.asarray([ranking[name] for name in column_order]) univariate_filter = SelectKBest(selector, k=3).set_output(transform="pandas") output = univariate_filter.fit_transform(X, y) assert_array_equal( output.columns, ["petal length (cm)", "petal width (cm)", "petal_width_binned"] ) for name, dtype in output.dtypes.items(): assert dtype == X.dtypes[name] @pytest.mark.parametrize( "selector", [ SelectKBest(k=4), SelectPercentile(percentile=80), GenericUnivariateSelect(mode="k_best", param=4), GenericUnivariateSelect(mode="percentile", param=80), ], ) def test_unsupervised_filter(selector): """Check support for unsupervised feature selection for the filter that could require only `X`. """ rng = np.random.RandomState(0) X = rng.randn(10, 5) def score_func(X, y=None): return np.array([1, 1, 1, 1, 0]) selector.set_params(score_func=score_func) selector.fit(X) X_trans = selector.transform(X) assert_allclose(X_trans, X[:, :4]) X_trans = selector.fit_transform(X) assert_allclose(X_trans, X[:, :4])
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/feature_selection/tests/test_from_model.py
sklearn/feature_selection/tests/test_from_model.py
import re import warnings from unittest.mock import Mock import numpy as np import pytest from sklearn import datasets from sklearn.base import BaseEstimator from sklearn.cross_decomposition import CCA, PLSCanonical, PLSRegression from sklearn.datasets import make_friedman1, make_regression from sklearn.decomposition import PCA from sklearn.ensemble import HistGradientBoostingClassifier, RandomForestClassifier from sklearn.exceptions import NotFittedError from sklearn.feature_selection import SelectFromModel from sklearn.linear_model import ( ElasticNet, ElasticNetCV, Lasso, LassoCV, LinearRegression, LogisticRegression, SGDClassifier, ) from sklearn.pipeline import make_pipeline from sklearn.svm import LinearSVC from sklearn.utils._testing import ( MinimalClassifier, assert_allclose, assert_array_almost_equal, assert_array_equal, skip_if_32bit, ) class NaNTag(BaseEstimator): def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.input_tags.allow_nan = True return tags class NoNaNTag(BaseEstimator): def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.input_tags.allow_nan = False return tags class NaNTagRandomForest(RandomForestClassifier): def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.input_tags.allow_nan = True return tags iris = datasets.load_iris() data, y = iris.data, iris.target def test_invalid_input(): clf = SGDClassifier( alpha=0.1, max_iter=10, shuffle=True, random_state=None, tol=None ) for threshold in ["gobbledigook", ".5 * gobbledigook"]: model = SelectFromModel(clf, threshold=threshold) model.fit(data, y) with pytest.raises(ValueError): model.transform(data) def test_input_estimator_unchanged(): # Test that SelectFromModel fits on a clone of the estimator. est = RandomForestClassifier() transformer = SelectFromModel(estimator=est) transformer.fit(data, y) assert transformer.estimator is est @pytest.mark.parametrize( "max_features, err_type, err_msg", [ ( lambda X: 1.5, TypeError, "max_features must be an instance of int, not float.", ), ( lambda X: -1, ValueError, "max_features ==", ), ], ) def test_max_features_error(max_features, err_type, err_msg): err_msg = re.escape(err_msg) clf = RandomForestClassifier(n_estimators=5, random_state=0) transformer = SelectFromModel( estimator=clf, max_features=max_features, threshold=-np.inf ) with pytest.raises(err_type, match=err_msg): transformer.fit(data, y) @pytest.mark.parametrize("max_features", [0, 2, data.shape[1], None]) def test_inferred_max_features_integer(max_features): """Check max_features_ and output shape for integer max_features.""" clf = RandomForestClassifier(n_estimators=5, random_state=0) transformer = SelectFromModel( estimator=clf, max_features=max_features, threshold=-np.inf ) X_trans = transformer.fit_transform(data, y) if max_features is not None: assert transformer.max_features_ == max_features assert X_trans.shape[1] == transformer.max_features_ else: assert not hasattr(transformer, "max_features_") assert X_trans.shape[1] == data.shape[1] @pytest.mark.parametrize( "max_features", [lambda X: 1, lambda X: X.shape[1], lambda X: min(X.shape[1], 10000)], ) def test_inferred_max_features_callable(max_features): """Check max_features_ and output shape for callable max_features.""" clf = RandomForestClassifier(n_estimators=5, random_state=0) transformer = SelectFromModel( estimator=clf, max_features=max_features, threshold=-np.inf ) X_trans = transformer.fit_transform(data, y) assert transformer.max_features_ == max_features(data) assert X_trans.shape[1] == transformer.max_features_ @pytest.mark.parametrize("max_features", [lambda X: round(len(X[0]) / 2), 2]) def test_max_features_array_like(max_features): X = [ [0.87, -1.34, 0.31], [-2.79, -0.02, -0.85], [-1.34, -0.48, -2.55], [1.92, 1.48, 0.65], ] y = [0, 1, 0, 1] clf = RandomForestClassifier(n_estimators=5, random_state=0) transformer = SelectFromModel( estimator=clf, max_features=max_features, threshold=-np.inf ) X_trans = transformer.fit_transform(X, y) assert X_trans.shape[1] == transformer.max_features_ @pytest.mark.parametrize( "max_features", [lambda X: min(X.shape[1], 10000), lambda X: X.shape[1], lambda X: 1], ) def test_max_features_callable_data(max_features): """Tests that the callable passed to `fit` is called on X.""" clf = RandomForestClassifier(n_estimators=50, random_state=0) m = Mock(side_effect=max_features) transformer = SelectFromModel(estimator=clf, max_features=m, threshold=-np.inf) transformer.fit_transform(data, y) m.assert_called_with(data) class FixedImportanceEstimator(BaseEstimator): def __init__(self, importances): self.importances = importances def fit(self, X, y=None): self.feature_importances_ = np.array(self.importances) def test_max_features(): # Test max_features parameter using various values X, y = datasets.make_classification( n_samples=1000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=0, ) max_features = X.shape[1] est = RandomForestClassifier(n_estimators=50, random_state=0) transformer1 = SelectFromModel(estimator=est, threshold=-np.inf) transformer2 = SelectFromModel( estimator=est, max_features=max_features, threshold=-np.inf ) X_new1 = transformer1.fit_transform(X, y) X_new2 = transformer2.fit_transform(X, y) assert_allclose(X_new1, X_new2) # Test max_features against actual model. transformer1 = SelectFromModel(estimator=Lasso(alpha=0.025, random_state=42)) X_new1 = transformer1.fit_transform(X, y) scores1 = np.abs(transformer1.estimator_.coef_) candidate_indices1 = np.argsort(-scores1, kind="mergesort") for n_features in range(1, X_new1.shape[1] + 1): transformer2 = SelectFromModel( estimator=Lasso(alpha=0.025, random_state=42), max_features=n_features, threshold=-np.inf, ) X_new2 = transformer2.fit_transform(X, y) scores2 = np.abs(transformer2.estimator_.coef_) candidate_indices2 = np.argsort(-scores2, kind="mergesort") assert_allclose( X[:, candidate_indices1[:n_features]], X[:, candidate_indices2[:n_features]] ) assert_allclose(transformer1.estimator_.coef_, transformer2.estimator_.coef_) def test_max_features_tiebreak(): # Test if max_features can break tie among feature importance X, y = datasets.make_classification( n_samples=1000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=0, ) max_features = X.shape[1] feature_importances = np.array([4, 4, 4, 4, 3, 3, 3, 2, 2, 1]) for n_features in range(1, max_features + 1): transformer = SelectFromModel( FixedImportanceEstimator(feature_importances), max_features=n_features, threshold=-np.inf, ) X_new = transformer.fit_transform(X, y) selected_feature_indices = np.where(transformer._get_support_mask())[0] assert_array_equal(selected_feature_indices, np.arange(n_features)) assert X_new.shape[1] == n_features def test_threshold_and_max_features(): X, y = datasets.make_classification( n_samples=1000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=0, ) est = RandomForestClassifier(n_estimators=50, random_state=0) transformer1 = SelectFromModel(estimator=est, max_features=3, threshold=-np.inf) X_new1 = transformer1.fit_transform(X, y) transformer2 = SelectFromModel(estimator=est, threshold=0.04) X_new2 = transformer2.fit_transform(X, y) transformer3 = SelectFromModel(estimator=est, max_features=3, threshold=0.04) X_new3 = transformer3.fit_transform(X, y) assert X_new3.shape[1] == min(X_new1.shape[1], X_new2.shape[1]) selected_indices = transformer3.transform(np.arange(X.shape[1])[np.newaxis, :]) assert_allclose(X_new3, X[:, selected_indices[0]]) @skip_if_32bit def test_feature_importances(): X, y = datasets.make_classification( n_samples=1000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=0, ) est = RandomForestClassifier(n_estimators=50, random_state=0) for threshold, func in zip(["mean", "median"], [np.mean, np.median]): transformer = SelectFromModel(estimator=est, threshold=threshold) transformer.fit(X, y) assert hasattr(transformer.estimator_, "feature_importances_") X_new = transformer.transform(X) assert X_new.shape[1] < X.shape[1] importances = transformer.estimator_.feature_importances_ feature_mask = np.abs(importances) > func(importances) assert_array_almost_equal(X_new, X[:, feature_mask]) def test_sample_weight(): # Ensure sample weights are passed to underlying estimator X, y = datasets.make_classification( n_samples=100, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=0, ) # Check with sample weights sample_weight = np.ones(y.shape) sample_weight[y == 1] *= 100 est = LogisticRegression(random_state=0, fit_intercept=False) transformer = SelectFromModel(estimator=est) transformer.fit(X, y, sample_weight=None) mask = transformer._get_support_mask() transformer.fit(X, y, sample_weight=sample_weight) weighted_mask = transformer._get_support_mask() assert not np.all(weighted_mask == mask) transformer.fit(X, y, sample_weight=3 * sample_weight) reweighted_mask = transformer._get_support_mask() assert np.all(weighted_mask == reweighted_mask) @pytest.mark.parametrize( "estimator", [ Lasso(alpha=0.1, random_state=42), LassoCV(random_state=42), ElasticNet(l1_ratio=1, random_state=42), ElasticNetCV(l1_ratio=[1], random_state=42), ], ) def test_coef_default_threshold(estimator): X, y = datasets.make_classification( n_samples=100, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=0, ) # For the Lasso and related models, the threshold defaults to 1e-5 transformer = SelectFromModel(estimator=estimator) transformer.fit(X, y) X_new = transformer.transform(X) mask = np.abs(transformer.estimator_.coef_) > 1e-5 assert_array_almost_equal(X_new, X[:, mask]) @skip_if_32bit def test_2d_coef(): X, y = datasets.make_classification( n_samples=1000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=0, n_classes=4, ) est = LogisticRegression() for threshold, func in zip(["mean", "median"], [np.mean, np.median]): for order in [1, 2, np.inf]: # Fit SelectFromModel a multi-class problem transformer = SelectFromModel( estimator=LogisticRegression(), threshold=threshold, norm_order=order ) transformer.fit(X, y) assert hasattr(transformer.estimator_, "coef_") X_new = transformer.transform(X) assert X_new.shape[1] < X.shape[1] # Manually check that the norm is correctly performed est.fit(X, y) importances = np.linalg.norm(est.coef_, axis=0, ord=order) feature_mask = importances > func(importances) assert_array_almost_equal(X_new, X[:, feature_mask]) def test_partial_fit(): est = SGDClassifier( random_state=0, shuffle=False, max_iter=5, tol=None, learning_rate="pa1" ) transformer = SelectFromModel(estimator=est) transformer.partial_fit(data, y, classes=np.unique(y)) old_model = transformer.estimator_ transformer.partial_fit(data, y, classes=np.unique(y)) new_model = transformer.estimator_ assert old_model is new_model X_transform = transformer.transform(data) transformer.fit(np.vstack((data, data)), np.concatenate((y, y))) assert_array_almost_equal(X_transform, transformer.transform(data)) # check that if est doesn't have partial_fit, neither does SelectFromModel transformer = SelectFromModel(estimator=RandomForestClassifier()) assert not hasattr(transformer, "partial_fit") def test_calling_fit_reinitializes(): est = LinearSVC(random_state=0) transformer = SelectFromModel(estimator=est) transformer.fit(data, y) transformer.set_params(estimator__C=100) transformer.fit(data, y) assert transformer.estimator_.C == 100 def test_prefit(): # Test all possible combinations of the prefit parameter. # Passing a prefit parameter with the selected model # and fitting a unfit model with prefit=False should give same results. clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True, random_state=0, tol=None) model = SelectFromModel(clf) model.fit(data, y) X_transform = model.transform(data) clf.fit(data, y) model = SelectFromModel(clf, prefit=True) assert_array_almost_equal(model.transform(data), X_transform) model.fit(data, y) assert model.estimator_ is not clf # Check that the model is rewritten if prefit=False and a fitted model is # passed model = SelectFromModel(clf, prefit=False) model.fit(data, y) assert_array_almost_equal(model.transform(data), X_transform) # Check that passing an unfitted estimator with `prefit=True` raises a # `ValueError` clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True, random_state=0, tol=None) model = SelectFromModel(clf, prefit=True) err_msg = "When `prefit=True`, `estimator` is expected to be a fitted estimator." with pytest.raises(NotFittedError, match=err_msg): model.fit(data, y) with pytest.raises(NotFittedError, match=err_msg): model.partial_fit(data, y) with pytest.raises(NotFittedError, match=err_msg): model.transform(data) # Check that the internal parameters of prefitted model are not changed # when calling `fit` or `partial_fit` with `prefit=True` clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True, tol=None).fit(data, y) model = SelectFromModel(clf, prefit=True) model.fit(data, y) assert_allclose(model.estimator_.coef_, clf.coef_) model.partial_fit(data, y) assert_allclose(model.estimator_.coef_, clf.coef_) def test_prefit_max_features(): """Check the interaction between `prefit` and `max_features`.""" # case 1: an error should be raised at `transform` if `fit` was not called to # validate the attributes estimator = RandomForestClassifier(n_estimators=5, random_state=0) estimator.fit(data, y) model = SelectFromModel(estimator, prefit=True, max_features=lambda X: X.shape[1]) err_msg = ( "When `prefit=True` and `max_features` is a callable, call `fit` " "before calling `transform`." ) with pytest.raises(NotFittedError, match=err_msg): model.transform(data) # case 2: `max_features` is not validated and different from an integer # FIXME: we cannot validate the upper bound of the attribute at transform # and we should force calling `fit` if we intend to force the attribute # to have such an upper bound. max_features = 2.5 model.set_params(max_features=max_features) with pytest.raises(ValueError, match="`max_features` must be an integer"): model.transform(data) def test_get_feature_names_out_elasticnetcv(): """Check if ElasticNetCV works with a list of floats. Non-regression test for #30936.""" X, y = make_regression(n_features=5, n_informative=3, random_state=0) estimator = ElasticNetCV(l1_ratio=[0.25, 0.5, 0.75], random_state=0) selector = SelectFromModel(estimator=estimator) selector.fit(X, y) names_out = selector.get_feature_names_out() mask = selector.get_support() expected = np.array([f"x{i}" for i in range(X.shape[1])])[mask] assert_array_equal(names_out, expected) def test_prefit_get_feature_names_out(): """Check the interaction between prefit and the feature names.""" clf = RandomForestClassifier(n_estimators=2, random_state=0) clf.fit(data, y) model = SelectFromModel(clf, prefit=True, max_features=1) name = type(model).__name__ err_msg = ( f"This {name} instance is not fitted yet. Call 'fit' with " "appropriate arguments before using this estimator." ) with pytest.raises(NotFittedError, match=err_msg): model.get_feature_names_out() model.fit(data, y) feature_names = model.get_feature_names_out() assert feature_names == ["x3"] def test_threshold_string(): est = RandomForestClassifier(n_estimators=50, random_state=0) model = SelectFromModel(est, threshold="0.5*mean") model.fit(data, y) X_transform = model.transform(data) # Calculate the threshold from the estimator directly. est.fit(data, y) threshold = 0.5 * np.mean(est.feature_importances_) mask = est.feature_importances_ > threshold assert_array_almost_equal(X_transform, data[:, mask]) def test_threshold_without_refitting(): # Test that the threshold can be set without refitting the model. clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True, random_state=0, tol=None) model = SelectFromModel(clf, threshold="0.1 * mean") model.fit(data, y) X_transform = model.transform(data) # Set a higher threshold to filter out more features. model.threshold = "1.0 * mean" assert X_transform.shape[1] > model.transform(data).shape[1] def test_fit_accepts_nan_inf(): # Test that fit doesn't check for np.inf and np.nan values. clf = HistGradientBoostingClassifier(random_state=0) model = SelectFromModel(estimator=clf) nan_data = data.copy() nan_data[0] = np.nan nan_data[1] = np.inf model.fit(data, y) def test_transform_accepts_nan_inf(): # Test that transform doesn't check for np.inf and np.nan values. clf = NaNTagRandomForest(n_estimators=100, random_state=0) nan_data = data.copy() model = SelectFromModel(estimator=clf) model.fit(nan_data, y) nan_data[0] = np.nan nan_data[1] = np.inf model.transform(nan_data) def test_allow_nan_tag_comes_from_estimator(): allow_nan_est = NaNTag() model = SelectFromModel(estimator=allow_nan_est) assert model.__sklearn_tags__().input_tags.allow_nan is True no_nan_est = NoNaNTag() model = SelectFromModel(estimator=no_nan_est) assert model.__sklearn_tags__().input_tags.allow_nan is False def _pca_importances(pca_estimator): return np.abs(pca_estimator.explained_variance_) @pytest.mark.parametrize( "estimator, importance_getter", [ ( make_pipeline(PCA(random_state=0), LogisticRegression()), "named_steps.logisticregression.coef_", ), (PCA(random_state=0), _pca_importances), ], ) def test_importance_getter(estimator, importance_getter): selector = SelectFromModel( estimator, threshold="mean", importance_getter=importance_getter ) selector.fit(data, y) assert selector.transform(data).shape[1] == 1 @pytest.mark.parametrize("PLSEstimator", [CCA, PLSCanonical, PLSRegression]) def test_select_from_model_pls(PLSEstimator): """Check the behaviour of SelectFromModel with PLS estimators. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/12410 """ X, y = make_friedman1(n_samples=50, n_features=10, random_state=0) estimator = PLSEstimator(n_components=1) model = make_pipeline(SelectFromModel(estimator), estimator).fit(X, y) assert model.score(X, y) > 0.5 def test_estimator_does_not_support_feature_names(): """SelectFromModel works with estimators that do not support feature_names_in_. Non-regression test for #21949. """ pytest.importorskip("pandas") X, y = datasets.load_iris(as_frame=True, return_X_y=True) all_feature_names = set(X.columns) def importance_getter(estimator): return np.arange(X.shape[1]) selector = SelectFromModel( MinimalClassifier(), importance_getter=importance_getter ).fit(X, y) # selector learns the feature names itself assert_array_equal(selector.feature_names_in_, X.columns) feature_names_out = set(selector.get_feature_names_out()) assert feature_names_out < all_feature_names with warnings.catch_warnings(): warnings.simplefilter("error", UserWarning) selector.transform(X.iloc[1:3]) @pytest.mark.parametrize("as_frame", [True, False]) def test_partial_fit_validate_feature_names(as_frame): """Test that partial_fit from SelectFromModel validates `feature_names_in_`.""" pytest.importorskip("pandas") X, y = datasets.load_iris(as_frame=as_frame, return_X_y=True) selector = SelectFromModel(estimator=SGDClassifier(), max_features=4).partial_fit( X, y, classes=[0, 1, 2] ) if as_frame: assert_array_equal(selector.feature_names_in_, X.columns) else: assert not hasattr(selector, "feature_names_in_") def test_from_model_estimator_attribute_error(): """Check that we raise the proper AttributeError when the estimator does not implement the `partial_fit` method, which is decorated with `available_if`. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/28108 """ # `LinearRegression` does not implement 'partial_fit' and should raise an # AttributeError from_model = SelectFromModel(estimator=LinearRegression()) outer_msg = "This 'SelectFromModel' has no attribute 'partial_fit'" inner_msg = "'LinearRegression' object has no attribute 'partial_fit'" with pytest.raises(AttributeError, match=outer_msg) as exec_info: from_model.fit(data, y).partial_fit(data) assert isinstance(exec_info.value.__cause__, AttributeError) assert inner_msg in str(exec_info.value.__cause__)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/feature_selection/tests/test_base.py
sklearn/feature_selection/tests/test_base.py
import numpy as np import pytest from numpy.testing import assert_array_equal from sklearn.base import BaseEstimator from sklearn.feature_selection._base import SelectorMixin from sklearn.utils.fixes import CSC_CONTAINERS from sklearn.utils.validation import validate_data class StepSelector(SelectorMixin, BaseEstimator): """Retain every `step` features (beginning with 0). If `step < 1`, then no features are selected. """ def __init__(self, step=2): self.step = step def fit(self, X, y=None): X = validate_data(self, X, accept_sparse="csc") return self def _get_support_mask(self): mask = np.zeros(self.n_features_in_, dtype=bool) if self.step >= 1: mask[:: self.step] = True return mask support = [True, False] * 5 support_inds = [0, 2, 4, 6, 8] X = np.arange(20).reshape(2, 10) Xt = np.arange(0, 20, 2).reshape(2, 5) Xinv = X.copy() Xinv[:, 1::2] = 0 y = [0, 1] feature_names = list("ABCDEFGHIJ") feature_names_t = feature_names[::2] feature_names_inv = np.array(feature_names) feature_names_inv[1::2] = "" def test_transform_dense(): sel = StepSelector() Xt_actual = sel.fit(X, y).transform(X) Xt_actual2 = StepSelector().fit_transform(X, y) assert_array_equal(Xt, Xt_actual) assert_array_equal(Xt, Xt_actual2) # Check dtype matches assert np.int32 == sel.transform(X.astype(np.int32)).dtype assert np.float32 == sel.transform(X.astype(np.float32)).dtype # Check 1d list and other dtype: names_t_actual = sel.transform([feature_names]) assert_array_equal(feature_names_t, names_t_actual.ravel()) # Check wrong shape raises error with pytest.raises(ValueError): sel.transform(np.array([[1], [2]])) @pytest.mark.parametrize("csc_container", CSC_CONTAINERS) def test_transform_sparse(csc_container): X_sp = csc_container(X) sel = StepSelector() Xt_actual = sel.fit(X_sp).transform(X_sp) Xt_actual2 = sel.fit_transform(X_sp) assert_array_equal(Xt, Xt_actual.toarray()) assert_array_equal(Xt, Xt_actual2.toarray()) # Check dtype matches assert np.int32 == sel.transform(X_sp.astype(np.int32)).dtype assert np.float32 == sel.transform(X_sp.astype(np.float32)).dtype # Check wrong shape raises error with pytest.raises(ValueError): sel.transform(np.array([[1], [2]])) def test_inverse_transform_dense(): sel = StepSelector() Xinv_actual = sel.fit(X, y).inverse_transform(Xt) assert_array_equal(Xinv, Xinv_actual) # Check dtype matches assert np.int32 == sel.inverse_transform(Xt.astype(np.int32)).dtype assert np.float32 == sel.inverse_transform(Xt.astype(np.float32)).dtype # Check 1d list and other dtype: names_inv_actual = sel.inverse_transform([feature_names_t]) assert_array_equal(feature_names_inv, names_inv_actual.ravel()) # Check wrong shape raises error with pytest.raises(ValueError): sel.inverse_transform(np.array([[1], [2]])) @pytest.mark.parametrize("csc_container", CSC_CONTAINERS) def test_inverse_transform_sparse(csc_container): X_sp = csc_container(X) Xt_sp = csc_container(Xt) sel = StepSelector() Xinv_actual = sel.fit(X_sp).inverse_transform(Xt_sp) assert_array_equal(Xinv, Xinv_actual.toarray()) # Check dtype matches assert np.int32 == sel.inverse_transform(Xt_sp.astype(np.int32)).dtype assert np.float32 == sel.inverse_transform(Xt_sp.astype(np.float32)).dtype # Check wrong shape raises error with pytest.raises(ValueError): sel.inverse_transform(np.array([[1], [2]])) def test_get_support(): sel = StepSelector() sel.fit(X, y) assert_array_equal(support, sel.get_support()) assert_array_equal(support_inds, sel.get_support(indices=True)) def test_output_dataframe(): """Check output dtypes for dataframes is consistent with the input dtypes.""" pd = pytest.importorskip("pandas") X = pd.DataFrame( { "a": pd.Series([1.0, 2.4, 4.5], dtype=np.float32), "b": pd.Series(["a", "b", "a"], dtype="category"), "c": pd.Series(["j", "b", "b"], dtype="category"), "d": pd.Series([3.0, 2.4, 1.2], dtype=np.float64), } ) for step in [2, 3]: sel = StepSelector(step=step).set_output(transform="pandas") sel.fit(X) output = sel.transform(X) for name, dtype in output.dtypes.items(): assert dtype == X.dtypes[name] # step=0 will select nothing sel0 = StepSelector(step=0).set_output(transform="pandas") sel0.fit(X, y) msg = "No features were selected" with pytest.warns(UserWarning, match=msg): output0 = sel0.transform(X) assert_array_equal(output0.index, X.index) assert output0.shape == (X.shape[0], 0)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/feature_selection/tests/test_sequential.py
sklearn/feature_selection/tests/test_sequential.py
import numpy as np import pytest from numpy.testing import assert_array_equal from sklearn.cluster import KMeans from sklearn.datasets import make_blobs, make_classification, make_regression from sklearn.ensemble import HistGradientBoostingRegressor from sklearn.feature_selection import SequentialFeatureSelector from sklearn.linear_model import LinearRegression from sklearn.model_selection import LeaveOneGroupOut, cross_val_score from sklearn.neighbors import KNeighborsClassifier from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler from sklearn.utils.fixes import CSR_CONTAINERS def test_bad_n_features_to_select(): n_features = 5 X, y = make_regression(n_features=n_features) sfs = SequentialFeatureSelector(LinearRegression(), n_features_to_select=n_features) with pytest.raises(ValueError, match="n_features_to_select must be < n_features"): sfs.fit(X, y) @pytest.mark.parametrize("direction", ("forward", "backward")) @pytest.mark.parametrize("n_features_to_select", (1, 5, 9, "auto")) def test_n_features_to_select(direction, n_features_to_select): # Make sure n_features_to_select is respected n_features = 10 X, y = make_regression(n_features=n_features, random_state=0) sfs = SequentialFeatureSelector( LinearRegression(), n_features_to_select=n_features_to_select, direction=direction, cv=2, ) sfs.fit(X, y) if n_features_to_select == "auto": n_features_to_select = n_features // 2 assert sfs.get_support(indices=True).shape[0] == n_features_to_select assert sfs.n_features_to_select_ == n_features_to_select assert sfs.transform(X).shape[1] == n_features_to_select @pytest.mark.parametrize("direction", ("forward", "backward")) def test_n_features_to_select_auto(direction): """Check the behaviour of `n_features_to_select="auto"` with different values for the parameter `tol`. """ n_features = 10 tol = 1e-3 X, y = make_regression(n_features=n_features, random_state=0) sfs = SequentialFeatureSelector( LinearRegression(), n_features_to_select="auto", tol=tol, direction=direction, cv=2, ) sfs.fit(X, y) max_features_to_select = n_features - 1 assert sfs.get_support(indices=True).shape[0] <= max_features_to_select assert sfs.n_features_to_select_ <= max_features_to_select assert sfs.transform(X).shape[1] <= max_features_to_select assert sfs.get_support(indices=True).shape[0] == sfs.n_features_to_select_ @pytest.mark.parametrize("direction", ("forward", "backward")) def test_n_features_to_select_stopping_criterion(direction): """Check the behaviour stopping criterion for feature selection depending on the values of `n_features_to_select` and `tol`. When `direction` is `'forward'`, select a new features at random among those not currently selected in selector.support_, build a new version of the data that includes all the features in selector.support_ + this newly selected feature. And check that the cross-validation score of the model trained on this new dataset variant is lower than the model with the selected forward selected features or at least does not improve by more than the tol margin. When `direction` is `'backward'`, instead of adding a new feature to selector.support_, try to remove one of those selected features at random And check that the cross-validation score is either decreasing or not improving by more than the tol margin. """ X, y = make_regression(n_features=50, n_informative=10, random_state=0) tol = 1e-3 sfs = SequentialFeatureSelector( LinearRegression(), n_features_to_select="auto", tol=tol, direction=direction, cv=2, ) sfs.fit(X, y) selected_X = sfs.transform(X) rng = np.random.RandomState(0) added_candidates = list(set(range(X.shape[1])) - set(sfs.get_support(indices=True))) added_X = np.hstack( [ selected_X, (X[:, rng.choice(added_candidates)])[:, np.newaxis], ] ) removed_candidate = rng.choice(list(range(sfs.n_features_to_select_))) removed_X = np.delete(selected_X, removed_candidate, axis=1) plain_cv_score = cross_val_score(LinearRegression(), X, y, cv=2).mean() sfs_cv_score = cross_val_score(LinearRegression(), selected_X, y, cv=2).mean() added_cv_score = cross_val_score(LinearRegression(), added_X, y, cv=2).mean() removed_cv_score = cross_val_score(LinearRegression(), removed_X, y, cv=2).mean() assert sfs_cv_score >= plain_cv_score if direction == "forward": assert (sfs_cv_score - added_cv_score) <= tol assert (sfs_cv_score - removed_cv_score) >= tol else: assert (added_cv_score - sfs_cv_score) <= tol assert (removed_cv_score - sfs_cv_score) <= tol @pytest.mark.parametrize("direction", ("forward", "backward")) @pytest.mark.parametrize( "n_features_to_select, expected", ( (0.1, 1), (1.0, 10), (0.5, 5), ), ) def test_n_features_to_select_float(direction, n_features_to_select, expected): # Test passing a float as n_features_to_select X, y = make_regression(n_features=10) sfs = SequentialFeatureSelector( LinearRegression(), n_features_to_select=n_features_to_select, direction=direction, cv=2, ) sfs.fit(X, y) assert sfs.n_features_to_select_ == expected @pytest.mark.parametrize("seed", range(10)) @pytest.mark.parametrize("direction", ("forward", "backward")) @pytest.mark.parametrize( "n_features_to_select, expected_selected_features", [ (2, [0, 2]), # f1 is dropped since it has no predictive power (1, [2]), # f2 is more predictive than f0 so it's kept ], ) def test_sanity(seed, direction, n_features_to_select, expected_selected_features): # Basic sanity check: 3 features, only f0 and f2 are correlated with the # target, f2 having a stronger correlation than f0. We expect f1 to be # dropped, and f2 to always be selected. rng = np.random.RandomState(seed) n_samples = 100 X = rng.randn(n_samples, 3) y = 3 * X[:, 0] - 10 * X[:, 2] sfs = SequentialFeatureSelector( LinearRegression(), n_features_to_select=n_features_to_select, direction=direction, cv=2, ) sfs.fit(X, y) assert_array_equal(sfs.get_support(indices=True), expected_selected_features) @pytest.mark.parametrize("csr_container", CSR_CONTAINERS) def test_sparse_support(csr_container): # Make sure sparse data is supported X, y = make_regression(n_features=10) X = csr_container(X) sfs = SequentialFeatureSelector( LinearRegression(), n_features_to_select="auto", cv=2 ) sfs.fit(X, y) sfs.transform(X) def test_nan_support(): # Make sure nans are OK if the underlying estimator supports nans rng = np.random.RandomState(0) n_samples, n_features = 40, 4 X, y = make_regression(n_samples, n_features, random_state=0) nan_mask = rng.randint(0, 2, size=(n_samples, n_features), dtype=bool) X[nan_mask] = np.nan sfs = SequentialFeatureSelector( HistGradientBoostingRegressor(), n_features_to_select="auto", cv=2 ) sfs.fit(X, y) sfs.transform(X) with pytest.raises(ValueError, match="Input X contains NaN"): # LinearRegression does not support nans SequentialFeatureSelector( LinearRegression(), n_features_to_select="auto", cv=2 ).fit(X, y) def test_pipeline_support(): # Make sure that pipelines can be passed into SFS and that SFS can be # passed into a pipeline n_samples, n_features = 50, 3 X, y = make_regression(n_samples, n_features, random_state=0) # pipeline in SFS pipe = make_pipeline(StandardScaler(), LinearRegression()) sfs = SequentialFeatureSelector(pipe, n_features_to_select="auto", cv=2) sfs.fit(X, y) sfs.transform(X) # SFS in pipeline sfs = SequentialFeatureSelector( LinearRegression(), n_features_to_select="auto", cv=2 ) pipe = make_pipeline(StandardScaler(), sfs) pipe.fit(X, y) pipe.transform(X) @pytest.mark.parametrize("n_features_to_select", (2, 3)) def test_unsupervised_model_fit(n_features_to_select): # Make sure that models without classification labels are not being # validated X, y = make_blobs(n_features=4) sfs = SequentialFeatureSelector( KMeans(n_init=1), n_features_to_select=n_features_to_select, ) sfs.fit(X) assert sfs.transform(X).shape[1] == n_features_to_select @pytest.mark.parametrize("y", ("no_validation", 1j, 99.9, np.nan, 3)) def test_no_y_validation_model_fit(y): # Make sure that other non-conventional y labels are not accepted X, clusters = make_blobs(n_features=6) sfs = SequentialFeatureSelector( KMeans(), n_features_to_select=3, ) with pytest.raises((TypeError, ValueError)): sfs.fit(X, y) def test_forward_neg_tol_error(): """Check that we raise an error when tol<0 and direction='forward'""" X, y = make_regression(n_features=10, random_state=0) sfs = SequentialFeatureSelector( LinearRegression(), n_features_to_select="auto", direction="forward", tol=-1e-3, ) with pytest.raises(ValueError, match="tol must be strictly positive"): sfs.fit(X, y) def test_backward_neg_tol(): """Check that SequentialFeatureSelector works negative tol non-regression test for #25525 """ X, y = make_regression(n_features=10, random_state=0) lr = LinearRegression() initial_score = lr.fit(X, y).score(X, y) sfs = SequentialFeatureSelector( lr, n_features_to_select="auto", direction="backward", tol=-1e-3, ) Xr = sfs.fit_transform(X, y) new_score = lr.fit(Xr, y).score(Xr, y) assert 0 < sfs.get_support().sum() < X.shape[1] assert new_score < initial_score def test_cv_generator_support(): """Check that no exception raised when cv is generator non-regression test for #25957 """ X, y = make_classification(random_state=0) groups = np.zeros_like(y, dtype=int) groups[y.size // 2 :] = 1 cv = LeaveOneGroupOut() splits = cv.split(X, y, groups=groups) knc = KNeighborsClassifier(n_neighbors=5) sfs = SequentialFeatureSelector(knc, n_features_to_select=5, cv=splits) sfs.fit(X, y) def test_fit_rejects_params_with_no_routing_enabled(): X, y = make_classification(random_state=42) est = LinearRegression() sfs = SequentialFeatureSelector(estimator=est) with pytest.raises(ValueError, match="is only supported if"): sfs.fit(X, y, sample_weight=np.ones_like(y))
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/feature_selection/tests/test_variance_threshold.py
sklearn/feature_selection/tests/test_variance_threshold.py
import numpy as np import pytest from sklearn.feature_selection import VarianceThreshold from sklearn.utils._testing import assert_array_equal from sklearn.utils.fixes import BSR_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS data = [[0, 1, 2, 3, 4], [0, 2, 2, 3, 5], [1, 1, 2, 4, 0]] data2 = [[-0.13725701]] * 10 @pytest.mark.parametrize( "sparse_container", [None] + BSR_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS ) def test_zero_variance(sparse_container): # Test VarianceThreshold with default setting, zero variance. X = data if sparse_container is None else sparse_container(data) sel = VarianceThreshold().fit(X) assert_array_equal([0, 1, 3, 4], sel.get_support(indices=True)) def test_zero_variance_value_error(): # Test VarianceThreshold with default setting, zero variance, error cases. with pytest.raises(ValueError): VarianceThreshold().fit([[0, 1, 2, 3]]) with pytest.raises(ValueError): VarianceThreshold().fit([[0, 1], [0, 1]]) @pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS) def test_variance_threshold(sparse_container): # Test VarianceThreshold with custom variance. X = data if sparse_container is None else sparse_container(data) X = VarianceThreshold(threshold=0.4).fit_transform(X) assert (len(data), 1) == X.shape @pytest.mark.skipif( np.var(data2) == 0, reason=( "This test is not valid for this platform, " "as it relies on numerical instabilities." ), ) @pytest.mark.parametrize( "sparse_container", [None] + BSR_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS ) def test_zero_variance_floating_point_error(sparse_container): # Test that VarianceThreshold(0.0).fit eliminates features that have # the same value in every sample, even when floating point errors # cause np.var not to be 0 for the feature. # See #13691 X = data2 if sparse_container is None else sparse_container(data2) msg = "No feature in X meets the variance threshold 0.00000" with pytest.raises(ValueError, match=msg): VarianceThreshold().fit(X) @pytest.mark.parametrize( "sparse_container", [None] + BSR_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS ) def test_variance_nan(sparse_container): arr = np.array(data, dtype=np.float64) # add single NaN and feature should still be included arr[0, 0] = np.nan # make all values in feature NaN and feature should be rejected arr[:, 1] = np.nan X = arr if sparse_container is None else sparse_container(arr) sel = VarianceThreshold().fit(X) assert_array_equal([0, 3, 4], sel.get_support(indices=True))
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/feature_selection/tests/__init__.py
sklearn/feature_selection/tests/__init__.py
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/feature_selection/tests/test_chi2.py
sklearn/feature_selection/tests/test_chi2.py
""" Tests for chi2, currently the only feature selection function designed specifically to work with sparse matrices. """ import warnings import numpy as np import pytest import scipy.stats from sklearn.feature_selection import SelectKBest, chi2 from sklearn.feature_selection._univariate_selection import _chisquare from sklearn.utils._testing import assert_array_almost_equal, assert_array_equal from sklearn.utils.fixes import COO_CONTAINERS, CSR_CONTAINERS # Feature 0 is highly informative for class 1; # feature 1 is the same everywhere; # feature 2 is a bit informative for class 2. X = [[2, 1, 2], [9, 1, 1], [6, 1, 2], [0, 1, 2]] y = [0, 1, 2, 2] def mkchi2(k): """Make k-best chi2 selector""" return SelectKBest(chi2, k=k) @pytest.mark.parametrize("csr_container", CSR_CONTAINERS) def test_chi2(csr_container): # Test Chi2 feature extraction chi2 = mkchi2(k=1).fit(X, y) chi2 = mkchi2(k=1).fit(X, y) assert_array_equal(chi2.get_support(indices=True), [0]) assert_array_equal(chi2.transform(X), np.array(X)[:, [0]]) chi2 = mkchi2(k=2).fit(X, y) assert_array_equal(sorted(chi2.get_support(indices=True)), [0, 2]) Xsp = csr_container(X, dtype=np.float64) chi2 = mkchi2(k=2).fit(Xsp, y) assert_array_equal(sorted(chi2.get_support(indices=True)), [0, 2]) Xtrans = chi2.transform(Xsp) assert_array_equal(Xtrans.shape, [Xsp.shape[0], 2]) # == doesn't work on scipy.sparse matrices Xtrans = Xtrans.toarray() Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray() assert_array_almost_equal(Xtrans, Xtrans2) @pytest.mark.parametrize("coo_container", COO_CONTAINERS) def test_chi2_coo(coo_container): # Check that chi2 works with a COO matrix # (as returned by CountVectorizer, DictVectorizer) Xcoo = coo_container(X) mkchi2(k=2).fit_transform(Xcoo, y) # if we got here without an exception, we're safe @pytest.mark.parametrize("csr_container", CSR_CONTAINERS) def test_chi2_negative(csr_container): # Check for proper error on negative numbers in the input X. X, y = [[0, 1], [-1e-20, 1]], [0, 1] for X in (X, np.array(X), csr_container(X)): with pytest.raises(ValueError): chi2(X, y) def test_chi2_unused_feature(): # Unused feature should evaluate to NaN # and should issue no runtime warning with warnings.catch_warnings(record=True) as warned: warnings.simplefilter("always") chi, p = chi2([[1, 0], [0, 0]], [1, 0]) for w in warned: if "divide by zero" in repr(w): raise AssertionError("Found unexpected warning %s" % w) assert_array_equal(chi, [1, np.nan]) assert_array_equal(p[1], np.nan) def test_chisquare(): # Test replacement for scipy.stats.chisquare against the original. obs = np.array([[2.0, 2.0], [1.0, 1.0]]) exp = np.array([[1.5, 1.5], [1.5, 1.5]]) # call SciPy first because our version overwrites obs chi_scp, p_scp = scipy.stats.chisquare(obs, exp) chi_our, p_our = _chisquare(obs, exp) assert_array_almost_equal(chi_scp, chi_our) assert_array_almost_equal(p_scp, p_our)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/impute/_base.py
sklearn/impute/_base.py
# Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import numbers import warnings from collections import Counter from functools import partial from typing import Callable import numpy as np import numpy.ma as ma from scipy import sparse as sp from sklearn.base import BaseEstimator, TransformerMixin, _fit_context from sklearn.utils._mask import _get_mask from sklearn.utils._missing import is_pandas_na, is_scalar_nan from sklearn.utils._param_validation import MissingValues, StrOptions from sklearn.utils.fixes import _mode from sklearn.utils.sparsefuncs import _get_median from sklearn.utils.validation import ( FLOAT_DTYPES, _check_feature_names_in, _check_n_features, check_is_fitted, validate_data, ) def _check_inputs_dtype(X, missing_values): if is_pandas_na(missing_values): # Allow using `pd.NA` as missing values to impute numerical arrays. return if X.dtype.kind in ("f", "i", "u") and not isinstance(missing_values, numbers.Real): raise ValueError( "'X' and 'missing_values' types are expected to be" " both numerical. Got X.dtype={} and " " type(missing_values)={}.".format(X.dtype, type(missing_values)) ) def _safe_min(items): """Compute the minimum of a list of potentially non-comparable values. If values cannot be directly compared due to type incompatibility, the object with the lowest string representation is returned. """ try: return min(items) except TypeError as e: if "'<' not supported between" in str(e): return min(items, key=lambda x: (str(type(x)), str(x))) raise # pragma: no cover def _most_frequent(array, extra_value, n_repeat): """Compute the most frequent value in a 1d array extended with [extra_value] * n_repeat, where extra_value is assumed to be not part of the array.""" # Compute the most frequent value in array only if array.size > 0: if array.dtype == object: # scipy.stats.mode is slow with object dtype array. # Python Counter is more efficient counter = Counter(array) most_frequent_count = counter.most_common(1)[0][1] # tie breaking similarly to scipy.stats.mode most_frequent_value = _safe_min( [ value for value, count in counter.items() if count == most_frequent_count ] ) else: mode = _mode(array) most_frequent_value = mode[0][0] most_frequent_count = mode[1][0] else: most_frequent_value = 0 most_frequent_count = 0 # Compare to array + [extra_value] * n_repeat if most_frequent_count == 0 and n_repeat == 0: return np.nan elif most_frequent_count < n_repeat: return extra_value elif most_frequent_count > n_repeat: return most_frequent_value elif most_frequent_count == n_repeat: # tie breaking similarly to scipy.stats.mode return _safe_min([most_frequent_value, extra_value]) class _BaseImputer(TransformerMixin, BaseEstimator): """Base class for all imputers. It adds automatically support for `add_indicator`. """ _parameter_constraints: dict = { "missing_values": [MissingValues()], "add_indicator": ["boolean"], "keep_empty_features": ["boolean"], } def __init__( self, *, missing_values=np.nan, add_indicator=False, keep_empty_features=False ): self.missing_values = missing_values self.add_indicator = add_indicator self.keep_empty_features = keep_empty_features def _fit_indicator(self, X): """Fit a MissingIndicator.""" if self.add_indicator: self.indicator_ = MissingIndicator( missing_values=self.missing_values, error_on_new=False ) self.indicator_._fit(X, precomputed=True) else: self.indicator_ = None def _transform_indicator(self, X): """Compute the indicator mask.' Note that X must be the original data as passed to the imputer before any imputation, since imputation may be done inplace in some cases. """ if self.add_indicator: if not hasattr(self, "indicator_"): raise ValueError( "Make sure to call _fit_indicator before _transform_indicator" ) return self.indicator_.transform(X) def _concatenate_indicator(self, X_imputed, X_indicator): """Concatenate indicator mask with the imputed data.""" if not self.add_indicator: return X_imputed if sp.issparse(X_imputed): # sp.hstack may result in different formats between sparse arrays and # matrices; specify the format to keep consistent behavior hstack = partial(sp.hstack, format=X_imputed.format) else: hstack = np.hstack if X_indicator is None: raise ValueError( "Data from the missing indicator are not provided. Call " "_fit_indicator and _transform_indicator in the imputer " "implementation." ) return hstack((X_imputed, X_indicator)) def _concatenate_indicator_feature_names_out(self, names, input_features): if not self.add_indicator: return names indicator_names = self.indicator_.get_feature_names_out(input_features) return np.concatenate([names, indicator_names]) def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.input_tags.allow_nan = is_scalar_nan(self.missing_values) return tags class SimpleImputer(_BaseImputer): """Univariate imputer for completing missing values with simple strategies. Replace missing values using a descriptive statistic (e.g. mean, median, or most frequent) along each column, or using a constant value. Read more in the :ref:`User Guide <impute>`. .. versionadded:: 0.20 `SimpleImputer` replaces the previous `sklearn.preprocessing.Imputer` estimator which is now removed. Parameters ---------- missing_values : int, float, str, np.nan, None or pandas.NA, default=np.nan The placeholder for the missing values. All occurrences of `missing_values` will be imputed. For pandas' dataframes with nullable integer dtypes with missing values, `missing_values` can be set to either `np.nan` or `pd.NA`. strategy : str or Callable, default='mean' The imputation strategy. - If "mean", then replace missing values using the mean along each column. Can only be used with numeric data. - If "median", then replace missing values using the median along each column. Can only be used with numeric data. - If "most_frequent", then replace missing using the most frequent value along each column. Can be used with strings or numeric data. If there is more than one such value, only the smallest is returned. - If "constant", then replace missing values with fill_value. Can be used with strings or numeric data. - If an instance of Callable, then replace missing values using the scalar statistic returned by running the callable over a dense 1d array containing non-missing values of each column. .. versionadded:: 0.20 strategy="constant" for fixed value imputation. .. versionadded:: 1.5 strategy=callable for custom value imputation. fill_value : str or numerical value, default=None When strategy == "constant", `fill_value` is used to replace all occurrences of missing_values. For string or object data types, `fill_value` must be a string. If `None`, `fill_value` will be 0 when imputing numerical data and "missing_value" for strings or object data types. copy : bool, default=True If True, a copy of X will be created. If False, imputation will be done in-place whenever possible. Note that, in the following cases, a new copy will always be made, even if `copy=False`: - If `X` is not an array of floating values; - If `X` is encoded as a CSR matrix; - If `add_indicator=True`. add_indicator : bool, default=False If True, a :class:`MissingIndicator` transform will stack onto output of the imputer's transform. This allows a predictive estimator to account for missingness despite imputation. If a feature has no missing values at fit/train time, the feature won't appear on the missing indicator even if there are missing values at transform/test time. keep_empty_features : bool, default=False If True, features that consist exclusively of missing values when `fit` is called are returned in results when `transform` is called. The imputed value is always `0` except when `strategy="constant"` in which case `fill_value` will be used instead. .. versionadded:: 1.2 Attributes ---------- statistics_ : array of shape (n_features,) The imputation fill value for each feature. Computing statistics can result in `np.nan` values. During :meth:`transform`, features corresponding to `np.nan` statistics will be discarded. indicator_ : :class:`~sklearn.impute.MissingIndicator` Indicator used to add binary indicators for missing values. `None` if `add_indicator=False`. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- IterativeImputer : Multivariate imputer that estimates values to impute for each feature with missing values from all the others. KNNImputer : Multivariate imputer that estimates missing features using nearest samples. Notes ----- Columns which only contained missing values at :meth:`fit` are discarded upon :meth:`transform` if strategy is not `"constant"`. In a prediction context, simple imputation usually performs poorly when associated with a weak learner. However, with a powerful learner, it can lead to as good or better performance than complex imputation such as :class:`~sklearn.impute.IterativeImputer` or :class:`~sklearn.impute.KNNImputer`. Examples -------- >>> import numpy as np >>> from sklearn.impute import SimpleImputer >>> imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean') >>> imp_mean.fit([[7, 2, 3], [4, np.nan, 6], [10, 5, 9]]) SimpleImputer() >>> X = [[np.nan, 2, 3], [4, np.nan, 6], [10, np.nan, 9]] >>> print(imp_mean.transform(X)) [[ 7. 2. 3. ] [ 4. 3.5 6. ] [10. 3.5 9. ]] For a more detailed example see :ref:`sphx_glr_auto_examples_impute_plot_missing_values.py`. """ _parameter_constraints: dict = { **_BaseImputer._parameter_constraints, "strategy": [ StrOptions({"mean", "median", "most_frequent", "constant"}), callable, ], "fill_value": "no_validation", # any object is valid "copy": ["boolean"], } def __init__( self, *, missing_values=np.nan, strategy="mean", fill_value=None, copy=True, add_indicator=False, keep_empty_features=False, ): super().__init__( missing_values=missing_values, add_indicator=add_indicator, keep_empty_features=keep_empty_features, ) self.strategy = strategy self.fill_value = fill_value self.copy = copy def _validate_input(self, X, in_fit): if self.strategy in ("most_frequent", "constant"): # If input is a list of strings, dtype = object. # Otherwise ValueError is raised in SimpleImputer # with strategy='most_frequent' or 'constant' # because the list is converted to Unicode numpy array if isinstance(X, list) and any( isinstance(elem, str) for row in X for elem in row ): dtype = object else: dtype = None else: dtype = FLOAT_DTYPES if not in_fit and self._fit_dtype.kind == "O": # Use object dtype if fitted on object dtypes dtype = self._fit_dtype if is_pandas_na(self.missing_values) or is_scalar_nan(self.missing_values): ensure_all_finite = "allow-nan" else: ensure_all_finite = True try: X = validate_data( self, X, reset=in_fit, accept_sparse="csc", dtype=dtype, force_writeable=True if not in_fit else None, ensure_all_finite=ensure_all_finite, copy=self.copy, ) except ValueError as ve: if "could not convert" in str(ve): new_ve = ValueError( "Cannot use {} strategy with non-numeric data:\n{}".format( self.strategy, ve ) ) raise new_ve from None else: raise ve if in_fit: # Use the dtype seen in `fit` for non-`fit` conversion self._fit_dtype = X.dtype _check_inputs_dtype(X, self.missing_values) if X.dtype.kind not in ("i", "u", "f", "O"): raise ValueError( "SimpleImputer does not support data with dtype " "{0}. Please provide either a numeric array (with" " a floating point or integer dtype) or " "categorical data represented either as an array " "with integer dtype or an array of string values " "with an object dtype.".format(X.dtype) ) if sp.issparse(X) and self.missing_values == 0: # missing_values = 0 not allowed with sparse data as it would # force densification raise ValueError( "Imputation not possible when missing_values " "== 0 and input is sparse. Provide a dense " "array instead." ) if self.strategy == "constant": if in_fit and self.fill_value is not None: fill_value_dtype = type(self.fill_value) err_msg = ( f"fill_value={self.fill_value!r} (of type {fill_value_dtype!r}) " f"cannot be cast to the input data that is {X.dtype!r}. " "If fill_value is a Python scalar, instead pass a numpy scalar " "(e.g. fill_value=np.uint8(0) if your data is of type np.uint8). " "Make sure that both dtypes are of the same kind." ) elif not in_fit: fill_value_dtype = self._fill_dtype err_msg = ( f"The dtype of the filling value (i.e. {fill_value_dtype!r}) " f"cannot be cast to the input data that is {X.dtype!r}. " "Make sure that the dtypes of the input data are of the same kind " "between fit and transform." ) else: # By default, fill_value=None, and the replacement is always # compatible with the input data fill_value_dtype = X.dtype # Make sure we can safely cast fill_value dtype to the input data dtype if not np.can_cast(fill_value_dtype, X.dtype, casting="same_kind"): raise ValueError(err_msg) return X @_fit_context(prefer_skip_nested_validation=True) def fit(self, X, y=None): """Fit the imputer on `X`. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Input data, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Not used, present here for API consistency by convention. Returns ------- self : object Fitted estimator. """ X = self._validate_input(X, in_fit=True) # default fill_value is 0 for numerical input and "missing_value" # otherwise if self.fill_value is None: if X.dtype.kind in ("i", "u", "f"): fill_value = 0 else: fill_value = "missing_value" else: fill_value = self.fill_value self._fill_dtype = X.dtype if sp.issparse(X): self.statistics_ = self._sparse_fit( X, self.strategy, self.missing_values, fill_value ) else: self.statistics_ = self._dense_fit( X, self.strategy, self.missing_values, fill_value ) return self def _sparse_fit(self, X, strategy, missing_values, fill_value): """Fit the transformer on sparse data.""" missing_mask = _get_mask(X, missing_values) mask_data = missing_mask.data n_implicit_zeros = X.shape[0] - np.diff(X.indptr) statistics = np.empty(X.shape[1]) if strategy == "constant": # for constant strategy, self.statistics_ is used to store # fill_value in each column, or np.nan for columns to drop statistics.fill(fill_value) if not self.keep_empty_features: for i in range(missing_mask.shape[1]): if all(missing_mask[:, i].data): statistics[i] = np.nan else: for i in range(X.shape[1]): column = X.data[X.indptr[i] : X.indptr[i + 1]] mask_column = mask_data[X.indptr[i] : X.indptr[i + 1]] column = column[~mask_column] # combine explicit and implicit zeros mask_zeros = _get_mask(column, 0) column = column[~mask_zeros] n_explicit_zeros = mask_zeros.sum() n_zeros = n_implicit_zeros[i] + n_explicit_zeros if len(column) == 0 and self.keep_empty_features: # in case we want to keep columns with only missing values. statistics[i] = 0 else: if strategy == "mean": s = column.size + n_zeros statistics[i] = np.nan if s == 0 else column.sum() / s elif strategy == "median": statistics[i] = _get_median(column, n_zeros) elif strategy == "most_frequent": statistics[i] = _most_frequent(column, 0, n_zeros) elif isinstance(strategy, Callable): statistics[i] = self.strategy(column) super()._fit_indicator(missing_mask) return statistics def _dense_fit(self, X, strategy, missing_values, fill_value): """Fit the transformer on dense data.""" missing_mask = _get_mask(X, missing_values) masked_X = ma.masked_array(X, mask=missing_mask) super()._fit_indicator(missing_mask) # Mean if strategy == "mean": mean_masked = np.ma.mean(masked_X, axis=0) # Avoid the warning "Warning: converting a masked element to nan." mean = np.ma.getdata(mean_masked) mean[np.ma.getmask(mean_masked)] = 0 if self.keep_empty_features else np.nan return mean # Median elif strategy == "median": median_masked = np.ma.median(masked_X, axis=0) # Avoid the warning "Warning: converting a masked element to nan." median = np.ma.getdata(median_masked) median[np.ma.getmaskarray(median_masked)] = ( 0 if self.keep_empty_features else np.nan ) return median # Most frequent elif strategy == "most_frequent": # Avoid use of scipy.stats.mstats.mode due to the required # additional overhead and slow benchmarking performance. # See Issue 14325 and PR 14399 for full discussion. # To be able access the elements by columns X = X.transpose() mask = missing_mask.transpose() if X.dtype.kind == "O": most_frequent = np.empty(X.shape[0], dtype=object) else: most_frequent = np.empty(X.shape[0]) for i, (row, row_mask) in enumerate(zip(X[:], mask[:])): row_mask = np.logical_not(row_mask).astype(bool) row = row[row_mask] if len(row) == 0 and self.keep_empty_features: most_frequent[i] = 0 else: most_frequent[i] = _most_frequent(row, np.nan, 0) return most_frequent # Constant elif strategy == "constant": # for constant strategy, self.statistcs_ is used to store # fill_value in each column, or np.nan for columns to drop statistics = np.full(X.shape[1], fill_value, dtype=np.object_) if not self.keep_empty_features: for i in range(missing_mask.shape[1]): if missing_mask[:, i].all(): statistics[i] = np.nan return statistics # Custom elif isinstance(strategy, Callable): statistics = np.empty(masked_X.shape[1]) for i in range(masked_X.shape[1]): statistics[i] = self.strategy(masked_X[:, i].compressed()) return statistics def transform(self, X): """Impute all missing values in `X`. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) The input data to complete. Returns ------- X_imputed : {ndarray, sparse matrix} of shape \ (n_samples, n_features_out) `X` with imputed values. """ check_is_fitted(self) X = self._validate_input(X, in_fit=False) statistics = self.statistics_ if X.shape[1] != statistics.shape[0]: raise ValueError( "X has %d features per sample, expected %d" % (X.shape[1], self.statistics_.shape[0]) ) # compute mask before eliminating invalid features missing_mask = _get_mask(X, self.missing_values) # Decide whether to keep missing features if self.keep_empty_features: valid_statistics = statistics.astype(self._fill_dtype, copy=False) valid_statistics_indexes = None else: # same as np.isnan but also works for object dtypes invalid_mask = _get_mask(statistics, np.nan) valid_mask = np.logical_not(invalid_mask) valid_statistics = statistics[valid_mask].astype( self._fill_dtype, copy=False ) valid_statistics_indexes = np.flatnonzero(valid_mask) if invalid_mask.any(): invalid_features = np.arange(X.shape[1])[invalid_mask] # use feature names warning if features are provided if hasattr(self, "feature_names_in_"): invalid_features = self.feature_names_in_[invalid_features] warnings.warn( "Skipping features without any observed values:" f" {invalid_features}. At least one non-missing value is needed" f" for imputation with strategy='{self.strategy}'." ) X = X[:, valid_statistics_indexes] # Do actual imputation if sp.issparse(X): if self.missing_values == 0: raise ValueError( "Imputation not possible when missing_values " "== 0 and input is sparse. Provide a dense " "array instead." ) else: # if no invalid statistics are found, use the mask computed # before, else recompute mask if valid_statistics_indexes is None: mask = missing_mask.data else: mask = _get_mask(X.data, self.missing_values) indexes = np.repeat( np.arange(len(X.indptr) - 1, dtype=int), np.diff(X.indptr) )[mask] X.data[mask] = valid_statistics[indexes] else: # use mask computed before eliminating invalid mask if valid_statistics_indexes is None: mask_valid_features = missing_mask else: mask_valid_features = missing_mask[:, valid_statistics_indexes] n_missing = np.sum(mask_valid_features, axis=0) values = np.repeat(valid_statistics, n_missing) coordinates = np.where(mask_valid_features.transpose())[::-1] X[coordinates] = values X_indicator = super()._transform_indicator(missing_mask) return super()._concatenate_indicator(X, X_indicator) def inverse_transform(self, X): """Convert the data back to the original representation. Inverts the `transform` operation performed on an array. This operation can only be performed after :class:`SimpleImputer` is instantiated with `add_indicator=True`. Note that `inverse_transform` can only invert the transform in features that have binary indicators for missing values. If a feature has no missing values at `fit` time, the feature won't have a binary indicator, and the imputation done at `transform` time won't be inverted. .. versionadded:: 0.24 Parameters ---------- X : array-like of shape \ (n_samples, n_features + n_features_missing_indicator) The imputed data to be reverted to original data. It has to be an augmented array of imputed data and the missing indicator mask. Returns ------- X_original : ndarray of shape (n_samples, n_features) The original `X` with missing values as it was prior to imputation. """ check_is_fitted(self) if not self.add_indicator: raise ValueError( "'inverse_transform' works only when " "'SimpleImputer' is instantiated with " "'add_indicator=True'. " f"Got 'add_indicator={self.add_indicator}' " "instead." ) n_features_missing = len(self.indicator_.features_) non_empty_feature_count = X.shape[1] - n_features_missing array_imputed = X[:, :non_empty_feature_count].copy() missing_mask = X[:, non_empty_feature_count:].astype(bool) n_features_original = len(self.statistics_) shape_original = (X.shape[0], n_features_original) X_original = np.zeros(shape_original) X_original[:, self.indicator_.features_] = missing_mask full_mask = X_original.astype(bool) imputed_idx, original_idx = 0, 0 while imputed_idx < len(array_imputed.T): if not np.all(X_original[:, original_idx]): X_original[:, original_idx] = array_imputed.T[imputed_idx] imputed_idx += 1 original_idx += 1 else: original_idx += 1 X_original[full_mask] = self.missing_values return X_original def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.input_tags.sparse = True tags.input_tags.allow_nan = is_pandas_na(self.missing_values) or is_scalar_nan( self.missing_values ) return tags def get_feature_names_out(self, input_features=None): """Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Input features. - If `input_features` is `None`, then `feature_names_in_` is used as feature names in. If `feature_names_in_` is not defined, then the following input feature names are generated: `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. - If `input_features` is an array-like, then `input_features` must match `feature_names_in_` if `feature_names_in_` is defined. Returns ------- feature_names_out : ndarray of str objects Transformed feature names. """ check_is_fitted(self, "n_features_in_") input_features = _check_feature_names_in(self, input_features) non_missing_mask = np.logical_not(_get_mask(self.statistics_, np.nan)) names = input_features[non_missing_mask] return self._concatenate_indicator_feature_names_out(names, input_features) class MissingIndicator(TransformerMixin, BaseEstimator): """Binary indicators for missing values. Note that this component typically should not be used in a vanilla :class:`~sklearn.pipeline.Pipeline` consisting of transformers and a classifier, but rather could be added using a :class:`~sklearn.pipeline.FeatureUnion` or :class:`~sklearn.compose.ColumnTransformer`. Read more in the :ref:`User Guide <impute>`. .. versionadded:: 0.20 Parameters ---------- missing_values : int, float, str, np.nan or None, default=np.nan The placeholder for the missing values. All occurrences of `missing_values` will be imputed. For pandas' dataframes with nullable integer dtypes with missing values, `missing_values` should be set to `np.nan`, since `pd.NA` will be converted to `np.nan`. features : {'missing-only', 'all'}, default='missing-only' Whether the imputer mask should represent all or a subset of features. - If `'missing-only'` (default), the imputer mask will only represent features containing missing values during fit time. - If `'all'`, the imputer mask will represent all features. sparse : bool or 'auto', default='auto' Whether the imputer mask format should be sparse or dense. - If `'auto'` (default), the imputer mask will be of same type as input. - If `True`, the imputer mask will be a sparse matrix. - If `False`, the imputer mask will be a numpy array. error_on_new : bool, default=True If `True`, :meth:`transform` will raise an error when there are features with missing values that have no missing values in :meth:`fit`. This is applicable only when `features='missing-only'`. Attributes ---------- features_ : ndarray of shape (n_missing_features,) or (n_features,) The features indices which will be returned when calling :meth:`transform`. They are computed during :meth:`fit`. If `features='all'`, `features_` is equal to `range(n_features)`. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- SimpleImputer : Univariate imputation of missing values. IterativeImputer : Multivariate imputation of missing values. Examples -------- >>> import numpy as np >>> from sklearn.impute import MissingIndicator >>> X1 = np.array([[np.nan, 1, 3], ... [4, 0, np.nan], ... [8, 1, 0]]) >>> X2 = np.array([[5, 1, np.nan], ... [np.nan, 2, 3], ... [2, 4, 0]]) >>> indicator = MissingIndicator() >>> indicator.fit(X1) MissingIndicator()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
true
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/impute/_iterative.py
sklearn/impute/_iterative.py
# Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import warnings from collections import namedtuple from numbers import Integral, Real from time import time import numpy as np from scipy import stats from sklearn.base import _fit_context, clone from sklearn.exceptions import ConvergenceWarning from sklearn.impute._base import SimpleImputer, _BaseImputer, _check_inputs_dtype from sklearn.preprocessing import normalize from sklearn.utils import _safe_indexing, check_array, check_random_state from sklearn.utils._indexing import _safe_assign from sklearn.utils._mask import _get_mask from sklearn.utils._missing import is_scalar_nan from sklearn.utils._param_validation import HasMethods, Interval, StrOptions from sklearn.utils.metadata_routing import ( MetadataRouter, MethodMapping, _raise_for_params, process_routing, ) from sklearn.utils.validation import ( FLOAT_DTYPES, _check_feature_names_in, _num_samples, check_is_fitted, validate_data, ) _ImputerTriplet = namedtuple( "_ImputerTriplet", ["feat_idx", "neighbor_feat_idx", "estimator"] ) def _assign_where(X1, X2, cond): """Assign X2 to X1 where cond is True. Parameters ---------- X1 : ndarray or dataframe of shape (n_samples, n_features) Data. X2 : ndarray of shape (n_samples, n_features) Data to be assigned. cond : ndarray of shape (n_samples, n_features) Boolean mask to assign data. """ if hasattr(X1, "mask"): # pandas dataframes X1.mask(cond=cond, other=X2, inplace=True) else: # ndarrays X1[cond] = X2[cond] class IterativeImputer(_BaseImputer): """Multivariate imputer that estimates each feature from all the others. A strategy for imputing missing values by modeling each feature with missing values as a function of other features in a round-robin fashion. Read more in the :ref:`User Guide <iterative_imputer>`. .. versionadded:: 0.21 .. note:: This estimator is still **experimental** for now: the predictions and the API might change without any deprecation cycle. To use it, you need to explicitly import `enable_iterative_imputer`:: >>> # explicitly require this experimental feature >>> from sklearn.experimental import enable_iterative_imputer # noqa >>> # now you can import normally from sklearn.impute >>> from sklearn.impute import IterativeImputer Parameters ---------- estimator : estimator object, default=BayesianRidge() The estimator to use at each step of the round-robin imputation. If `sample_posterior=True`, the estimator must support `return_std` in its `predict` method. missing_values : int or np.nan, default=np.nan The placeholder for the missing values. All occurrences of `missing_values` will be imputed. For pandas' dataframes with nullable integer dtypes with missing values, `missing_values` should be set to `np.nan`, since `pd.NA` will be converted to `np.nan`. sample_posterior : bool, default=False Whether to sample from the (Gaussian) predictive posterior of the fitted estimator for each imputation. Estimator must support `return_std` in its `predict` method if set to `True`. Set to `True` if using `IterativeImputer` for multiple imputations. max_iter : int, default=10 Maximum number of imputation rounds to perform before returning the imputations computed during the final round. A round is a single imputation of each feature with missing values. The stopping criterion is met once `max(abs(X_t - X_{t-1}))/max(abs(X[known_vals])) < tol`, where `X_t` is `X` at iteration `t`. Note that early stopping is only applied if `sample_posterior=False`. tol : float, default=1e-3 Tolerance of the stopping condition. n_nearest_features : int, default=None Number of other features to use to estimate the missing values of each feature column. Nearness between features is measured using the absolute correlation coefficient between each feature pair (after initial imputation). To ensure coverage of features throughout the imputation process, the neighbor features are not necessarily nearest, but are drawn with probability proportional to correlation for each imputed target feature. Can provide significant speed-up when the number of features is huge. If `None`, all features will be used. initial_strategy : {'mean', 'median', 'most_frequent', 'constant'}, \ default='mean' Which strategy to use to initialize the missing values. Same as the `strategy` parameter in :class:`~sklearn.impute.SimpleImputer`. fill_value : str or numerical value, default=None When `strategy="constant"`, `fill_value` is used to replace all occurrences of missing_values. For string or object data types, `fill_value` must be a string. If `None`, `fill_value` will be 0 when imputing numerical data and "missing_value" for strings or object data types. .. versionadded:: 1.3 imputation_order : {'ascending', 'descending', 'roman', 'arabic', \ 'random'}, default='ascending' The order in which the features will be imputed. Possible values: - `'ascending'`: From features with fewest missing values to most. - `'descending'`: From features with most missing values to fewest. - `'roman'`: Left to right. - `'arabic'`: Right to left. - `'random'`: A random order for each round. skip_complete : bool, default=False If `True` then features with missing values during :meth:`transform` which did not have any missing values during :meth:`fit` will be imputed with the initial imputation method only. Set to `True` if you have many features with no missing values at both :meth:`fit` and :meth:`transform` time to save compute. min_value : float or array-like of shape (n_features,), default=-np.inf Minimum possible imputed value. Broadcast to shape `(n_features,)` if scalar. If array-like, expects shape `(n_features,)`, one min value for each feature. The default is `-np.inf`. .. versionchanged:: 0.23 Added support for array-like. max_value : float or array-like of shape (n_features,), default=np.inf Maximum possible imputed value. Broadcast to shape `(n_features,)` if scalar. If array-like, expects shape `(n_features,)`, one max value for each feature. The default is `np.inf`. .. versionchanged:: 0.23 Added support for array-like. verbose : int, default=0 Verbosity flag, controls the debug messages that are issued as functions are evaluated. The higher, the more verbose. Can be 0, 1, or 2. random_state : int, RandomState instance or None, default=None The seed of the pseudo random number generator to use. Randomizes selection of estimator features if `n_nearest_features` is not `None`, the `imputation_order` if `random`, and the sampling from posterior if `sample_posterior=True`. Use an integer for determinism. See :term:`the Glossary <random_state>`. add_indicator : bool, default=False If `True`, a :class:`MissingIndicator` transform will stack onto output of the imputer's transform. This allows a predictive estimator to account for missingness despite imputation. If a feature has no missing values at fit/train time, the feature won't appear on the missing indicator even if there are missing values at transform/test time. keep_empty_features : bool, default=False If True, features that consist exclusively of missing values when `fit` is called are returned in results when `transform` is called. The imputed value is always `0` except when `initial_strategy="constant"` in which case `fill_value` will be used instead. .. versionadded:: 1.2 Attributes ---------- initial_imputer_ : object of type :class:`~sklearn.impute.SimpleImputer` Imputer used to initialize the missing values. imputation_sequence_ : list of tuples Each tuple has `(feat_idx, neighbor_feat_idx, estimator)`, where `feat_idx` is the current feature to be imputed, `neighbor_feat_idx` is the array of other features used to impute the current feature, and `estimator` is the trained estimator used for the imputation. Length is `self.n_features_with_missing_ * self.n_iter_`. n_iter_ : int Number of iteration rounds that occurred. Will be less than `self.max_iter` if early stopping criterion was reached. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 n_features_with_missing_ : int Number of features with missing values. indicator_ : :class:`~sklearn.impute.MissingIndicator` Indicator used to add binary indicators for missing values. `None` if `add_indicator=False`. random_state_ : RandomState instance RandomState instance that is generated either from a seed, the random number generator or by `np.random`. See Also -------- SimpleImputer : Univariate imputer for completing missing values with simple strategies. KNNImputer : Multivariate imputer that estimates missing features using nearest samples. Notes ----- To support imputation in inductive mode we store each feature's estimator during the :meth:`fit` phase, and predict without refitting (in order) during the :meth:`transform` phase. Features which contain all missing values at :meth:`fit` are discarded upon :meth:`transform`. Using defaults, the imputer scales in :math:`\\mathcal{O}(knp^3\\min(n,p))` where :math:`k` = `max_iter`, :math:`n` the number of samples and :math:`p` the number of features. It thus becomes prohibitively costly when the number of features increases. Setting `n_nearest_features << n_features`, `skip_complete=True` or increasing `tol` can help to reduce its computational cost. Depending on the nature of missing values, simple imputers can be preferable in a prediction context. References ---------- .. [1] `Stef van Buuren, Karin Groothuis-Oudshoorn (2011). "mice: Multivariate Imputation by Chained Equations in R". Journal of Statistical Software 45: 1-67. <https://www.jstatsoft.org/article/view/v045i03>`_ .. [2] `S. F. Buck, (1960). "A Method of Estimation of Missing Values in Multivariate Data Suitable for use with an Electronic Computer". Journal of the Royal Statistical Society 22(2): 302-306. <https://www.jstor.org/stable/2984099>`_ Examples -------- >>> import numpy as np >>> from sklearn.experimental import enable_iterative_imputer >>> from sklearn.impute import IterativeImputer >>> imp_mean = IterativeImputer(random_state=0) >>> imp_mean.fit([[7, 2, 3], [4, np.nan, 6], [10, 5, 9]]) IterativeImputer(random_state=0) >>> X = [[np.nan, 2, 3], [4, np.nan, 6], [10, np.nan, 9]] >>> imp_mean.transform(X) array([[ 6.9584, 2. , 3. ], [ 4. , 2.6000, 6. ], [10. , 4.9999, 9. ]]) For a more detailed example see :ref:`sphx_glr_auto_examples_impute_plot_missing_values.py` or :ref:`sphx_glr_auto_examples_impute_plot_iterative_imputer_variants_comparison.py`. """ _parameter_constraints: dict = { **_BaseImputer._parameter_constraints, "estimator": [None, HasMethods(["fit", "predict"])], "sample_posterior": ["boolean"], "max_iter": [Interval(Integral, 0, None, closed="left")], "tol": [Interval(Real, 0, None, closed="left")], "n_nearest_features": [None, Interval(Integral, 1, None, closed="left")], "initial_strategy": [ StrOptions({"mean", "median", "most_frequent", "constant"}) ], "fill_value": "no_validation", # any object is valid "imputation_order": [ StrOptions({"ascending", "descending", "roman", "arabic", "random"}) ], "skip_complete": ["boolean"], "min_value": [None, Interval(Real, None, None, closed="both"), "array-like"], "max_value": [None, Interval(Real, None, None, closed="both"), "array-like"], "verbose": ["verbose"], "random_state": ["random_state"], } def __init__( self, estimator=None, *, missing_values=np.nan, sample_posterior=False, max_iter=10, tol=1e-3, n_nearest_features=None, initial_strategy="mean", fill_value=None, imputation_order="ascending", skip_complete=False, min_value=-np.inf, max_value=np.inf, verbose=0, random_state=None, add_indicator=False, keep_empty_features=False, ): super().__init__( missing_values=missing_values, add_indicator=add_indicator, keep_empty_features=keep_empty_features, ) self.estimator = estimator self.sample_posterior = sample_posterior self.max_iter = max_iter self.tol = tol self.n_nearest_features = n_nearest_features self.initial_strategy = initial_strategy self.fill_value = fill_value self.imputation_order = imputation_order self.skip_complete = skip_complete self.min_value = min_value self.max_value = max_value self.verbose = verbose self.random_state = random_state def _impute_one_feature( self, X_filled, mask_missing_values, feat_idx, neighbor_feat_idx, estimator=None, fit_mode=True, params=None, ): """Impute a single feature from the others provided. This function predicts the missing values of one of the features using the current estimates of all the other features. The `estimator` must support `return_std=True` in its `predict` method for this function to work. Parameters ---------- X_filled : ndarray Input data with the most recent imputations. mask_missing_values : ndarray Input data's missing indicator matrix. feat_idx : int Index of the feature currently being imputed. neighbor_feat_idx : ndarray Indices of the features to be used in imputing `feat_idx`. estimator : object The estimator to use at this step of the round-robin imputation. If `sample_posterior=True`, the estimator must support `return_std` in its `predict` method. If None, it will be cloned from self._estimator. fit_mode : boolean, default=True Whether to fit and predict with the estimator or just predict. params : dict Additional params routed to the individual estimator. Returns ------- X_filled : ndarray Input data with `X_filled[missing_row_mask, feat_idx]` updated. estimator : estimator with sklearn API The fitted estimator used to impute `X_filled[missing_row_mask, feat_idx]`. """ if estimator is None and fit_mode is False: raise ValueError( "If fit_mode is False, then an already-fitted " "estimator should be passed in." ) if estimator is None: estimator = clone(self._estimator) missing_row_mask = mask_missing_values[:, feat_idx] if fit_mode: X_train = _safe_indexing( _safe_indexing(X_filled, neighbor_feat_idx, axis=1), ~missing_row_mask, axis=0, ) y_train = _safe_indexing( _safe_indexing(X_filled, feat_idx, axis=1), ~missing_row_mask, axis=0, ) estimator.fit(X_train, y_train, **params) # if no missing values, don't predict if np.sum(missing_row_mask) == 0: return X_filled, estimator # get posterior samples if there is at least one missing value X_test = _safe_indexing( _safe_indexing(X_filled, neighbor_feat_idx, axis=1), missing_row_mask, axis=0, ) if self.sample_posterior: mus, sigmas = estimator.predict(X_test, return_std=True) imputed_values = np.zeros(mus.shape, dtype=X_filled.dtype) # two types of problems: (1) non-positive sigmas # (2) mus outside legal range of min_value and max_value # (results in inf sample) positive_sigmas = sigmas > 0 imputed_values[~positive_sigmas] = mus[~positive_sigmas] mus_too_low = mus < self._min_value[feat_idx] imputed_values[mus_too_low] = self._min_value[feat_idx] mus_too_high = mus > self._max_value[feat_idx] imputed_values[mus_too_high] = self._max_value[feat_idx] # the rest can be sampled without statistical issues inrange_mask = positive_sigmas & ~mus_too_low & ~mus_too_high mus = mus[inrange_mask] sigmas = sigmas[inrange_mask] a = (self._min_value[feat_idx] - mus) / sigmas b = (self._max_value[feat_idx] - mus) / sigmas truncated_normal = stats.truncnorm(a=a, b=b, loc=mus, scale=sigmas) imputed_values[inrange_mask] = truncated_normal.rvs( random_state=self.random_state_ ) else: imputed_values = estimator.predict(X_test) imputed_values = np.clip( imputed_values, self._min_value[feat_idx], self._max_value[feat_idx] ) # update the feature _safe_assign( X_filled, imputed_values, row_indexer=missing_row_mask, column_indexer=feat_idx, ) return X_filled, estimator def _get_neighbor_feat_idx(self, n_features, feat_idx, abs_corr_mat): """Get a list of other features to predict `feat_idx`. If `self.n_nearest_features` is less than or equal to the total number of features, then use a probability proportional to the absolute correlation between `feat_idx` and each other feature to randomly choose a subsample of the other features (without replacement). Parameters ---------- n_features : int Number of features in `X`. feat_idx : int Index of the feature currently being imputed. abs_corr_mat : ndarray, shape (n_features, n_features) Absolute correlation matrix of `X`. The diagonal has been zeroed out and each feature has been normalized to sum to 1. Can be None. Returns ------- neighbor_feat_idx : array-like The features to use to impute `feat_idx`. """ if self.n_nearest_features is not None and self.n_nearest_features < n_features: p = abs_corr_mat[:, feat_idx] neighbor_feat_idx = self.random_state_.choice( np.arange(n_features), self.n_nearest_features, replace=False, p=p ) else: inds_left = np.arange(feat_idx) inds_right = np.arange(feat_idx + 1, n_features) neighbor_feat_idx = np.concatenate((inds_left, inds_right)) return neighbor_feat_idx def _get_ordered_idx(self, mask_missing_values): """Decide in what order we will update the features. As a homage to the MICE R package, we will have 4 main options of how to order the updates, and use a random order if anything else is specified. Also, this function skips features which have no missing values. Parameters ---------- mask_missing_values : array-like, shape (n_samples, n_features) Input data's missing indicator matrix, where `n_samples` is the number of samples and `n_features` is the number of features. Returns ------- ordered_idx : ndarray, shape (n_features,) The order in which to impute the features. """ frac_of_missing_values = mask_missing_values.mean(axis=0) if self.skip_complete: missing_values_idx = np.flatnonzero(frac_of_missing_values) else: missing_values_idx = np.arange(np.shape(frac_of_missing_values)[0]) if self.imputation_order == "roman": ordered_idx = missing_values_idx elif self.imputation_order == "arabic": ordered_idx = missing_values_idx[::-1] elif self.imputation_order == "ascending": n = len(frac_of_missing_values) - len(missing_values_idx) ordered_idx = np.argsort(frac_of_missing_values, kind="mergesort")[n:] elif self.imputation_order == "descending": n = len(frac_of_missing_values) - len(missing_values_idx) ordered_idx = np.argsort(frac_of_missing_values, kind="mergesort")[n:][::-1] elif self.imputation_order == "random": ordered_idx = missing_values_idx self.random_state_.shuffle(ordered_idx) return ordered_idx def _get_abs_corr_mat(self, X_filled, tolerance=1e-6): """Get absolute correlation matrix between features. Parameters ---------- X_filled : ndarray, shape (n_samples, n_features) Input data with the most recent imputations. tolerance : float, default=1e-6 `abs_corr_mat` can have nans, which will be replaced with `tolerance`. Returns ------- abs_corr_mat : ndarray, shape (n_features, n_features) Absolute correlation matrix of `X` at the beginning of the current round. The diagonal has been zeroed out and each feature's absolute correlations with all others have been normalized to sum to 1. """ n_features = X_filled.shape[1] if self.n_nearest_features is None or self.n_nearest_features >= n_features: return None with np.errstate(invalid="ignore"): # if a feature in the neighborhood has only a single value # (e.g., categorical feature), the std. dev. will be null and # np.corrcoef will raise a warning due to a division by zero abs_corr_mat = np.abs(np.corrcoef(X_filled.T)) # np.corrcoef is not defined for features with zero std abs_corr_mat[np.isnan(abs_corr_mat)] = tolerance # ensures exploration, i.e. at least some probability of sampling np.clip(abs_corr_mat, tolerance, None, out=abs_corr_mat) # features are not their own neighbors np.fill_diagonal(abs_corr_mat, 0) # needs to sum to 1 for np.random.choice sampling abs_corr_mat = normalize(abs_corr_mat, norm="l1", axis=0, copy=False) return abs_corr_mat def _initial_imputation(self, X, in_fit=False): """Perform initial imputation for input `X`. Parameters ---------- X : ndarray of shape (n_samples, n_features) Input data, where `n_samples` is the number of samples and `n_features` is the number of features. in_fit : bool, default=False Whether function is called in :meth:`fit`. Returns ------- Xt : ndarray of shape (n_samples, n_features) Input data, where `n_samples` is the number of samples and `n_features` is the number of features. X_filled : ndarray of shape (n_samples, n_features) Input data with the most recent imputations. mask_missing_values : ndarray of shape (n_samples, n_features) Input data's missing indicator matrix, where `n_samples` is the number of samples and `n_features` is the number of features, masked by non-missing features. X_missing_mask : ndarray, shape (n_samples, n_features) Input data's mask matrix indicating missing datapoints, where `n_samples` is the number of samples and `n_features` is the number of features. """ if is_scalar_nan(self.missing_values): ensure_all_finite = "allow-nan" else: ensure_all_finite = True X = validate_data( self, X, dtype=FLOAT_DTYPES, order="F", reset=in_fit, ensure_all_finite=ensure_all_finite, ) _check_inputs_dtype(X, self.missing_values) X_missing_mask = _get_mask(X, self.missing_values) mask_missing_values = X_missing_mask.copy() if self.initial_imputer_ is None: self.initial_imputer_ = SimpleImputer( missing_values=self.missing_values, strategy=self.initial_strategy, fill_value=self.fill_value, keep_empty_features=self.keep_empty_features, ).set_output(transform="default") X_filled = self.initial_imputer_.fit_transform(X) else: X_filled = self.initial_imputer_.transform(X) if in_fit: self._is_empty_feature = np.all(mask_missing_values, axis=0) if not self.keep_empty_features: # drop empty features Xt = X[:, ~self._is_empty_feature] mask_missing_values = mask_missing_values[:, ~self._is_empty_feature] else: # mark empty features as not missing and keep the original # imputation mask_missing_values[:, self._is_empty_feature] = False Xt = X Xt[:, self._is_empty_feature] = X_filled[:, self._is_empty_feature] return Xt, X_filled, mask_missing_values, X_missing_mask @staticmethod def _validate_limit( limit, limit_type, n_features, is_empty_feature, keep_empty_feature ): """Validate the limits (min/max) of the feature values. Converts scalar min/max limits to vectors of shape `(n_features,)`. Parameters ---------- limit: scalar or array-like The user-specified limit (i.e, min_value or max_value). limit_type: {'max', 'min'} Type of limit to validate. n_features: int Number of features in the dataset. is_empty_feature: ndarray, shape (n_features, ) Mask array indicating empty feature imputer has seen during fit. keep_empty_feature: bool If False, remove empty-feature indices from the limit. Returns ------- limit: ndarray, shape(n_features,) Array of limits, one for each feature. """ n_features_in = _num_samples(is_empty_feature) if ( limit is not None and not np.isscalar(limit) and _num_samples(limit) != n_features_in ): raise ValueError( f"'{limit_type}_value' should be of shape ({n_features_in},) when an" f" array-like is provided. Got {len(limit)}, instead." ) limit_bound = np.inf if limit_type == "max" else -np.inf limit = limit_bound if limit is None else limit if np.isscalar(limit): limit = np.full(n_features, limit) limit = check_array(limit, ensure_all_finite=False, copy=False, ensure_2d=False) # Make sure to remove the empty feature elements from the bounds if not keep_empty_feature and len(limit) == len(is_empty_feature): limit = limit[~is_empty_feature] return limit @_fit_context( # IterativeImputer.estimator is not validated yet prefer_skip_nested_validation=False ) def fit_transform(self, X, y=None, **params): """Fit the imputer on `X` and return the transformed `X`. Parameters ---------- X : array-like, shape (n_samples, n_features) Input data, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Not used, present for API consistency by convention. **params : dict Parameters routed to the `fit` method of the sub-estimator via the metadata routing API. .. versionadded:: 1.5 Only available if `sklearn.set_config(enable_metadata_routing=True)` is set. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- Xt : array-like, shape (n_samples, n_features) The imputed input data. """ _raise_for_params(params, self, "fit") routed_params = process_routing( self, "fit", **params, ) self.random_state_ = getattr( self, "random_state_", check_random_state(self.random_state) ) if self.estimator is None: from sklearn.linear_model import BayesianRidge self._estimator = BayesianRidge() else: self._estimator = clone(self.estimator) self.imputation_sequence_ = [] self.initial_imputer_ = None X, Xt, mask_missing_values, complete_mask = self._initial_imputation( X, in_fit=True ) super()._fit_indicator(complete_mask) X_indicator = super()._transform_indicator(complete_mask) if self.max_iter == 0 or np.all(mask_missing_values): self.n_iter_ = 0 return super()._concatenate_indicator(Xt, X_indicator) # Edge case: a single feature, we return the initial imputation. if Xt.shape[1] == 1: self.n_iter_ = 0 return super()._concatenate_indicator(Xt, X_indicator) self._min_value = self._validate_limit( self.min_value, "min", X.shape[1], self._is_empty_feature, self.keep_empty_features, ) self._max_value = self._validate_limit( self.max_value, "max", X.shape[1], self._is_empty_feature, self.keep_empty_features, ) if not np.all(np.greater(self._max_value, self._min_value)): raise ValueError("One (or more) features have min_value >= max_value.") # order in which to impute # note this is probably too slow for large feature data (d > 100000) # and a better way would be good. # see: https://goo.gl/KyCNwj and subsequent comments ordered_idx = self._get_ordered_idx(mask_missing_values) self.n_features_with_missing_ = len(ordered_idx) abs_corr_mat = self._get_abs_corr_mat(Xt) n_samples, n_features = Xt.shape if self.verbose > 0: print("[IterativeImputer] Completing matrix with shape %s" % (X.shape,)) start_t = time() if not self.sample_posterior: Xt_previous = Xt.copy() normalized_tol = self.tol * np.max(np.abs(X[~mask_missing_values])) for self.n_iter_ in range(1, self.max_iter + 1): if self.imputation_order == "random": ordered_idx = self._get_ordered_idx(mask_missing_values) for feat_idx in ordered_idx: neighbor_feat_idx = self._get_neighbor_feat_idx( n_features, feat_idx, abs_corr_mat ) Xt, estimator = self._impute_one_feature( Xt, mask_missing_values, feat_idx, neighbor_feat_idx, estimator=None, fit_mode=True, params=routed_params.estimator.fit, )
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
true
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/impute/_knn.py
sklearn/impute/_knn.py
# Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from numbers import Integral import numpy as np from sklearn.base import _fit_context from sklearn.impute._base import _BaseImputer from sklearn.metrics import pairwise_distances_chunked from sklearn.metrics.pairwise import _NAN_METRICS from sklearn.neighbors._base import _get_weights from sklearn.utils._mask import _get_mask from sklearn.utils._missing import is_scalar_nan from sklearn.utils._param_validation import Hidden, Interval, StrOptions from sklearn.utils.validation import ( FLOAT_DTYPES, _check_feature_names_in, check_is_fitted, validate_data, ) class KNNImputer(_BaseImputer): """Imputation for completing missing values using k-Nearest Neighbors. Each sample's missing values are imputed using the mean value from `n_neighbors` nearest neighbors found in the training set. Two samples are close if the features that neither is missing are close. Read more in the :ref:`User Guide <knnimpute>`. .. versionadded:: 0.22 Parameters ---------- missing_values : int, float, str, np.nan or None, default=np.nan The placeholder for the missing values. All occurrences of `missing_values` will be imputed. For pandas' dataframes with nullable integer dtypes with missing values, `missing_values` should be set to np.nan, since `pd.NA` will be converted to np.nan. n_neighbors : int, default=5 Number of neighboring samples to use for imputation. weights : {'uniform', 'distance'} or callable, default='uniform' Weight function used in prediction. Possible values: - 'uniform' : uniform weights. All points in each neighborhood are weighted equally. - 'distance' : weight points by the inverse of their distance. in this case, closer neighbors of a query point will have a greater influence than neighbors which are further away. - callable : a user-defined function which accepts an array of distances, and returns an array of the same shape containing the weights. metric : {'nan_euclidean'} or callable, default='nan_euclidean' Distance metric for searching neighbors. Possible values: - 'nan_euclidean' - callable : a user-defined function which conforms to the definition of ``func_metric(x, y, *, missing_values=np.nan)``. `x` and `y` corresponds to a row (i.e. 1-D arrays) of `X` and `Y`, respectively. The callable should returns a scalar distance value. copy : bool, default=True If True, a copy of X will be created. If False, imputation will be done in-place whenever possible. add_indicator : bool, default=False If True, a :class:`MissingIndicator` transform will stack onto the output of the imputer's transform. This allows a predictive estimator to account for missingness despite imputation. If a feature has no missing values at fit/train time, the feature won't appear on the missing indicator even if there are missing values at transform/test time. keep_empty_features : bool, default=False If True, features that consist exclusively of missing values when `fit` is called are returned in results when `transform` is called. The imputed value is always `0`. .. versionadded:: 1.2 Attributes ---------- indicator_ : :class:`~sklearn.impute.MissingIndicator` Indicator used to add binary indicators for missing values. ``None`` if add_indicator is False. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- SimpleImputer : Univariate imputer for completing missing values with simple strategies. IterativeImputer : Multivariate imputer that estimates values to impute for each feature with missing values from all the others. References ---------- * `Olga Troyanskaya, Michael Cantor, Gavin Sherlock, Pat Brown, Trevor Hastie, Robert Tibshirani, David Botstein and Russ B. Altman, Missing value estimation methods for DNA microarrays, BIOINFORMATICS Vol. 17 no. 6, 2001 Pages 520-525. <https://academic.oup.com/bioinformatics/article/17/6/520/272365>`_ Examples -------- >>> import numpy as np >>> from sklearn.impute import KNNImputer >>> X = [[1, 2, np.nan], [3, 4, 3], [np.nan, 6, 5], [8, 8, 7]] >>> imputer = KNNImputer(n_neighbors=2) >>> imputer.fit_transform(X) array([[1. , 2. , 4. ], [3. , 4. , 3. ], [5.5, 6. , 5. ], [8. , 8. , 7. ]]) For a more detailed example see :ref:`sphx_glr_auto_examples_impute_plot_missing_values.py`. """ _parameter_constraints: dict = { **_BaseImputer._parameter_constraints, "n_neighbors": [Interval(Integral, 1, None, closed="left")], "weights": [StrOptions({"uniform", "distance"}), callable, Hidden(None)], "metric": [StrOptions(set(_NAN_METRICS)), callable], "copy": ["boolean"], } def __init__( self, *, missing_values=np.nan, n_neighbors=5, weights="uniform", metric="nan_euclidean", copy=True, add_indicator=False, keep_empty_features=False, ): super().__init__( missing_values=missing_values, add_indicator=add_indicator, keep_empty_features=keep_empty_features, ) self.n_neighbors = n_neighbors self.weights = weights self.metric = metric self.copy = copy def _calc_impute(self, dist_pot_donors, n_neighbors, fit_X_col, mask_fit_X_col): """Helper function to impute a single column. Parameters ---------- dist_pot_donors : ndarray of shape (n_receivers, n_potential_donors) Distance matrix between the receivers and potential donors from training set. There must be at least one non-nan distance between a receiver and a potential donor. n_neighbors : int Number of neighbors to consider. fit_X_col : ndarray of shape (n_potential_donors,) Column of potential donors from training set. mask_fit_X_col : ndarray of shape (n_potential_donors,) Missing mask for fit_X_col. Returns ------- imputed_values: ndarray of shape (n_receivers,) Imputed values for receiver. """ # Get donors donors_idx = np.argpartition(dist_pot_donors, n_neighbors - 1, axis=1)[ :, :n_neighbors ] # Get weight matrix from distance matrix donors_dist = dist_pot_donors[ np.arange(donors_idx.shape[0])[:, None], donors_idx ] weight_matrix = _get_weights(donors_dist, self.weights) # fill nans with zeros if weight_matrix is not None: weight_matrix[np.isnan(weight_matrix)] = 0.0 else: weight_matrix = np.ones_like(donors_dist) weight_matrix[np.isnan(donors_dist)] = 0.0 # Retrieve donor values and calculate kNN average donors = fit_X_col.take(donors_idx) donors_mask = mask_fit_X_col.take(donors_idx) donors = np.ma.array(donors, mask=donors_mask) return np.ma.average(donors, axis=1, weights=weight_matrix).data @_fit_context(prefer_skip_nested_validation=True) def fit(self, X, y=None): """Fit the imputer on X. Parameters ---------- X : array-like shape of (n_samples, n_features) Input data, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Not used, present here for API consistency by convention. Returns ------- self : object The fitted `KNNImputer` class instance. """ # Check data integrity and calling arguments if not is_scalar_nan(self.missing_values): ensure_all_finite = True else: ensure_all_finite = "allow-nan" X = validate_data( self, X, accept_sparse=False, dtype=FLOAT_DTYPES, ensure_all_finite=ensure_all_finite, copy=self.copy, ) self._fit_X = X self._mask_fit_X = _get_mask(self._fit_X, self.missing_values) self._valid_mask = ~np.all(self._mask_fit_X, axis=0) super()._fit_indicator(self._mask_fit_X) return self def transform(self, X): """Impute all missing values in X. Parameters ---------- X : array-like of shape (n_samples, n_features) The input data to complete. Returns ------- X : array-like of shape (n_samples, n_output_features) The imputed dataset. `n_output_features` is the number of features that is not always missing during `fit`. """ check_is_fitted(self) if not is_scalar_nan(self.missing_values): ensure_all_finite = True else: ensure_all_finite = "allow-nan" X = validate_data( self, X, accept_sparse=False, dtype=FLOAT_DTYPES, force_writeable=True, ensure_all_finite=ensure_all_finite, copy=self.copy, reset=False, ) mask = _get_mask(X, self.missing_values) mask_fit_X = self._mask_fit_X valid_mask = self._valid_mask X_indicator = super()._transform_indicator(mask) # Removes columns where the training data is all nan if not np.any(mask[:, valid_mask]): # No missing values in X if self.keep_empty_features: Xc = X Xc[:, ~valid_mask] = 0 else: Xc = X[:, valid_mask] # Even if there are no missing values in X, we still concatenate Xc # with the missing value indicator matrix, X_indicator. # This is to ensure that the output maintains consistency in terms # of columns, regardless of whether missing values exist in X or not. return super()._concatenate_indicator(Xc, X_indicator) row_missing_idx = np.flatnonzero(mask[:, valid_mask].any(axis=1)) non_missing_fix_X = np.logical_not(mask_fit_X) # Maps from indices from X to indices in dist matrix dist_idx_map = np.zeros(X.shape[0], dtype=int) dist_idx_map[row_missing_idx] = np.arange(row_missing_idx.shape[0]) def process_chunk(dist_chunk, start): row_missing_chunk = row_missing_idx[start : start + len(dist_chunk)] # Find and impute missing by column for col in range(X.shape[1]): if not valid_mask[col]: # column was all missing during training continue col_mask = mask[row_missing_chunk, col] if not np.any(col_mask): # column has no missing values continue (potential_donors_idx,) = np.nonzero(non_missing_fix_X[:, col]) # receivers_idx are indices in X receivers_idx = row_missing_chunk[np.flatnonzero(col_mask)] # distances for samples that needed imputation for column dist_subset = dist_chunk[dist_idx_map[receivers_idx] - start][ :, potential_donors_idx ] # receivers with all nan distances impute with mean all_nan_dist_mask = np.isnan(dist_subset).all(axis=1) all_nan_receivers_idx = receivers_idx[all_nan_dist_mask] if all_nan_receivers_idx.size: col_mean = np.ma.array( self._fit_X[:, col], mask=mask_fit_X[:, col] ).mean() X[all_nan_receivers_idx, col] = col_mean if len(all_nan_receivers_idx) == len(receivers_idx): # all receivers imputed with mean continue # receivers with at least one defined distance receivers_idx = receivers_idx[~all_nan_dist_mask] dist_subset = dist_chunk[dist_idx_map[receivers_idx] - start][ :, potential_donors_idx ] n_neighbors = min(self.n_neighbors, len(potential_donors_idx)) value = self._calc_impute( dist_subset, n_neighbors, self._fit_X[potential_donors_idx, col], mask_fit_X[potential_donors_idx, col], ) X[receivers_idx, col] = value # process in fixed-memory chunks gen = pairwise_distances_chunked( X[row_missing_idx, :], self._fit_X, metric=self.metric, missing_values=self.missing_values, ensure_all_finite=ensure_all_finite, reduce_func=process_chunk, ) for chunk in gen: # process_chunk modifies X in place. No return value. pass if self.keep_empty_features: Xc = X Xc[:, ~valid_mask] = 0 else: Xc = X[:, valid_mask] return super()._concatenate_indicator(Xc, X_indicator) def get_feature_names_out(self, input_features=None): """Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Input features. - If `input_features` is `None`, then `feature_names_in_` is used as feature names in. If `feature_names_in_` is not defined, then the following input feature names are generated: `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. - If `input_features` is an array-like, then `input_features` must match `feature_names_in_` if `feature_names_in_` is defined. Returns ------- feature_names_out : ndarray of str objects Transformed feature names. """ check_is_fitted(self, "n_features_in_") input_features = _check_feature_names_in(self, input_features) names = input_features[self._valid_mask] return self._concatenate_indicator_feature_names_out(names, input_features)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/impute/__init__.py
sklearn/impute/__init__.py
"""Transformers for missing value imputation.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import typing from sklearn.impute._base import MissingIndicator, SimpleImputer from sklearn.impute._knn import KNNImputer if typing.TYPE_CHECKING: # Avoid errors in type checkers (e.g. mypy) for experimental estimators. # TODO: remove this check once the estimator is no longer experimental. from sklearn.impute._iterative import IterativeImputer # noqa: F401 __all__ = ["KNNImputer", "MissingIndicator", "SimpleImputer"] # TODO: remove this check once the estimator is no longer experimental. def __getattr__(name): if name == "IterativeImputer": raise ImportError( f"{name} is experimental and the API might change without any " "deprecation cycle. To use it, you need to explicitly import " "enable_iterative_imputer:\n" "from sklearn.experimental import enable_iterative_imputer" ) raise AttributeError(f"module {__name__} has no attribute {name}")
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/impute/tests/test_common.py
sklearn/impute/tests/test_common.py
import numpy as np import pytest from sklearn.base import clone from sklearn.experimental import enable_iterative_imputer # noqa: F401 from sklearn.impute import IterativeImputer, KNNImputer, SimpleImputer from sklearn.utils._testing import ( assert_allclose, assert_allclose_dense_sparse, assert_array_equal, ) from sklearn.utils.fixes import CSR_CONTAINERS def imputers(): return [IterativeImputer(tol=0.1), KNNImputer(), SimpleImputer()] def sparse_imputers(): return [SimpleImputer()] # ConvergenceWarning will be raised by the IterativeImputer @pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning") @pytest.mark.parametrize("imputer", imputers(), ids=lambda x: x.__class__.__name__) def test_imputation_missing_value_in_test_array(imputer): # [Non Regression Test for issue #13968] Missing value in test set should # not throw an error and return a finite dataset train = [[1], [2]] test = [[3], [np.nan]] imputer = clone(imputer) imputer.set_params(add_indicator=True) imputer.fit(train).transform(test) # ConvergenceWarning will be raised by the IterativeImputer @pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning") @pytest.mark.parametrize("marker", [np.nan, -1, 0]) @pytest.mark.parametrize("imputer", imputers(), ids=lambda x: x.__class__.__name__) def test_imputers_add_indicator(marker, imputer): X = np.array( [ [marker, 1, 5, marker, 1], [2, marker, 1, marker, 2], [6, 3, marker, marker, 3], [1, 2, 9, marker, 4], ] ) X_true_indicator = np.array( [ [1.0, 0.0, 0.0, 1.0], [0.0, 1.0, 0.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 0.0, 1.0], ] ) imputer = clone(imputer) imputer.set_params(missing_values=marker, add_indicator=True) X_trans = imputer.fit_transform(X) assert_allclose(X_trans[:, -4:], X_true_indicator) assert_array_equal(imputer.indicator_.features_, np.array([0, 1, 2, 3])) imputer.set_params(add_indicator=False) X_trans_no_indicator = imputer.fit_transform(X) assert_allclose(X_trans[:, :-4], X_trans_no_indicator) # ConvergenceWarning will be raised by the IterativeImputer @pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning") @pytest.mark.parametrize("marker", [np.nan, -1]) @pytest.mark.parametrize( "imputer", sparse_imputers(), ids=lambda x: x.__class__.__name__ ) @pytest.mark.parametrize("csr_container", CSR_CONTAINERS) def test_imputers_add_indicator_sparse(imputer, marker, csr_container): imputer = clone(imputer) # Avoid side effects from shared instances. X = csr_container( [ [marker, 1, 5, marker, 1], [2, marker, 1, marker, 2], [6, 3, marker, marker, 3], [1, 2, 9, marker, 4], ] ) X_true_indicator = csr_container( [ [1.0, 0.0, 0.0, 1.0], [0.0, 1.0, 0.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 0.0, 1.0], ] ) imputer.set_params(missing_values=marker, add_indicator=True) X_trans = imputer.fit_transform(X) assert_allclose_dense_sparse(X_trans[:, -4:], X_true_indicator) assert_array_equal(imputer.indicator_.features_, np.array([0, 1, 2, 3])) imputer.set_params(add_indicator=False) X_trans_no_indicator = imputer.fit_transform(X) assert_allclose_dense_sparse(X_trans[:, :-4], X_trans_no_indicator) # ConvergenceWarning will be raised by the IterativeImputer @pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning") @pytest.mark.parametrize("imputer", imputers(), ids=lambda x: x.__class__.__name__) @pytest.mark.parametrize("add_indicator", [True, False]) def test_imputers_pandas_na_integer_array_support(imputer, add_indicator): # Test pandas IntegerArray with pd.NA pd = pytest.importorskip("pandas") marker = np.nan imputer = imputer.set_params(add_indicator=add_indicator, missing_values=marker) X = np.array( [ [marker, 1, 5, marker, 1], [2, marker, 1, marker, 2], [6, 3, marker, marker, 3], [1, 2, 9, marker, 4], ] ) # fit on numpy array X_trans_expected = imputer.fit_transform(X) # Creates dataframe with IntegerArrays with pd.NA X_df = pd.DataFrame(X, dtype="Int16", columns=["a", "b", "c", "d", "e"]) # fit on pandas dataframe with IntegerArrays X_trans = imputer.fit_transform(X_df) assert_allclose(X_trans_expected, X_trans) @pytest.mark.parametrize("imputer", imputers(), ids=lambda x: x.__class__.__name__) @pytest.mark.parametrize("add_indicator", [True, False]) def test_imputers_feature_names_out_pandas(imputer, add_indicator): """Check feature names out for imputers.""" pd = pytest.importorskip("pandas") marker = np.nan imputer = imputer.set_params(add_indicator=add_indicator, missing_values=marker) X = np.array( [ [marker, 1, 5, 3, marker, 1], [2, marker, 1, 4, marker, 2], [6, 3, 7, marker, marker, 3], [1, 2, 9, 8, marker, 4], ] ) X_df = pd.DataFrame(X, columns=["a", "b", "c", "d", "e", "f"]) imputer.fit(X_df) names = imputer.get_feature_names_out() if add_indicator: expected_names = [ "a", "b", "c", "d", "f", "missingindicator_a", "missingindicator_b", "missingindicator_d", "missingindicator_e", ] assert_array_equal(expected_names, names) else: expected_names = ["a", "b", "c", "d", "f"] assert_array_equal(expected_names, names) @pytest.mark.parametrize("keep_empty_features", [True, False]) @pytest.mark.parametrize("imputer", imputers(), ids=lambda x: x.__class__.__name__) def test_keep_empty_features(imputer, keep_empty_features): """Check that the imputer keeps features with only missing values.""" X = np.array([[np.nan, 1], [np.nan, 2], [np.nan, 3]]) imputer = clone(imputer) imputer = imputer.set_params( add_indicator=False, keep_empty_features=keep_empty_features ) for method in ["fit_transform", "transform"]: X_imputed = getattr(imputer, method)(X) if keep_empty_features: assert X_imputed.shape == X.shape else: assert X_imputed.shape == (X.shape[0], X.shape[1] - 1) @pytest.mark.parametrize("imputer", imputers(), ids=lambda x: x.__class__.__name__) @pytest.mark.parametrize("missing_value_test", [np.nan, 1]) def test_imputation_adds_missing_indicator_if_add_indicator_is_true( imputer, missing_value_test ): """Check that missing indicator always exists when add_indicator=True. Non-regression test for gh-26590. """ X_train = np.array([[0, np.nan], [1, 2]]) # Test data where missing_value_test variable can be set to np.nan or 1. X_test = np.array([[0, missing_value_test], [1, 2]]) imputer = clone(imputer) imputer.set_params(add_indicator=True) imputer.fit(X_train) X_test_imputed_with_indicator = imputer.transform(X_test) assert X_test_imputed_with_indicator.shape == (2, 3) imputer.set_params(add_indicator=False) imputer.fit(X_train) X_test_imputed_without_indicator = imputer.transform(X_test) assert X_test_imputed_without_indicator.shape == (2, 2) assert_allclose( X_test_imputed_with_indicator[:, :-1], X_test_imputed_without_indicator ) if np.isnan(missing_value_test): expected_missing_indicator = [1, 0] else: expected_missing_indicator = [0, 0] assert_allclose(X_test_imputed_with_indicator[:, -1], expected_missing_indicator)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/impute/tests/test_base.py
sklearn/impute/tests/test_base.py
import numpy as np import pytest from sklearn.impute._base import _BaseImputer from sklearn.impute._iterative import _assign_where from sklearn.utils._mask import _get_mask from sklearn.utils._testing import _convert_container, assert_allclose @pytest.fixture def data(): X = np.random.randn(10, 2) X[::2] = np.nan return X class NoFitIndicatorImputer(_BaseImputer): def fit(self, X, y=None): return self def transform(self, X, y=None): return self._concatenate_indicator(X, self._transform_indicator(X)) class NoTransformIndicatorImputer(_BaseImputer): def fit(self, X, y=None): mask = _get_mask(X, value_to_mask=np.nan) super()._fit_indicator(mask) return self def transform(self, X, y=None): return self._concatenate_indicator(X, None) class NoPrecomputedMaskFit(_BaseImputer): def fit(self, X, y=None): self._fit_indicator(X) return self def transform(self, X): return self._concatenate_indicator(X, self._transform_indicator(X)) class NoPrecomputedMaskTransform(_BaseImputer): def fit(self, X, y=None): mask = _get_mask(X, value_to_mask=np.nan) self._fit_indicator(mask) return self def transform(self, X): return self._concatenate_indicator(X, self._transform_indicator(X)) def test_base_imputer_not_fit(data): imputer = NoFitIndicatorImputer(add_indicator=True) err_msg = "Make sure to call _fit_indicator before _transform_indicator" with pytest.raises(ValueError, match=err_msg): imputer.fit(data).transform(data) with pytest.raises(ValueError, match=err_msg): imputer.fit_transform(data) def test_base_imputer_not_transform(data): imputer = NoTransformIndicatorImputer(add_indicator=True) err_msg = ( "Call _fit_indicator and _transform_indicator in the imputer implementation" ) with pytest.raises(ValueError, match=err_msg): imputer.fit(data).transform(data) with pytest.raises(ValueError, match=err_msg): imputer.fit_transform(data) def test_base_no_precomputed_mask_fit(data): imputer = NoPrecomputedMaskFit(add_indicator=True) err_msg = "precomputed is True but the input data is not a mask" with pytest.raises(ValueError, match=err_msg): imputer.fit(data) with pytest.raises(ValueError, match=err_msg): imputer.fit_transform(data) def test_base_no_precomputed_mask_transform(data): imputer = NoPrecomputedMaskTransform(add_indicator=True) err_msg = "precomputed is True but the input data is not a mask" imputer.fit(data) with pytest.raises(ValueError, match=err_msg): imputer.transform(data) with pytest.raises(ValueError, match=err_msg): imputer.fit_transform(data) @pytest.mark.parametrize("X1_type", ["array", "dataframe"]) def test_assign_where(X1_type): """Check the behaviour of the private helpers `_assign_where`.""" rng = np.random.RandomState(0) n_samples, n_features = 10, 5 X1 = _convert_container(rng.randn(n_samples, n_features), constructor_name=X1_type) X2 = rng.randn(n_samples, n_features) mask = rng.randint(0, 2, size=(n_samples, n_features)).astype(bool) _assign_where(X1, X2, mask) if X1_type == "dataframe": X1 = X1.to_numpy() assert_allclose(X1[mask], X2[mask])
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/impute/tests/test_impute.py
sklearn/impute/tests/test_impute.py
import io import re import warnings from itertools import product import numpy as np import pytest from scipy import sparse from scipy.stats import kstest from sklearn import tree from sklearn.datasets import load_diabetes from sklearn.dummy import DummyRegressor from sklearn.exceptions import ConvergenceWarning # make IterativeImputer available from sklearn.experimental import enable_iterative_imputer # noqa: F401 from sklearn.impute import IterativeImputer, KNNImputer, MissingIndicator, SimpleImputer from sklearn.impute._base import _most_frequent from sklearn.linear_model import ARDRegression, BayesianRidge, RidgeCV from sklearn.model_selection import GridSearchCV from sklearn.pipeline import Pipeline, make_union from sklearn.random_projection import _sparse_random_matrix from sklearn.utils._testing import ( _convert_container, assert_allclose, assert_allclose_dense_sparse, assert_array_almost_equal, assert_array_equal, ) from sklearn.utils.fixes import ( BSR_CONTAINERS, COO_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS, LIL_CONTAINERS, ) def _assert_array_equal_and_same_dtype(x, y): assert_array_equal(x, y) assert x.dtype == y.dtype def _assert_allclose_and_same_dtype(x, y): assert_allclose(x, y) assert x.dtype == y.dtype def _check_statistics( X, X_true, strategy, statistics, missing_values, sparse_container ): """Utility function for testing imputation for a given strategy. Test with dense and sparse arrays Check that: - the statistics (mean, median, mode) are correct - the missing values are imputed correctly""" err_msg = "Parameters: strategy = %s, missing_values = %s, sparse = {0}" % ( strategy, missing_values, ) assert_ae = assert_array_equal if X.dtype.kind == "f" or X_true.dtype.kind == "f": assert_ae = assert_array_almost_equal # Normal matrix imputer = SimpleImputer(missing_values=missing_values, strategy=strategy) X_trans = imputer.fit(X).transform(X.copy()) assert_ae(imputer.statistics_, statistics, err_msg=err_msg.format(False)) assert_ae(X_trans, X_true, err_msg=err_msg.format(False)) # Sparse matrix imputer = SimpleImputer(missing_values=missing_values, strategy=strategy) imputer.fit(sparse_container(X)) X_trans = imputer.transform(sparse_container(X.copy())) if sparse.issparse(X_trans): X_trans = X_trans.toarray() assert_ae(imputer.statistics_, statistics, err_msg=err_msg.format(True)) assert_ae(X_trans, X_true, err_msg=err_msg.format(True)) @pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent", "constant"]) @pytest.mark.parametrize("csr_container", CSR_CONTAINERS) def test_imputation_shape(strategy, csr_container): # Verify the shapes of the imputed matrix for different strategies. X = np.random.randn(10, 2) X[::2] = np.nan imputer = SimpleImputer(strategy=strategy) X_imputed = imputer.fit_transform(csr_container(X)) assert X_imputed.shape == (10, 2) X_imputed = imputer.fit_transform(X) assert X_imputed.shape == (10, 2) iterative_imputer = IterativeImputer(initial_strategy=strategy) X_imputed = iterative_imputer.fit_transform(X) assert X_imputed.shape == (10, 2) @pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent"]) def test_imputation_deletion_warning(strategy): X = np.ones((3, 5)) X[:, 0] = np.nan imputer = SimpleImputer(strategy=strategy).fit(X) with pytest.warns(UserWarning, match="Skipping"): imputer.transform(X) @pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent"]) def test_imputation_deletion_warning_feature_names(strategy): pd = pytest.importorskip("pandas") missing_values = np.nan feature_names = np.array(["a", "b", "c", "d"], dtype=object) X = pd.DataFrame( [ [missing_values, missing_values, 1, missing_values], [4, missing_values, 2, 10], ], columns=feature_names, ) imputer = SimpleImputer(strategy=strategy).fit(X) # check SimpleImputer returning feature name attribute correctly assert_array_equal(imputer.feature_names_in_, feature_names) # ensure that skipped feature warning includes feature name with pytest.warns( UserWarning, match=r"Skipping features without any observed values: \['b'\]" ): imputer.transform(X) @pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent", "constant"]) @pytest.mark.parametrize("csc_container", CSC_CONTAINERS) def test_imputation_error_sparse_0(strategy, csc_container): # check that error are raised when missing_values = 0 and input is sparse X = np.ones((3, 5)) X[0] = 0 X = csc_container(X) imputer = SimpleImputer(strategy=strategy, missing_values=0) with pytest.raises(ValueError, match="Provide a dense array"): imputer.fit(X) imputer.fit(X.toarray()) with pytest.raises(ValueError, match="Provide a dense array"): imputer.transform(X) def safe_median(arr, *args, **kwargs): # np.median([]) raises a TypeError for numpy >= 1.10.1 length = arr.size if hasattr(arr, "size") else len(arr) return np.nan if length == 0 else np.median(arr, *args, **kwargs) def safe_mean(arr, *args, **kwargs): # np.mean([]) raises a RuntimeWarning for numpy >= 1.10.1 length = arr.size if hasattr(arr, "size") else len(arr) return np.nan if length == 0 else np.mean(arr, *args, **kwargs) @pytest.mark.parametrize("csc_container", CSC_CONTAINERS) def test_imputation_mean_median(csc_container): # Test imputation using the mean and median strategies, when # missing_values != 0. rng = np.random.RandomState(0) dim = 10 dec = 10 shape = (dim * dim, dim + dec) zeros = np.zeros(shape[0]) values = np.arange(1, shape[0] + 1) values[4::2] = -values[4::2] tests = [ ("mean", np.nan, lambda z, v, p: safe_mean(np.hstack((z, v)))), ("median", np.nan, lambda z, v, p: safe_median(np.hstack((z, v)))), ] for strategy, test_missing_values, true_value_fun in tests: X = np.empty(shape) X_true = np.empty(shape) true_statistics = np.empty(shape[1]) # Create a matrix X with columns # - with only zeros, # - with only missing values # - with zeros, missing values and values # And a matrix X_true containing all true values for j in range(shape[1]): nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1) nb_missing_values = max(shape[0] + dec * dec - (j + dec) * (j + dec), 0) nb_values = shape[0] - nb_zeros - nb_missing_values z = zeros[:nb_zeros] p = np.repeat(test_missing_values, nb_missing_values) v = values[rng.permutation(len(values))[:nb_values]] true_statistics[j] = true_value_fun(z, v, p) # Create the columns X[:, j] = np.hstack((v, z, p)) if 0 == test_missing_values: # XXX unreached code as of v0.22 X_true[:, j] = np.hstack( (v, np.repeat(true_statistics[j], nb_missing_values + nb_zeros)) ) else: X_true[:, j] = np.hstack( (v, z, np.repeat(true_statistics[j], nb_missing_values)) ) # Shuffle them the same way np.random.RandomState(j).shuffle(X[:, j]) np.random.RandomState(j).shuffle(X_true[:, j]) # Mean doesn't support columns containing NaNs, median does if strategy == "median": cols_to_keep = ~np.isnan(X_true).any(axis=0) else: cols_to_keep = ~np.isnan(X_true).all(axis=0) X_true = X_true[:, cols_to_keep] _check_statistics( X, X_true, strategy, true_statistics, test_missing_values, csc_container ) @pytest.mark.parametrize("csc_container", CSC_CONTAINERS) def test_imputation_median_special_cases(csc_container): # Test median imputation with sparse boundary cases X = np.array( [ [0, np.nan, np.nan], # odd: implicit zero [5, np.nan, np.nan], # odd: explicit nonzero [0, 0, np.nan], # even: average two zeros [-5, 0, np.nan], # even: avg zero and neg [0, 5, np.nan], # even: avg zero and pos [4, 5, np.nan], # even: avg nonzeros [-4, -5, np.nan], # even: avg negatives [-1, 2, np.nan], # even: crossing neg and pos ] ).transpose() X_imputed_median = np.array( [ [0, 0, 0], [5, 5, 5], [0, 0, 0], [-5, 0, -2.5], [0, 5, 2.5], [4, 5, 4.5], [-4, -5, -4.5], [-1, 2, 0.5], ] ).transpose() statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, 0.5] _check_statistics( X, X_imputed_median, "median", statistics_median, np.nan, csc_container ) @pytest.mark.parametrize("strategy", ["mean", "median"]) @pytest.mark.parametrize("dtype", [None, object, str]) def test_imputation_mean_median_error_invalid_type(strategy, dtype): X = np.array([["a", "b", 3], [4, "e", 6], ["g", "h", 9]], dtype=dtype) msg = "non-numeric data:\ncould not convert string to float:" with pytest.raises(ValueError, match=msg): imputer = SimpleImputer(strategy=strategy) imputer.fit_transform(X) @pytest.mark.parametrize("strategy", ["mean", "median"]) @pytest.mark.parametrize("type", ["list", "dataframe"]) def test_imputation_mean_median_error_invalid_type_list_pandas(strategy, type): X = [["a", "b", 3], [4, "e", 6], ["g", "h", 9]] if type == "dataframe": pd = pytest.importorskip("pandas") X = pd.DataFrame(X) msg = "non-numeric data:\ncould not convert string to float:" with pytest.raises(ValueError, match=msg): imputer = SimpleImputer(strategy=strategy) imputer.fit_transform(X) @pytest.mark.parametrize("strategy", ["constant", "most_frequent"]) @pytest.mark.parametrize("dtype", [str, np.dtype("U"), np.dtype("S")]) def test_imputation_const_mostf_error_invalid_types(strategy, dtype): # Test imputation on non-numeric data using "most_frequent" and "constant" # strategy X = np.array( [ [np.nan, np.nan, "a", "f"], [np.nan, "c", np.nan, "d"], [np.nan, "b", "d", np.nan], [np.nan, "c", "d", "h"], ], dtype=dtype, ) err_msg = "SimpleImputer does not support data" with pytest.raises(ValueError, match=err_msg): imputer = SimpleImputer(strategy=strategy) imputer.fit(X).transform(X) @pytest.mark.parametrize("csc_container", CSC_CONTAINERS) def test_imputation_most_frequent(csc_container): # Test imputation using the most-frequent strategy. X = np.array( [ [-1, -1, 0, 5], [-1, 2, -1, 3], [-1, 1, 3, -1], [-1, 2, 3, 7], ] ) X_true = np.array( [ [2, 0, 5], [2, 3, 3], [1, 3, 3], [2, 3, 7], ] ) # scipy.stats.mode, used in SimpleImputer, doesn't return the first most # frequent as promised in the doc but the lowest most frequent. When this # test will fail after an update of scipy, SimpleImputer will need to be # updated to be consistent with the new (correct) behaviour _check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1, csc_container) @pytest.mark.parametrize("marker", [None, np.nan, "NAN", "", 0]) def test_imputation_most_frequent_objects(marker): # Test imputation using the most-frequent strategy. X = np.array( [ [marker, marker, "a", "f"], [marker, "c", marker, "d"], [marker, "b", "d", marker], [marker, "c", "d", "h"], ], dtype=object, ) X_true = np.array( [ ["c", "a", "f"], ["c", "d", "d"], ["b", "d", "d"], ["c", "d", "h"], ], dtype=object, ) imputer = SimpleImputer(missing_values=marker, strategy="most_frequent") X_trans = imputer.fit(X).transform(X) assert_array_equal(X_trans, X_true) @pytest.mark.parametrize("dtype", [object, "category"]) def test_imputation_most_frequent_pandas(dtype): # Test imputation using the most frequent strategy on pandas df pd = pytest.importorskip("pandas") f = io.StringIO("Cat1,Cat2,Cat3,Cat4\n,i,x,\na,,y,\na,j,,\nb,j,x,") df = pd.read_csv(f, dtype=dtype) X_true = np.array( [["a", "i", "x"], ["a", "j", "y"], ["a", "j", "x"], ["b", "j", "x"]], dtype=object, ) imputer = SimpleImputer(strategy="most_frequent") X_trans = imputer.fit_transform(df) assert_array_equal(X_trans, X_true) @pytest.mark.parametrize("X_data, missing_value", [(1, 0), (1.0, np.nan)]) def test_imputation_constant_error_invalid_type(X_data, missing_value): # Verify that exceptions are raised on invalid fill_value type X = np.full((3, 5), X_data, dtype=float) X[0, 0] = missing_value fill_value = "x" err_msg = f"fill_value={fill_value!r} (of type {type(fill_value)!r}) cannot be cast" with pytest.raises(ValueError, match=re.escape(err_msg)): imputer = SimpleImputer( missing_values=missing_value, strategy="constant", fill_value=fill_value ) imputer.fit_transform(X) @pytest.mark.parametrize("keep_empty_features", [True, False]) def test_imputation_constant_integer(keep_empty_features): # Test imputation using the constant strategy on integers X = np.array([[-1, 2, 3, -1], [4, -1, 5, -1], [6, 7, -1, -1], [8, 9, 0, -1]]) X_true = np.array([[0, 2, 3, 0], [4, 0, 5, 0], [6, 7, 0, 0], [8, 9, 0, 0]]) if not keep_empty_features: X_true = X_true[:, :-1] imputer = SimpleImputer( missing_values=-1, strategy="constant", fill_value=0, keep_empty_features=keep_empty_features, ) X_trans = imputer.fit_transform(X) assert_array_equal(X_trans, X_true) @pytest.mark.parametrize("array_constructor", CSR_CONTAINERS + [np.asarray]) @pytest.mark.parametrize("keep_empty_features", [True, False]) def test_imputation_constant_float(array_constructor, keep_empty_features): # Test imputation using the constant strategy on floats X = np.array( [ [np.nan, 1.1, 0, np.nan], [1.2, np.nan, 1.3, np.nan], [0, 0, np.nan, np.nan], [1.4, 1.5, 0, np.nan], ] ) X_true = np.array( [[-1, 1.1, 0, -1], [1.2, -1, 1.3, -1], [0, 0, -1, -1], [1.4, 1.5, 0, -1]] ) if not keep_empty_features: X_true = X_true[:, :-1] X = array_constructor(X) X_true = array_constructor(X_true) imputer = SimpleImputer( strategy="constant", fill_value=-1, keep_empty_features=keep_empty_features ) X_trans = imputer.fit_transform(X) assert_allclose_dense_sparse(X_trans, X_true) @pytest.mark.parametrize("marker", [None, np.nan, "NAN", "", 0]) @pytest.mark.parametrize("keep_empty_features", [True, False]) def test_imputation_constant_object(marker, keep_empty_features): # Test imputation using the constant strategy on objects X = np.array( [ [marker, "a", "b", marker], ["c", marker, "d", marker], ["e", "f", marker, marker], ["g", "h", "i", marker], ], dtype=object, ) X_true = np.array( [ ["missing", "a", "b", "missing"], ["c", "missing", "d", "missing"], ["e", "f", "missing", "missing"], ["g", "h", "i", "missing"], ], dtype=object, ) if not keep_empty_features: X_true = X_true[:, :-1] imputer = SimpleImputer( missing_values=marker, strategy="constant", fill_value="missing", keep_empty_features=keep_empty_features, ) X_trans = imputer.fit_transform(X) assert_array_equal(X_trans, X_true) @pytest.mark.parametrize("dtype", [object, "category"]) @pytest.mark.parametrize("keep_empty_features", [True, False]) def test_imputation_constant_pandas(dtype, keep_empty_features): # Test imputation using the constant strategy on pandas df pd = pytest.importorskip("pandas") f = io.StringIO("Cat1,Cat2,Cat3,Cat4\n,i,x,\na,,y,\na,j,,\nb,j,x,") df = pd.read_csv(f, dtype=dtype) X_true = np.array( [ ["missing_value", "i", "x", "missing_value"], ["a", "missing_value", "y", "missing_value"], ["a", "j", "missing_value", "missing_value"], ["b", "j", "x", "missing_value"], ], dtype=object, ) if not keep_empty_features: X_true = X_true[:, :-1] imputer = SimpleImputer( strategy="constant", keep_empty_features=keep_empty_features ) X_trans = imputer.fit_transform(df) assert_array_equal(X_trans, X_true) @pytest.mark.parametrize("X", [[[1], [2]], [[1], [np.nan]]]) def test_iterative_imputer_one_feature(X): # check we exit early when there is a single feature imputer = IterativeImputer().fit(X) assert imputer.n_iter_ == 0 imputer = IterativeImputer() imputer.fit([[1], [2]]) assert imputer.n_iter_ == 0 imputer.fit([[1], [np.nan]]) assert imputer.n_iter_ == 0 def test_imputation_pipeline_grid_search(): # Test imputation within a pipeline + gridsearch. X = _sparse_random_matrix(100, 100, density=0.10) missing_values = X.data[0] pipeline = Pipeline( [ ("imputer", SimpleImputer(missing_values=missing_values)), ("tree", tree.DecisionTreeRegressor(random_state=0)), ] ) parameters = {"imputer__strategy": ["mean", "median", "most_frequent"]} Y = _sparse_random_matrix(100, 1, density=0.10).toarray() gs = GridSearchCV(pipeline, parameters) gs.fit(X, Y) def test_imputation_copy(): # Test imputation with copy X_orig = _sparse_random_matrix(5, 5, density=0.75, random_state=0) # copy=True, dense => copy X = X_orig.copy().toarray() imputer = SimpleImputer(missing_values=0, strategy="mean", copy=True) Xt = imputer.fit(X).transform(X) Xt[0, 0] = -1 assert not np.all(X == Xt) # copy=True, sparse csr => copy X = X_orig.copy() imputer = SimpleImputer(missing_values=X.data[0], strategy="mean", copy=True) Xt = imputer.fit(X).transform(X) Xt.data[0] = -1 assert not np.all(X.data == Xt.data) # copy=False, dense => no copy X = X_orig.copy().toarray() imputer = SimpleImputer(missing_values=0, strategy="mean", copy=False) Xt = imputer.fit(X).transform(X) Xt[0, 0] = -1 assert_array_almost_equal(X, Xt) # copy=False, sparse csc => no copy X = X_orig.copy().tocsc() imputer = SimpleImputer(missing_values=X.data[0], strategy="mean", copy=False) Xt = imputer.fit(X).transform(X) Xt.data[0] = -1 assert_array_almost_equal(X.data, Xt.data) # copy=False, sparse csr => copy X = X_orig.copy() imputer = SimpleImputer(missing_values=X.data[0], strategy="mean", copy=False) Xt = imputer.fit(X).transform(X) Xt.data[0] = -1 assert not np.all(X.data == Xt.data) # Note: If X is sparse and if missing_values=0, then a (dense) copy of X is # made, even if copy=False. def test_iterative_imputer_zero_iters(): rng = np.random.RandomState(0) n = 100 d = 10 X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() missing_flag = X == 0 X[missing_flag] = np.nan imputer = IterativeImputer(max_iter=0) X_imputed = imputer.fit_transform(X) # with max_iter=0, only initial imputation is performed assert_allclose(X_imputed, imputer.initial_imputer_.transform(X)) # repeat but force n_iter_ to 0 imputer = IterativeImputer(max_iter=5).fit(X) # transformed should not be equal to initial imputation assert not np.all(imputer.transform(X) == imputer.initial_imputer_.transform(X)) imputer.n_iter_ = 0 # now they should be equal as only initial imputation is done assert_allclose(imputer.transform(X), imputer.initial_imputer_.transform(X)) def test_iterative_imputer_verbose(): rng = np.random.RandomState(0) n = 100 d = 3 X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() imputer = IterativeImputer(missing_values=0, max_iter=1, verbose=1) imputer.fit(X) imputer.transform(X) imputer = IterativeImputer(missing_values=0, max_iter=1, verbose=2) imputer.fit(X) imputer.transform(X) def test_iterative_imputer_all_missing(): n = 100 d = 3 X = np.zeros((n, d)) imputer = IterativeImputer(missing_values=0, max_iter=1) X_imputed = imputer.fit_transform(X) assert_allclose(X_imputed, imputer.initial_imputer_.transform(X)) @pytest.mark.parametrize( "imputation_order", ["random", "roman", "ascending", "descending", "arabic"] ) def test_iterative_imputer_imputation_order(imputation_order): rng = np.random.RandomState(0) n = 100 d = 10 max_iter = 2 X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() X[:, 0] = 1 # this column should not be discarded by IterativeImputer imputer = IterativeImputer( missing_values=0, max_iter=max_iter, n_nearest_features=5, sample_posterior=False, skip_complete=True, min_value=0, max_value=1, verbose=1, imputation_order=imputation_order, random_state=rng, ) imputer.fit_transform(X) ordered_idx = [i.feat_idx for i in imputer.imputation_sequence_] assert len(ordered_idx) // imputer.n_iter_ == imputer.n_features_with_missing_ if imputation_order == "roman": assert np.all(ordered_idx[: d - 1] == np.arange(1, d)) elif imputation_order == "arabic": assert np.all(ordered_idx[: d - 1] == np.arange(d - 1, 0, -1)) elif imputation_order == "random": ordered_idx_round_1 = ordered_idx[: d - 1] ordered_idx_round_2 = ordered_idx[d - 1 :] assert ordered_idx_round_1 != ordered_idx_round_2 elif "ending" in imputation_order: assert len(ordered_idx) == max_iter * (d - 1) @pytest.mark.parametrize( "estimator", [None, DummyRegressor(), BayesianRidge(), ARDRegression(), RidgeCV()] ) def test_iterative_imputer_estimators(estimator): rng = np.random.RandomState(0) n = 100 d = 10 X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() imputer = IterativeImputer( missing_values=0, max_iter=1, estimator=estimator, random_state=rng ) imputer.fit_transform(X) # check that types are correct for estimators hashes = [] for triplet in imputer.imputation_sequence_: expected_type = ( type(estimator) if estimator is not None else type(BayesianRidge()) ) assert isinstance(triplet.estimator, expected_type) hashes.append(id(triplet.estimator)) # check that each estimator is unique assert len(set(hashes)) == len(hashes) def test_iterative_imputer_clip(): rng = np.random.RandomState(0) n = 100 d = 10 X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() imputer = IterativeImputer( missing_values=0, max_iter=1, min_value=0.1, max_value=0.2, random_state=rng ) Xt = imputer.fit_transform(X) assert_allclose(np.min(Xt[X == 0]), 0.1) assert_allclose(np.max(Xt[X == 0]), 0.2) assert_allclose(Xt[X != 0], X[X != 0]) def test_iterative_imputer_clip_truncnorm(): rng = np.random.RandomState(0) n = 100 d = 10 X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() X[:, 0] = 1 imputer = IterativeImputer( missing_values=0, max_iter=2, n_nearest_features=5, sample_posterior=True, min_value=0.1, max_value=0.2, verbose=1, imputation_order="random", random_state=rng, ) Xt = imputer.fit_transform(X) assert_allclose(np.min(Xt[X == 0]), 0.1) assert_allclose(np.max(Xt[X == 0]), 0.2) assert_allclose(Xt[X != 0], X[X != 0]) def test_iterative_imputer_truncated_normal_posterior(): # test that the values that are imputed using `sample_posterior=True` # with boundaries (`min_value` and `max_value` are not None) are drawn # from a distribution that looks gaussian via the Kolmogorov Smirnov test. # note that starting from the wrong random seed will make this test fail # because random sampling doesn't occur at all when the imputation # is outside of the (min_value, max_value) range rng = np.random.RandomState(42) X = rng.normal(size=(5, 5)) X[0][0] = np.nan imputer = IterativeImputer( min_value=0, max_value=0.5, sample_posterior=True, random_state=rng ) imputer.fit_transform(X) # generate multiple imputations for the single missing value imputations = np.array([imputer.transform(X)[0][0] for _ in range(100)]) assert all(imputations >= 0) assert all(imputations <= 0.5) mu, sigma = imputations.mean(), imputations.std() ks_statistic, p_value = kstest((imputations - mu) / sigma, "norm") if sigma == 0: sigma += 1e-12 ks_statistic, p_value = kstest((imputations - mu) / sigma, "norm") # we want to fail to reject null hypothesis # null hypothesis: distributions are the same assert ks_statistic < 0.2 or p_value > 0.1, "The posterior does appear to be normal" @pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent"]) def test_iterative_imputer_missing_at_transform(strategy): rng = np.random.RandomState(0) n = 100 d = 10 X_train = rng.randint(low=0, high=3, size=(n, d)) X_test = rng.randint(low=0, high=3, size=(n, d)) X_train[:, 0] = 1 # definitely no missing values in 0th column X_test[0, 0] = 0 # definitely missing value in 0th column imputer = IterativeImputer( missing_values=0, max_iter=1, initial_strategy=strategy, random_state=rng ).fit(X_train) initial_imputer = SimpleImputer(missing_values=0, strategy=strategy).fit(X_train) # if there were no missing values at time of fit, then imputer will # only use the initial imputer for that feature at transform assert_allclose( imputer.transform(X_test)[:, 0], initial_imputer.transform(X_test)[:, 0] ) def test_iterative_imputer_transform_stochasticity(): rng1 = np.random.RandomState(0) rng2 = np.random.RandomState(1) n = 100 d = 10 X = _sparse_random_matrix(n, d, density=0.10, random_state=rng1).toarray() # when sample_posterior=True, two transforms shouldn't be equal imputer = IterativeImputer( missing_values=0, max_iter=1, sample_posterior=True, random_state=rng1 ) imputer.fit(X) X_fitted_1 = imputer.transform(X) X_fitted_2 = imputer.transform(X) # sufficient to assert that the means are not the same assert np.mean(X_fitted_1) != pytest.approx(np.mean(X_fitted_2)) # when sample_posterior=False, and n_nearest_features=None # and imputation_order is not random # the two transforms should be identical even if rng are different imputer1 = IterativeImputer( missing_values=0, max_iter=1, sample_posterior=False, n_nearest_features=None, imputation_order="ascending", random_state=rng1, ) imputer2 = IterativeImputer( missing_values=0, max_iter=1, sample_posterior=False, n_nearest_features=None, imputation_order="ascending", random_state=rng2, ) imputer1.fit(X) imputer2.fit(X) X_fitted_1a = imputer1.transform(X) X_fitted_1b = imputer1.transform(X) X_fitted_2 = imputer2.transform(X) assert_allclose(X_fitted_1a, X_fitted_1b) assert_allclose(X_fitted_1a, X_fitted_2) def test_iterative_imputer_no_missing(): rng = np.random.RandomState(0) X = rng.rand(100, 100) X[:, 0] = np.nan m1 = IterativeImputer(max_iter=10, random_state=rng) m2 = IterativeImputer(max_iter=10, random_state=rng) pred1 = m1.fit(X).transform(X) pred2 = m2.fit_transform(X) # should exclude the first column entirely assert_allclose(X[:, 1:], pred1) # fit and fit_transform should both be identical assert_allclose(pred1, pred2) def test_iterative_imputer_rank_one(): rng = np.random.RandomState(0) d = 50 A = rng.rand(d, 1) B = rng.rand(1, d) X = np.dot(A, B) nan_mask = rng.rand(d, d) < 0.5 X_missing = X.copy() X_missing[nan_mask] = np.nan imputer = IterativeImputer(max_iter=5, verbose=1, random_state=rng) X_filled = imputer.fit_transform(X_missing) assert_allclose(X_filled, X, atol=0.02) @pytest.mark.parametrize("rank", [3, 5]) def test_iterative_imputer_transform_recovery(rank): rng = np.random.RandomState(0) n = 70 d = 70 A = rng.rand(n, rank) B = rng.rand(rank, d) X_filled = np.dot(A, B) nan_mask = rng.rand(n, d) < 0.5 X_missing = X_filled.copy() X_missing[nan_mask] = np.nan # split up data in half n = n // 2 X_train = X_missing[:n] X_test_filled = X_filled[n:] X_test = X_missing[n:] imputer = IterativeImputer( max_iter=5, imputation_order="descending", verbose=1, random_state=rng ).fit(X_train) X_test_est = imputer.transform(X_test) assert_allclose(X_test_filled, X_test_est, atol=0.1) def test_iterative_imputer_additive_matrix(): rng = np.random.RandomState(0) n = 100 d = 10 A = rng.randn(n, d) B = rng.randn(n, d) X_filled = np.zeros(A.shape) for i in range(d): for j in range(d): X_filled[:, (i + j) % d] += (A[:, i] + B[:, j]) / 2 # a quarter is randomly missing nan_mask = rng.rand(n, d) < 0.25 X_missing = X_filled.copy() X_missing[nan_mask] = np.nan # split up data n = n // 2 X_train = X_missing[:n] X_test_filled = X_filled[n:] X_test = X_missing[n:] imputer = IterativeImputer(max_iter=10, verbose=1, random_state=rng).fit(X_train) X_test_est = imputer.transform(X_test) assert_allclose(X_test_filled, X_test_est, rtol=1e-3, atol=0.01) def test_iterative_imputer_early_stopping(): rng = np.random.RandomState(0) n = 50 d = 5 A = rng.rand(n, 1) B = rng.rand(1, d) X = np.dot(A, B) nan_mask = rng.rand(n, d) < 0.5 X_missing = X.copy() X_missing[nan_mask] = np.nan imputer = IterativeImputer( max_iter=100, tol=1e-2, sample_posterior=False, verbose=1, random_state=rng ) X_filled_100 = imputer.fit_transform(X_missing) assert len(imputer.imputation_sequence_) == d * imputer.n_iter_ imputer = IterativeImputer( max_iter=imputer.n_iter_, sample_posterior=False, verbose=1, random_state=rng ) X_filled_early = imputer.fit_transform(X_missing) assert_allclose(X_filled_100, X_filled_early, atol=1e-7) imputer = IterativeImputer( max_iter=100, tol=0, sample_posterior=False, verbose=1, random_state=rng ) imputer.fit(X_missing) assert imputer.n_iter_ == imputer.max_iter def test_iterative_imputer_catch_warning(): # check that we catch a RuntimeWarning due to a division by zero when a # feature is constant in the dataset X, y = load_diabetes(return_X_y=True) n_samples, n_features = X.shape # simulate that a feature only contain one category during fit X[:, 3] = 1 # add some missing values rng = np.random.RandomState(0) missing_rate = 0.15 for feat in range(n_features): sample_idx = rng.choice( np.arange(n_samples), size=int(n_samples * missing_rate), replace=False ) X[sample_idx, feat] = np.nan imputer = IterativeImputer(n_nearest_features=5, sample_posterior=True) with warnings.catch_warnings(): warnings.simplefilter("error", RuntimeWarning) X_fill = imputer.fit_transform(X, y) assert not np.any(np.isnan(X_fill)) @pytest.mark.parametrize( "min_value, max_value, correct_output", [ (0, 100, np.array([[0] * 3, [100] * 3])), (None, None, np.array([[-np.inf] * 3, [np.inf] * 3])), (-np.inf, np.inf, np.array([[-np.inf] * 3, [np.inf] * 3])),
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
true
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/impute/tests/__init__.py
sklearn/impute/tests/__init__.py
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/impute/tests/test_knn.py
sklearn/impute/tests/test_knn.py
import numpy as np import pytest from sklearn import config_context from sklearn.impute import KNNImputer from sklearn.metrics.pairwise import nan_euclidean_distances, pairwise_distances from sklearn.neighbors import KNeighborsRegressor from sklearn.utils._testing import assert_allclose @pytest.mark.parametrize("weights", ["uniform", "distance"]) @pytest.mark.parametrize("n_neighbors", range(1, 6)) def test_knn_imputer_shape(weights, n_neighbors): # Verify the shapes of the imputed matrix for different weights and # number of neighbors. n_rows = 10 n_cols = 2 X = np.random.rand(n_rows, n_cols) X[0, 0] = np.nan imputer = KNNImputer(n_neighbors=n_neighbors, weights=weights) X_imputed = imputer.fit_transform(X) assert X_imputed.shape == (n_rows, n_cols) @pytest.mark.parametrize("na", [np.nan, -1]) def test_knn_imputer_default_with_invalid_input(na): # Test imputation with default values and invalid input # Test with inf present X = np.array( [ [np.inf, 1, 1, 2, na], [2, 1, 2, 2, 3], [3, 2, 3, 3, 8], [na, 6, 0, 5, 13], [na, 7, 0, 7, 8], [6, 6, 2, 5, 7], ] ) with pytest.raises(ValueError, match="Input X contains (infinity|NaN)"): KNNImputer(missing_values=na).fit(X) # Test with inf present in matrix passed in transform() X = np.array( [ [np.inf, 1, 1, 2, na], [2, 1, 2, 2, 3], [3, 2, 3, 3, 8], [na, 6, 0, 5, 13], [na, 7, 0, 7, 8], [6, 6, 2, 5, 7], ] ) X_fit = np.array( [ [0, 1, 1, 2, na], [2, 1, 2, 2, 3], [3, 2, 3, 3, 8], [na, 6, 0, 5, 13], [na, 7, 0, 7, 8], [6, 6, 2, 5, 7], ] ) imputer = KNNImputer(missing_values=na).fit(X_fit) with pytest.raises(ValueError, match="Input X contains (infinity|NaN)"): imputer.transform(X) # Test with missing_values=0 when NaN present imputer = KNNImputer(missing_values=0, n_neighbors=2, weights="uniform") X = np.array( [ [np.nan, 0, 0, 0, 5], [np.nan, 1, 0, np.nan, 3], [np.nan, 2, 0, 0, 0], [np.nan, 6, 0, 5, 13], ] ) msg = "Input X contains NaN" with pytest.raises(ValueError, match=msg): imputer.fit(X) X = np.array( [ [0, 0], [np.nan, 2], ] ) @pytest.mark.parametrize("na", [np.nan, -1]) def test_knn_imputer_removes_all_na_features(na): X = np.array( [ [1, 1, na, 1, 1, 1.0], [2, 3, na, 2, 2, 2], [3, 4, na, 3, 3, na], [6, 4, na, na, 6, 6], ] ) knn = KNNImputer(missing_values=na, n_neighbors=2).fit(X) X_transform = knn.transform(X) assert not np.isnan(X_transform).any() assert X_transform.shape == (4, 5) X_test = np.arange(0, 12).reshape(2, 6) X_transform = knn.transform(X_test) assert_allclose(X_test[:, [0, 1, 3, 4, 5]], X_transform) @pytest.mark.parametrize("na", [np.nan, -1]) def test_knn_imputer_zero_nan_imputes_the_same(na): # Test with an imputable matrix and compare with different missing_values X_zero = np.array( [ [1, 0, 1, 1, 1.0], [2, 2, 2, 2, 2], [3, 3, 3, 3, 0], [6, 6, 0, 6, 6], ] ) X_nan = np.array( [ [1, na, 1, 1, 1.0], [2, 2, 2, 2, 2], [3, 3, 3, 3, na], [6, 6, na, 6, 6], ] ) X_imputed = np.array( [ [1, 2.5, 1, 1, 1.0], [2, 2, 2, 2, 2], [3, 3, 3, 3, 1.5], [6, 6, 2.5, 6, 6], ] ) imputer_zero = KNNImputer(missing_values=0, n_neighbors=2, weights="uniform") imputer_nan = KNNImputer(missing_values=na, n_neighbors=2, weights="uniform") assert_allclose(imputer_zero.fit_transform(X_zero), X_imputed) assert_allclose( imputer_zero.fit_transform(X_zero), imputer_nan.fit_transform(X_nan) ) @pytest.mark.parametrize("na", [np.nan, -1]) def test_knn_imputer_verify(na): # Test with an imputable matrix X = np.array( [ [1, 0, 0, 1], [2, 1, 2, na], [3, 2, 3, na], [na, 4, 5, 5], [6, na, 6, 7], [8, 8, 8, 8], [16, 15, 18, 19], ] ) X_imputed = np.array( [ [1, 0, 0, 1], [2, 1, 2, 8], [3, 2, 3, 8], [4, 4, 5, 5], [6, 3, 6, 7], [8, 8, 8, 8], [16, 15, 18, 19], ] ) imputer = KNNImputer(missing_values=na) assert_allclose(imputer.fit_transform(X), X_imputed) # Test when there is not enough neighbors X = np.array( [ [1, 0, 0, na], [2, 1, 2, na], [3, 2, 3, na], [4, 4, 5, na], [6, 7, 6, na], [8, 8, 8, na], [20, 20, 20, 20], [22, 22, 22, 22], ] ) # Not enough neighbors, use column mean from training X_impute_value = (20 + 22) / 2 X_imputed = np.array( [ [1, 0, 0, X_impute_value], [2, 1, 2, X_impute_value], [3, 2, 3, X_impute_value], [4, 4, 5, X_impute_value], [6, 7, 6, X_impute_value], [8, 8, 8, X_impute_value], [20, 20, 20, 20], [22, 22, 22, 22], ] ) imputer = KNNImputer(missing_values=na) assert_allclose(imputer.fit_transform(X), X_imputed) # Test when data in fit() and transform() are different X = np.array([[0, 0], [na, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 16]]) X1 = np.array([[1, 0], [3, 2], [4, na]]) X_2_1 = (0 + 3 + 6 + 7 + 8) / 5 X1_imputed = np.array([[1, 0], [3, 2], [4, X_2_1]]) imputer = KNNImputer(missing_values=na) assert_allclose(imputer.fit(X).transform(X1), X1_imputed) @pytest.mark.parametrize("na", [np.nan, -1]) def test_knn_imputer_one_n_neighbors(na): X = np.array([[0, 0], [na, 2], [4, 3], [5, na], [7, 7], [na, 8], [14, 13]]) X_imputed = np.array([[0, 0], [4, 2], [4, 3], [5, 3], [7, 7], [7, 8], [14, 13]]) imputer = KNNImputer(n_neighbors=1, missing_values=na) assert_allclose(imputer.fit_transform(X), X_imputed) @pytest.mark.parametrize("na", [np.nan, -1]) def test_knn_imputer_all_samples_are_neighbors(na): X = np.array([[0, 0], [na, 2], [4, 3], [5, na], [7, 7], [na, 8], [14, 13]]) X_imputed = np.array( [[0, 0], [6.25, 2], [4, 3], [5, 5.75], [7, 7], [6.25, 8], [14, 13]] ) n_neighbors = X.shape[0] - 1 imputer = KNNImputer(n_neighbors=n_neighbors, missing_values=na) assert_allclose(imputer.fit_transform(X), X_imputed) n_neighbors = X.shape[0] imputer_plus1 = KNNImputer(n_neighbors=n_neighbors, missing_values=na) assert_allclose(imputer_plus1.fit_transform(X), X_imputed) @pytest.mark.parametrize("na", [np.nan, -1]) def test_knn_imputer_weight_uniform(na): X = np.array([[0, 0], [na, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 10]]) # Test with "uniform" weight (or unweighted) X_imputed_uniform = np.array( [[0, 0], [5, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 10]] ) imputer = KNNImputer(weights="uniform", missing_values=na) assert_allclose(imputer.fit_transform(X), X_imputed_uniform) # Test with "callable" weight def no_weight(dist): return None imputer = KNNImputer(weights=no_weight, missing_values=na) assert_allclose(imputer.fit_transform(X), X_imputed_uniform) # Test with "callable" uniform weight def uniform_weight(dist): return np.ones_like(dist) imputer = KNNImputer(weights=uniform_weight, missing_values=na) assert_allclose(imputer.fit_transform(X), X_imputed_uniform) @pytest.mark.parametrize("na", [np.nan, -1]) def test_knn_imputer_weight_distance(na): X = np.array([[0, 0], [na, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 10]]) # Test with "distance" weight nn = KNeighborsRegressor(metric="euclidean", weights="distance") X_rows_idx = [0, 2, 3, 4, 5, 6] nn.fit(X[X_rows_idx, 1:], X[X_rows_idx, 0]) knn_imputed_value = nn.predict(X[1:2, 1:])[0] # Manual calculation X_neighbors_idx = [0, 2, 3, 4, 5] dist = nan_euclidean_distances(X[1:2, :], X, missing_values=na) weights = 1 / dist[:, X_neighbors_idx].ravel() manual_imputed_value = np.average(X[X_neighbors_idx, 0], weights=weights) X_imputed_distance1 = np.array( [[0, 0], [manual_imputed_value, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 10]] ) # NearestNeighbor calculation X_imputed_distance2 = np.array( [[0, 0], [knn_imputed_value, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 10]] ) imputer = KNNImputer(weights="distance", missing_values=na) assert_allclose(imputer.fit_transform(X), X_imputed_distance1) assert_allclose(imputer.fit_transform(X), X_imputed_distance2) # Test with weights = "distance" and n_neighbors=2 X = np.array( [ [na, 0, 0], [2, 1, 2], [3, 2, 3], [4, 5, 5], ] ) # neighbors are rows 1, 2, the nan_euclidean_distances are: dist_0_1 = np.sqrt((3 / 2) * ((1 - 0) ** 2 + (2 - 0) ** 2)) dist_0_2 = np.sqrt((3 / 2) * ((2 - 0) ** 2 + (3 - 0) ** 2)) imputed_value = np.average([2, 3], weights=[1 / dist_0_1, 1 / dist_0_2]) X_imputed = np.array( [ [imputed_value, 0, 0], [2, 1, 2], [3, 2, 3], [4, 5, 5], ] ) imputer = KNNImputer(n_neighbors=2, weights="distance", missing_values=na) assert_allclose(imputer.fit_transform(X), X_imputed) # Test with varying missingness patterns X = np.array( [ [1, 0, 0, 1], [0, na, 1, na], [1, 1, 1, na], [0, 1, 0, 0], [0, 0, 0, 0], [1, 0, 1, 1], [10, 10, 10, 10], ] ) # Get weights of donor neighbors dist = nan_euclidean_distances(X, missing_values=na) r1c1_nbor_dists = dist[1, [0, 2, 3, 4, 5]] r1c3_nbor_dists = dist[1, [0, 3, 4, 5, 6]] r1c1_nbor_wt = 1 / r1c1_nbor_dists r1c3_nbor_wt = 1 / r1c3_nbor_dists r2c3_nbor_dists = dist[2, [0, 3, 4, 5, 6]] r2c3_nbor_wt = 1 / r2c3_nbor_dists # Collect donor values col1_donor_values = np.ma.masked_invalid(X[[0, 2, 3, 4, 5], 1]).copy() col3_donor_values = np.ma.masked_invalid(X[[0, 3, 4, 5, 6], 3]).copy() # Final imputed values r1c1_imp = np.ma.average(col1_donor_values, weights=r1c1_nbor_wt) r1c3_imp = np.ma.average(col3_donor_values, weights=r1c3_nbor_wt) r2c3_imp = np.ma.average(col3_donor_values, weights=r2c3_nbor_wt) X_imputed = np.array( [ [1, 0, 0, 1], [0, r1c1_imp, 1, r1c3_imp], [1, 1, 1, r2c3_imp], [0, 1, 0, 0], [0, 0, 0, 0], [1, 0, 1, 1], [10, 10, 10, 10], ] ) imputer = KNNImputer(weights="distance", missing_values=na) assert_allclose(imputer.fit_transform(X), X_imputed) X = np.array( [ [0, 0, 0, na], [1, 1, 1, na], [2, 2, na, 2], [3, 3, 3, 3], [4, 4, 4, 4], [5, 5, 5, 5], [6, 6, 6, 6], [na, 7, 7, 7], ] ) dist = pairwise_distances( X, metric="nan_euclidean", squared=False, missing_values=na ) # Calculate weights r0c3_w = 1.0 / dist[0, 2:-1] r1c3_w = 1.0 / dist[1, 2:-1] r2c2_w = 1.0 / dist[2, (0, 1, 3, 4, 5)] r7c0_w = 1.0 / dist[7, 2:7] # Calculate weighted averages r0c3 = np.average(X[2:-1, -1], weights=r0c3_w) r1c3 = np.average(X[2:-1, -1], weights=r1c3_w) r2c2 = np.average(X[(0, 1, 3, 4, 5), 2], weights=r2c2_w) r7c0 = np.average(X[2:7, 0], weights=r7c0_w) X_imputed = np.array( [ [0, 0, 0, r0c3], [1, 1, 1, r1c3], [2, 2, r2c2, 2], [3, 3, 3, 3], [4, 4, 4, 4], [5, 5, 5, 5], [6, 6, 6, 6], [r7c0, 7, 7, 7], ] ) imputer_comp_wt = KNNImputer(missing_values=na, weights="distance") assert_allclose(imputer_comp_wt.fit_transform(X), X_imputed) def test_knn_imputer_callable_metric(): # Define callable metric that returns the l1 norm: def custom_callable(x, y, missing_values=np.nan, squared=False): x = np.ma.array(x, mask=np.isnan(x)) y = np.ma.array(y, mask=np.isnan(y)) dist = np.nansum(np.abs(x - y)) return dist X = np.array([[4, 3, 3, np.nan], [6, 9, 6, 9], [4, 8, 6, 9], [np.nan, 9, 11, 10.0]]) X_0_3 = (9 + 9) / 2 X_3_0 = (6 + 4) / 2 X_imputed = np.array( [[4, 3, 3, X_0_3], [6, 9, 6, 9], [4, 8, 6, 9], [X_3_0, 9, 11, 10.0]] ) imputer = KNNImputer(n_neighbors=2, metric=custom_callable) assert_allclose(imputer.fit_transform(X), X_imputed) @pytest.mark.parametrize("working_memory", [None, 0]) @pytest.mark.parametrize("na", [-1, np.nan]) # Note that we use working_memory=0 to ensure that chunking is tested, even # for a small dataset. However, it should raise a UserWarning that we ignore. @pytest.mark.filterwarnings("ignore:adhere to working_memory") def test_knn_imputer_with_simple_example(na, working_memory): X = np.array( [ [0, na, 0, na], [1, 1, 1, na], [2, 2, na, 2], [3, 3, 3, 3], [4, 4, 4, 4], [5, 5, 5, 5], [6, 6, 6, 6], [na, 7, 7, 7], ] ) r0c1 = np.mean(X[1:6, 1]) r0c3 = np.mean(X[2:-1, -1]) r1c3 = np.mean(X[2:-1, -1]) r2c2 = np.mean(X[[0, 1, 3, 4, 5], 2]) r7c0 = np.mean(X[2:-1, 0]) X_imputed = np.array( [ [0, r0c1, 0, r0c3], [1, 1, 1, r1c3], [2, 2, r2c2, 2], [3, 3, 3, 3], [4, 4, 4, 4], [5, 5, 5, 5], [6, 6, 6, 6], [r7c0, 7, 7, 7], ] ) with config_context(working_memory=working_memory): imputer_comp = KNNImputer(missing_values=na) assert_allclose(imputer_comp.fit_transform(X), X_imputed) @pytest.mark.parametrize("na", [-1, np.nan]) @pytest.mark.parametrize("weights", ["uniform", "distance"]) def test_knn_imputer_not_enough_valid_distances(na, weights): # Samples with needed feature has nan distance X1 = np.array([[na, 11], [na, 1], [3, na]]) X1_imputed = np.array([[3, 11], [3, 1], [3, 6]]) knn = KNNImputer(missing_values=na, n_neighbors=1, weights=weights) assert_allclose(knn.fit_transform(X1), X1_imputed) X2 = np.array([[4, na]]) X2_imputed = np.array([[4, 6]]) assert_allclose(knn.transform(X2), X2_imputed) @pytest.mark.parametrize("na", [-1, np.nan]) @pytest.mark.parametrize("weights", ["uniform", "distance"]) def test_knn_imputer_nan_distance(na, weights): # Samples with nan distance should be excluded from the mean computation X1_train = np.array([[1, 1], [na, 2]]) X1_test = np.array([[0, na]]) X1_test_expected = np.array([[0, 1]]) knn1 = KNNImputer(n_neighbors=2, missing_values=na, weights=weights) knn1.fit(X1_train) assert_allclose(knn1.transform(X1_test), X1_test_expected) X2_train = np.array([[na, 1, 1], [2, na, 2], [3, 3, na]]) X2_test = np.array([[na, 0, na], [0, na, na], [na, na, 0]]) X2_test_expected = np.array([[3, 0, 1], [0, 3, 2], [2, 1, 0]]) knn2 = KNNImputer(n_neighbors=2, missing_values=na, weights=weights) knn2.fit(X2_train) assert_allclose(knn2.transform(X2_test), X2_test_expected) @pytest.mark.parametrize("na", [-1, np.nan]) def test_knn_imputer_drops_all_nan_features(na): X1 = np.array([[na, 1], [na, 2]]) knn = KNNImputer(missing_values=na, n_neighbors=1) X1_expected = np.array([[1], [2]]) assert_allclose(knn.fit_transform(X1), X1_expected) X2 = np.array([[1, 2], [3, na]]) X2_expected = np.array([[2], [1.5]]) assert_allclose(knn.transform(X2), X2_expected) @pytest.mark.parametrize("working_memory", [None, 0]) @pytest.mark.parametrize("na", [-1, np.nan]) def test_knn_imputer_distance_weighted_not_enough_neighbors(na, working_memory): X = np.array([[3, na], [2, na], [na, 4], [5, 6], [6, 8], [na, 5]]) dist = pairwise_distances( X, metric="nan_euclidean", squared=False, missing_values=na ) X_01 = np.average(X[3:5, 1], weights=1 / dist[0, 3:5]) X_11 = np.average(X[3:5, 1], weights=1 / dist[1, 3:5]) X_20 = np.average(X[3:5, 0], weights=1 / dist[2, 3:5]) X_50 = np.average(X[3:5, 0], weights=1 / dist[5, 3:5]) X_expected = np.array([[3, X_01], [2, X_11], [X_20, 4], [5, 6], [6, 8], [X_50, 5]]) with config_context(working_memory=working_memory): knn_3 = KNNImputer(missing_values=na, n_neighbors=3, weights="distance") assert_allclose(knn_3.fit_transform(X), X_expected) knn_4 = KNNImputer(missing_values=na, n_neighbors=4, weights="distance") assert_allclose(knn_4.fit_transform(X), X_expected) @pytest.mark.parametrize("na, allow_nan", [(-1, False), (np.nan, True)]) def test_knn_tags(na, allow_nan): knn = KNNImputer(missing_values=na) assert knn.__sklearn_tags__().input_tags.allow_nan == allow_nan
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/mixture/_gaussian_mixture.py
sklearn/mixture/_gaussian_mixture.py
"""Gaussian Mixture Model.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import math import numpy as np from sklearn._config import get_config from sklearn.externals import array_api_extra as xpx from sklearn.mixture._base import BaseMixture, _check_shape from sklearn.utils import check_array from sklearn.utils._array_api import ( _add_to_diagonal, _cholesky, _linalg_solve, get_namespace, get_namespace_and_device, ) from sklearn.utils._param_validation import StrOptions from sklearn.utils.extmath import row_norms ############################################################################### # Gaussian mixture shape checkers used by the GaussianMixture class def _check_weights(weights, n_components, xp=None): """Check the user provided 'weights'. Parameters ---------- weights : array-like of shape (n_components,) The proportions of components of each mixture. n_components : int Number of components. Returns ------- weights : array, shape (n_components,) """ weights = check_array(weights, dtype=[xp.float64, xp.float32], ensure_2d=False) _check_shape(weights, (n_components,), "weights") # check range if any(xp.less(weights, 0.0)) or any(xp.greater(weights, 1.0)): raise ValueError( "The parameter 'weights' should be in the range " "[0, 1], but got max value %.5f, min value %.5f" % (xp.min(weights), xp.max(weights)) ) # check normalization atol = 1e-6 if weights.dtype == xp.float32 else 1e-8 if not np.allclose(float(xp.abs(1.0 - xp.sum(weights))), 0.0, atol=atol): raise ValueError( "The parameter 'weights' should be normalized, but got sum(weights) = %.5f" % xp.sum(weights) ) return weights def _check_means(means, n_components, n_features, xp=None): """Validate the provided 'means'. Parameters ---------- means : array-like of shape (n_components, n_features) The centers of the current components. n_components : int Number of components. n_features : int Number of features. Returns ------- means : array, (n_components, n_features) """ xp, _ = get_namespace(means, xp=xp) means = check_array(means, dtype=[xp.float64, xp.float32], ensure_2d=False) _check_shape(means, (n_components, n_features), "means") return means def _check_precision_positivity(precision, covariance_type, xp=None): """Check a precision vector is positive-definite.""" xp, _ = get_namespace(precision, xp=xp) if xp.any(xp.less_equal(precision, 0.0)): raise ValueError("'%s precision' should be positive" % covariance_type) def _check_precision_matrix(precision, covariance_type, xp=None): """Check a precision matrix is symmetric and positive-definite.""" xp, _ = get_namespace(precision, xp=xp) if not ( xp.all(xpx.isclose(precision, precision.T)) and xp.all(xp.linalg.eigvalsh(precision) > 0.0) ): raise ValueError( "'%s precision' should be symmetric, positive-definite" % covariance_type ) def _check_precisions_full(precisions, covariance_type, xp=None): """Check the precision matrices are symmetric and positive-definite.""" xp, _ = get_namespace(precisions, xp=xp) for i in range(precisions.shape[0]): _check_precision_matrix(precisions[i, :, :], covariance_type, xp=xp) def _check_precisions(precisions, covariance_type, n_components, n_features, xp=None): """Validate user provided precisions. Parameters ---------- precisions : array-like 'full' : shape of (n_components, n_features, n_features) 'tied' : shape of (n_features, n_features) 'diag' : shape of (n_components, n_features) 'spherical' : shape of (n_components,) covariance_type : str n_components : int Number of components. n_features : int Number of features. Returns ------- precisions : array """ xp, _ = get_namespace(precisions, xp=xp) precisions = check_array( precisions, dtype=[xp.float64, xp.float32], ensure_2d=False, allow_nd=covariance_type == "full", ) precisions_shape = { "full": (n_components, n_features, n_features), "tied": (n_features, n_features), "diag": (n_components, n_features), "spherical": (n_components,), } _check_shape( precisions, precisions_shape[covariance_type], "%s precision" % covariance_type ) _check_precisions = { "full": _check_precisions_full, "tied": _check_precision_matrix, "diag": _check_precision_positivity, "spherical": _check_precision_positivity, } _check_precisions[covariance_type](precisions, covariance_type, xp=xp) return precisions ############################################################################### # Gaussian mixture parameters estimators (used by the M-Step) def _estimate_gaussian_covariances_full(resp, X, nk, means, reg_covar, xp=None): """Estimate the full covariance matrices. Parameters ---------- resp : array-like of shape (n_samples, n_components) X : array-like of shape (n_samples, n_features) nk : array-like of shape (n_components,) means : array-like of shape (n_components, n_features) reg_covar : float Returns ------- covariances : array, shape (n_components, n_features, n_features) The covariance matrix of the current components. """ xp, _, device_ = get_namespace_and_device(X, xp=xp) n_components, n_features = means.shape covariances = xp.empty( (n_components, n_features, n_features), device=device_, dtype=X.dtype ) for k in range(n_components): diff = X - means[k, :] covariances[k, :, :] = ((resp[:, k] * diff.T) @ diff) / nk[k] _add_to_diagonal(covariances[k, :, :], reg_covar, xp) return covariances def _estimate_gaussian_covariances_tied(resp, X, nk, means, reg_covar, xp=None): """Estimate the tied covariance matrix. Parameters ---------- resp : array-like of shape (n_samples, n_components) X : array-like of shape (n_samples, n_features) nk : array-like of shape (n_components,) means : array-like of shape (n_components, n_features) reg_covar : float Returns ------- covariance : array, shape (n_features, n_features) The tied covariance matrix of the components. """ xp, _ = get_namespace(X, means, xp=xp) avg_X2 = X.T @ X avg_means2 = nk * means.T @ means covariance = avg_X2 - avg_means2 covariance /= xp.sum(nk) _add_to_diagonal(covariance, reg_covar, xp) return covariance def _estimate_gaussian_covariances_diag(resp, X, nk, means, reg_covar, xp=None): """Estimate the diagonal covariance vectors. Parameters ---------- responsibilities : array-like of shape (n_samples, n_components) X : array-like of shape (n_samples, n_features) nk : array-like of shape (n_components,) means : array-like of shape (n_components, n_features) reg_covar : float Returns ------- covariances : array, shape (n_components, n_features) The covariance vector of the current components. """ xp, _ = get_namespace(X, xp=xp) avg_X2 = (resp.T @ (X * X)) / nk[:, xp.newaxis] avg_means2 = means**2 return avg_X2 - avg_means2 + reg_covar def _estimate_gaussian_covariances_spherical(resp, X, nk, means, reg_covar, xp=None): """Estimate the spherical variance values. Parameters ---------- responsibilities : array-like of shape (n_samples, n_components) X : array-like of shape (n_samples, n_features) nk : array-like of shape (n_components,) means : array-like of shape (n_components, n_features) reg_covar : float Returns ------- variances : array, shape (n_components,) The variance values of each components. """ xp, _ = get_namespace(X) return xp.mean( _estimate_gaussian_covariances_diag(resp, X, nk, means, reg_covar, xp=xp), axis=1, ) def _estimate_gaussian_parameters(X, resp, reg_covar, covariance_type, xp=None): """Estimate the Gaussian distribution parameters. Parameters ---------- X : array-like of shape (n_samples, n_features) The input data array. resp : array-like of shape (n_samples, n_components) The responsibilities for each data sample in X. reg_covar : float The regularization added to the diagonal of the covariance matrices. covariance_type : {'full', 'tied', 'diag', 'spherical'} The type of precision matrices. Returns ------- nk : array-like of shape (n_components,) The numbers of data samples in the current components. means : array-like of shape (n_components, n_features) The centers of the current components. covariances : array-like The covariance matrix of the current components. The shape depends of the covariance_type. """ xp, _ = get_namespace(X, xp=xp) nk = xp.sum(resp, axis=0) + 10 * xp.finfo(resp.dtype).eps means = (resp.T @ X) / nk[:, xp.newaxis] covariances = { "full": _estimate_gaussian_covariances_full, "tied": _estimate_gaussian_covariances_tied, "diag": _estimate_gaussian_covariances_diag, "spherical": _estimate_gaussian_covariances_spherical, }[covariance_type](resp, X, nk, means, reg_covar, xp=xp) return nk, means, covariances def _compute_precision_cholesky(covariances, covariance_type, xp=None): """Compute the Cholesky decomposition of the precisions. Parameters ---------- covariances : array-like The covariance matrix of the current components. The shape depends of the covariance_type. covariance_type : {'full', 'tied', 'diag', 'spherical'} The type of precision matrices. Returns ------- precisions_cholesky : array-like The Cholesky decomposition of sample precisions of the current components. The shape depends of the covariance_type. """ xp, _, device_ = get_namespace_and_device(covariances, xp=xp) estimate_precision_error_message = ( "Fitting the mixture model failed because some components have " "ill-defined empirical covariance (for instance caused by singleton " "or collapsed samples). Try to decrease the number of components, " "increase reg_covar, or scale the input data." ) dtype = covariances.dtype if dtype == xp.float32: estimate_precision_error_message += ( " The numerical accuracy can also be improved by passing float64" " data instead of float32." ) if covariance_type == "full": n_components, n_features, _ = covariances.shape precisions_chol = xp.empty( (n_components, n_features, n_features), device=device_, dtype=dtype ) for k in range(covariances.shape[0]): covariance = covariances[k, :, :] try: cov_chol = _cholesky(covariance, xp) # catch only numpy exceptions, b/c exceptions aren't part of array api spec except np.linalg.LinAlgError: raise ValueError(estimate_precision_error_message) precisions_chol[k, :, :] = _linalg_solve( cov_chol, xp.eye(n_features, dtype=dtype, device=device_), xp ).T elif covariance_type == "tied": _, n_features = covariances.shape try: cov_chol = _cholesky(covariances, xp) # catch only numpy exceptions, since exceptions are not part of array api spec except np.linalg.LinAlgError: raise ValueError(estimate_precision_error_message) precisions_chol = _linalg_solve( cov_chol, xp.eye(n_features, dtype=dtype, device=device_), xp ).T else: if xp.any(covariances <= 0.0): raise ValueError(estimate_precision_error_message) precisions_chol = 1.0 / xp.sqrt(covariances) return precisions_chol def _flipudlr(array, xp=None): """Reverse the rows and columns of an array.""" xp, _ = get_namespace(array, xp=xp) return xp.flip(xp.flip(array, axis=1), axis=0) def _compute_precision_cholesky_from_precisions(precisions, covariance_type, xp=None): r"""Compute the Cholesky decomposition of precisions using precisions themselves. As implemented in :func:`_compute_precision_cholesky`, the `precisions_cholesky_` is an upper-triangular matrix for each Gaussian component, which can be expressed as the $UU^T$ factorization of the precision matrix for each Gaussian component, where $U$ is an upper-triangular matrix. In order to use the Cholesky decomposition to get $UU^T$, the precision matrix $\Lambda$ needs to be permutated such that its rows and columns are reversed, which can be done by applying a similarity transformation with an exchange matrix $J$, where the 1 elements reside on the anti-diagonal and all other elements are 0. In particular, the Cholesky decomposition of the transformed precision matrix is $J\Lambda J=LL^T$, where $L$ is a lower-triangular matrix. Because $\Lambda=UU^T$ and $J=J^{-1}=J^T$, the `precisions_cholesky_` for each Gaussian component can be expressed as $JLJ$. Refer to #26415 for details. Parameters ---------- precisions : array-like The precision matrix of the current components. The shape depends on the covariance_type. covariance_type : {'full', 'tied', 'diag', 'spherical'} The type of precision matrices. Returns ------- precisions_cholesky : array-like The Cholesky decomposition of sample precisions of the current components. The shape depends on the covariance_type. """ if covariance_type == "full": precisions_cholesky = xp.stack( [ _flipudlr( _cholesky(_flipudlr(precisions[i, :, :], xp=xp), xp=xp), xp=xp ) for i in range(precisions.shape[0]) ] ) elif covariance_type == "tied": precisions_cholesky = _flipudlr( _cholesky(_flipudlr(precisions, xp=xp), xp=xp), xp=xp ) else: precisions_cholesky = xp.sqrt(precisions) return precisions_cholesky ############################################################################### # Gaussian mixture probability estimators def _compute_log_det_cholesky(matrix_chol, covariance_type, n_features, xp=None): """Compute the log-det of the Cholesky decomposition of matrices. Parameters ---------- matrix_chol : array-like Cholesky decompositions of the matrices. 'full' : shape of (n_components, n_features, n_features) 'tied' : shape of (n_features, n_features) 'diag' : shape of (n_components, n_features) 'spherical' : shape of (n_components,) covariance_type : {'full', 'tied', 'diag', 'spherical'} n_features : int Number of features. Returns ------- log_det_precision_chol : array-like of shape (n_components,) The determinant of the precision matrix for each component. """ xp, _ = get_namespace(matrix_chol, xp=xp) if covariance_type == "full": n_components, _, _ = matrix_chol.shape log_det_chol = xp.sum( xp.log(xp.reshape(matrix_chol, (n_components, -1))[:, :: n_features + 1]), axis=1, ) elif covariance_type == "tied": log_det_chol = xp.sum(xp.log(xp.linalg.diagonal(matrix_chol))) elif covariance_type == "diag": log_det_chol = xp.sum(xp.log(matrix_chol), axis=1) else: log_det_chol = n_features * xp.log(matrix_chol) return log_det_chol def _estimate_log_gaussian_prob(X, means, precisions_chol, covariance_type, xp=None): """Estimate the log Gaussian probability. Parameters ---------- X : array-like of shape (n_samples, n_features) means : array-like of shape (n_components, n_features) precisions_chol : array-like Cholesky decompositions of the precision matrices. 'full' : shape of (n_components, n_features, n_features) 'tied' : shape of (n_features, n_features) 'diag' : shape of (n_components, n_features) 'spherical' : shape of (n_components,) covariance_type : {'full', 'tied', 'diag', 'spherical'} Returns ------- log_prob : array, shape (n_samples, n_components) """ xp, _, device_ = get_namespace_and_device(X, means, precisions_chol, xp=xp) n_samples, n_features = X.shape n_components, _ = means.shape # The determinant of the precision matrix from the Cholesky decomposition # corresponds to the negative half of the determinant of the full precision # matrix. # In short: det(precision_chol) = - det(precision) / 2 log_det = _compute_log_det_cholesky(precisions_chol, covariance_type, n_features) if covariance_type == "full": log_prob = xp.empty((n_samples, n_components), dtype=X.dtype, device=device_) for k in range(means.shape[0]): mu = means[k, :] prec_chol = precisions_chol[k, :, :] y = (X @ prec_chol) - (mu @ prec_chol) log_prob[:, k] = xp.sum(xp.square(y), axis=1) elif covariance_type == "tied": log_prob = xp.empty((n_samples, n_components), dtype=X.dtype, device=device_) for k in range(means.shape[0]): mu = means[k, :] y = (X @ precisions_chol) - (mu @ precisions_chol) log_prob[:, k] = xp.sum(xp.square(y), axis=1) elif covariance_type == "diag": precisions = precisions_chol**2 log_prob = ( xp.sum((means**2 * precisions), axis=1) - 2.0 * (X @ (means * precisions).T) + (X**2 @ precisions.T) ) elif covariance_type == "spherical": precisions = precisions_chol**2 log_prob = ( xp.sum(means**2, axis=1) * precisions - 2 * (X @ means.T * precisions) + xp.linalg.outer(row_norms(X, squared=True), precisions) ) # Since we are using the precision of the Cholesky decomposition, # `- 0.5 * log_det_precision` becomes `+ log_det_precision_chol` return -0.5 * (n_features * math.log(2 * math.pi) + log_prob) + log_det class GaussianMixture(BaseMixture): """Gaussian Mixture. Representation of a Gaussian mixture model probability distribution. This class allows to estimate the parameters of a Gaussian mixture distribution. Read more in the :ref:`User Guide <gmm>`. .. versionadded:: 0.18 Parameters ---------- n_components : int, default=1 The number of mixture components. covariance_type : {'full', 'tied', 'diag', 'spherical'}, default='full' String describing the type of covariance parameters to use. Must be one of: - 'full': each component has its own general covariance matrix. - 'tied': all components share the same general covariance matrix. - 'diag': each component has its own diagonal covariance matrix. - 'spherical': each component has its own single variance. For an example of using `covariance_type`, refer to :ref:`sphx_glr_auto_examples_mixture_plot_gmm_selection.py`. tol : float, default=1e-3 The convergence threshold. EM iterations will stop when the lower bound average gain is below this threshold. reg_covar : float, default=1e-6 Non-negative regularization added to the diagonal of covariance. Allows to assure that the covariance matrices are all positive. max_iter : int, default=100 The number of EM iterations to perform. n_init : int, default=1 The number of initializations to perform. The best results are kept. init_params : {'kmeans', 'k-means++', 'random', 'random_from_data'}, \ default='kmeans' The method used to initialize the weights, the means and the precisions. String must be one of: - 'kmeans' : responsibilities are initialized using kmeans. - 'k-means++' : use the k-means++ method to initialize. - 'random' : responsibilities are initialized randomly. - 'random_from_data' : initial means are randomly selected data points. .. versionchanged:: v1.1 `init_params` now accepts 'random_from_data' and 'k-means++' as initialization methods. weights_init : array-like of shape (n_components, ), default=None The user-provided initial weights. If it is None, weights are initialized using the `init_params` method. means_init : array-like of shape (n_components, n_features), default=None The user-provided initial means, If it is None, means are initialized using the `init_params` method. precisions_init : array-like, default=None The user-provided initial precisions (inverse of the covariance matrices). If it is None, precisions are initialized using the 'init_params' method. The shape depends on 'covariance_type':: (n_components,) if 'spherical', (n_features, n_features) if 'tied', (n_components, n_features) if 'diag', (n_components, n_features, n_features) if 'full' random_state : int, RandomState instance or None, default=None Controls the random seed given to the method chosen to initialize the parameters (see `init_params`). In addition, it controls the generation of random samples from the fitted distribution (see the method `sample`). Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. warm_start : bool, default=False If 'warm_start' is True, the solution of the last fitting is used as initialization for the next call of fit(). This can speed up convergence when fit is called several times on similar problems. In that case, 'n_init' is ignored and only a single initialization occurs upon the first call. See :term:`the Glossary <warm_start>`. verbose : int, default=0 Enable verbose output. If 1 then it prints the current initialization and each iteration step. If greater than 1 then it prints also the log probability and the time needed for each step. verbose_interval : int, default=10 Number of iteration done before the next print. Attributes ---------- weights_ : array-like of shape (n_components,) The weights of each mixture components. means_ : array-like of shape (n_components, n_features) The mean of each mixture component. covariances_ : array-like The covariance of each mixture component. The shape depends on `covariance_type`:: (n_components,) if 'spherical', (n_features, n_features) if 'tied', (n_components, n_features) if 'diag', (n_components, n_features, n_features) if 'full' For an example of using covariances, refer to :ref:`sphx_glr_auto_examples_mixture_plot_gmm_covariances.py`. precisions_ : array-like The precision matrices for each component in the mixture. A precision matrix is the inverse of a covariance matrix. A covariance matrix is symmetric positive definite so the mixture of Gaussian can be equivalently parameterized by the precision matrices. Storing the precision matrices instead of the covariance matrices makes it more efficient to compute the log-likelihood of new samples at test time. The shape depends on `covariance_type`:: (n_components,) if 'spherical', (n_features, n_features) if 'tied', (n_components, n_features) if 'diag', (n_components, n_features, n_features) if 'full' precisions_cholesky_ : array-like The Cholesky decomposition of the precision matrices of each mixture component. A precision matrix is the inverse of a covariance matrix. A covariance matrix is symmetric positive definite so the mixture of Gaussian can be equivalently parameterized by the precision matrices. Storing the precision matrices instead of the covariance matrices makes it more efficient to compute the log-likelihood of new samples at test time. The shape depends on `covariance_type`:: (n_components,) if 'spherical', (n_features, n_features) if 'tied', (n_components, n_features) if 'diag', (n_components, n_features, n_features) if 'full' converged_ : bool True when convergence of the best fit of EM was reached, False otherwise. n_iter_ : int Number of step used by the best fit of EM to reach the convergence. lower_bound_ : float Lower bound value on the log-likelihood (of the training data with respect to the model) of the best fit of EM. lower_bounds_ : array-like of shape (`n_iter_`,) The list of lower bound values on the log-likelihood from each iteration of the best fit of EM. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- BayesianGaussianMixture : Gaussian mixture model fit with a variational inference. Examples -------- >>> import numpy as np >>> from sklearn.mixture import GaussianMixture >>> X = np.array([[1, 2], [1, 4], [1, 0], [10, 2], [10, 4], [10, 0]]) >>> gm = GaussianMixture(n_components=2, random_state=0).fit(X) >>> gm.means_ array([[10., 2.], [ 1., 2.]]) >>> gm.predict([[0, 0], [12, 3]]) array([1, 0]) For a comparison of Gaussian Mixture with other clustering algorithms, see :ref:`sphx_glr_auto_examples_cluster_plot_cluster_comparison.py`. For an illustration of the negative log-likelihood surface of a :class:`~sklearn.mixture.GaussianMixture` Model, see :ref:`sphx_glr_auto_examples_mixture_plot_gmm_pdf.py`. """ _parameter_constraints: dict = { **BaseMixture._parameter_constraints, "covariance_type": [StrOptions({"full", "tied", "diag", "spherical"})], "weights_init": ["array-like", None], "means_init": ["array-like", None], "precisions_init": ["array-like", None], } def __init__( self, n_components=1, *, covariance_type="full", tol=1e-3, reg_covar=1e-6, max_iter=100, n_init=1, init_params="kmeans", weights_init=None, means_init=None, precisions_init=None, random_state=None, warm_start=False, verbose=0, verbose_interval=10, ): super().__init__( n_components=n_components, tol=tol, reg_covar=reg_covar, max_iter=max_iter, n_init=n_init, init_params=init_params, random_state=random_state, warm_start=warm_start, verbose=verbose, verbose_interval=verbose_interval, ) self.covariance_type = covariance_type self.weights_init = weights_init self.means_init = means_init self.precisions_init = precisions_init def _check_parameters(self, X, xp=None): """Check the Gaussian mixture parameters are well defined.""" _, n_features = X.shape if self.weights_init is not None: self.weights_init = _check_weights( self.weights_init, self.n_components, xp=xp ) if self.means_init is not None: self.means_init = _check_means( self.means_init, self.n_components, n_features, xp=xp ) if self.precisions_init is not None: self.precisions_init = _check_precisions( self.precisions_init, self.covariance_type, self.n_components, n_features, xp=xp, ) allowed_init_params = ["random", "random_from_data"] if ( get_config()["array_api_dispatch"] and self.init_params not in allowed_init_params ): raise NotImplementedError( f"Allowed `init_params` are {allowed_init_params} if " f"'array_api_dispatch' is enabled. You passed " f"init_params={self.init_params!r}, which are not implemented to work " "with 'array_api_dispatch' enabled. Please disable " f"'array_api_dispatch' to use init_params={self.init_params!r}." ) def _initialize_parameters(self, X, random_state, xp=None): # If all the initial parameters are all provided, then there is no need to run # the initialization. compute_resp = ( self.weights_init is None or self.means_init is None or self.precisions_init is None ) if compute_resp: super()._initialize_parameters(X, random_state, xp=xp) else: self._initialize(X, None, xp=xp) def _initialize(self, X, resp, xp=None): """Initialization of the Gaussian mixture parameters. Parameters ---------- X : array-like of shape (n_samples, n_features) resp : array-like of shape (n_samples, n_components) """ xp, _, device_ = get_namespace_and_device(X, xp=xp) n_samples, _ = X.shape weights, means, covariances = None, None, None if resp is not None: weights, means, covariances = _estimate_gaussian_parameters( X, resp, self.reg_covar, self.covariance_type, xp=xp ) if self.weights_init is None: weights /= n_samples self.weights_ = weights if self.weights_init is None else self.weights_init self.weights_ = xp.asarray(self.weights_, device=device_) self.means_ = means if self.means_init is None else self.means_init if self.precisions_init is None: self.covariances_ = covariances self.precisions_cholesky_ = _compute_precision_cholesky( covariances, self.covariance_type, xp=xp ) else: self.precisions_cholesky_ = _compute_precision_cholesky_from_precisions( self.precisions_init, self.covariance_type, xp=xp ) def _m_step(self, X, log_resp, xp=None): """M step. Parameters ---------- X : array-like of shape (n_samples, n_features) log_resp : array-like of shape (n_samples, n_components) Logarithm of the posterior probabilities (or responsibilities) of the point of each sample in X. """ xp, _ = get_namespace(X, log_resp, xp=xp) self.weights_, self.means_, self.covariances_ = _estimate_gaussian_parameters( X, xp.exp(log_resp), self.reg_covar, self.covariance_type, xp=xp ) self.weights_ /= xp.sum(self.weights_) self.precisions_cholesky_ = _compute_precision_cholesky( self.covariances_, self.covariance_type, xp=xp ) def _estimate_log_prob(self, X, xp=None): return _estimate_log_gaussian_prob( X, self.means_, self.precisions_cholesky_, self.covariance_type, xp=xp ) def _estimate_log_weights(self, xp=None): xp, _ = get_namespace(self.weights_, xp=xp) return xp.log(self.weights_) def _compute_lower_bound(self, _, log_prob_norm): return log_prob_norm def _get_parameters(self): return ( self.weights_, self.means_, self.covariances_, self.precisions_cholesky_, )
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
true
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/mixture/_base.py
sklearn/mixture/_base.py
"""Base class for mixture models.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import warnings from abc import ABCMeta, abstractmethod from contextlib import nullcontext from numbers import Integral, Real from time import time import numpy as np from sklearn import cluster from sklearn.base import BaseEstimator, DensityMixin, _fit_context from sklearn.cluster import kmeans_plusplus from sklearn.exceptions import ConvergenceWarning from sklearn.utils import check_random_state from sklearn.utils._array_api import ( _convert_to_numpy, _is_numpy_namespace, _logsumexp, _max_precision_float_dtype, get_namespace, get_namespace_and_device, ) from sklearn.utils._param_validation import Interval, StrOptions from sklearn.utils.validation import check_is_fitted, validate_data def _check_shape(param, param_shape, name): """Validate the shape of the input parameter 'param'. Parameters ---------- param : array param_shape : tuple name : str """ if param.shape != param_shape: raise ValueError( "The parameter '%s' should have the shape of %s, but got %s" % (name, param_shape, param.shape) ) class BaseMixture(DensityMixin, BaseEstimator, metaclass=ABCMeta): """Base class for mixture models. This abstract class specifies an interface for all mixture classes and provides basic common methods for mixture models. """ _parameter_constraints: dict = { "n_components": [Interval(Integral, 1, None, closed="left")], "tol": [Interval(Real, 0.0, None, closed="left")], "reg_covar": [Interval(Real, 0.0, None, closed="left")], "max_iter": [Interval(Integral, 0, None, closed="left")], "n_init": [Interval(Integral, 1, None, closed="left")], "init_params": [ StrOptions({"kmeans", "random", "random_from_data", "k-means++"}) ], "random_state": ["random_state"], "warm_start": ["boolean"], "verbose": ["verbose"], "verbose_interval": [Interval(Integral, 1, None, closed="left")], } def __init__( self, n_components, tol, reg_covar, max_iter, n_init, init_params, random_state, warm_start, verbose, verbose_interval, ): self.n_components = n_components self.tol = tol self.reg_covar = reg_covar self.max_iter = max_iter self.n_init = n_init self.init_params = init_params self.random_state = random_state self.warm_start = warm_start self.verbose = verbose self.verbose_interval = verbose_interval @abstractmethod def _check_parameters(self, X, xp=None): """Check initial parameters of the derived class. Parameters ---------- X : array-like of shape (n_samples, n_features) """ pass def _initialize_parameters(self, X, random_state, xp=None): """Initialize the model parameters. Parameters ---------- X : array-like of shape (n_samples, n_features) random_state : RandomState A random number generator instance that controls the random seed used for the method chosen to initialize the parameters. """ xp, _, device = get_namespace_and_device(X, xp=xp) n_samples, _ = X.shape if self.init_params == "kmeans": resp = np.zeros((n_samples, self.n_components), dtype=X.dtype) label = ( cluster.KMeans( n_clusters=self.n_components, n_init=1, random_state=random_state ) .fit(X) .labels_ ) resp[np.arange(n_samples), label] = 1 elif self.init_params == "random": resp = xp.asarray( random_state.uniform(size=(n_samples, self.n_components)), dtype=X.dtype, device=device, ) resp /= xp.sum(resp, axis=1)[:, xp.newaxis] elif self.init_params == "random_from_data": resp = xp.zeros( (n_samples, self.n_components), dtype=X.dtype, device=device ) indices = random_state.choice( n_samples, size=self.n_components, replace=False ) # TODO: when array API supports __setitem__ with fancy indexing we # can use the previous code: # resp[indices, xp.arange(self.n_components)] = 1 # Until then we use a for loop on one dimension. for col, index in enumerate(indices): resp[index, col] = 1 elif self.init_params == "k-means++": resp = np.zeros((n_samples, self.n_components), dtype=X.dtype) _, indices = kmeans_plusplus( X, self.n_components, random_state=random_state, ) resp[indices, np.arange(self.n_components)] = 1 self._initialize(X, resp) @abstractmethod def _initialize(self, X, resp): """Initialize the model parameters of the derived class. Parameters ---------- X : array-like of shape (n_samples, n_features) resp : array-like of shape (n_samples, n_components) """ pass def fit(self, X, y=None): """Estimate model parameters with the EM algorithm. The method fits the model ``n_init`` times and sets the parameters with which the model has the largest likelihood or lower bound. Within each trial, the method iterates between E-step and M-step for ``max_iter`` times until the change of likelihood or lower bound is less than ``tol``, otherwise, a ``ConvergenceWarning`` is raised. If ``warm_start`` is ``True``, then ``n_init`` is ignored and a single initialization is performed upon the first call. Upon consecutive calls, training starts where it left off. Parameters ---------- X : array-like of shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object The fitted mixture. """ # parameters are validated in fit_predict self.fit_predict(X, y) return self @_fit_context(prefer_skip_nested_validation=True) def fit_predict(self, X, y=None): """Estimate model parameters using X and predict the labels for X. The method fits the model ``n_init`` times and sets the parameters with which the model has the largest likelihood or lower bound. Within each trial, the method iterates between E-step and M-step for `max_iter` times until the change of likelihood or lower bound is less than `tol`, otherwise, a :class:`~sklearn.exceptions.ConvergenceWarning` is raised. After fitting, it predicts the most probable label for the input data points. .. versionadded:: 0.20 Parameters ---------- X : array-like of shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. y : Ignored Not used, present for API consistency by convention. Returns ------- labels : array, shape (n_samples,) Component labels. """ xp, _ = get_namespace(X) X = validate_data(self, X, dtype=[xp.float64, xp.float32], ensure_min_samples=2) if X.shape[0] < self.n_components: raise ValueError( "Expected n_samples >= n_components " f"but got n_components = {self.n_components}, " f"n_samples = {X.shape[0]}" ) self._check_parameters(X, xp=xp) # if we enable warm_start, we will have a unique initialisation do_init = not (self.warm_start and hasattr(self, "converged_")) n_init = self.n_init if do_init else 1 max_lower_bound = -xp.inf best_lower_bounds = [] self.converged_ = False random_state = check_random_state(self.random_state) n_samples, _ = X.shape for init in range(n_init): self._print_verbose_msg_init_beg(init) if do_init: self._initialize_parameters(X, random_state, xp=xp) lower_bound = -xp.inf if do_init else self.lower_bound_ current_lower_bounds = [] if self.max_iter == 0: best_params = self._get_parameters() best_n_iter = 0 else: converged = False for n_iter in range(1, self.max_iter + 1): prev_lower_bound = lower_bound log_prob_norm, log_resp = self._e_step(X, xp=xp) self._m_step(X, log_resp, xp=xp) lower_bound = self._compute_lower_bound(log_resp, log_prob_norm) current_lower_bounds.append(lower_bound) change = lower_bound - prev_lower_bound self._print_verbose_msg_iter_end(n_iter, change) if abs(change) < self.tol: converged = True break self._print_verbose_msg_init_end(lower_bound, converged) if lower_bound > max_lower_bound or max_lower_bound == -xp.inf: max_lower_bound = lower_bound best_params = self._get_parameters() best_n_iter = n_iter best_lower_bounds = current_lower_bounds self.converged_ = converged # Should only warn about convergence if max_iter > 0, otherwise # the user is assumed to have used 0-iters initialization # to get the initial means. if not self.converged_ and self.max_iter > 0: warnings.warn( ( "Best performing initialization did not converge. " "Try different init parameters, or increase max_iter, " "tol, or check for degenerate data." ), ConvergenceWarning, ) self._set_parameters(best_params, xp=xp) self.n_iter_ = best_n_iter self.lower_bound_ = max_lower_bound self.lower_bounds_ = best_lower_bounds # Always do a final e-step to guarantee that the labels returned by # fit_predict(X) are always consistent with fit(X).predict(X) # for any value of max_iter and tol (and any random_state). _, log_resp = self._e_step(X, xp=xp) return xp.argmax(log_resp, axis=1) def _e_step(self, X, xp=None): """E step. Parameters ---------- X : array-like of shape (n_samples, n_features) Returns ------- log_prob_norm : float Mean of the logarithms of the probabilities of each sample in X log_responsibility : array, shape (n_samples, n_components) Logarithm of the posterior probabilities (or responsibilities) of the point of each sample in X. """ xp, _ = get_namespace(X, xp=xp) log_prob_norm, log_resp = self._estimate_log_prob_resp(X, xp=xp) return xp.mean(log_prob_norm), log_resp @abstractmethod def _m_step(self, X, log_resp): """M step. Parameters ---------- X : array-like of shape (n_samples, n_features) log_resp : array-like of shape (n_samples, n_components) Logarithm of the posterior probabilities (or responsibilities) of the point of each sample in X. """ pass @abstractmethod def _get_parameters(self): pass @abstractmethod def _set_parameters(self, params): pass def score_samples(self, X): """Compute the log-likelihood of each sample. Parameters ---------- X : array-like of shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- log_prob : array, shape (n_samples,) Log-likelihood of each sample in `X` under the current model. """ check_is_fitted(self) X = validate_data(self, X, reset=False) return _logsumexp(self._estimate_weighted_log_prob(X), axis=1) def score(self, X, y=None): """Compute the per-sample average log-likelihood of the given data X. Parameters ---------- X : array-like of shape (n_samples, n_dimensions) List of n_features-dimensional data points. Each row corresponds to a single data point. y : Ignored Not used, present for API consistency by convention. Returns ------- log_likelihood : float Log-likelihood of `X` under the Gaussian mixture model. """ xp, _ = get_namespace(X) return float(xp.mean(self.score_samples(X))) def predict(self, X): """Predict the labels for the data samples in X using trained model. Parameters ---------- X : array-like of shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- labels : array, shape (n_samples,) Component labels. """ check_is_fitted(self) xp, _ = get_namespace(X) X = validate_data(self, X, reset=False) return xp.argmax(self._estimate_weighted_log_prob(X), axis=1) def predict_proba(self, X): """Evaluate the components' density for each sample. Parameters ---------- X : array-like of shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- resp : array, shape (n_samples, n_components) Density of each Gaussian component for each sample in X. """ check_is_fitted(self) X = validate_data(self, X, reset=False) xp, _ = get_namespace(X) _, log_resp = self._estimate_log_prob_resp(X, xp=xp) return xp.exp(log_resp) def sample(self, n_samples=1): """Generate random samples from the fitted Gaussian distribution. Parameters ---------- n_samples : int, default=1 Number of samples to generate. Returns ------- X : array, shape (n_samples, n_features) Randomly generated sample. y : array, shape (nsamples,) Component labels. """ check_is_fitted(self) xp, _, device_ = get_namespace_and_device(self.means_) if n_samples < 1: raise ValueError( "Invalid value for 'n_samples': %d . The sampling requires at " "least one sample." % (self.n_components) ) _, n_features = self.means_.shape rng = check_random_state(self.random_state) n_samples_comp = rng.multinomial( n_samples, _convert_to_numpy(self.weights_, xp) ) if self.covariance_type == "full": X = np.vstack( [ rng.multivariate_normal(mean, covariance, int(sample)) for (mean, covariance, sample) in zip( _convert_to_numpy(self.means_, xp), _convert_to_numpy(self.covariances_, xp), n_samples_comp, ) ] ) elif self.covariance_type == "tied": X = np.vstack( [ rng.multivariate_normal( mean, _convert_to_numpy(self.covariances_, xp), int(sample) ) for (mean, sample) in zip( _convert_to_numpy(self.means_, xp), n_samples_comp ) ] ) else: X = np.vstack( [ mean + rng.standard_normal(size=(sample, n_features)) * np.sqrt(covariance) for (mean, covariance, sample) in zip( _convert_to_numpy(self.means_, xp), _convert_to_numpy(self.covariances_, xp), n_samples_comp, ) ] ) y = xp.concat( [ xp.full(int(n_samples_comp[i]), i, dtype=xp.int64, device=device_) for i in range(len(n_samples_comp)) ] ) max_float_dtype = _max_precision_float_dtype(xp=xp, device=device_) return xp.asarray(X, dtype=max_float_dtype, device=device_), y def _estimate_weighted_log_prob(self, X, xp=None): """Estimate the weighted log-probabilities, log P(X | Z) + log weights. Parameters ---------- X : array-like of shape (n_samples, n_features) Returns ------- weighted_log_prob : array, shape (n_samples, n_component) """ return self._estimate_log_prob(X, xp=xp) + self._estimate_log_weights(xp=xp) @abstractmethod def _estimate_log_weights(self, xp=None): """Estimate log-weights in EM algorithm, E[ log pi ] in VB algorithm. Returns ------- log_weight : array, shape (n_components, ) """ pass @abstractmethod def _estimate_log_prob(self, X, xp=None): """Estimate the log-probabilities log P(X | Z). Compute the log-probabilities per each component for each sample. Parameters ---------- X : array-like of shape (n_samples, n_features) Returns ------- log_prob : array, shape (n_samples, n_component) """ pass def _estimate_log_prob_resp(self, X, xp=None): """Estimate log probabilities and responsibilities for each sample. Compute the log probabilities, weighted log probabilities per component and responsibilities for each sample in X with respect to the current state of the model. Parameters ---------- X : array-like of shape (n_samples, n_features) Returns ------- log_prob_norm : array, shape (n_samples,) log p(X) log_responsibilities : array, shape (n_samples, n_components) logarithm of the responsibilities """ xp, _ = get_namespace(X, xp=xp) weighted_log_prob = self._estimate_weighted_log_prob(X, xp=xp) log_prob_norm = _logsumexp(weighted_log_prob, axis=1, xp=xp) # There is no errstate equivalent for warning/error management in array API context_manager = ( np.errstate(under="ignore") if _is_numpy_namespace(xp) else nullcontext() ) with context_manager: # ignore underflow log_resp = weighted_log_prob - log_prob_norm[:, xp.newaxis] return log_prob_norm, log_resp def _print_verbose_msg_init_beg(self, n_init): """Print verbose message on initialization.""" if self.verbose == 1: print("Initialization %d" % n_init) elif self.verbose >= 2: print("Initialization %d" % n_init) self._init_prev_time = time() self._iter_prev_time = self._init_prev_time def _print_verbose_msg_iter_end(self, n_iter, diff_ll): """Print verbose message on initialization.""" if n_iter % self.verbose_interval == 0: if self.verbose == 1: print(" Iteration %d" % n_iter) elif self.verbose >= 2: cur_time = time() print( " Iteration %d\t time lapse %.5fs\t ll change %.5f" % (n_iter, cur_time - self._iter_prev_time, diff_ll) ) self._iter_prev_time = cur_time def _print_verbose_msg_init_end(self, lb, init_has_converged): """Print verbose message on the end of iteration.""" converged_msg = "converged" if init_has_converged else "did not converge" if self.verbose == 1: print(f"Initialization {converged_msg}.") elif self.verbose >= 2: t = time() - self._init_prev_time print( f"Initialization {converged_msg}. time lapse {t:.5f}s\t lower bound" f" {lb:.5f}." )
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/mixture/_bayesian_mixture.py
sklearn/mixture/_bayesian_mixture.py
"""Bayesian Gaussian Mixture Model.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import math from numbers import Real import numpy as np from scipy.special import betaln, digamma, gammaln from sklearn.mixture._base import BaseMixture, _check_shape from sklearn.mixture._gaussian_mixture import ( _check_precision_matrix, _check_precision_positivity, _compute_log_det_cholesky, _compute_precision_cholesky, _estimate_gaussian_parameters, _estimate_log_gaussian_prob, ) from sklearn.utils import check_array from sklearn.utils._param_validation import Interval, StrOptions def _log_dirichlet_norm(dirichlet_concentration): """Compute the log of the Dirichlet distribution normalization term. Parameters ---------- dirichlet_concentration : array-like of shape (n_samples,) The parameters values of the Dirichlet distribution. Returns ------- log_dirichlet_norm : float The log normalization of the Dirichlet distribution. """ return gammaln(np.sum(dirichlet_concentration)) - np.sum( gammaln(dirichlet_concentration) ) def _log_wishart_norm(degrees_of_freedom, log_det_precisions_chol, n_features): """Compute the log of the Wishart distribution normalization term. Parameters ---------- degrees_of_freedom : array-like of shape (n_components,) The number of degrees of freedom on the covariance Wishart distributions. log_det_precision_chol : array-like of shape (n_components,) The determinant of the precision matrix for each component. n_features : int The number of features. Return ------ log_wishart_norm : array-like of shape (n_components,) The log normalization of the Wishart distribution. """ # To simplify the computation we have removed the np.log(np.pi) term return -( degrees_of_freedom * log_det_precisions_chol + degrees_of_freedom * n_features * 0.5 * math.log(2.0) + np.sum( gammaln(0.5 * (degrees_of_freedom - np.arange(n_features)[:, np.newaxis])), 0, ) ) class BayesianGaussianMixture(BaseMixture): """Variational Bayesian estimation of a Gaussian mixture. This class allows to infer an approximate posterior distribution over the parameters of a Gaussian mixture distribution. The effective number of components can be inferred from the data. This class implements two types of prior for the weights distribution: a finite mixture model with Dirichlet distribution and an infinite mixture model with the Dirichlet Process. In practice Dirichlet Process inference algorithm is approximated and uses a truncated distribution with a fixed maximum number of components (called the Stick-breaking representation). The number of components actually used almost always depends on the data. .. versionadded:: 0.18 Read more in the :ref:`User Guide <bgmm>`. Parameters ---------- n_components : int, default=1 The number of mixture components. Depending on the data and the value of the `weight_concentration_prior` the model can decide to not use all the components by setting some component `weights_` to values very close to zero. The number of effective components is therefore smaller than n_components. covariance_type : {'full', 'tied', 'diag', 'spherical'}, default='full' String describing the type of covariance parameters to use. Must be one of: - 'full' (each component has its own general covariance matrix), - 'tied' (all components share the same general covariance matrix), - 'diag' (each component has its own diagonal covariance matrix), - 'spherical' (each component has its own single variance). tol : float, default=1e-3 The convergence threshold. EM iterations will stop when the lower bound average gain on the likelihood (of the training data with respect to the model) is below this threshold. reg_covar : float, default=1e-6 Non-negative regularization added to the diagonal of covariance. Allows to assure that the covariance matrices are all positive. max_iter : int, default=100 The number of EM iterations to perform. n_init : int, default=1 The number of initializations to perform. The result with the highest lower bound value on the likelihood is kept. init_params : {'kmeans', 'k-means++', 'random', 'random_from_data'}, \ default='kmeans' The method used to initialize the weights, the means and the covariances. String must be one of: - 'kmeans': responsibilities are initialized using kmeans. - 'k-means++': use the k-means++ method to initialize. - 'random': responsibilities are initialized randomly. - 'random_from_data': initial means are randomly selected data points. .. versionchanged:: v1.1 `init_params` now accepts 'random_from_data' and 'k-means++' as initialization methods. weight_concentration_prior_type : {'dirichlet_process', 'dirichlet_distribution'}, \ default='dirichlet_process' String describing the type of the weight concentration prior. weight_concentration_prior : float or None, default=None The dirichlet concentration of each component on the weight distribution (Dirichlet). This is commonly called gamma in the literature. The higher concentration puts more mass in the center and will lead to more components being active, while a lower concentration parameter will lead to more mass at the edge of the mixture weights simplex. The value of the parameter must be greater than 0. If it is None, it's set to ``1. / n_components``. mean_precision_prior : float or None, default=None The precision prior on the mean distribution (Gaussian). Controls the extent of where means can be placed. Larger values concentrate the cluster means around `mean_prior`. The value of the parameter must be greater than 0. If it is None, it is set to 1. mean_prior : array-like, shape (n_features,), default=None The prior on the mean distribution (Gaussian). If it is None, it is set to the mean of X. degrees_of_freedom_prior : float or None, default=None The prior of the number of degrees of freedom on the covariance distributions (Wishart). If it is None, it's set to `n_features`. covariance_prior : float or array-like, default=None The prior on the covariance distribution (Wishart). If it is None, the emiprical covariance prior is initialized using the covariance of X. The shape depends on `covariance_type`:: (n_features, n_features) if 'full', (n_features, n_features) if 'tied', (n_features) if 'diag', float if 'spherical' random_state : int, RandomState instance or None, default=None Controls the random seed given to the method chosen to initialize the parameters (see `init_params`). In addition, it controls the generation of random samples from the fitted distribution (see the method `sample`). Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. warm_start : bool, default=False If 'warm_start' is True, the solution of the last fitting is used as initialization for the next call of fit(). This can speed up convergence when fit is called several times on similar problems. See :term:`the Glossary <warm_start>`. verbose : int, default=0 Enable verbose output. If 1 then it prints the current initialization and each iteration step. If greater than 1 then it prints also the log probability and the time needed for each step. verbose_interval : int, default=10 Number of iteration done before the next print. Attributes ---------- weights_ : array-like of shape (n_components,) The weights of each mixture components. means_ : array-like of shape (n_components, n_features) The mean of each mixture component. covariances_ : array-like The covariance of each mixture component. The shape depends on `covariance_type`:: (n_components,) if 'spherical', (n_features, n_features) if 'tied', (n_components, n_features) if 'diag', (n_components, n_features, n_features) if 'full' precisions_ : array-like The precision matrices for each component in the mixture. A precision matrix is the inverse of a covariance matrix. A covariance matrix is symmetric positive definite so the mixture of Gaussian can be equivalently parameterized by the precision matrices. Storing the precision matrices instead of the covariance matrices makes it more efficient to compute the log-likelihood of new samples at test time. The shape depends on ``covariance_type``:: (n_components,) if 'spherical', (n_features, n_features) if 'tied', (n_components, n_features) if 'diag', (n_components, n_features, n_features) if 'full' precisions_cholesky_ : array-like The Cholesky decomposition of the precision matrices of each mixture component. A precision matrix is the inverse of a covariance matrix. A covariance matrix is symmetric positive definite so the mixture of Gaussian can be equivalently parameterized by the precision matrices. Storing the precision matrices instead of the covariance matrices makes it more efficient to compute the log-likelihood of new samples at test time. The shape depends on ``covariance_type``:: (n_components,) if 'spherical', (n_features, n_features) if 'tied', (n_components, n_features) if 'diag', (n_components, n_features, n_features) if 'full' converged_ : bool True when convergence of the best fit of inference was reached, False otherwise. n_iter_ : int Number of step used by the best fit of inference to reach the convergence. lower_bound_ : float Lower bound value on the model evidence (of the training data) of the best fit of inference. lower_bounds_ : array-like of shape (`n_iter_`,) The list of lower bound values on the model evidence from each iteration of the best fit of inference. weight_concentration_prior_ : tuple or float The dirichlet concentration of each component on the weight distribution (Dirichlet). The type depends on ``weight_concentration_prior_type``:: (float, float) if 'dirichlet_process' (Beta parameters), float if 'dirichlet_distribution' (Dirichlet parameters). The higher concentration puts more mass in the center and will lead to more components being active, while a lower concentration parameter will lead to more mass at the edge of the simplex. weight_concentration_ : array-like of shape (n_components,) The dirichlet concentration of each component on the weight distribution (Dirichlet). mean_precision_prior_ : float The precision prior on the mean distribution (Gaussian). Controls the extent of where means can be placed. Larger values concentrate the cluster means around `mean_prior`. If mean_precision_prior is set to None, `mean_precision_prior_` is set to 1. mean_precision_ : array-like of shape (n_components,) The precision of each components on the mean distribution (Gaussian). mean_prior_ : array-like of shape (n_features,) The prior on the mean distribution (Gaussian). degrees_of_freedom_prior_ : float The prior of the number of degrees of freedom on the covariance distributions (Wishart). degrees_of_freedom_ : array-like of shape (n_components,) The number of degrees of freedom of each components in the model. covariance_prior_ : float or array-like The prior on the covariance distribution (Wishart). The shape depends on `covariance_type`:: (n_features, n_features) if 'full', (n_features, n_features) if 'tied', (n_features) if 'diag', float if 'spherical' n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- GaussianMixture : Finite Gaussian mixture fit with EM. References ---------- .. [1] `Bishop, Christopher M. (2006). "Pattern recognition and machine learning". Vol. 4 No. 4. New York: Springer. <https://www.springer.com/kr/book/9780387310732>`_ .. [2] `Hagai Attias. (2000). "A Variational Bayesian Framework for Graphical Models". In Advances in Neural Information Processing Systems 12. <https://proceedings.neurips.cc/paper_files/paper/1999/file/74563ba21a90da13dacf2a73e3ddefa7-Paper.pdf>`_ .. [3] `Blei, David M. and Michael I. Jordan. (2006). "Variational inference for Dirichlet process mixtures". Bayesian analysis 1.1 <https://www.cs.princeton.edu/courses/archive/fall11/cos597C/reading/BleiJordan2005.pdf>`_ Examples -------- >>> import numpy as np >>> from sklearn.mixture import BayesianGaussianMixture >>> X = np.array([[1, 2], [1, 4], [1, 0], [4, 2], [12, 4], [10, 7]]) >>> bgm = BayesianGaussianMixture(n_components=2, random_state=42).fit(X) >>> bgm.means_ array([[2.49 , 2.29], [8.45, 4.52 ]]) >>> bgm.predict([[0, 0], [9, 3]]) array([0, 1]) """ _parameter_constraints: dict = { **BaseMixture._parameter_constraints, "covariance_type": [StrOptions({"spherical", "tied", "diag", "full"})], "weight_concentration_prior_type": [ StrOptions({"dirichlet_process", "dirichlet_distribution"}) ], "weight_concentration_prior": [ None, Interval(Real, 0.0, None, closed="neither"), ], "mean_precision_prior": [None, Interval(Real, 0.0, None, closed="neither")], "mean_prior": [None, "array-like"], "degrees_of_freedom_prior": [None, Interval(Real, 0.0, None, closed="neither")], "covariance_prior": [ None, "array-like", Interval(Real, 0.0, None, closed="neither"), ], } def __init__( self, *, n_components=1, covariance_type="full", tol=1e-3, reg_covar=1e-6, max_iter=100, n_init=1, init_params="kmeans", weight_concentration_prior_type="dirichlet_process", weight_concentration_prior=None, mean_precision_prior=None, mean_prior=None, degrees_of_freedom_prior=None, covariance_prior=None, random_state=None, warm_start=False, verbose=0, verbose_interval=10, ): super().__init__( n_components=n_components, tol=tol, reg_covar=reg_covar, max_iter=max_iter, n_init=n_init, init_params=init_params, random_state=random_state, warm_start=warm_start, verbose=verbose, verbose_interval=verbose_interval, ) self.covariance_type = covariance_type self.weight_concentration_prior_type = weight_concentration_prior_type self.weight_concentration_prior = weight_concentration_prior self.mean_precision_prior = mean_precision_prior self.mean_prior = mean_prior self.degrees_of_freedom_prior = degrees_of_freedom_prior self.covariance_prior = covariance_prior def _check_parameters(self, X, xp=None): """Check that the parameters are well defined. Parameters ---------- X : array-like of shape (n_samples, n_features) """ self._check_weights_parameters() self._check_means_parameters(X) self._check_precision_parameters(X) self._checkcovariance_prior_parameter(X) def _check_weights_parameters(self): """Check the parameter of the Dirichlet distribution.""" if self.weight_concentration_prior is None: self.weight_concentration_prior_ = 1.0 / self.n_components else: self.weight_concentration_prior_ = self.weight_concentration_prior def _check_means_parameters(self, X): """Check the parameters of the Gaussian distribution. Parameters ---------- X : array-like of shape (n_samples, n_features) """ _, n_features = X.shape if self.mean_precision_prior is None: self.mean_precision_prior_ = 1.0 else: self.mean_precision_prior_ = self.mean_precision_prior if self.mean_prior is None: self.mean_prior_ = X.mean(axis=0) else: self.mean_prior_ = check_array( self.mean_prior, dtype=[np.float64, np.float32], ensure_2d=False ) _check_shape(self.mean_prior_, (n_features,), "means") def _check_precision_parameters(self, X): """Check the prior parameters of the precision distribution. Parameters ---------- X : array-like of shape (n_samples, n_features) """ _, n_features = X.shape if self.degrees_of_freedom_prior is None: self.degrees_of_freedom_prior_ = n_features elif self.degrees_of_freedom_prior > n_features - 1.0: self.degrees_of_freedom_prior_ = self.degrees_of_freedom_prior else: raise ValueError( "The parameter 'degrees_of_freedom_prior' " "should be greater than %d, but got %.3f." % (n_features - 1, self.degrees_of_freedom_prior) ) def _checkcovariance_prior_parameter(self, X): """Check the `covariance_prior_`. Parameters ---------- X : array-like of shape (n_samples, n_features) """ _, n_features = X.shape if self.covariance_prior is None: self.covariance_prior_ = { "full": np.atleast_2d(np.cov(X.T)), "tied": np.atleast_2d(np.cov(X.T)), "diag": np.var(X, axis=0, ddof=1), "spherical": np.var(X, axis=0, ddof=1).mean(), }[self.covariance_type] elif self.covariance_type in ["full", "tied"]: self.covariance_prior_ = check_array( self.covariance_prior, dtype=[np.float64, np.float32], ensure_2d=False ) _check_shape( self.covariance_prior_, (n_features, n_features), "%s covariance_prior" % self.covariance_type, ) _check_precision_matrix(self.covariance_prior_, self.covariance_type) elif self.covariance_type == "diag": self.covariance_prior_ = check_array( self.covariance_prior, dtype=[np.float64, np.float32], ensure_2d=False ) _check_shape( self.covariance_prior_, (n_features,), "%s covariance_prior" % self.covariance_type, ) _check_precision_positivity(self.covariance_prior_, self.covariance_type) # spherical case else: self.covariance_prior_ = self.covariance_prior def _initialize(self, X, resp): """Initialization of the mixture parameters. Parameters ---------- X : array-like of shape (n_samples, n_features) resp : array-like of shape (n_samples, n_components) """ nk, xk, sk = _estimate_gaussian_parameters( X, resp, self.reg_covar, self.covariance_type ) self._estimate_weights(nk) self._estimate_means(nk, xk) self._estimate_precisions(nk, xk, sk) def _estimate_weights(self, nk): """Estimate the parameters of the Dirichlet distribution. Parameters ---------- nk : array-like of shape (n_components,) """ if self.weight_concentration_prior_type == "dirichlet_process": # For dirichlet process weight_concentration will be a tuple # containing the two parameters of the beta distribution self.weight_concentration_ = ( 1.0 + nk, ( self.weight_concentration_prior_ + np.hstack((np.cumsum(nk[::-1])[-2::-1], 0)) ), ) else: # case Variational Gaussian mixture with dirichlet distribution self.weight_concentration_ = self.weight_concentration_prior_ + nk def _estimate_means(self, nk, xk): """Estimate the parameters of the Gaussian distribution. Parameters ---------- nk : array-like of shape (n_components,) xk : array-like of shape (n_components, n_features) """ self.mean_precision_ = self.mean_precision_prior_ + nk self.means_ = ( self.mean_precision_prior_ * self.mean_prior_ + nk[:, np.newaxis] * xk ) / self.mean_precision_[:, np.newaxis] def _estimate_precisions(self, nk, xk, sk): """Estimate the precisions parameters of the precision distribution. Parameters ---------- nk : array-like of shape (n_components,) xk : array-like of shape (n_components, n_features) sk : array-like The shape depends of `covariance_type`: 'full' : (n_components, n_features, n_features) 'tied' : (n_features, n_features) 'diag' : (n_components, n_features) 'spherical' : (n_components,) """ { "full": self._estimate_wishart_full, "tied": self._estimate_wishart_tied, "diag": self._estimate_wishart_diag, "spherical": self._estimate_wishart_spherical, }[self.covariance_type](nk, xk, sk) self.precisions_cholesky_ = _compute_precision_cholesky( self.covariances_, self.covariance_type ) def _estimate_wishart_full(self, nk, xk, sk): """Estimate the full Wishart distribution parameters. Parameters ---------- X : array-like of shape (n_samples, n_features) nk : array-like of shape (n_components,) xk : array-like of shape (n_components, n_features) sk : array-like of shape (n_components, n_features, n_features) """ _, n_features = xk.shape # Warning : in some Bishop book, there is a typo on the formula 10.63 # `degrees_of_freedom_k = degrees_of_freedom_0 + Nk` is # the correct formula self.degrees_of_freedom_ = self.degrees_of_freedom_prior_ + nk self.covariances_ = np.empty((self.n_components, n_features, n_features)) for k in range(self.n_components): diff = xk[k] - self.mean_prior_ self.covariances_[k] = ( self.covariance_prior_ + nk[k] * sk[k] + nk[k] * self.mean_precision_prior_ / self.mean_precision_[k] * np.outer(diff, diff) ) # Contrary to the original bishop book, we normalize the covariances self.covariances_ /= self.degrees_of_freedom_[:, np.newaxis, np.newaxis] def _estimate_wishart_tied(self, nk, xk, sk): """Estimate the tied Wishart distribution parameters. Parameters ---------- X : array-like of shape (n_samples, n_features) nk : array-like of shape (n_components,) xk : array-like of shape (n_components, n_features) sk : array-like of shape (n_features, n_features) """ _, n_features = xk.shape # Warning : in some Bishop book, there is a typo on the formula 10.63 # `degrees_of_freedom_k = degrees_of_freedom_0 + Nk` # is the correct formula self.degrees_of_freedom_ = ( self.degrees_of_freedom_prior_ + nk.sum() / self.n_components ) diff = xk - self.mean_prior_ self.covariances_ = ( self.covariance_prior_ + sk * nk.sum() / self.n_components + self.mean_precision_prior_ / self.n_components * np.dot((nk / self.mean_precision_) * diff.T, diff) ) # Contrary to the original bishop book, we normalize the covariances self.covariances_ /= self.degrees_of_freedom_ def _estimate_wishart_diag(self, nk, xk, sk): """Estimate the diag Wishart distribution parameters. Parameters ---------- X : array-like of shape (n_samples, n_features) nk : array-like of shape (n_components,) xk : array-like of shape (n_components, n_features) sk : array-like of shape (n_components, n_features) """ _, n_features = xk.shape # Warning : in some Bishop book, there is a typo on the formula 10.63 # `degrees_of_freedom_k = degrees_of_freedom_0 + Nk` # is the correct formula self.degrees_of_freedom_ = self.degrees_of_freedom_prior_ + nk diff = xk - self.mean_prior_ self.covariances_ = self.covariance_prior_ + nk[:, np.newaxis] * ( sk + (self.mean_precision_prior_ / self.mean_precision_)[:, np.newaxis] * np.square(diff) ) # Contrary to the original bishop book, we normalize the covariances self.covariances_ /= self.degrees_of_freedom_[:, np.newaxis] def _estimate_wishart_spherical(self, nk, xk, sk): """Estimate the spherical Wishart distribution parameters. Parameters ---------- X : array-like of shape (n_samples, n_features) nk : array-like of shape (n_components,) xk : array-like of shape (n_components, n_features) sk : array-like of shape (n_components,) """ _, n_features = xk.shape # Warning : in some Bishop book, there is a typo on the formula 10.63 # `degrees_of_freedom_k = degrees_of_freedom_0 + Nk` # is the correct formula self.degrees_of_freedom_ = self.degrees_of_freedom_prior_ + nk diff = xk - self.mean_prior_ self.covariances_ = self.covariance_prior_ + nk * ( sk + self.mean_precision_prior_ / self.mean_precision_ * np.mean(np.square(diff), 1) ) # Contrary to the original bishop book, we normalize the covariances self.covariances_ /= self.degrees_of_freedom_ def _m_step(self, X, log_resp, xp=None): """M step. Parameters ---------- X : array-like of shape (n_samples, n_features) log_resp : array-like of shape (n_samples, n_components) Logarithm of the posterior probabilities (or responsibilities) of the point of each sample in X. """ n_samples, _ = X.shape nk, xk, sk = _estimate_gaussian_parameters( X, np.exp(log_resp), self.reg_covar, self.covariance_type ) self._estimate_weights(nk) self._estimate_means(nk, xk) self._estimate_precisions(nk, xk, sk) def _estimate_log_weights(self, xp=None): if self.weight_concentration_prior_type == "dirichlet_process": digamma_sum = digamma( self.weight_concentration_[0] + self.weight_concentration_[1] ) digamma_a = digamma(self.weight_concentration_[0]) digamma_b = digamma(self.weight_concentration_[1]) return ( digamma_a - digamma_sum + np.hstack((0, np.cumsum(digamma_b - digamma_sum)[:-1])) ) else: # case Variational Gaussian mixture with dirichlet distribution return digamma(self.weight_concentration_) - digamma( np.sum(self.weight_concentration_) ) def _estimate_log_prob(self, X, xp=None): _, n_features = X.shape # We remove `n_features * np.log(self.degrees_of_freedom_)` because # the precision matrix is normalized log_gauss = _estimate_log_gaussian_prob( X, self.means_, self.precisions_cholesky_, self.covariance_type ) - 0.5 * n_features * np.log(self.degrees_of_freedom_) log_lambda = n_features * np.log(2.0) + np.sum( digamma( 0.5 * (self.degrees_of_freedom_ - np.arange(0, n_features)[:, np.newaxis]) ), 0, ) return log_gauss + 0.5 * (log_lambda - n_features / self.mean_precision_) def _compute_lower_bound(self, log_resp, log_prob_norm): """Estimate the lower bound of the model. The lower bound on the likelihood (of the training data with respect to the model) is used to detect the convergence and has to increase at each iteration. Parameters ---------- X : array-like of shape (n_samples, n_features) log_resp : array, shape (n_samples, n_components) Logarithm of the posterior probabilities (or responsibilities) of the point of each sample in X. log_prob_norm : float Logarithm of the probability of each sample in X. Returns ------- lower_bound : float """ # Contrary to the original formula, we have done some simplification # and removed all the constant terms. (n_features,) = self.mean_prior_.shape # We removed `.5 * n_features * np.log(self.degrees_of_freedom_)` # because the precision matrix is normalized. log_det_precisions_chol = _compute_log_det_cholesky( self.precisions_cholesky_, self.covariance_type, n_features ) - 0.5 * n_features * np.log(self.degrees_of_freedom_) if self.covariance_type == "tied": log_wishart = self.n_components * np.float64( _log_wishart_norm( self.degrees_of_freedom_, log_det_precisions_chol, n_features ) ) else: log_wishart = np.sum( _log_wishart_norm( self.degrees_of_freedom_, log_det_precisions_chol, n_features ) ) if self.weight_concentration_prior_type == "dirichlet_process": log_norm_weight = -np.sum( betaln(self.weight_concentration_[0], self.weight_concentration_[1]) ) else: log_norm_weight = _log_dirichlet_norm(self.weight_concentration_) return ( -np.sum(np.exp(log_resp) * log_resp) - log_wishart - log_norm_weight - 0.5 * n_features * np.sum(np.log(self.mean_precision_)) ) def _get_parameters(self): return ( self.weight_concentration_, self.mean_precision_, self.means_, self.degrees_of_freedom_, self.covariances_, self.precisions_cholesky_, ) def _set_parameters(self, params, xp=None): ( self.weight_concentration_, self.mean_precision_, self.means_, self.degrees_of_freedom_, self.covariances_, self.precisions_cholesky_, ) = params # Weights computation if self.weight_concentration_prior_type == "dirichlet_process": weight_dirichlet_sum = ( self.weight_concentration_[0] + self.weight_concentration_[1] )
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
true
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/mixture/__init__.py
sklearn/mixture/__init__.py
"""Mixture modeling algorithms.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from sklearn.mixture._bayesian_mixture import BayesianGaussianMixture from sklearn.mixture._gaussian_mixture import GaussianMixture __all__ = ["BayesianGaussianMixture", "GaussianMixture"]
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/mixture/tests/test_bayesian_mixture.py
sklearn/mixture/tests/test_bayesian_mixture.py
# Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import copy import numpy as np import pytest from scipy.special import gammaln from sklearn.exceptions import NotFittedError from sklearn.metrics.cluster import adjusted_rand_score from sklearn.mixture import BayesianGaussianMixture from sklearn.mixture._bayesian_mixture import _log_dirichlet_norm, _log_wishart_norm from sklearn.mixture.tests.test_gaussian_mixture import RandomData from sklearn.utils._testing import ( assert_almost_equal, assert_array_equal, ) COVARIANCE_TYPE = ["full", "tied", "diag", "spherical"] PRIOR_TYPE = ["dirichlet_process", "dirichlet_distribution"] def test_log_dirichlet_norm(): rng = np.random.RandomState(0) weight_concentration = rng.rand(2) expected_norm = gammaln(np.sum(weight_concentration)) - np.sum( gammaln(weight_concentration) ) predected_norm = _log_dirichlet_norm(weight_concentration) assert_almost_equal(expected_norm, predected_norm) def test_log_wishart_norm(): rng = np.random.RandomState(0) n_components, n_features = 5, 2 degrees_of_freedom = np.abs(rng.rand(n_components)) + 1.0 log_det_precisions_chol = n_features * np.log(range(2, 2 + n_components)) expected_norm = np.empty(5) for k, (degrees_of_freedom_k, log_det_k) in enumerate( zip(degrees_of_freedom, log_det_precisions_chol) ): expected_norm[k] = -( degrees_of_freedom_k * (log_det_k + 0.5 * n_features * np.log(2.0)) + np.sum( gammaln( 0.5 * (degrees_of_freedom_k - np.arange(0, n_features)[:, np.newaxis]) ), 0, ) ).item() predected_norm = _log_wishart_norm( degrees_of_freedom, log_det_precisions_chol, n_features ) assert_almost_equal(expected_norm, predected_norm) def test_bayesian_mixture_weights_prior_initialisation(): rng = np.random.RandomState(0) n_samples, n_components, n_features = 10, 5, 2 X = rng.rand(n_samples, n_features) # Check correct init for a given value of weight_concentration_prior weight_concentration_prior = rng.rand() bgmm = BayesianGaussianMixture( weight_concentration_prior=weight_concentration_prior, random_state=rng ).fit(X) assert_almost_equal(weight_concentration_prior, bgmm.weight_concentration_prior_) # Check correct init for the default value of weight_concentration_prior bgmm = BayesianGaussianMixture(n_components=n_components, random_state=rng).fit(X) assert_almost_equal(1.0 / n_components, bgmm.weight_concentration_prior_) def test_bayesian_mixture_mean_prior_initialisation(): rng = np.random.RandomState(0) n_samples, n_components, n_features = 10, 3, 2 X = rng.rand(n_samples, n_features) # Check correct init for a given value of mean_precision_prior mean_precision_prior = rng.rand() bgmm = BayesianGaussianMixture( mean_precision_prior=mean_precision_prior, random_state=rng ).fit(X) assert_almost_equal(mean_precision_prior, bgmm.mean_precision_prior_) # Check correct init for the default value of mean_precision_prior bgmm = BayesianGaussianMixture(random_state=rng).fit(X) assert_almost_equal(1.0, bgmm.mean_precision_prior_) # Check correct init for a given value of mean_prior mean_prior = rng.rand(n_features) bgmm = BayesianGaussianMixture( n_components=n_components, mean_prior=mean_prior, random_state=rng ).fit(X) assert_almost_equal(mean_prior, bgmm.mean_prior_) # Check correct init for the default value of bemean_priorta bgmm = BayesianGaussianMixture(n_components=n_components, random_state=rng).fit(X) assert_almost_equal(X.mean(axis=0), bgmm.mean_prior_) def test_bayesian_mixture_precisions_prior_initialisation(): rng = np.random.RandomState(0) n_samples, n_features = 10, 2 X = rng.rand(n_samples, n_features) # Check raise message for a bad value of degrees_of_freedom_prior bad_degrees_of_freedom_prior_ = n_features - 1.0 bgmm = BayesianGaussianMixture( degrees_of_freedom_prior=bad_degrees_of_freedom_prior_, random_state=rng ) msg = ( "The parameter 'degrees_of_freedom_prior' should be greater than" f" {n_features - 1}, but got {bad_degrees_of_freedom_prior_:.3f}." ) with pytest.raises(ValueError, match=msg): bgmm.fit(X) # Check correct init for a given value of degrees_of_freedom_prior degrees_of_freedom_prior = rng.rand() + n_features - 1.0 bgmm = BayesianGaussianMixture( degrees_of_freedom_prior=degrees_of_freedom_prior, random_state=rng ).fit(X) assert_almost_equal(degrees_of_freedom_prior, bgmm.degrees_of_freedom_prior_) # Check correct init for the default value of degrees_of_freedom_prior degrees_of_freedom_prior_default = n_features bgmm = BayesianGaussianMixture( degrees_of_freedom_prior=degrees_of_freedom_prior_default, random_state=rng ).fit(X) assert_almost_equal( degrees_of_freedom_prior_default, bgmm.degrees_of_freedom_prior_ ) # Check correct init for a given value of covariance_prior covariance_prior = { "full": np.cov(X.T, bias=1) + 10, "tied": np.cov(X.T, bias=1) + 5, "diag": np.diag(np.atleast_2d(np.cov(X.T, bias=1))) + 3, "spherical": rng.rand(), } bgmm = BayesianGaussianMixture(random_state=rng) for cov_type in ["full", "tied", "diag", "spherical"]: bgmm.covariance_type = cov_type bgmm.covariance_prior = covariance_prior[cov_type] bgmm.fit(X) assert_almost_equal(covariance_prior[cov_type], bgmm.covariance_prior_) # Check correct init for the default value of covariance_prior covariance_prior_default = { "full": np.atleast_2d(np.cov(X.T)), "tied": np.atleast_2d(np.cov(X.T)), "diag": np.var(X, axis=0, ddof=1), "spherical": np.var(X, axis=0, ddof=1).mean(), } bgmm = BayesianGaussianMixture(random_state=0) for cov_type in ["full", "tied", "diag", "spherical"]: bgmm.covariance_type = cov_type bgmm.fit(X) assert_almost_equal(covariance_prior_default[cov_type], bgmm.covariance_prior_) def test_bayesian_mixture_check_is_fitted(): rng = np.random.RandomState(0) n_samples, n_features = 10, 2 # Check raise message bgmm = BayesianGaussianMixture(random_state=rng) X = rng.rand(n_samples, n_features) msg = "This BayesianGaussianMixture instance is not fitted yet." with pytest.raises(ValueError, match=msg): bgmm.score(X) def test_bayesian_mixture_weights(): rng = np.random.RandomState(0) n_samples, n_features = 10, 2 X = rng.rand(n_samples, n_features) # Case Dirichlet distribution for the weight concentration prior type bgmm = BayesianGaussianMixture( weight_concentration_prior_type="dirichlet_distribution", n_components=3, random_state=rng, ).fit(X) expected_weights = bgmm.weight_concentration_ / np.sum(bgmm.weight_concentration_) assert_almost_equal(expected_weights, bgmm.weights_) assert_almost_equal(np.sum(bgmm.weights_), 1.0) # Case Dirichlet process for the weight concentration prior type dpgmm = BayesianGaussianMixture( weight_concentration_prior_type="dirichlet_process", n_components=3, random_state=rng, ).fit(X) weight_dirichlet_sum = ( dpgmm.weight_concentration_[0] + dpgmm.weight_concentration_[1] ) tmp = dpgmm.weight_concentration_[1] / weight_dirichlet_sum expected_weights = ( dpgmm.weight_concentration_[0] / weight_dirichlet_sum * np.hstack((1, np.cumprod(tmp[:-1]))) ) expected_weights /= np.sum(expected_weights) assert_almost_equal(expected_weights, dpgmm.weights_) assert_almost_equal(np.sum(dpgmm.weights_), 1.0) @pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning") def test_monotonic_likelihood(): # We check that each step of the each step of variational inference without # regularization improve monotonically the training set of the bound rng = np.random.RandomState(0) rand_data = RandomData(rng, scale=20) n_components = rand_data.n_components for prior_type in PRIOR_TYPE: for covar_type in COVARIANCE_TYPE: X = rand_data.X[covar_type] bgmm = BayesianGaussianMixture( weight_concentration_prior_type=prior_type, n_components=2 * n_components, covariance_type=covar_type, warm_start=True, max_iter=1, random_state=rng, tol=1e-3, ) current_lower_bound = -np.inf # Do one training iteration at a time so we can make sure that the # training log likelihood increases after each iteration. for _ in range(600): prev_lower_bound = current_lower_bound current_lower_bound = bgmm.fit(X).lower_bound_ assert current_lower_bound >= prev_lower_bound if bgmm.converged_: break assert bgmm.converged_ def test_compare_covar_type(): # We can compare the 'full' precision with the other cov_type if we apply # 1 iter of the M-step (done during _initialize_parameters). rng = np.random.RandomState(0) rand_data = RandomData(rng, scale=7) X = rand_data.X["full"] n_components = rand_data.n_components for prior_type in PRIOR_TYPE: # Computation of the full_covariance bgmm = BayesianGaussianMixture( weight_concentration_prior_type=prior_type, n_components=2 * n_components, covariance_type="full", max_iter=1, random_state=0, tol=1e-7, ) bgmm._check_parameters(X) bgmm._initialize_parameters(X, np.random.RandomState(0)) full_covariances = ( bgmm.covariances_ * bgmm.degrees_of_freedom_[:, np.newaxis, np.newaxis] ) # Check tied_covariance = mean(full_covariances, 0) bgmm = BayesianGaussianMixture( weight_concentration_prior_type=prior_type, n_components=2 * n_components, covariance_type="tied", max_iter=1, random_state=0, tol=1e-7, ) bgmm._check_parameters(X) bgmm._initialize_parameters(X, np.random.RandomState(0)) tied_covariance = bgmm.covariances_ * bgmm.degrees_of_freedom_ assert_almost_equal(tied_covariance, np.mean(full_covariances, 0)) # Check diag_covariance = diag(full_covariances) bgmm = BayesianGaussianMixture( weight_concentration_prior_type=prior_type, n_components=2 * n_components, covariance_type="diag", max_iter=1, random_state=0, tol=1e-7, ) bgmm._check_parameters(X) bgmm._initialize_parameters(X, np.random.RandomState(0)) diag_covariances = bgmm.covariances_ * bgmm.degrees_of_freedom_[:, np.newaxis] assert_almost_equal( diag_covariances, np.array([np.diag(cov) for cov in full_covariances]) ) # Check spherical_covariance = np.mean(diag_covariances, 0) bgmm = BayesianGaussianMixture( weight_concentration_prior_type=prior_type, n_components=2 * n_components, covariance_type="spherical", max_iter=1, random_state=0, tol=1e-7, ) bgmm._check_parameters(X) bgmm._initialize_parameters(X, np.random.RandomState(0)) spherical_covariances = bgmm.covariances_ * bgmm.degrees_of_freedom_ assert_almost_equal(spherical_covariances, np.mean(diag_covariances, 1)) @pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning") def test_check_covariance_precision(): # We check that the dot product of the covariance and the precision # matrices is identity. rng = np.random.RandomState(0) rand_data = RandomData(rng, scale=7) n_components, n_features = 2 * rand_data.n_components, 2 # Computation of the full_covariance bgmm = BayesianGaussianMixture( n_components=n_components, max_iter=100, random_state=rng, tol=1e-3, reg_covar=0 ) for covar_type in COVARIANCE_TYPE: bgmm.covariance_type = covar_type bgmm.fit(rand_data.X[covar_type]) if covar_type == "full": for covar, precision in zip(bgmm.covariances_, bgmm.precisions_): assert_almost_equal(np.dot(covar, precision), np.eye(n_features)) elif covar_type == "tied": assert_almost_equal( np.dot(bgmm.covariances_, bgmm.precisions_), np.eye(n_features) ) elif covar_type == "diag": assert_almost_equal( bgmm.covariances_ * bgmm.precisions_, np.ones((n_components, n_features)), ) else: assert_almost_equal( bgmm.covariances_ * bgmm.precisions_, np.ones(n_components) ) def test_invariant_translation(): # We check here that adding a constant in the data change correctly the # parameters of the mixture rng = np.random.RandomState(0) rand_data = RandomData(rng, scale=100) n_components = 2 * rand_data.n_components for prior_type in PRIOR_TYPE: for covar_type in COVARIANCE_TYPE: X = rand_data.X[covar_type] bgmm1 = BayesianGaussianMixture( weight_concentration_prior_type=prior_type, n_components=n_components, max_iter=100, random_state=0, tol=1e-3, reg_covar=0, ).fit(X) bgmm2 = BayesianGaussianMixture( weight_concentration_prior_type=prior_type, n_components=n_components, max_iter=100, random_state=0, tol=1e-3, reg_covar=0, ).fit(X + 100) assert_almost_equal(bgmm1.means_, bgmm2.means_ - 100) assert_almost_equal(bgmm1.weights_, bgmm2.weights_) assert_almost_equal(bgmm1.covariances_, bgmm2.covariances_) @pytest.mark.filterwarnings("ignore:.*did not converge.*") @pytest.mark.parametrize( "seed, max_iter, tol", [ (0, 2, 1e-7), # strict non-convergence (1, 2, 1e-1), # loose non-convergence (3, 300, 1e-7), # strict convergence (4, 300, 1e-1), # loose convergence ], ) def test_bayesian_mixture_fit_predict(seed, max_iter, tol): rng = np.random.RandomState(seed) rand_data = RandomData(rng, n_samples=50, scale=7) n_components = 2 * rand_data.n_components for covar_type in COVARIANCE_TYPE: bgmm1 = BayesianGaussianMixture( n_components=n_components, max_iter=max_iter, random_state=rng, tol=tol, reg_covar=0, ) bgmm1.covariance_type = covar_type bgmm2 = copy.deepcopy(bgmm1) X = rand_data.X[covar_type] Y_pred1 = bgmm1.fit(X).predict(X) Y_pred2 = bgmm2.fit_predict(X) assert_array_equal(Y_pred1, Y_pred2) def test_bayesian_mixture_fit_predict_n_init(): # Check that fit_predict is equivalent to fit.predict, when n_init > 1 X = np.random.RandomState(0).randn(50, 5) gm = BayesianGaussianMixture(n_components=5, n_init=10, random_state=0) y_pred1 = gm.fit_predict(X) y_pred2 = gm.predict(X) assert_array_equal(y_pred1, y_pred2) def test_bayesian_mixture_predict_predict_proba(): # this is the same test as test_gaussian_mixture_predict_predict_proba() rng = np.random.RandomState(0) rand_data = RandomData(rng) for prior_type in PRIOR_TYPE: for covar_type in COVARIANCE_TYPE: X = rand_data.X[covar_type] Y = rand_data.Y bgmm = BayesianGaussianMixture( n_components=rand_data.n_components, random_state=rng, weight_concentration_prior_type=prior_type, covariance_type=covar_type, ) # Check a warning message arrive if we don't do fit msg = ( "This BayesianGaussianMixture instance is not fitted yet. " "Call 'fit' with appropriate arguments before using this " "estimator." ) with pytest.raises(NotFittedError, match=msg): bgmm.predict(X) bgmm.fit(X) Y_pred = bgmm.predict(X) Y_pred_proba = bgmm.predict_proba(X).argmax(axis=1) assert_array_equal(Y_pred, Y_pred_proba) assert adjusted_rand_score(Y, Y_pred) >= 0.95
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/mixture/tests/test_mixture.py
sklearn/mixture/tests/test_mixture.py
# Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import numpy as np import pytest from sklearn.base import clone from sklearn.mixture import BayesianGaussianMixture, GaussianMixture @pytest.mark.parametrize("estimator", [GaussianMixture(), BayesianGaussianMixture()]) def test_gaussian_mixture_n_iter(estimator): # check that n_iter is the number of iteration performed. estimator = clone(estimator) # Avoid side effects from shared instances rng = np.random.RandomState(0) X = rng.rand(10, 5) max_iter = 1 estimator.set_params(max_iter=max_iter) estimator.fit(X) assert estimator.n_iter_ == max_iter @pytest.mark.parametrize("estimator", [GaussianMixture(), BayesianGaussianMixture()]) def test_mixture_n_components_greater_than_n_samples_error(estimator): """Check error when n_components <= n_samples""" estimator = clone(estimator) # Avoid side effects from shared instances rng = np.random.RandomState(0) X = rng.rand(10, 5) estimator.set_params(n_components=12) msg = "Expected n_samples >= n_components" with pytest.raises(ValueError, match=msg): estimator.fit(X)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/mixture/tests/test_gaussian_mixture.py
sklearn/mixture/tests/test_gaussian_mixture.py
# Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import copy import itertools import re import sys import warnings from io import StringIO from unittest.mock import Mock import numpy as np import pytest from scipy import linalg, stats import sklearn from sklearn.cluster import KMeans from sklearn.covariance import EmpiricalCovariance from sklearn.datasets import make_spd_matrix from sklearn.datasets._samples_generator import make_blobs from sklearn.exceptions import ConvergenceWarning, NotFittedError from sklearn.metrics.cluster import adjusted_rand_score from sklearn.mixture import GaussianMixture from sklearn.mixture._gaussian_mixture import ( _compute_log_det_cholesky, _compute_precision_cholesky, _estimate_gaussian_covariances_diag, _estimate_gaussian_covariances_full, _estimate_gaussian_covariances_spherical, _estimate_gaussian_covariances_tied, _estimate_gaussian_parameters, ) from sklearn.utils._array_api import ( _convert_to_numpy, _get_namespace_device_dtype_ids, device, get_namespace, yield_namespace_device_dtype_combinations, ) from sklearn.utils._testing import ( _array_api_for_tests, assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_array_equal, skip_if_array_api_compat_not_configured, ) from sklearn.utils.extmath import fast_logdet COVARIANCE_TYPE = ["full", "tied", "diag", "spherical"] def generate_data( n_samples, n_features, weights, means, precisions, covariance_type, dtype=np.float64 ): rng = np.random.RandomState(0) X = [] if covariance_type == "spherical": for _, (w, m, c) in enumerate(zip(weights, means, precisions["spherical"])): X.append( rng.multivariate_normal( m, c * np.eye(n_features), int(np.round(w * n_samples)) ).astype(dtype) ) if covariance_type == "diag": for _, (w, m, c) in enumerate(zip(weights, means, precisions["diag"])): X.append( rng.multivariate_normal( m, np.diag(c), int(np.round(w * n_samples)) ).astype(dtype) ) if covariance_type == "tied": for _, (w, m) in enumerate(zip(weights, means)): X.append( rng.multivariate_normal( m, precisions["tied"], int(np.round(w * n_samples)) ).astype(dtype) ) if covariance_type == "full": for _, (w, m, c) in enumerate(zip(weights, means, precisions["full"])): X.append( rng.multivariate_normal(m, c, int(np.round(w * n_samples))).astype( dtype ) ) X = np.vstack(X) return X class RandomData: def __init__( self, rng, n_samples=200, n_components=2, n_features=2, scale=50, dtype=np.float64, ): self.n_samples = n_samples self.n_components = n_components self.n_features = n_features self.weights = rng.rand(n_components).astype(dtype) self.weights = self.weights.astype(dtype) / self.weights.sum() self.means = rng.rand(n_components, n_features).astype(dtype) * scale self.covariances = { "spherical": 0.5 + rng.rand(n_components).astype(dtype), "diag": (0.5 + rng.rand(n_components, n_features).astype(dtype)) ** 2, "tied": make_spd_matrix(n_features, random_state=rng).astype(dtype), "full": np.array( [ make_spd_matrix(n_features, random_state=rng).astype(dtype) * 0.5 for _ in range(n_components) ] ), } self.precisions = { "spherical": 1.0 / self.covariances["spherical"], "diag": 1.0 / self.covariances["diag"], "tied": linalg.inv(self.covariances["tied"]), "full": np.array( [linalg.inv(covariance) for covariance in self.covariances["full"]] ), } self.X = dict( zip( COVARIANCE_TYPE, [ generate_data( n_samples, n_features, self.weights, self.means, self.covariances, covar_type, dtype=dtype, ) for covar_type in COVARIANCE_TYPE ], ) ) self.Y = np.hstack( [ np.full(int(np.round(w * n_samples)), k, dtype=int) for k, w in enumerate(self.weights) ] ) def test_gaussian_mixture_attributes(): # test bad parameters rng = np.random.RandomState(0) X = rng.rand(10, 2) # test good parameters n_components, tol, n_init, max_iter, reg_covar = 2, 1e-4, 3, 30, 1e-1 covariance_type, init_params = "full", "random" gmm = GaussianMixture( n_components=n_components, tol=tol, n_init=n_init, max_iter=max_iter, reg_covar=reg_covar, covariance_type=covariance_type, init_params=init_params, ).fit(X) assert gmm.n_components == n_components assert gmm.covariance_type == covariance_type assert gmm.tol == tol assert gmm.reg_covar == reg_covar assert gmm.max_iter == max_iter assert gmm.n_init == n_init assert gmm.init_params == init_params def test_check_weights(): rng = np.random.RandomState(0) rand_data = RandomData(rng) n_components = rand_data.n_components X = rand_data.X["full"] g = GaussianMixture(n_components=n_components) # Check bad shape weights_bad_shape = rng.rand(n_components, 1) g.weights_init = weights_bad_shape msg = re.escape( "The parameter 'weights' should have the shape of " f"({n_components},), but got {weights_bad_shape.shape}" ) with pytest.raises(ValueError, match=msg): g.fit(X) # Check bad range weights_bad_range = rng.rand(n_components) + 1 g.weights_init = weights_bad_range msg = re.escape( "The parameter 'weights' should be in the range [0, 1], but got" f" max value {np.min(weights_bad_range):.5f}, " f"min value {np.max(weights_bad_range):.5f}" ) with pytest.raises(ValueError, match=msg): g.fit(X) # Check bad normalization weights_bad_norm = rng.rand(n_components) weights_bad_norm = weights_bad_norm / (weights_bad_norm.sum() + 1) g.weights_init = weights_bad_norm msg = re.escape( "The parameter 'weights' should be normalized, " f"but got sum(weights) = {np.sum(weights_bad_norm):.5f}" ) with pytest.raises(ValueError, match=msg): g.fit(X) # Check good weights matrix weights = rand_data.weights g = GaussianMixture(weights_init=weights, n_components=n_components) g.fit(X) assert_array_equal(weights, g.weights_init) def test_check_means(): rng = np.random.RandomState(0) rand_data = RandomData(rng) n_components, n_features = rand_data.n_components, rand_data.n_features X = rand_data.X["full"] g = GaussianMixture(n_components=n_components) # Check means bad shape means_bad_shape = rng.rand(n_components + 1, n_features) g.means_init = means_bad_shape msg = "The parameter 'means' should have the shape of " with pytest.raises(ValueError, match=msg): g.fit(X) # Check good means matrix means = rand_data.means g.means_init = means g.fit(X) assert_array_equal(means, g.means_init) def test_check_precisions(): rng = np.random.RandomState(0) rand_data = RandomData(rng) n_components, n_features = rand_data.n_components, rand_data.n_features # Define the bad precisions for each covariance_type precisions_bad_shape = { "full": np.ones((n_components + 1, n_features, n_features)), "tied": np.ones((n_features + 1, n_features + 1)), "diag": np.ones((n_components + 1, n_features)), "spherical": np.ones((n_components + 1)), } # Define not positive-definite precisions precisions_not_pos = np.ones((n_components, n_features, n_features)) precisions_not_pos[0] = np.eye(n_features) precisions_not_pos[0, 0, 0] = -1.0 precisions_not_positive = { "full": precisions_not_pos, "tied": precisions_not_pos[0], "diag": np.full((n_components, n_features), -1.0), "spherical": np.full(n_components, -1.0), } not_positive_errors = { "full": "symmetric, positive-definite", "tied": "symmetric, positive-definite", "diag": "positive", "spherical": "positive", } for covar_type in COVARIANCE_TYPE: X = RandomData(rng).X[covar_type] g = GaussianMixture( n_components=n_components, covariance_type=covar_type, random_state=rng ) # Check precisions with bad shapes g.precisions_init = precisions_bad_shape[covar_type] msg = f"The parameter '{covar_type} precision' should have the shape of" with pytest.raises(ValueError, match=msg): g.fit(X) # Check not positive precisions g.precisions_init = precisions_not_positive[covar_type] msg = f"'{covar_type} precision' should be {not_positive_errors[covar_type]}" with pytest.raises(ValueError, match=msg): g.fit(X) # Check the correct init of precisions_init g.precisions_init = rand_data.precisions[covar_type] g.fit(X) assert_array_equal(rand_data.precisions[covar_type], g.precisions_init) def test_suffstat_sk_full(): # compare the precision matrix compute from the # EmpiricalCovariance.covariance fitted on X*sqrt(resp) # with _sufficient_sk_full, n_components=1 rng = np.random.RandomState(0) n_samples, n_features = 500, 2 # special case 1, assuming data is "centered" X = rng.rand(n_samples, n_features) resp = rng.rand(n_samples, 1) X_resp = np.sqrt(resp) * X nk = np.array([n_samples]) xk = np.zeros((1, n_features)) covars_pred = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0) ecov = EmpiricalCovariance(assume_centered=True) ecov.fit(X_resp) assert_almost_equal(ecov.error_norm(covars_pred[0], norm="frobenius"), 0) assert_almost_equal(ecov.error_norm(covars_pred[0], norm="spectral"), 0) # check the precision computation precs_chol_pred = _compute_precision_cholesky(covars_pred, "full") precs_pred = np.array([np.dot(prec, prec.T) for prec in precs_chol_pred]) precs_est = np.array([linalg.inv(cov) for cov in covars_pred]) assert_array_almost_equal(precs_est, precs_pred) # special case 2, assuming resp are all ones resp = np.ones((n_samples, 1)) nk = np.array([n_samples]) xk = X.mean(axis=0).reshape((1, -1)) covars_pred = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0) ecov = EmpiricalCovariance(assume_centered=False) ecov.fit(X) assert_almost_equal(ecov.error_norm(covars_pred[0], norm="frobenius"), 0) assert_almost_equal(ecov.error_norm(covars_pred[0], norm="spectral"), 0) # check the precision computation precs_chol_pred = _compute_precision_cholesky(covars_pred, "full") precs_pred = np.array([np.dot(prec, prec.T) for prec in precs_chol_pred]) precs_est = np.array([linalg.inv(cov) for cov in covars_pred]) assert_array_almost_equal(precs_est, precs_pred) def test_suffstat_sk_tied(): # use equation Nk * Sk / N = S_tied rng = np.random.RandomState(0) n_samples, n_features, n_components = 500, 2, 2 resp = rng.rand(n_samples, n_components) resp = resp / resp.sum(axis=1)[:, np.newaxis] X = rng.rand(n_samples, n_features) nk = resp.sum(axis=0) xk = np.dot(resp.T, X) / nk[:, np.newaxis] covars_pred_full = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0) covars_pred_full = ( np.sum(nk[:, np.newaxis, np.newaxis] * covars_pred_full, 0) / n_samples ) covars_pred_tied = _estimate_gaussian_covariances_tied(resp, X, nk, xk, 0) ecov = EmpiricalCovariance() ecov.covariance_ = covars_pred_full assert_almost_equal(ecov.error_norm(covars_pred_tied, norm="frobenius"), 0) assert_almost_equal(ecov.error_norm(covars_pred_tied, norm="spectral"), 0) # check the precision computation precs_chol_pred = _compute_precision_cholesky(covars_pred_tied, "tied") precs_pred = np.dot(precs_chol_pred, precs_chol_pred.T) precs_est = linalg.inv(covars_pred_tied) assert_array_almost_equal(precs_est, precs_pred) def test_suffstat_sk_diag(): # test against 'full' case rng = np.random.RandomState(0) n_samples, n_features, n_components = 500, 2, 2 resp = rng.rand(n_samples, n_components) resp = resp / resp.sum(axis=1)[:, np.newaxis] X = rng.rand(n_samples, n_features) nk = resp.sum(axis=0) xk = np.dot(resp.T, X) / nk[:, np.newaxis] covars_pred_full = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0) covars_pred_diag = _estimate_gaussian_covariances_diag(resp, X, nk, xk, 0) ecov = EmpiricalCovariance() for cov_full, cov_diag in zip(covars_pred_full, covars_pred_diag): ecov.covariance_ = np.diag(np.diag(cov_full)) cov_diag = np.diag(cov_diag) assert_almost_equal(ecov.error_norm(cov_diag, norm="frobenius"), 0) assert_almost_equal(ecov.error_norm(cov_diag, norm="spectral"), 0) # check the precision computation precs_chol_pred = _compute_precision_cholesky(covars_pred_diag, "diag") assert_almost_equal(covars_pred_diag, 1.0 / precs_chol_pred**2) def test_gaussian_suffstat_sk_spherical(global_dtype): # computing spherical covariance equals to the variance of one-dimension # data after flattening, n_components=1 rng = np.random.RandomState(0) n_samples, n_features = 500, 2 X = rng.rand(n_samples, n_features).astype(global_dtype) X = X - X.mean() resp = np.ones((n_samples, 1), dtype=global_dtype) nk = np.array([n_samples], dtype=global_dtype) xk = X.mean() covars_pred_spherical = _estimate_gaussian_covariances_spherical(resp, X, nk, xk, 0) covars_pred_spherical2 = np.dot(X.flatten().T, X.flatten()) / ( n_features * n_samples ) assert_almost_equal(covars_pred_spherical, covars_pred_spherical2) assert covars_pred_spherical.dtype == global_dtype # check the precision computation precs_chol_pred = _compute_precision_cholesky(covars_pred_spherical, "spherical") assert_almost_equal(covars_pred_spherical, 1.0 / precs_chol_pred**2) assert precs_chol_pred.dtype == global_dtype def test_compute_log_det_cholesky(global_dtype): n_features = 2 rand_data = RandomData(np.random.RandomState(0), dtype=global_dtype) for covar_type in COVARIANCE_TYPE: covariance = rand_data.covariances[covar_type] if covar_type == "full": predected_det = np.array([linalg.det(cov) for cov in covariance]) elif covar_type == "tied": predected_det = linalg.det(covariance) elif covar_type == "diag": predected_det = np.array([np.prod(cov) for cov in covariance]) elif covar_type == "spherical": predected_det = covariance**n_features # We compute the cholesky decomposition of the covariance matrix assert covariance.dtype == global_dtype expected_det = _compute_log_det_cholesky( _compute_precision_cholesky(covariance, covar_type), covar_type, n_features=n_features, ) assert_array_almost_equal(expected_det, -0.5 * np.log(predected_det)) assert expected_det.dtype == global_dtype def _naive_lmvnpdf_diag(X, means, covars): resp = np.empty((len(X), len(means))) stds = np.sqrt(covars) for i, (mean, std) in enumerate(zip(means, stds)): resp[:, i] = stats.norm.logpdf(X, mean, std).sum(axis=1) return resp def test_gaussian_mixture_log_probabilities(): from sklearn.mixture._gaussian_mixture import _estimate_log_gaussian_prob # test against with _naive_lmvnpdf_diag rng = np.random.RandomState(0) rand_data = RandomData(rng) n_samples = 500 n_features = rand_data.n_features n_components = rand_data.n_components means = rand_data.means covars_diag = rng.rand(n_components, n_features) X = rng.rand(n_samples, n_features) log_prob_naive = _naive_lmvnpdf_diag(X, means, covars_diag) # full covariances precs_full = np.array([np.diag(1.0 / np.sqrt(x)) for x in covars_diag]) log_prob = _estimate_log_gaussian_prob(X, means, precs_full, "full") assert_array_almost_equal(log_prob, log_prob_naive) # diag covariances precs_chol_diag = 1.0 / np.sqrt(covars_diag) log_prob = _estimate_log_gaussian_prob(X, means, precs_chol_diag, "diag") assert_array_almost_equal(log_prob, log_prob_naive) # tied covars_tied = np.array([x for x in covars_diag]).mean(axis=0) precs_tied = np.diag(np.sqrt(1.0 / covars_tied)) log_prob_naive = _naive_lmvnpdf_diag(X, means, [covars_tied] * n_components) log_prob = _estimate_log_gaussian_prob(X, means, precs_tied, "tied") assert_array_almost_equal(log_prob, log_prob_naive) # spherical covars_spherical = covars_diag.mean(axis=1) precs_spherical = 1.0 / np.sqrt(covars_diag.mean(axis=1)) log_prob_naive = _naive_lmvnpdf_diag( X, means, [[k] * n_features for k in covars_spherical] ) log_prob = _estimate_log_gaussian_prob(X, means, precs_spherical, "spherical") assert_array_almost_equal(log_prob, log_prob_naive) # skip tests on weighted_log_probabilities, log_weights def test_gaussian_mixture_estimate_log_prob_resp(): # test whether responsibilities are normalized rng = np.random.RandomState(0) rand_data = RandomData(rng, scale=5) n_samples = rand_data.n_samples n_features = rand_data.n_features n_components = rand_data.n_components X = rng.rand(n_samples, n_features) for covar_type in COVARIANCE_TYPE: weights = rand_data.weights means = rand_data.means precisions = rand_data.precisions[covar_type] g = GaussianMixture( n_components=n_components, random_state=rng, weights_init=weights, means_init=means, precisions_init=precisions, covariance_type=covar_type, ) g.fit(X) resp = g.predict_proba(X) assert_array_almost_equal(resp.sum(axis=1), np.ones(n_samples)) assert_array_equal(g.weights_init, weights) assert_array_equal(g.means_init, means) assert_array_equal(g.precisions_init, precisions) def test_gaussian_mixture_predict_predict_proba(): rng = np.random.RandomState(0) rand_data = RandomData(rng) for covar_type in COVARIANCE_TYPE: X = rand_data.X[covar_type] Y = rand_data.Y g = GaussianMixture( n_components=rand_data.n_components, random_state=rng, weights_init=rand_data.weights, means_init=rand_data.means, precisions_init=rand_data.precisions[covar_type], covariance_type=covar_type, ) # Check a warning message arrive if we don't do fit msg = ( "This GaussianMixture instance is not fitted yet. Call 'fit' " "with appropriate arguments before using this estimator." ) with pytest.raises(NotFittedError, match=msg): g.predict(X) g.fit(X) Y_pred = g.predict(X) Y_pred_proba = g.predict_proba(X).argmax(axis=1) assert_array_equal(Y_pred, Y_pred_proba) assert adjusted_rand_score(Y, Y_pred) > 0.95 @pytest.mark.filterwarnings("ignore:.*did not converge.*") @pytest.mark.parametrize( "seed, max_iter, tol", [ (0, 2, 1e-7), # strict non-convergence (1, 2, 1e-1), # loose non-convergence (3, 300, 1e-7), # strict convergence (4, 300, 1e-1), # loose convergence ], ) def test_gaussian_mixture_fit_predict(seed, max_iter, tol, global_dtype): rng = np.random.RandomState(seed) rand_data = RandomData(rng, dtype=global_dtype) for covar_type in COVARIANCE_TYPE: X = rand_data.X[covar_type] Y = rand_data.Y g = GaussianMixture( n_components=rand_data.n_components, random_state=rng, weights_init=rand_data.weights, means_init=rand_data.means, precisions_init=rand_data.precisions[covar_type], covariance_type=covar_type, max_iter=max_iter, tol=tol, ) # check if fit_predict(X) is equivalent to fit(X).predict(X) f = copy.deepcopy(g) Y_pred1 = f.fit(X).predict(X) Y_pred2 = g.fit_predict(X) assert_array_equal(Y_pred1, Y_pred2) assert adjusted_rand_score(Y, Y_pred2) > 0.95 assert g.means_.dtype == global_dtype assert g.weights_.dtype == global_dtype assert g.precisions_.dtype == global_dtype def test_gaussian_mixture_fit_predict_n_init(): # Check that fit_predict is equivalent to fit.predict, when n_init > 1 X = np.random.RandomState(0).randn(1000, 5) gm = GaussianMixture(n_components=5, n_init=5, random_state=0) y_pred1 = gm.fit_predict(X) y_pred2 = gm.predict(X) assert_array_equal(y_pred1, y_pred2) def test_gaussian_mixture_fit(global_dtype): # recover the ground truth rng = np.random.RandomState(0) rand_data = RandomData(rng, dtype=global_dtype) n_features = rand_data.n_features n_components = rand_data.n_components for covar_type in COVARIANCE_TYPE: X = rand_data.X[covar_type] g = GaussianMixture( n_components=n_components, n_init=20, reg_covar=0, random_state=rng, covariance_type=covar_type, ) g.fit(X) # needs more data to pass the test with rtol=1e-7 assert_allclose( np.sort(g.weights_), np.sort(rand_data.weights), rtol=0.1, atol=1e-2 ) arg_idx1 = g.means_[:, 0].argsort() arg_idx2 = rand_data.means[:, 0].argsort() assert_allclose( g.means_[arg_idx1], rand_data.means[arg_idx2], rtol=0.1, atol=1e-2 ) if covar_type == "full": prec_pred = g.precisions_ prec_test = rand_data.precisions["full"] elif covar_type == "tied": prec_pred = np.array([g.precisions_] * n_components) prec_test = np.array([rand_data.precisions["tied"]] * n_components) elif covar_type == "spherical": prec_pred = np.array([np.eye(n_features) * c for c in g.precisions_]) prec_test = np.array( [np.eye(n_features) * c for c in rand_data.precisions["spherical"]] ) elif covar_type == "diag": prec_pred = np.array([np.diag(d) for d in g.precisions_]) prec_test = np.array([np.diag(d) for d in rand_data.precisions["diag"]]) arg_idx1 = np.trace(prec_pred, axis1=1, axis2=2).argsort() arg_idx2 = np.trace(prec_test, axis1=1, axis2=2).argsort() for k, h in zip(arg_idx1, arg_idx2): ecov = EmpiricalCovariance() ecov.covariance_ = prec_test[h] # the accuracy depends on the number of data and randomness, rng assert_allclose(ecov.error_norm(prec_pred[k]), 0, atol=0.15) assert g.means_.dtype == global_dtype assert g.covariances_.dtype == global_dtype assert g.precisions_.dtype == global_dtype def test_gaussian_mixture_fit_best_params(): rng = np.random.RandomState(0) rand_data = RandomData(rng) n_components = rand_data.n_components n_init = 10 for covar_type in COVARIANCE_TYPE: X = rand_data.X[covar_type] g = GaussianMixture( n_components=n_components, n_init=1, reg_covar=0, random_state=rng, covariance_type=covar_type, ) ll = [] for _ in range(n_init): g.fit(X) ll.append(g.score(X)) ll = np.array(ll) g_best = GaussianMixture( n_components=n_components, n_init=n_init, reg_covar=0, random_state=rng, covariance_type=covar_type, ) g_best.fit(X) assert_almost_equal(ll.min(), g_best.score(X)) def test_gaussian_mixture_fit_convergence_warning(): rng = np.random.RandomState(0) rand_data = RandomData(rng, scale=1) n_components = rand_data.n_components max_iter = 1 for covar_type in COVARIANCE_TYPE: X = rand_data.X[covar_type] g = GaussianMixture( n_components=n_components, n_init=1, max_iter=max_iter, reg_covar=0, random_state=rng, covariance_type=covar_type, ) msg = ( "Best performing initialization did not converge. " "Try different init parameters, or increase max_iter, " "tol, or check for degenerate data." ) with pytest.warns(ConvergenceWarning, match=msg): g.fit(X) def test_multiple_init(): # Test that multiple inits does not much worse than a single one rng = np.random.RandomState(0) n_samples, n_features, n_components = 50, 5, 2 X = rng.randn(n_samples, n_features) for cv_type in COVARIANCE_TYPE: train1 = ( GaussianMixture( n_components=n_components, covariance_type=cv_type, random_state=0 ) .fit(X) .score(X) ) train2 = ( GaussianMixture( n_components=n_components, covariance_type=cv_type, random_state=0, n_init=5, ) .fit(X) .score(X) ) assert train2 >= train1 def test_gaussian_mixture_n_parameters(): # Test that the right number of parameters is estimated rng = np.random.RandomState(0) n_samples, n_features, n_components = 50, 5, 2 X = rng.randn(n_samples, n_features) n_params = {"spherical": 13, "diag": 21, "tied": 26, "full": 41} for cv_type in COVARIANCE_TYPE: g = GaussianMixture( n_components=n_components, covariance_type=cv_type, random_state=rng ).fit(X) assert g._n_parameters() == n_params[cv_type] def test_bic_1d_1component(): # Test all of the covariance_types return the same BIC score for # 1-dimensional, 1 component fits. rng = np.random.RandomState(0) n_samples, n_dim, n_components = 100, 1, 1 X = rng.randn(n_samples, n_dim) bic_full = ( GaussianMixture( n_components=n_components, covariance_type="full", random_state=rng ) .fit(X) .bic(X) ) for covariance_type in ["tied", "diag", "spherical"]: bic = ( GaussianMixture( n_components=n_components, covariance_type=covariance_type, random_state=rng, ) .fit(X) .bic(X) ) assert_almost_equal(bic_full, bic) def test_gaussian_mixture_aic_bic(): # Test the aic and bic criteria rng = np.random.RandomState(0) n_samples, n_features, n_components = 50, 3, 2 X = rng.randn(n_samples, n_features) # standard gaussian entropy sgh = 0.5 * ( fast_logdet(np.cov(X.T, bias=1)) + n_features * (1 + np.log(2 * np.pi)) ) for cv_type in COVARIANCE_TYPE: g = GaussianMixture( n_components=n_components, covariance_type=cv_type, random_state=rng, max_iter=200, ) g.fit(X) aic = 2 * n_samples * sgh + 2 * g._n_parameters() bic = 2 * n_samples * sgh + np.log(n_samples) * g._n_parameters() bound = n_features / np.sqrt(n_samples) assert (g.aic(X) - aic) / n_samples < bound assert (g.bic(X) - bic) / n_samples < bound def test_gaussian_mixture_verbose(): rng = np.random.RandomState(0) rand_data = RandomData(rng) n_components = rand_data.n_components for covar_type in COVARIANCE_TYPE: X = rand_data.X[covar_type] g = GaussianMixture( n_components=n_components, n_init=1, reg_covar=0, random_state=rng, covariance_type=covar_type, verbose=1, ) h = GaussianMixture( n_components=n_components, n_init=1, reg_covar=0, random_state=rng, covariance_type=covar_type, verbose=2, ) old_stdout = sys.stdout sys.stdout = StringIO() try: g.fit(X) h.fit(X) finally: sys.stdout = old_stdout @pytest.mark.filterwarnings("ignore:.*did not converge.*") @pytest.mark.parametrize("seed", (0, 1, 2)) def test_warm_start(seed): random_state = seed rng = np.random.RandomState(random_state) n_samples, n_features, n_components = 500, 2, 2 X = rng.rand(n_samples, n_features) # Assert the warm_start give the same result for the same number of iter g = GaussianMixture( n_components=n_components, n_init=1, max_iter=2, reg_covar=0, random_state=random_state, warm_start=False, ) h = GaussianMixture( n_components=n_components, n_init=1, max_iter=1, reg_covar=0, random_state=random_state, warm_start=True, ) g.fit(X) score1 = h.fit(X).score(X) score2 = h.fit(X).score(X) assert_almost_equal(g.weights_, h.weights_) assert_almost_equal(g.means_, h.means_) assert_almost_equal(g.precisions_, h.precisions_) assert score2 > score1 # Assert that by using warm_start we can converge to a good solution g = GaussianMixture( n_components=n_components, n_init=1, max_iter=5, reg_covar=0, random_state=random_state, warm_start=False, tol=1e-6, ) h = GaussianMixture( n_components=n_components, n_init=1, max_iter=5, reg_covar=0, random_state=random_state, warm_start=True, tol=1e-6, ) g.fit(X) assert not g.converged_ h.fit(X) # depending on the data there is large variability in the number of # refit necessary to converge due to the complete randomness of the # data for _ in range(1000): h.fit(X) if h.converged_: break assert h.converged_ @pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning") def test_convergence_detected_with_warm_start(): # We check that convergence is detected when warm_start=True rng = np.random.RandomState(0) rand_data = RandomData(rng) n_components = rand_data.n_components X = rand_data.X["full"] for max_iter in (1, 2, 50): gmm = GaussianMixture( n_components=n_components, warm_start=True, max_iter=max_iter, random_state=rng, ) for _ in range(100): gmm.fit(X) if gmm.converged_: break assert gmm.converged_ assert max_iter >= gmm.n_iter_ def test_score(global_dtype): covar_type = "full" rng = np.random.RandomState(0) rand_data = RandomData(rng, scale=7, dtype=global_dtype) n_components = rand_data.n_components X = rand_data.X[covar_type] assert X.dtype == global_dtype # Check the error message if we don't call fit gmm1 = GaussianMixture( n_components=n_components, n_init=1, max_iter=1, reg_covar=0, random_state=rng, covariance_type=covar_type, ) msg = ( "This GaussianMixture instance is not fitted yet. Call 'fit' with " "appropriate arguments before using this estimator." ) with pytest.raises(NotFittedError, match=msg): gmm1.score(X) # Check score value with warnings.catch_warnings(): warnings.simplefilter("ignore", ConvergenceWarning) gmm1.fit(X) assert gmm1.means_.dtype == global_dtype
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
true
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/mixture/tests/__init__.py
sklearn/mixture/tests/__init__.py
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/__check_build/__init__.py
sklearn/__check_build/__init__.py
"""Module to give helpful messages to the user that did not compile scikit-learn properly. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import os INPLACE_MSG = """ It appears that you are importing a local scikit-learn source tree. For this, you need to have an inplace install. Maybe you are in the source directory and you need to try from another location.""" STANDARD_MSG = """ If you have used an installer, please check that it is suited for your Python version, your operating system and your platform.""" def raise_build_error(e): # Raise a comprehensible error and list the contents of the # directory to help debugging on the mailing list. local_dir = os.path.split(__file__)[0] msg = STANDARD_MSG if local_dir == "sklearn/__check_build": # Picking up the local install: this will work only if the # install is an 'inplace build' msg = INPLACE_MSG dir_content = list() for i, filename in enumerate(os.listdir(local_dir)): if (i + 1) % 3: dir_content.append(filename.ljust(26)) else: dir_content.append(filename + "\n") raise ImportError( """%s ___________________________________________________________________________ Contents of %s: %s ___________________________________________________________________________ It seems that scikit-learn has not been built correctly. If you have installed scikit-learn from source, please do not forget to build the package before using it. For detailed instructions, see: https://scikit-learn.org/dev/developers/development_setup.html#install-editable-version-of-scikit-learn %s""" % (e, local_dir, "".join(dir_content).strip(), msg) ) try: from sklearn.__check_build._check_build import check_build # noqa: F401 except ImportError as e: raise_build_error(e)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/preprocessing/_encoders.py
sklearn/preprocessing/_encoders.py
# Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import numbers import warnings from numbers import Integral import numpy as np from scipy import sparse from sklearn.base import ( BaseEstimator, OneToOneFeatureMixin, TransformerMixin, _fit_context, ) from sklearn.utils import _safe_indexing, check_array from sklearn.utils._encode import _check_unknown, _encode, _get_counts, _unique from sklearn.utils._mask import _get_mask from sklearn.utils._missing import is_scalar_nan from sklearn.utils._param_validation import Interval, RealNotInt, StrOptions from sklearn.utils._set_output import _get_output_config from sklearn.utils.validation import ( _check_feature_names_in, check_is_fitted, validate_data, ) __all__ = ["OneHotEncoder", "OrdinalEncoder"] class _BaseEncoder(TransformerMixin, BaseEstimator): """ Base class for encoders that includes the code to categorize and transform the input features. """ def _check_X(self, X, ensure_all_finite=True): """ Perform custom check_array: - convert list of strings to object dtype - check for missing values for object dtype data (check_array does not do that) - return list of features (arrays): this list of features is constructed feature by feature to preserve the data types of pandas DataFrame columns, as otherwise information is lost and cannot be used, e.g. for the `categories_` attribute. """ if not (hasattr(X, "iloc") and getattr(X, "ndim", 0) == 2): # if not a dataframe, do normal check_array validation X_temp = check_array(X, dtype=None, ensure_all_finite=ensure_all_finite) if not hasattr(X, "dtype") and np.issubdtype(X_temp.dtype, np.str_): X = check_array(X, dtype=object, ensure_all_finite=ensure_all_finite) else: X = X_temp needs_validation = False else: # pandas dataframe, do validation later column by column, in order # to keep the dtype information to be used in the encoder. needs_validation = ensure_all_finite n_samples, n_features = X.shape X_columns = [] for i in range(n_features): Xi = _safe_indexing(X, indices=i, axis=1) Xi = check_array( Xi, ensure_2d=False, dtype=None, ensure_all_finite=needs_validation ) X_columns.append(Xi) return X_columns, n_samples, n_features def _fit( self, X, handle_unknown="error", ensure_all_finite=True, return_counts=False, return_and_ignore_missing_for_infrequent=False, ): self._check_infrequent_enabled() validate_data(self, X=X, reset=True, skip_check_array=True) X_list, n_samples, n_features = self._check_X( X, ensure_all_finite=ensure_all_finite ) self.n_features_in_ = n_features if self.categories != "auto": if len(self.categories) != n_features: raise ValueError( "Shape mismatch: if categories is an array," " it has to be of shape (n_features,)." ) self.categories_ = [] category_counts = [] compute_counts = return_counts or self._infrequent_enabled for i in range(n_features): Xi = X_list[i] if self.categories == "auto": result = _unique(Xi, return_counts=compute_counts) if compute_counts: cats, counts = result category_counts.append(counts) else: cats = result else: if np.issubdtype(Xi.dtype, np.str_): # Always convert string categories to objects to avoid # unexpected string truncation for longer category labels # passed in the constructor. Xi_dtype = object else: Xi_dtype = Xi.dtype cats = np.array(self.categories[i], dtype=Xi_dtype) if ( cats.dtype == object and isinstance(cats[0], bytes) and Xi.dtype.kind != "S" ): msg = ( f"In column {i}, the predefined categories have type 'bytes'" " which is incompatible with values of type" f" '{type(Xi[0]).__name__}'." ) raise ValueError(msg) # `nan` must be the last stated category for category in cats[:-1]: if is_scalar_nan(category): raise ValueError( "Nan should be the last element in user" f" provided categories, see categories {cats}" f" in column #{i}" ) if cats.size != len(_unique(cats)): msg = ( f"In column {i}, the predefined categories" " contain duplicate elements." ) raise ValueError(msg) if Xi.dtype.kind not in "OUS": sorted_cats = np.sort(cats) error_msg = ( "Unsorted categories are not supported for numerical categories" ) # if there are nans, nan should be the last element stop_idx = -1 if np.isnan(sorted_cats[-1]) else None if np.any(sorted_cats[:stop_idx] != cats[:stop_idx]): raise ValueError(error_msg) if handle_unknown == "error": diff = _check_unknown(Xi, cats) if diff: msg = ( "Found unknown categories {0} in column {1}" " during fit".format(diff, i) ) raise ValueError(msg) if compute_counts: category_counts.append(_get_counts(Xi, cats)) self.categories_.append(cats) output = {"n_samples": n_samples} if return_counts: output["category_counts"] = category_counts missing_indices = {} if return_and_ignore_missing_for_infrequent: for feature_idx, categories_for_idx in enumerate(self.categories_): if is_scalar_nan(categories_for_idx[-1]): # `nan` values can only be placed in the latest position missing_indices[feature_idx] = categories_for_idx.size - 1 output["missing_indices"] = missing_indices if self._infrequent_enabled: self._fit_infrequent_category_mapping( n_samples, category_counts, missing_indices, ) return output def _transform( self, X, handle_unknown="error", ensure_all_finite=True, warn_on_unknown=False, ignore_category_indices=None, ): X_list, n_samples, n_features = self._check_X( X, ensure_all_finite=ensure_all_finite ) validate_data(self, X=X, reset=False, skip_check_array=True) X_int = np.zeros((n_samples, n_features), dtype=int) X_mask = np.ones((n_samples, n_features), dtype=bool) columns_with_unknown = [] for i in range(n_features): Xi = X_list[i] diff, valid_mask = _check_unknown(Xi, self.categories_[i], return_mask=True) if not np.all(valid_mask): if handle_unknown == "error": msg = ( "Found unknown categories {0} in column {1}" " during transform".format(diff, i) ) raise ValueError(msg) else: if warn_on_unknown: columns_with_unknown.append(i) # Set the problematic rows to an acceptable value and # continue `The rows are marked `X_mask` and will be # removed later. X_mask[:, i] = valid_mask # cast Xi into the largest string type necessary # to handle different lengths of numpy strings if ( self.categories_[i].dtype.kind in ("U", "S") and self.categories_[i].itemsize > Xi.itemsize ): Xi = Xi.astype(self.categories_[i].dtype) elif self.categories_[i].dtype.kind == "O" and Xi.dtype.kind == "U": # categories are objects and Xi are numpy strings. # Cast Xi to an object dtype to prevent truncation # when setting invalid values. Xi = Xi.astype("O") else: Xi = Xi.copy() Xi[~valid_mask] = self.categories_[i][0] # We use check_unknown=False, since _check_unknown was # already called above. X_int[:, i] = _encode(Xi, uniques=self.categories_[i], check_unknown=False) if columns_with_unknown: if handle_unknown == "infrequent_if_exist": msg = ( "Found unknown categories in columns " f"{columns_with_unknown} during transform. These " "unknown categories will be encoded as the " "infrequent category." ) else: msg = ( "Found unknown categories in columns " f"{columns_with_unknown} during transform. These " "unknown categories will be encoded as all zeros" ) warnings.warn(msg, UserWarning) self._map_infrequent_categories(X_int, X_mask, ignore_category_indices) return X_int, X_mask @property def infrequent_categories_(self): """Infrequent categories for each feature.""" # raises an AttributeError if `_infrequent_indices` is not defined infrequent_indices = self._infrequent_indices return [ None if indices is None else category[indices] for category, indices in zip(self.categories_, infrequent_indices) ] def _check_infrequent_enabled(self): """ This functions checks whether _infrequent_enabled is True or False. This has to be called after parameter validation in the fit function. """ max_categories = getattr(self, "max_categories", None) min_frequency = getattr(self, "min_frequency", None) self._infrequent_enabled = ( max_categories is not None and max_categories >= 1 ) or min_frequency is not None def _identify_infrequent(self, category_count, n_samples, col_idx): """Compute the infrequent indices. Parameters ---------- category_count : ndarray of shape (n_cardinality,) Category counts. n_samples : int Number of samples. col_idx : int Index of the current category. Only used for the error message. Returns ------- output : ndarray of shape (n_infrequent_categories,) or None If there are infrequent categories, indices of infrequent categories. Otherwise None. """ if isinstance(self.min_frequency, numbers.Integral): infrequent_mask = category_count < self.min_frequency elif isinstance(self.min_frequency, numbers.Real): min_frequency_abs = n_samples * self.min_frequency infrequent_mask = category_count < min_frequency_abs else: infrequent_mask = np.zeros(category_count.shape[0], dtype=bool) n_current_features = category_count.size - infrequent_mask.sum() + 1 if self.max_categories is not None and self.max_categories < n_current_features: # max_categories includes the one infrequent category frequent_category_count = self.max_categories - 1 if frequent_category_count == 0: # All categories are infrequent infrequent_mask[:] = True else: # stable sort to preserve original count order smallest_levels = np.argsort(category_count, kind="mergesort")[ :-frequent_category_count ] infrequent_mask[smallest_levels] = True output = np.flatnonzero(infrequent_mask) return output if output.size > 0 else None def _fit_infrequent_category_mapping( self, n_samples, category_counts, missing_indices ): """Fit infrequent categories. Defines the private attribute: `_default_to_infrequent_mappings`. For feature `i`, `_default_to_infrequent_mappings[i]` defines the mapping from the integer encoding returned by `super().transform()` into infrequent categories. If `_default_to_infrequent_mappings[i]` is None, there were no infrequent categories in the training set. For example if categories 0, 2 and 4 were frequent, while categories 1, 3, 5 were infrequent for feature 7, then these categories are mapped to a single output: `_default_to_infrequent_mappings[7] = array([0, 3, 1, 3, 2, 3])` Defines private attribute: `_infrequent_indices`. `_infrequent_indices[i]` is an array of indices such that `categories_[i][_infrequent_indices[i]]` are all the infrequent category labels. If the feature `i` has no infrequent categories `_infrequent_indices[i]` is None. .. versionadded:: 1.1 Parameters ---------- n_samples : int Number of samples in training set. category_counts: list of ndarray `category_counts[i]` is the category counts corresponding to `self.categories_[i]`. missing_indices : dict Dict mapping from feature_idx to category index with a missing value. """ # Remove missing value from counts, so it is not considered as infrequent if missing_indices: category_counts_ = [] for feature_idx, count in enumerate(category_counts): if feature_idx in missing_indices: category_counts_.append( np.delete(count, missing_indices[feature_idx]) ) else: category_counts_.append(count) else: category_counts_ = category_counts self._infrequent_indices = [ self._identify_infrequent(category_count, n_samples, col_idx) for col_idx, category_count in enumerate(category_counts_) ] # compute mapping from default mapping to infrequent mapping self._default_to_infrequent_mappings = [] for feature_idx, infreq_idx in enumerate(self._infrequent_indices): cats = self.categories_[feature_idx] # no infrequent categories if infreq_idx is None: self._default_to_infrequent_mappings.append(None) continue n_cats = len(cats) if feature_idx in missing_indices: # Missing index was removed from this category when computing # infrequent indices, thus we need to decrease the number of # total categories when considering the infrequent mapping. n_cats -= 1 # infrequent indices exist mapping = np.empty(n_cats, dtype=np.int64) n_infrequent_cats = infreq_idx.size # infrequent categories are mapped to the last element. n_frequent_cats = n_cats - n_infrequent_cats mapping[infreq_idx] = n_frequent_cats frequent_indices = np.setdiff1d(np.arange(n_cats), infreq_idx) mapping[frequent_indices] = np.arange(n_frequent_cats) self._default_to_infrequent_mappings.append(mapping) def _map_infrequent_categories(self, X_int, X_mask, ignore_category_indices): """Map infrequent categories to integer representing the infrequent category. This modifies X_int in-place. Values that were invalid based on `X_mask` are mapped to the infrequent category if there was an infrequent category for that feature. Parameters ---------- X_int: ndarray of shape (n_samples, n_features) Integer encoded categories. X_mask: ndarray of shape (n_samples, n_features) Bool mask for valid values in `X_int`. ignore_category_indices : dict Dictionary mapping from feature_idx to category index to ignore. Ignored indexes will not be grouped and the original ordinal encoding will remain. """ if not self._infrequent_enabled: return ignore_category_indices = ignore_category_indices or {} for col_idx in range(X_int.shape[1]): infrequent_idx = self._infrequent_indices[col_idx] if infrequent_idx is None: continue X_int[~X_mask[:, col_idx], col_idx] = infrequent_idx[0] if self.handle_unknown in ("infrequent_if_exist", "warn"): # All the unknown values are now mapped to the # infrequent_idx[0], which makes the unknown values valid # This is needed in `transform` when the encoding is formed # using `X_mask`. X_mask[:, col_idx] = True # Remaps encoding in `X_int` where the infrequent categories are # grouped together. for i, mapping in enumerate(self._default_to_infrequent_mappings): if mapping is None: continue if i in ignore_category_indices: # Update rows that are **not** ignored rows_to_update = X_int[:, i] != ignore_category_indices[i] else: rows_to_update = slice(None) X_int[rows_to_update, i] = np.take(mapping, X_int[rows_to_update, i]) def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.input_tags.categorical = True tags.input_tags.allow_nan = True return tags class OneHotEncoder(_BaseEncoder): """ Encode categorical features as a one-hot numeric array. The input to this transformer should be an array-like of integers or strings, denoting the values taken on by categorical (discrete) features. The features are encoded using a one-hot (aka 'one-of-K' or 'dummy') encoding scheme. This creates a binary column for each category and returns a sparse matrix or dense array (depending on the ``sparse_output`` parameter). By default, the encoder derives the categories based on the unique values in each feature. Alternatively, you can also specify the `categories` manually. This encoding is needed for feeding categorical data to many scikit-learn estimators, notably linear models and SVMs with the standard kernels. Note: a one-hot encoding of y labels should use a LabelBinarizer instead. Read more in the :ref:`User Guide <preprocessing_categorical_features>`. For a comparison of different encoders, refer to: :ref:`sphx_glr_auto_examples_preprocessing_plot_target_encoder.py`. Parameters ---------- categories : 'auto' or a list of array-like, default='auto' Categories (unique values) per feature: - 'auto' : Determine categories automatically from the training data. - list : ``categories[i]`` holds the categories expected in the ith column. The passed categories should not mix strings and numeric values within a single feature, and should be sorted in case of numeric values. The used categories can be found in the ``categories_`` attribute. .. versionadded:: 0.20 drop : {'first', 'if_binary'} or an array-like of shape (n_features,), \ default=None Specifies a methodology to use to drop one of the categories per feature. This is useful in situations where perfectly collinear features cause problems, such as when feeding the resulting data into an unregularized linear regression model. However, dropping one category breaks the symmetry of the original representation and can therefore induce a bias in downstream models, for instance for penalized linear classification or regression models. - None : retain all features (the default). - 'first' : drop the first category in each feature. If only one category is present, the feature will be dropped entirely. - 'if_binary' : drop the first category in each feature with two categories. Features with 1 or more than 2 categories are left intact. - array : ``drop[i]`` is the category in feature ``X[:, i]`` that should be dropped. When `max_categories` or `min_frequency` is configured to group infrequent categories, the dropping behavior is handled after the grouping. .. versionadded:: 0.21 The parameter `drop` was added in 0.21. .. versionchanged:: 0.23 The option `drop='if_binary'` was added in 0.23. .. versionchanged:: 1.1 Support for dropping infrequent categories. sparse_output : bool, default=True When ``True``, it returns a :class:`scipy.sparse.csr_matrix`, i.e. a sparse matrix in "Compressed Sparse Row" (CSR) format. .. versionadded:: 1.2 `sparse` was renamed to `sparse_output` dtype : number type, default=np.float64 Desired dtype of output. handle_unknown : {'error', 'ignore', 'infrequent_if_exist', 'warn'}, \ default='error' Specifies the way unknown categories are handled during :meth:`transform`. - 'error' : Raise an error if an unknown category is present during transform. - 'ignore' : When an unknown category is encountered during transform, the resulting one-hot encoded columns for this feature will be all zeros. In the inverse transform, an unknown category will be denoted as None. - 'infrequent_if_exist' : When an unknown category is encountered during transform, the resulting one-hot encoded columns for this feature will map to the infrequent category if it exists. The infrequent category will be mapped to the last position in the encoding. During inverse transform, an unknown category will be mapped to the category denoted `'infrequent'` if it exists. If the `'infrequent'` category does not exist, then :meth:`transform` and :meth:`inverse_transform` will handle an unknown category as with `handle_unknown='ignore'`. Infrequent categories exist based on `min_frequency` and `max_categories`. Read more in the :ref:`User Guide <encoder_infrequent_categories>`. - 'warn' : When an unknown category is encountered during transform a warning is issued, and the encoding then proceeds as described for `handle_unknown="infrequent_if_exist"`. .. versionchanged:: 1.1 `'infrequent_if_exist'` was added to automatically handle unknown categories and infrequent categories. .. versionadded:: 1.6 The option `"warn"` was added in 1.6. min_frequency : int or float, default=None Specifies the minimum frequency below which a category will be considered infrequent. - If `int`, categories with a smaller cardinality will be considered infrequent. - If `float`, categories with a smaller cardinality than `min_frequency * n_samples` will be considered infrequent. .. versionadded:: 1.1 Read more in the :ref:`User Guide <encoder_infrequent_categories>`. max_categories : int, default=None Specifies an upper limit to the number of output features for each input feature when considering infrequent categories. If there are infrequent categories, `max_categories` includes the category representing the infrequent categories along with the frequent categories. If `None`, there is no limit to the number of output features. .. versionadded:: 1.1 Read more in the :ref:`User Guide <encoder_infrequent_categories>`. feature_name_combiner : "concat" or callable, default="concat" Callable with signature `def callable(input_feature, category)` that returns a string. This is used to create feature names to be returned by :meth:`get_feature_names_out`. `"concat"` concatenates encoded feature name and category with `feature + "_" + str(category)`.E.g. feature X with values 1, 6, 7 create feature names `X_1, X_6, X_7`. .. versionadded:: 1.3 Attributes ---------- categories_ : list of arrays The categories of each feature determined during fitting (in order of the features in X and corresponding with the output of ``transform``). This includes the category specified in ``drop`` (if any). drop_idx_ : array of shape (n_features,) - ``drop_idx_[i]`` is the index in ``categories_[i]`` of the category to be dropped for each feature. - ``drop_idx_[i] = None`` if no category is to be dropped from the feature with index ``i``, e.g. when `drop='if_binary'` and the feature isn't binary. - ``drop_idx_ = None`` if all the transformed features will be retained. If infrequent categories are enabled by setting `min_frequency` or `max_categories` to a non-default value and `drop_idx[i]` corresponds to an infrequent category, then the entire infrequent category is dropped. .. versionchanged:: 0.23 Added the possibility to contain `None` values. infrequent_categories_ : list of ndarray Defined only if infrequent categories are enabled by setting `min_frequency` or `max_categories` to a non-default value. `infrequent_categories_[i]` are the infrequent categories for feature `i`. If the feature `i` has no infrequent categories `infrequent_categories_[i]` is None. .. versionadded:: 1.1 n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 1.0 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 feature_name_combiner : callable or None Callable with signature `def callable(input_feature, category)` that returns a string. This is used to create feature names to be returned by :meth:`get_feature_names_out`. .. versionadded:: 1.3 See Also -------- OrdinalEncoder : Performs an ordinal (integer) encoding of the categorical features. TargetEncoder : Encodes categorical features using the target. sklearn.feature_extraction.DictVectorizer : Performs a one-hot encoding of dictionary items (also handles string-valued features). sklearn.feature_extraction.FeatureHasher : Performs an approximate one-hot encoding of dictionary items or strings. LabelBinarizer : Binarizes labels in a one-vs-all fashion. MultiLabelBinarizer : Transforms between iterable of iterables and a multilabel format, e.g. a (samples x classes) binary matrix indicating the presence of a class label. Examples -------- Given a dataset with two features, we let the encoder find the unique values per feature and transform the data to a binary one-hot encoding. >>> from sklearn.preprocessing import OneHotEncoder One can discard categories not seen during `fit`: >>> enc = OneHotEncoder(handle_unknown='ignore') >>> X = [['Male', 1], ['Female', 3], ['Female', 2]] >>> enc.fit(X) OneHotEncoder(handle_unknown='ignore') >>> enc.categories_ [array(['Female', 'Male'], dtype=object), array([1, 2, 3], dtype=object)] >>> enc.transform([['Female', 1], ['Male', 4]]).toarray() array([[1., 0., 1., 0., 0.], [0., 1., 0., 0., 0.]]) >>> enc.inverse_transform([[0, 1, 1, 0, 0], [0, 0, 0, 1, 0]]) array([['Male', 1], [None, 2]], dtype=object) >>> enc.get_feature_names_out(['gender', 'group']) array(['gender_Female', 'gender_Male', 'group_1', 'group_2', 'group_3'], ...) One can always drop the first column for each feature: >>> drop_enc = OneHotEncoder(drop='first').fit(X) >>> drop_enc.categories_ [array(['Female', 'Male'], dtype=object), array([1, 2, 3], dtype=object)] >>> drop_enc.transform([['Female', 1], ['Male', 2]]).toarray() array([[0., 0., 0.], [1., 1., 0.]]) Or drop a column for feature only having 2 categories: >>> drop_binary_enc = OneHotEncoder(drop='if_binary').fit(X) >>> drop_binary_enc.transform([['Female', 1], ['Male', 2]]).toarray() array([[0., 1., 0., 0.], [1., 0., 1., 0.]]) One can change the way feature names are created. >>> def custom_combiner(feature, category): ... return str(feature) + "_" + type(category).__name__ + "_" + str(category) >>> custom_fnames_enc = OneHotEncoder(feature_name_combiner=custom_combiner).fit(X) >>> custom_fnames_enc.get_feature_names_out() array(['x0_str_Female', 'x0_str_Male', 'x1_int_1', 'x1_int_2', 'x1_int_3'], dtype=object) Infrequent categories are enabled by setting `max_categories` or `min_frequency`. >>> import numpy as np >>> X = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3], dtype=object).T >>> ohe = OneHotEncoder(max_categories=3, sparse_output=False).fit(X) >>> ohe.infrequent_categories_ [array(['a', 'd'], dtype=object)] >>> ohe.transform([["a"], ["b"]]) array([[0., 0., 1.], [1., 0., 0.]]) """ _parameter_constraints: dict = { "categories": [StrOptions({"auto"}), list], "drop": [StrOptions({"first", "if_binary"}), "array-like", None], "dtype": "no_validation", # validation delegated to numpy "handle_unknown": [ StrOptions({"error", "ignore", "infrequent_if_exist", "warn"}) ], "max_categories": [Interval(Integral, 1, None, closed="left"), None], "min_frequency": [ Interval(Integral, 1, None, closed="left"), Interval(RealNotInt, 0, 1, closed="neither"), None, ], "sparse_output": ["boolean"], "feature_name_combiner": [StrOptions({"concat"}), callable], } def __init__( self, *, categories="auto", drop=None, sparse_output=True, dtype=np.float64, handle_unknown="error", min_frequency=None, max_categories=None, feature_name_combiner="concat", ): self.categories = categories self.sparse_output = sparse_output self.dtype = dtype self.handle_unknown = handle_unknown self.drop = drop self.min_frequency = min_frequency self.max_categories = max_categories self.feature_name_combiner = feature_name_combiner def _map_drop_idx_to_infrequent(self, feature_idx, drop_idx): """Convert `drop_idx` into the index for infrequent categories. If there are no infrequent categories, then `drop_idx` is returned. This method is called in `_set_drop_idx` when the `drop` parameter is an array-like. """ if not self._infrequent_enabled: return drop_idx default_to_infrequent = self._default_to_infrequent_mappings[feature_idx] if default_to_infrequent is None:
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
true
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/preprocessing/_target_encoder.py
sklearn/preprocessing/_target_encoder.py
# Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from numbers import Integral, Real import numpy as np from sklearn.base import OneToOneFeatureMixin, _fit_context from sklearn.preprocessing._encoders import _BaseEncoder from sklearn.preprocessing._target_encoder_fast import ( _fit_encoding_fast, _fit_encoding_fast_auto_smooth, ) from sklearn.utils._param_validation import Interval, StrOptions from sklearn.utils.multiclass import type_of_target from sklearn.utils.validation import ( _check_feature_names_in, _check_y, check_consistent_length, check_is_fitted, ) class TargetEncoder(OneToOneFeatureMixin, _BaseEncoder): """Target Encoder for regression and classification targets. Each category is encoded based on a shrunk estimate of the average target values for observations belonging to the category. The encoding scheme mixes the global target mean with the target mean conditioned on the value of the category (see [MIC]_). When the target type is "multiclass", encodings are based on the conditional probability estimate for each class. The target is first binarized using the "one-vs-all" scheme via :class:`~sklearn.preprocessing.LabelBinarizer`, then the average target value for each class and each category is used for encoding, resulting in `n_features` * `n_classes` encoded output features. :class:`TargetEncoder` considers missing values, such as `np.nan` or `None`, as another category and encodes them like any other category. Categories that are not seen during :meth:`fit` are encoded with the target mean, i.e. `target_mean_`. For a demo on the importance of the `TargetEncoder` internal cross-fitting, see :ref:`sphx_glr_auto_examples_preprocessing_plot_target_encoder_cross_val.py`. For a comparison of different encoders, refer to :ref:`sphx_glr_auto_examples_preprocessing_plot_target_encoder.py`. Read more in the :ref:`User Guide <target_encoder>`. .. note:: `fit(X, y).transform(X)` does not equal `fit_transform(X, y)` because a :term:`cross fitting` scheme is used in `fit_transform` for encoding. See the :ref:`User Guide <target_encoder>` for details. .. versionadded:: 1.3 Parameters ---------- categories : "auto" or list of shape (n_features,) of array-like, default="auto" Categories (unique values) per feature: - `"auto"` : Determine categories automatically from the training data. - list : `categories[i]` holds the categories expected in the i-th column. The passed categories should not mix strings and numeric values within a single feature, and should be sorted in case of numeric values. The used categories are stored in the `categories_` fitted attribute. target_type : {"auto", "continuous", "binary", "multiclass"}, default="auto" Type of target. - `"auto"` : Type of target is inferred with :func:`~sklearn.utils.multiclass.type_of_target`. - `"continuous"` : Continuous target - `"binary"` : Binary target - `"multiclass"` : Multiclass target .. note:: The type of target inferred with `"auto"` may not be the desired target type used for modeling. For example, if the target consisted of integers between 0 and 100, then :func:`~sklearn.utils.multiclass.type_of_target` will infer the target as `"multiclass"`. In this case, setting `target_type="continuous"` will specify the target as a regression problem. The `target_type_` attribute gives the target type used by the encoder. .. versionchanged:: 1.4 Added the option 'multiclass'. smooth : "auto" or float, default="auto" The amount of mixing of the target mean conditioned on the value of the category with the global target mean. A larger `smooth` value will put more weight on the global target mean. If `"auto"`, then `smooth` is set to an empirical Bayes estimate. cv : int, default=5 Determines the number of folds in the :term:`cross fitting` strategy used in :meth:`fit_transform`. For classification targets, `StratifiedKFold` is used and for continuous targets, `KFold` is used. shuffle : bool, default=True Whether to shuffle the data in :meth:`fit_transform` before splitting into folds. Note that the samples within each split will not be shuffled. random_state : int, RandomState instance or None, default=None When `shuffle` is True, `random_state` affects the ordering of the indices, which controls the randomness of each fold. Otherwise, this parameter has no effect. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Attributes ---------- encodings_ : list of shape (n_features,) or (n_features * n_classes) of \ ndarray Encodings learnt on all of `X`. For feature `i`, `encodings_[i]` are the encodings matching the categories listed in `categories_[i]`. When `target_type_` is "multiclass", the encoding for feature `i` and class `j` is stored in `encodings_[j + (i * len(classes_))]`. E.g., for 2 features (f) and 3 classes (c), encodings are ordered: f0_c0, f0_c1, f0_c2, f1_c0, f1_c1, f1_c2, categories_ : list of shape (n_features,) of ndarray The categories of each input feature determined during fitting or specified in `categories` (in order of the features in `X` and corresponding with the output of :meth:`transform`). target_type_ : str Type of target. target_mean_ : float The overall mean of the target. This value is only used in :meth:`transform` to encode categories. n_features_in_ : int Number of features seen during :term:`fit`. feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. classes_ : ndarray or None If `target_type_` is 'binary' or 'multiclass', holds the label for each class, otherwise `None`. See Also -------- OrdinalEncoder : Performs an ordinal (integer) encoding of the categorical features. Contrary to TargetEncoder, this encoding is not supervised. Treating the resulting encoding as a numerical features therefore lead arbitrarily ordered values and therefore typically lead to lower predictive performance when used as preprocessing for a classifier or regressor. OneHotEncoder : Performs a one-hot encoding of categorical features. This unsupervised encoding is better suited for low cardinality categorical variables as it generate one new feature per unique category. References ---------- .. [MIC] :doi:`Micci-Barreca, Daniele. "A preprocessing scheme for high-cardinality categorical attributes in classification and prediction problems" SIGKDD Explor. Newsl. 3, 1 (July 2001), 27–32. <10.1145/507533.507538>` Examples -------- With `smooth="auto"`, the smoothing parameter is set to an empirical Bayes estimate: >>> import numpy as np >>> from sklearn.preprocessing import TargetEncoder >>> X = np.array([["dog"] * 20 + ["cat"] * 30 + ["snake"] * 38], dtype=object).T >>> y = [90.3] * 5 + [80.1] * 15 + [20.4] * 5 + [20.1] * 25 + [21.2] * 8 + [49] * 30 >>> enc_auto = TargetEncoder(smooth="auto") >>> X_trans = enc_auto.fit_transform(X, y) >>> # A high `smooth` parameter puts more weight on global mean on the categorical >>> # encodings: >>> enc_high_smooth = TargetEncoder(smooth=5000.0).fit(X, y) >>> enc_high_smooth.target_mean_ np.float64(44.3) >>> enc_high_smooth.encodings_ [array([44.1, 44.4, 44.3])] >>> # On the other hand, a low `smooth` parameter puts more weight on target >>> # conditioned on the value of the categorical: >>> enc_low_smooth = TargetEncoder(smooth=1.0).fit(X, y) >>> enc_low_smooth.encodings_ [array([21, 80.8, 43.2])] """ _parameter_constraints: dict = { "categories": [StrOptions({"auto"}), list], "target_type": [StrOptions({"auto", "continuous", "binary", "multiclass"})], "smooth": [StrOptions({"auto"}), Interval(Real, 0, None, closed="left")], "cv": [Interval(Integral, 2, None, closed="left")], "shuffle": ["boolean"], "random_state": ["random_state"], } def __init__( self, categories="auto", target_type="auto", smooth="auto", cv=5, shuffle=True, random_state=None, ): self.categories = categories self.smooth = smooth self.target_type = target_type self.cv = cv self.shuffle = shuffle self.random_state = random_state @_fit_context(prefer_skip_nested_validation=True) def fit(self, X, y): """Fit the :class:`TargetEncoder` to X and y. It is discouraged to use this method because it can introduce data leakage. Use `fit_transform` on training data instead. .. note:: `fit(X, y).transform(X)` does not equal `fit_transform(X, y)` because a :term:`cross fitting` scheme is used in `fit_transform` for encoding. See the :ref:`User Guide <target_encoder>` for details. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to determine the categories of each feature. y : array-like of shape (n_samples,) The target data used to encode the categories. Returns ------- self : object Fitted encoder. """ self._fit_encodings_all(X, y) return self @_fit_context(prefer_skip_nested_validation=True) def fit_transform(self, X, y): """Fit :class:`TargetEncoder` and transform `X` with the target encoding. This method uses a :term:`cross fitting` scheme to prevent target leakage and overfitting in downstream predictors. It is the recommended method for encoding training data. .. note:: `fit(X, y).transform(X)` does not equal `fit_transform(X, y)` because a :term:`cross fitting` scheme is used in `fit_transform` for encoding. See the :ref:`User Guide <target_encoder>` for details. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to determine the categories of each feature. y : array-like of shape (n_samples,) The target data used to encode the categories. Returns ------- X_trans : ndarray of shape (n_samples, n_features) or \ (n_samples, (n_features * n_classes)) Transformed input. """ from sklearn.model_selection import ( # avoid circular import KFold, StratifiedKFold, ) X_ordinal, X_known_mask, y_encoded, n_categories = self._fit_encodings_all(X, y) # The cv splitter is voluntarily restricted to *KFold to enforce non # overlapping validation folds, otherwise the fit_transform output will # not be well-specified. if self.target_type_ == "continuous": cv = KFold(self.cv, shuffle=self.shuffle, random_state=self.random_state) else: cv = StratifiedKFold( self.cv, shuffle=self.shuffle, random_state=self.random_state ) # If 'multiclass' multiply axis=1 by num classes else keep shape the same if self.target_type_ == "multiclass": X_out = np.empty( (X_ordinal.shape[0], X_ordinal.shape[1] * len(self.classes_)), dtype=np.float64, ) else: X_out = np.empty_like(X_ordinal, dtype=np.float64) for train_idx, test_idx in cv.split(X, y): X_train, y_train = X_ordinal[train_idx, :], y_encoded[train_idx] y_train_mean = np.mean(y_train, axis=0) if self.target_type_ == "multiclass": encodings = self._fit_encoding_multiclass( X_train, y_train, n_categories, y_train_mean, ) else: encodings = self._fit_encoding_binary_or_continuous( X_train, y_train, n_categories, y_train_mean, ) self._transform_X_ordinal( X_out, X_ordinal, ~X_known_mask, test_idx, encodings, y_train_mean, ) return X_out def transform(self, X): """Transform X with the target encoding. This method internally uses the `encodings_` attribute learnt during :meth:`TargetEncoder.fit_transform` to transform test data. .. note:: `fit(X, y).transform(X)` does not equal `fit_transform(X, y)` because a :term:`cross fitting` scheme is used in `fit_transform` for encoding. See the :ref:`User Guide <target_encoder>` for details. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to determine the categories of each feature. Returns ------- X_trans : ndarray of shape (n_samples, n_features) or \ (n_samples, (n_features * n_classes)) Transformed input. """ X_ordinal, X_known_mask = self._transform( X, handle_unknown="ignore", ensure_all_finite="allow-nan" ) # If 'multiclass' multiply axis=1 by num of classes else keep shape the same if self.target_type_ == "multiclass": X_out = np.empty( (X_ordinal.shape[0], X_ordinal.shape[1] * len(self.classes_)), dtype=np.float64, ) else: X_out = np.empty_like(X_ordinal, dtype=np.float64) self._transform_X_ordinal( X_out, X_ordinal, ~X_known_mask, slice(None), self.encodings_, self.target_mean_, ) return X_out def _fit_encodings_all(self, X, y): """Fit a target encoding with all the data.""" # avoid circular import from sklearn.preprocessing import LabelBinarizer, LabelEncoder check_consistent_length(X, y) self._fit(X, handle_unknown="ignore", ensure_all_finite="allow-nan") if self.target_type == "auto": accepted_target_types = ("binary", "multiclass", "continuous") inferred_type_of_target = type_of_target(y, input_name="y") if inferred_type_of_target not in accepted_target_types: raise ValueError( "Unknown label type: Target type was inferred to be " f"{inferred_type_of_target!r}. Only {accepted_target_types} are " "supported." ) self.target_type_ = inferred_type_of_target else: self.target_type_ = self.target_type self.classes_ = None if self.target_type_ == "binary": label_encoder = LabelEncoder() y = label_encoder.fit_transform(y) self.classes_ = label_encoder.classes_ elif self.target_type_ == "multiclass": label_binarizer = LabelBinarizer() y = label_binarizer.fit_transform(y) self.classes_ = label_binarizer.classes_ else: # continuous y = _check_y(y, y_numeric=True, estimator=self) self.target_mean_ = np.mean(y, axis=0) X_ordinal, X_known_mask = self._transform( X, handle_unknown="ignore", ensure_all_finite="allow-nan" ) n_categories = np.fromiter( (len(category_for_feature) for category_for_feature in self.categories_), dtype=np.int64, count=len(self.categories_), ) if self.target_type_ == "multiclass": encodings = self._fit_encoding_multiclass( X_ordinal, y, n_categories, self.target_mean_, ) else: encodings = self._fit_encoding_binary_or_continuous( X_ordinal, y, n_categories, self.target_mean_, ) self.encodings_ = encodings return X_ordinal, X_known_mask, y, n_categories def _fit_encoding_binary_or_continuous( self, X_ordinal, y, n_categories, target_mean ): """Learn target encodings.""" if self.smooth == "auto": y_variance = np.var(y) encodings = _fit_encoding_fast_auto_smooth( X_ordinal, y, n_categories, target_mean, y_variance, ) else: encodings = _fit_encoding_fast( X_ordinal, y, n_categories, self.smooth, target_mean, ) return encodings def _fit_encoding_multiclass(self, X_ordinal, y, n_categories, target_mean): """Learn multiclass encodings. Learn encodings for each class (c) then reorder encodings such that the same features (f) are grouped together. `reorder_index` enables converting from: f0_c0, f1_c0, f0_c1, f1_c1, f0_c2, f1_c2 to: f0_c0, f0_c1, f0_c2, f1_c0, f1_c1, f1_c2 """ n_features = self.n_features_in_ n_classes = len(self.classes_) encodings = [] for i in range(n_classes): y_class = y[:, i] encoding = self._fit_encoding_binary_or_continuous( X_ordinal, y_class, n_categories, target_mean[i], ) encodings.extend(encoding) reorder_index = ( idx for start in range(n_features) for idx in range(start, (n_classes * n_features), n_features) ) return [encodings[idx] for idx in reorder_index] def _transform_X_ordinal( self, X_out, X_ordinal, X_unknown_mask, row_indices, encodings, target_mean, ): """Transform X_ordinal using encodings. In the multiclass case, `X_ordinal` and `X_unknown_mask` have column (axis=1) size `n_features`, while `encodings` has length of size `n_features * n_classes`. `feat_idx` deals with this by repeating feature indices by `n_classes` E.g., for 3 features, 2 classes: 0,0,1,1,2,2 Additionally, `target_mean` is of shape (`n_classes`,) so `mean_idx` cycles through 0 to `n_classes` - 1, `n_features` times. """ if self.target_type_ == "multiclass": n_classes = len(self.classes_) for e_idx, encoding in enumerate(encodings): # Repeat feature indices by n_classes feat_idx = e_idx // n_classes # Cycle through each class mean_idx = e_idx % n_classes X_out[row_indices, e_idx] = encoding[X_ordinal[row_indices, feat_idx]] X_out[X_unknown_mask[:, feat_idx], e_idx] = target_mean[mean_idx] else: for e_idx, encoding in enumerate(encodings): X_out[row_indices, e_idx] = encoding[X_ordinal[row_indices, e_idx]] X_out[X_unknown_mask[:, e_idx], e_idx] = target_mean def get_feature_names_out(self, input_features=None): """Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Not used, present here for API consistency by convention. Returns ------- feature_names_out : ndarray of str objects Transformed feature names. `feature_names_in_` is used unless it is not defined, in which case the following input feature names are generated: `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. When `type_of_target_` is "multiclass" the names are of the format '<feature_name>_<class_name>'. """ check_is_fitted(self, "n_features_in_") feature_names = _check_feature_names_in(self, input_features) if self.target_type_ == "multiclass": feature_names = [ f"{feature_name}_{class_name}" for feature_name in feature_names for class_name in self.classes_ ] return np.asarray(feature_names, dtype=object) else: return feature_names def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.target_tags.required = True return tags
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/preprocessing/_polynomial.py
sklearn/preprocessing/_polynomial.py
""" This file contains preprocessing tools based on polynomials. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import collections from itertools import chain, combinations from itertools import combinations_with_replacement as combinations_w_r from numbers import Integral import numpy as np from scipy import sparse from scipy.interpolate import BSpline from scipy.special import comb from sklearn.base import BaseEstimator, TransformerMixin, _fit_context from sklearn.preprocessing._csr_polynomial_expansion import ( _calc_expanded_nnz, _calc_total_nnz, _csr_polynomial_expansion, ) from sklearn.utils import check_array from sklearn.utils._array_api import ( _is_numpy_namespace, get_namespace_and_device, supported_float_dtypes, ) from sklearn.utils._mask import _get_mask from sklearn.utils._param_validation import Interval, StrOptions from sklearn.utils.stats import _weighted_percentile from sklearn.utils.validation import ( FLOAT_DTYPES, _check_feature_names_in, _check_sample_weight, check_is_fitted, validate_data, ) __all__ = [ "PolynomialFeatures", "SplineTransformer", ] def _create_expansion(X, interaction_only, deg, n_features, cumulative_size=0): """Helper function for creating and appending sparse expansion matrices""" total_nnz = _calc_total_nnz(X.indptr, interaction_only, deg) expanded_col = _calc_expanded_nnz(n_features, interaction_only, deg) if expanded_col == 0: return None # This only checks whether each block needs 64bit integers upon # expansion. We prefer to keep int32 indexing where we can, # since currently SciPy's CSR construction downcasts when possible, # so we prefer to avoid an unnecessary cast. The dtype may still # change in the concatenation process if needed. # See: https://github.com/scipy/scipy/issues/16569 max_indices = expanded_col - 1 max_indptr = total_nnz max_int32 = np.iinfo(np.int32).max needs_int64 = max(max_indices, max_indptr) > max_int32 index_dtype = np.int64 if needs_int64 else np.int32 # Result of the expansion, modified in place by the # `_csr_polynomial_expansion` routine. expanded_data = np.empty(shape=total_nnz, dtype=X.data.dtype) expanded_indices = np.empty(shape=total_nnz, dtype=index_dtype) expanded_indptr = np.empty(shape=X.indptr.shape[0], dtype=index_dtype) _csr_polynomial_expansion( X.data, X.indices, X.indptr, X.shape[1], expanded_data, expanded_indices, expanded_indptr, interaction_only, deg, ) return sparse.csr_matrix( (expanded_data, expanded_indices, expanded_indptr), shape=(X.indptr.shape[0] - 1, expanded_col), dtype=X.dtype, ) class PolynomialFeatures(TransformerMixin, BaseEstimator): """Generate polynomial and interaction features. Generate a new feature matrix consisting of all polynomial combinations of the features with degree less than or equal to the specified degree. For example, if an input sample is two dimensional and of the form [a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2]. Read more in the :ref:`User Guide <polynomial_features>`. Parameters ---------- degree : int or tuple (min_degree, max_degree), default=2 If a single int is given, it specifies the maximal degree of the polynomial features. If a tuple `(min_degree, max_degree)` is passed, then `min_degree` is the minimum and `max_degree` is the maximum polynomial degree of the generated features. Note that `min_degree=0` and `min_degree=1` are equivalent as outputting the degree zero term is determined by `include_bias`. interaction_only : bool, default=False If `True`, only interaction features are produced: features that are products of at most `degree` *distinct* input features, i.e. terms with power of 2 or higher of the same input feature are excluded: - included: `x[0]`, `x[1]`, `x[0] * x[1]`, etc. - excluded: `x[0] ** 2`, `x[0] ** 2 * x[1]`, etc. include_bias : bool, default=True If `True` (default), then include a bias column, the feature in which all polynomial powers are zero (i.e. a column of ones - acts as an intercept term in a linear model). order : {'C', 'F'}, default='C' Order of output array in the dense case. `'F'` order is faster to compute, but may slow down subsequent estimators. .. versionadded:: 0.21 Attributes ---------- powers_ : ndarray of shape (`n_output_features_`, `n_features_in_`) `powers_[i, j]` is the exponent of the jth input in the ith output. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 n_output_features_ : int The total number of polynomial output features. The number of output features is computed by iterating over all suitably sized combinations of input features. See Also -------- SplineTransformer : Transformer that generates univariate B-spline bases for features. Notes ----- Be aware that the number of features in the output array scales polynomially in the number of features of the input array, and exponentially in the degree. High degrees can cause overfitting. See :ref:`examples/linear_model/plot_polynomial_interpolation.py <sphx_glr_auto_examples_linear_model_plot_polynomial_interpolation.py>` Examples -------- >>> import numpy as np >>> from sklearn.preprocessing import PolynomialFeatures >>> X = np.arange(6).reshape(3, 2) >>> X array([[0, 1], [2, 3], [4, 5]]) >>> poly = PolynomialFeatures(2) >>> poly.fit_transform(X) array([[ 1., 0., 1., 0., 0., 1.], [ 1., 2., 3., 4., 6., 9.], [ 1., 4., 5., 16., 20., 25.]]) >>> poly = PolynomialFeatures(interaction_only=True) >>> poly.fit_transform(X) array([[ 1., 0., 1., 0.], [ 1., 2., 3., 6.], [ 1., 4., 5., 20.]]) """ _parameter_constraints: dict = { "degree": [Interval(Integral, 0, None, closed="left"), "array-like"], "interaction_only": ["boolean"], "include_bias": ["boolean"], "order": [StrOptions({"C", "F"})], } def __init__( self, degree=2, *, interaction_only=False, include_bias=True, order="C" ): self.degree = degree self.interaction_only = interaction_only self.include_bias = include_bias self.order = order @staticmethod def _combinations( n_features, min_degree, max_degree, interaction_only, include_bias ): comb = combinations if interaction_only else combinations_w_r start = max(1, min_degree) iter = chain.from_iterable( comb(range(n_features), i) for i in range(start, max_degree + 1) ) if include_bias: iter = chain(comb(range(n_features), 0), iter) return iter @staticmethod def _num_combinations( n_features, min_degree, max_degree, interaction_only, include_bias ): """Calculate number of terms in polynomial expansion This should be equivalent to counting the number of terms returned by _combinations(...) but much faster. """ if interaction_only: combinations = sum( [ comb(n_features, i, exact=True) for i in range(max(1, min_degree), min(max_degree, n_features) + 1) ] ) else: combinations = comb(n_features + max_degree, max_degree, exact=True) - 1 if min_degree > 0: d = min_degree - 1 combinations -= comb(n_features + d, d, exact=True) - 1 if include_bias: combinations += 1 return combinations @property def powers_(self): """Exponent for each of the inputs in the output.""" check_is_fitted(self) combinations = self._combinations( n_features=self.n_features_in_, min_degree=self._min_degree, max_degree=self._max_degree, interaction_only=self.interaction_only, include_bias=self.include_bias, ) return np.vstack( [np.bincount(c, minlength=self.n_features_in_) for c in combinations] ) def get_feature_names_out(self, input_features=None): """Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Input features. - If `input_features is None`, then `feature_names_in_` is used as feature names in. If `feature_names_in_` is not defined, then the following input feature names are generated: `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. - If `input_features` is an array-like, then `input_features` must match `feature_names_in_` if `feature_names_in_` is defined. Returns ------- feature_names_out : ndarray of str objects Transformed feature names. """ powers = self.powers_ input_features = _check_feature_names_in(self, input_features) feature_names = [] for row in powers: inds = np.where(row)[0] if len(inds): name = " ".join( ( "%s^%d" % (input_features[ind], exp) if exp != 1 else input_features[ind] ) for ind, exp in zip(inds, row[inds]) ) else: name = "1" feature_names.append(name) return np.asarray(feature_names, dtype=object) @_fit_context(prefer_skip_nested_validation=True) def fit(self, X, y=None): """ Compute number of output features. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data. y : Ignored Not used, present here for API consistency by convention. Returns ------- self : object Fitted transformer. """ _, n_features = validate_data(self, X, accept_sparse=True).shape if isinstance(self.degree, Integral): if self.degree == 0 and not self.include_bias: raise ValueError( "Setting degree to zero and include_bias to False would result in" " an empty output array." ) self._min_degree = 0 self._max_degree = self.degree elif ( isinstance(self.degree, collections.abc.Iterable) and len(self.degree) == 2 ): self._min_degree, self._max_degree = self.degree if not ( isinstance(self._min_degree, Integral) and isinstance(self._max_degree, Integral) and self._min_degree >= 0 and self._min_degree <= self._max_degree ): raise ValueError( "degree=(min_degree, max_degree) must " "be non-negative integers that fulfil " "min_degree <= max_degree, got " f"{self.degree}." ) elif self._max_degree == 0 and not self.include_bias: raise ValueError( "Setting both min_degree and max_degree to zero and include_bias to" " False would result in an empty output array." ) else: raise ValueError( "degree must be a non-negative int or tuple " "(min_degree, max_degree), got " f"{self.degree}." ) self.n_output_features_ = self._num_combinations( n_features=n_features, min_degree=self._min_degree, max_degree=self._max_degree, interaction_only=self.interaction_only, include_bias=self.include_bias, ) if self.n_output_features_ > np.iinfo(np.intp).max: msg = ( "The output that would result from the current configuration would" f" have {self.n_output_features_} features which is too large to be" f" indexed by {np.intp().dtype.name}. Please change some or all of the" " following:\n- The number of features in the input, currently" f" {n_features=}\n- The range of degrees to calculate, currently" f" [{self._min_degree}, {self._max_degree}]\n- Whether to include only" f" interaction terms, currently {self.interaction_only}\n- Whether to" f" include a bias term, currently {self.include_bias}." ) if ( np.intp == np.int32 and self.n_output_features_ <= np.iinfo(np.int64).max ): # pragma: nocover msg += ( "\nNote that the current Python runtime has a limited 32 bit " "address space and that this configuration would have been " "admissible if run on a 64 bit Python runtime." ) raise ValueError(msg) # We also record the number of output features for # _min_degree = 0 self._n_out_full = self._num_combinations( n_features=n_features, min_degree=0, max_degree=self._max_degree, interaction_only=self.interaction_only, include_bias=self.include_bias, ) return self def transform(self, X): """Transform data to polynomial features. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data to transform, row by row. Prefer CSR over CSC for sparse input (for speed), but CSC is required if the degree is 4 or higher. If the degree is less than 4 and the input format is CSC, it will be converted to CSR, have its polynomial features generated, then converted back to CSC. If the degree is 2 or 3, the method described in "Leveraging Sparsity to Speed Up Polynomial Feature Expansions of CSR Matrices Using K-Simplex Numbers" by Andrew Nystrom and John Hughes is used, which is much faster than the method used on CSC input. For this reason, a CSC input will be converted to CSR, and the output will be converted back to CSC prior to being returned, hence the preference of CSR. Returns ------- XP : {ndarray, sparse matrix} of shape (n_samples, NP) The matrix of features, where `NP` is the number of polynomial features generated from the combination of inputs. If a sparse matrix is provided, it will be converted into a sparse `csr_matrix`. """ check_is_fitted(self) xp, _, device_ = get_namespace_and_device(X) X = validate_data( self, X, order="F", dtype=supported_float_dtypes(xp=xp, device=device_), reset=False, accept_sparse=("csr", "csc"), ) n_samples, n_features = X.shape max_int32 = xp.iinfo(xp.int32).max if sparse.issparse(X) and X.format == "csr": if self._max_degree > 3: return self.transform(X.tocsc()).tocsr() to_stack = [] if self.include_bias: to_stack.append( sparse.csr_matrix(np.ones(shape=(n_samples, 1), dtype=X.dtype)) ) if self._min_degree <= 1 and self._max_degree > 0: to_stack.append(X) cumulative_size = sum(mat.shape[1] for mat in to_stack) for deg in range(max(2, self._min_degree), self._max_degree + 1): expanded = _create_expansion( X=X, interaction_only=self.interaction_only, deg=deg, n_features=n_features, cumulative_size=cumulative_size, ) if expanded is not None: to_stack.append(expanded) cumulative_size += expanded.shape[1] if len(to_stack) == 0: # edge case: deal with empty matrix XP = sparse.csr_matrix((n_samples, 0), dtype=X.dtype) else: XP = sparse.hstack(to_stack, dtype=X.dtype, format="csr") elif sparse.issparse(X) and X.format == "csc" and self._max_degree < 4: return self.transform(X.tocsr()).tocsc() elif sparse.issparse(X): combinations = self._combinations( n_features=n_features, min_degree=self._min_degree, max_degree=self._max_degree, interaction_only=self.interaction_only, include_bias=self.include_bias, ) columns = [] for combi in combinations: if combi: out_col = 1 for col_idx in combi: out_col = X[:, [col_idx]].multiply(out_col) columns.append(out_col) else: bias = sparse.csc_matrix(np.ones((X.shape[0], 1))) columns.append(bias) XP = sparse.hstack(columns, dtype=X.dtype).tocsc() else: # Do as if _min_degree = 0 and cut down array after the # computation, i.e. use _n_out_full instead of n_output_features_. order_kwargs = {} if _is_numpy_namespace(xp=xp): order_kwargs["order"] = self.order elif self.order == "F": raise ValueError( "PolynomialFeatures does not support order='F' for non-numpy arrays" ) XP = xp.empty( shape=(n_samples, self._n_out_full), dtype=X.dtype, device=device_, **order_kwargs, ) # What follows is a faster implementation of: # for i, comb in enumerate(combinations): # XP[:, i] = X[:, comb].prod(1) # This implementation uses two optimisations. # First one is broadcasting, # multiply ([X1, ..., Xn], X1) -> [X1 X1, ..., Xn X1] # multiply ([X2, ..., Xn], X2) -> [X2 X2, ..., Xn X2] # ... # multiply ([X[:, start:end], X[:, start]) -> ... # Second optimisation happens for degrees >= 3. # Xi^3 is computed reusing previous computation: # Xi^3 = Xi^2 * Xi. # degree 0 term if self.include_bias: XP[:, 0] = 1 current_col = 1 else: current_col = 0 if self._max_degree == 0: return XP # degree 1 term XP[:, current_col : current_col + n_features] = X index = list(range(current_col, current_col + n_features)) current_col += n_features index.append(current_col) # loop over degree >= 2 terms for _ in range(2, self._max_degree + 1): new_index = [] end = index[-1] for feature_idx in range(n_features): start = index[feature_idx] new_index.append(current_col) if self.interaction_only: start += index[feature_idx + 1] - index[feature_idx] next_col = current_col + end - start if next_col <= current_col: break # XP[:, start:end] are terms of degree d - 1 # that exclude feature #feature_idx. if _is_numpy_namespace(xp): # numpy performs this multiplication in place np.multiply( XP[:, start:end], X[:, feature_idx : feature_idx + 1], out=XP[:, current_col:next_col], casting="no", ) else: XP[:, current_col:next_col] = xp.multiply( XP[:, start:end], X[:, feature_idx : feature_idx + 1] ) current_col = next_col new_index.append(current_col) index = new_index if self._min_degree > 1: n_XP, n_Xout = self._n_out_full, self.n_output_features_ if self.include_bias: Xout = xp.empty( shape=(n_samples, n_Xout), dtype=XP.dtype, device=device_, **order_kwargs, ) Xout[:, 0] = 1 Xout[:, 1:] = XP[:, n_XP - n_Xout + 1 :] else: Xout = xp.asarray(XP[:, n_XP - n_Xout :], copy=True) XP = Xout return XP def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.input_tags.sparse = True tags.array_api_support = True return tags class SplineTransformer(TransformerMixin, BaseEstimator): """Generate univariate B-spline bases for features. Generate a new feature matrix consisting of `n_splines=n_knots + degree - 1` (`n_knots - 1` for `extrapolation="periodic"`) spline basis functions (B-splines) of polynomial order=`degree` for each feature. In order to learn more about the SplineTransformer class go to: :ref:`sphx_glr_auto_examples_applications_plot_cyclical_feature_engineering.py` Read more in the :ref:`User Guide <spline_transformer>`. .. versionadded:: 1.0 Parameters ---------- n_knots : int, default=5 Number of knots of the splines if `knots` equals one of {'uniform', 'quantile'}. Must be larger or equal 2. Ignored if `knots` is array-like. degree : int, default=3 The polynomial degree of the spline basis. Must be a non-negative integer. knots : {'uniform', 'quantile'} or array-like of shape \ (n_knots, n_features), default='uniform' Set knot positions such that first knot <= features <= last knot. - If 'uniform', `n_knots` number of knots are distributed uniformly from min to max values of the features. - If 'quantile', they are distributed uniformly along the quantiles of the features. - If an array-like is given, it directly specifies the sorted knot positions including the boundary knots. Note that, internally, `degree` number of knots are added before the first knot, the same after the last knot. extrapolation : {'error', 'constant', 'linear', 'continue', 'periodic'}, \ default='constant' If 'error', values outside the min and max values of the training features raises a `ValueError`. If 'constant', the value of the splines at minimum and maximum value of the features is used as constant extrapolation. If 'linear', a linear extrapolation is used. If 'continue', the splines are extrapolated as is, i.e. option `extrapolate=True` in :class:`scipy.interpolate.BSpline`. If 'periodic', periodic splines with a periodicity equal to the distance between the first and last knot are used. Periodic splines enforce equal function values and derivatives at the first and last knot. For example, this makes it possible to avoid introducing an arbitrary jump between Dec 31st and Jan 1st in spline features derived from a naturally periodic "day-of-year" input feature. In this case it is recommended to manually set the knot values to control the period. include_bias : bool, default=True If False, then the last spline element inside the data range of a feature is dropped. As B-splines sum to one over the spline basis functions for each data point, they implicitly include a bias term, i.e. a column of ones. It acts as an intercept term in a linear models. order : {'C', 'F'}, default='C' Order of output array in the dense case. `'F'` order is faster to compute, but may slow down subsequent estimators. handle_missing : {'error', 'zeros'}, default='error' Specifies the way missing values are handled. - 'error' : Raise an error if `np.nan` values are present during :meth:`fit`. - 'zeros' : Encode splines of missing values with values `0`. Note that `handle_missing='zeros'` differs from first imputing missing values with zeros and then creating the spline basis. The latter creates spline basis functions which have non-zero values at the missing values whereas this option simply sets all spline basis function values to zero at the missing values. .. versionadded:: 1.8 sparse_output : bool, default=False Will return sparse CSR matrix if set True else will return an array. .. versionadded:: 1.2 Attributes ---------- bsplines_ : list of shape (n_features,) List of BSplines objects, one for each feature. n_features_in_ : int The total number of input features. feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 n_features_out_ : int The total number of output features, which is computed as `n_features * n_splines`, where `n_splines` is the number of bases elements of the B-splines, `n_knots + degree - 1` for non-periodic splines and `n_knots - 1` for periodic ones. If `include_bias=False`, then it is only `n_features * (n_splines - 1)`. See Also -------- KBinsDiscretizer : Transformer that bins continuous data into intervals. PolynomialFeatures : Transformer that generates polynomial and interaction features. Notes ----- High degrees and a high number of knots can cause overfitting. See :ref:`examples/linear_model/plot_polynomial_interpolation.py <sphx_glr_auto_examples_linear_model_plot_polynomial_interpolation.py>`. Examples -------- >>> import numpy as np >>> from sklearn.preprocessing import SplineTransformer >>> X = np.arange(6).reshape(6, 1) >>> spline = SplineTransformer(degree=2, n_knots=3) >>> spline.fit_transform(X) array([[0.5 , 0.5 , 0. , 0. ], [0.18, 0.74, 0.08, 0. ], [0.02, 0.66, 0.32, 0. ], [0. , 0.32, 0.66, 0.02], [0. , 0.08, 0.74, 0.18], [0. , 0. , 0.5 , 0.5 ]]) """ _parameter_constraints: dict = { "n_knots": [Interval(Integral, 2, None, closed="left")], "degree": [Interval(Integral, 0, None, closed="left")], "knots": [StrOptions({"uniform", "quantile"}), "array-like"], "extrapolation": [ StrOptions({"error", "constant", "linear", "continue", "periodic"}) ], "include_bias": ["boolean"], "order": [StrOptions({"C", "F"})], "handle_missing": [StrOptions({"error", "zeros"})], "sparse_output": ["boolean"], } def __init__( self, n_knots=5, degree=3, *, knots="uniform", extrapolation="constant", include_bias=True, order="C", handle_missing="error", sparse_output=False, ): self.n_knots = n_knots self.degree = degree self.knots = knots self.extrapolation = extrapolation self.include_bias = include_bias self.order = order self.handle_missing = handle_missing self.sparse_output = sparse_output @staticmethod def _get_base_knot_positions(X, n_knots=10, knots="uniform", sample_weight=None): """Calculate base knot positions for `knots` either "uniform" or "quantile". Base knots such that first knot <= feature <= last knot. For the B-spline construction with scipy.interpolate.BSpline, 2*degree knots beyond the base interval are added. Returns ------- knots : ndarray of shape (n_knots, n_features), dtype=np.float64 Knot positions (points) of base interval. """ if knots == "quantile": percentile_ranks = 100 * np.linspace( start=0, stop=1, num=n_knots, dtype=np.float64 ) if sample_weight is None: knots = np.nanpercentile(X, percentile_ranks, axis=0) else: knots = _weighted_percentile(X, sample_weight, percentile_ranks).T else: # knots == 'uniform': # Note that the variable `knots` has already been validated and # `else` is therefore safe. # Disregard observations with zero weight. mask = slice(None, None, 1) if sample_weight is None else sample_weight > 0 x_min = np.zeros(X.shape[1], dtype=np.float64) x_max = np.zeros(X.shape[1], dtype=np.float64) for feature_idx in range(X.shape[1]): x = X[mask, feature_idx] if np.all(np.isnan(x)): continue else: x_min[feature_idx] = np.nanmin(x) x_max[feature_idx] = np.nanmax(x) knots = np.linspace( start=x_min, stop=x_max, num=n_knots, endpoint=True, dtype=np.float64, ) return knots def get_feature_names_out(self, input_features=None): """Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Input features. - If `input_features` is `None`, then `feature_names_in_` is used as feature names in. If `feature_names_in_` is not defined, then the following input feature names are generated: `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. - If `input_features` is an array-like, then `input_features` must match `feature_names_in_` if `feature_names_in_` is defined. Returns ------- feature_names_out : ndarray of str objects Transformed feature names. """ check_is_fitted(self, "n_features_in_") n_splines = self.bsplines_[0].c.shape[1] input_features = _check_feature_names_in(self, input_features) feature_names = [] for i in range(self.n_features_in_): for j in range(n_splines - 1 + self.include_bias): feature_names.append(f"{input_features[i]}_sp_{j}") return np.asarray(feature_names, dtype=object) @_fit_context(prefer_skip_nested_validation=True) def fit(self, X, y=None, sample_weight=None): """Compute knot positions of splines. Parameters ---------- X : array-like of shape (n_samples, n_features) The data. y : None Ignored. sample_weight : array-like of shape (n_samples,), default = None Individual weights for each sample. Used to calculate quantiles if `knots="quantile"`. For `knots="uniform"`, zero weighted observations are ignored for finding the min and max of `X`. Returns ------- self : object Fitted transformer. """ try:
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
true
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/preprocessing/_discretization.py
sklearn/preprocessing/_discretization.py
# Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import warnings from numbers import Integral import numpy as np from sklearn.base import BaseEstimator, TransformerMixin, _fit_context from sklearn.preprocessing._encoders import OneHotEncoder from sklearn.utils import resample from sklearn.utils._param_validation import Interval, Options, StrOptions from sklearn.utils.stats import _weighted_percentile from sklearn.utils.validation import ( _check_feature_names_in, _check_sample_weight, check_array, check_is_fitted, validate_data, ) class KBinsDiscretizer(TransformerMixin, BaseEstimator): """ Bin continuous data into intervals. Read more in the :ref:`User Guide <preprocessing_discretization>`. .. versionadded:: 0.20 Parameters ---------- n_bins : int or array-like of shape (n_features,), default=5 The number of bins to produce. Raises ValueError if ``n_bins < 2``. encode : {'onehot', 'onehot-dense', 'ordinal'}, default='onehot' Method used to encode the transformed result. - 'onehot': Encode the transformed result with one-hot encoding and return a sparse matrix. Ignored features are always stacked to the right. - 'onehot-dense': Encode the transformed result with one-hot encoding and return a dense array. Ignored features are always stacked to the right. - 'ordinal': Return the bin identifier encoded as an integer value. strategy : {'uniform', 'quantile', 'kmeans'}, default='quantile' Strategy used to define the widths of the bins. - 'uniform': All bins in each feature have identical widths. - 'quantile': All bins in each feature have the same number of points. - 'kmeans': Values in each bin have the same nearest center of a 1D k-means cluster. For an example of the different strategies see: :ref:`sphx_glr_auto_examples_preprocessing_plot_discretization_strategies.py`. quantile_method : {"inverted_cdf", "averaged_inverted_cdf", "closest_observation", "interpolated_inverted_cdf", "hazen", "weibull", "linear", "median_unbiased", "normal_unbiased"}, default="linear" Method to pass on to np.percentile calculation when using strategy="quantile". Only `averaged_inverted_cdf` and `inverted_cdf` support the use of `sample_weight != None` when subsampling is not active. .. versionadded:: 1.7 dtype : {np.float32, np.float64}, default=None The desired data-type for the output. If None, output dtype is consistent with input dtype. Only np.float32 and np.float64 are supported. .. versionadded:: 0.24 subsample : int or None, default=200_000 Maximum number of samples, used to fit the model, for computational efficiency. `subsample=None` means that all the training samples are used when computing the quantiles that determine the binning thresholds. Since quantile computation relies on sorting each column of `X` and that sorting has an `n log(n)` time complexity, it is recommended to use subsampling on datasets with a very large number of samples. .. versionchanged:: 1.3 The default value of `subsample` changed from `None` to `200_000` when `strategy="quantile"`. .. versionchanged:: 1.5 The default value of `subsample` changed from `None` to `200_000` when `strategy="uniform"` or `strategy="kmeans"`. random_state : int, RandomState instance or None, default=None Determines random number generation for subsampling. Pass an int for reproducible results across multiple function calls. See the `subsample` parameter for more details. See :term:`Glossary <random_state>`. .. versionadded:: 1.1 Attributes ---------- bin_edges_ : ndarray of ndarray of shape (n_features,) The edges of each bin. Contain arrays of varying shapes ``(n_bins_, )`` Ignored features will have empty arrays. n_bins_ : ndarray of shape (n_features,), dtype=np.int64 Number of bins per feature. Bins whose width are too small (i.e., <= 1e-8) are removed with a warning. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- Binarizer : Class used to bin values as ``0`` or ``1`` based on a parameter ``threshold``. Notes ----- For a visualization of discretization on different datasets refer to :ref:`sphx_glr_auto_examples_preprocessing_plot_discretization_classification.py`. On the effect of discretization on linear models see: :ref:`sphx_glr_auto_examples_preprocessing_plot_discretization.py`. In bin edges for feature ``i``, the first and last values are used only for ``inverse_transform``. During transform, bin edges are extended to:: np.concatenate([-np.inf, bin_edges_[i][1:-1], np.inf]) You can combine ``KBinsDiscretizer`` with :class:`~sklearn.compose.ColumnTransformer` if you only want to preprocess part of the features. ``KBinsDiscretizer`` might produce constant features (e.g., when ``encode = 'onehot'`` and certain bins do not contain any data). These features can be removed with feature selection algorithms (e.g., :class:`~sklearn.feature_selection.VarianceThreshold`). Examples -------- >>> from sklearn.preprocessing import KBinsDiscretizer >>> X = [[-2, 1, -4, -1], ... [-1, 2, -3, -0.5], ... [ 0, 3, -2, 0.5], ... [ 1, 4, -1, 2]] >>> est = KBinsDiscretizer( ... n_bins=3, encode='ordinal', strategy='uniform' ... ) >>> est.fit(X) KBinsDiscretizer(...) >>> Xt = est.transform(X) >>> Xt # doctest: +SKIP array([[ 0., 0., 0., 0.], [ 1., 1., 1., 0.], [ 2., 2., 2., 1.], [ 2., 2., 2., 2.]]) Sometimes it may be useful to convert the data back into the original feature space. The ``inverse_transform`` function converts the binned data into the original feature space. Each value will be equal to the mean of the two bin edges. >>> est.bin_edges_[0] array([-2., -1., 0., 1.]) >>> est.inverse_transform(Xt) array([[-1.5, 1.5, -3.5, -0.5], [-0.5, 2.5, -2.5, -0.5], [ 0.5, 3.5, -1.5, 0.5], [ 0.5, 3.5, -1.5, 1.5]]) While this preprocessing step can be an optimization, it is important to note the array returned by ``inverse_transform`` will have an internal type of ``np.float64`` or ``np.float32``, denoted by the ``dtype`` input argument. This can drastically increase the memory usage of the array. See the :ref:`sphx_glr_auto_examples_cluster_plot_face_compress.py` where `KBinsDescretizer` is used to cluster the image into bins and increases the size of the image by 8x. """ _parameter_constraints: dict = { "n_bins": [Interval(Integral, 2, None, closed="left"), "array-like"], "encode": [StrOptions({"onehot", "onehot-dense", "ordinal"})], "strategy": [StrOptions({"uniform", "quantile", "kmeans"})], "quantile_method": [ StrOptions( { "warn", "inverted_cdf", "averaged_inverted_cdf", "closest_observation", "interpolated_inverted_cdf", "hazen", "weibull", "linear", "median_unbiased", "normal_unbiased", } ) ], "dtype": [Options(type, {np.float64, np.float32}), None], "subsample": [Interval(Integral, 1, None, closed="left"), None], "random_state": ["random_state"], } def __init__( self, n_bins=5, *, encode="onehot", strategy="quantile", quantile_method="warn", dtype=None, subsample=200_000, random_state=None, ): self.n_bins = n_bins self.encode = encode self.strategy = strategy self.quantile_method = quantile_method self.dtype = dtype self.subsample = subsample self.random_state = random_state @_fit_context(prefer_skip_nested_validation=True) def fit(self, X, y=None, sample_weight=None): """ Fit the estimator. Parameters ---------- X : array-like of shape (n_samples, n_features) Data to be discretized. y : None Ignored. This parameter exists only for compatibility with :class:`~sklearn.pipeline.Pipeline`. sample_weight : ndarray of shape (n_samples,) Contains weight values to be associated with each sample. .. versionadded:: 1.3 .. versionchanged:: 1.7 Added support for strategy="uniform". Returns ------- self : object Returns the instance itself. """ X = validate_data(self, X, dtype="numeric") if self.dtype in (np.float64, np.float32): output_dtype = self.dtype else: # self.dtype is None output_dtype = X.dtype n_samples, n_features = X.shape if sample_weight is not None: sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) if self.subsample is not None and n_samples > self.subsample: # Take a subsample of `X` # When resampling, it is important to subsample **with replacement** to # preserve the distribution, in particular in the presence of a few data # points with large weights. You can check this by setting `replace=False` # in sklearn.utils.test.test_indexing.test_resample_weighted and check that # it fails as a justification for this claim. X = resample( X, replace=True, n_samples=self.subsample, random_state=self.random_state, sample_weight=sample_weight, ) # Since we already used the weights when resampling when provided, # we set them back to `None` to avoid accounting for the weights twice # in subsequent operations to compute weight-aware bin edges with # quantiles or k-means. sample_weight = None n_features = X.shape[1] n_bins = self._validate_n_bins(n_features) bin_edges = np.zeros(n_features, dtype=object) # TODO(1.9): remove and switch to quantile_method="averaged_inverted_cdf" # by default. quantile_method = self.quantile_method if self.strategy == "quantile" and quantile_method == "warn": warnings.warn( "The current default behavior, quantile_method='linear', will be " "changed to quantile_method='averaged_inverted_cdf' in " "scikit-learn version 1.9 to naturally support sample weight " "equivalence properties by default. Pass " "quantile_method='averaged_inverted_cdf' explicitly to silence this " "warning.", FutureWarning, ) quantile_method = "linear" if ( self.strategy == "quantile" and quantile_method not in ["inverted_cdf", "averaged_inverted_cdf"] and sample_weight is not None ): raise ValueError( "When fitting with strategy='quantile' and sample weights, " "quantile_method should either be set to 'averaged_inverted_cdf' or " f"'inverted_cdf', got quantile_method='{quantile_method}' instead." ) if self.strategy != "quantile" and sample_weight is not None: # Prepare a mask to filter out zero-weight samples when extracting # the min and max values of each columns which are needed for the # "uniform" and "kmeans" strategies. nnz_weight_mask = sample_weight != 0 else: # Otherwise, all samples are used. Use a slice to avoid creating a # new array. nnz_weight_mask = slice(None) for jj in range(n_features): column = X[:, jj] col_min = column[nnz_weight_mask].min() col_max = column[nnz_weight_mask].max() if col_min == col_max: warnings.warn( "Feature %d is constant and will be replaced with 0." % jj ) n_bins[jj] = 1 bin_edges[jj] = np.array([-np.inf, np.inf]) continue if self.strategy == "uniform": bin_edges[jj] = np.linspace(col_min, col_max, n_bins[jj] + 1) elif self.strategy == "quantile": percentile_levels = np.linspace(0, 100, n_bins[jj] + 1) # method="linear" is the implicit default for any numpy # version. So we keep it version independent in that case by # using an empty param dict. percentile_kwargs = {} if quantile_method != "linear" and sample_weight is None: percentile_kwargs["method"] = quantile_method if sample_weight is None: bin_edges[jj] = np.asarray( np.percentile(column, percentile_levels, **percentile_kwargs), dtype=np.float64, ) else: average = ( True if quantile_method == "averaged_inverted_cdf" else False ) bin_edges[jj] = _weighted_percentile( column, sample_weight, percentile_levels, average=average ) elif self.strategy == "kmeans": from sklearn.cluster import KMeans # fixes import loops # Deterministic initialization with uniform spacing uniform_edges = np.linspace(col_min, col_max, n_bins[jj] + 1) init = (uniform_edges[1:] + uniform_edges[:-1])[:, None] * 0.5 # 1D k-means procedure km = KMeans(n_clusters=n_bins[jj], init=init, n_init=1) centers = km.fit( column[:, None], sample_weight=sample_weight ).cluster_centers_[:, 0] # Must sort, centers may be unsorted even with sorted init centers.sort() bin_edges[jj] = (centers[1:] + centers[:-1]) * 0.5 bin_edges[jj] = np.r_[col_min, bin_edges[jj], col_max] # Remove bins whose width are too small (i.e., <= 1e-8) if self.strategy in ("quantile", "kmeans"): mask = np.ediff1d(bin_edges[jj], to_begin=np.inf) > 1e-8 bin_edges[jj] = bin_edges[jj][mask] if len(bin_edges[jj]) - 1 != n_bins[jj]: warnings.warn( "Bins whose width are too small (i.e., <= " "1e-8) in feature %d are removed. Consider " "decreasing the number of bins." % jj ) n_bins[jj] = len(bin_edges[jj]) - 1 self.bin_edges_ = bin_edges self.n_bins_ = n_bins if "onehot" in self.encode: self._encoder = OneHotEncoder( categories=[np.arange(i) for i in self.n_bins_], sparse_output=self.encode == "onehot", dtype=output_dtype, ) # Fit the OneHotEncoder with toy datasets # so that it's ready for use after the KBinsDiscretizer is fitted self._encoder.fit(np.zeros((1, len(self.n_bins_)))) return self def _validate_n_bins(self, n_features): """Returns n_bins_, the number of bins per feature.""" orig_bins = self.n_bins if isinstance(orig_bins, Integral): return np.full(n_features, orig_bins, dtype=int) n_bins = check_array(orig_bins, dtype=int, copy=True, ensure_2d=False) if n_bins.ndim > 1 or n_bins.shape[0] != n_features: raise ValueError("n_bins must be a scalar or array of shape (n_features,).") bad_nbins_value = (n_bins < 2) | (n_bins != orig_bins) violating_indices = np.where(bad_nbins_value)[0] if violating_indices.shape[0] > 0: indices = ", ".join(str(i) for i in violating_indices) raise ValueError( "{} received an invalid number " "of bins at indices {}. Number of bins " "must be at least 2, and must be an int.".format( KBinsDiscretizer.__name__, indices ) ) return n_bins def transform(self, X): """ Discretize the data. Parameters ---------- X : array-like of shape (n_samples, n_features) Data to be discretized. Returns ------- Xt : {ndarray, sparse matrix}, dtype={np.float32, np.float64} Data in the binned space. Will be a sparse matrix if `self.encode='onehot'` and ndarray otherwise. """ check_is_fitted(self) # check input and attribute dtypes dtype = (np.float64, np.float32) if self.dtype is None else self.dtype Xt = validate_data(self, X, copy=True, dtype=dtype, reset=False) bin_edges = self.bin_edges_ for jj in range(Xt.shape[1]): Xt[:, jj] = np.searchsorted(bin_edges[jj][1:-1], Xt[:, jj], side="right") if self.encode == "ordinal": return Xt dtype_init = None if "onehot" in self.encode: dtype_init = self._encoder.dtype self._encoder.dtype = Xt.dtype try: Xt_enc = self._encoder.transform(Xt) finally: # revert the initial dtype to avoid modifying self. self._encoder.dtype = dtype_init return Xt_enc def inverse_transform(self, X): """ Transform discretized data back to original feature space. Note that this function does not regenerate the original data due to discretization rounding. Parameters ---------- X : array-like of shape (n_samples, n_features) Transformed data in the binned space. Returns ------- X_original : ndarray, dtype={np.float32, np.float64} Data in the original feature space. """ check_is_fitted(self) if "onehot" in self.encode: X = self._encoder.inverse_transform(X) Xinv = check_array(X, copy=True, dtype=(np.float64, np.float32)) n_features = self.n_bins_.shape[0] if Xinv.shape[1] != n_features: raise ValueError( "Incorrect number of features. Expecting {}, received {}.".format( n_features, Xinv.shape[1] ) ) for jj in range(n_features): bin_edges = self.bin_edges_[jj] bin_centers = (bin_edges[1:] + bin_edges[:-1]) * 0.5 Xinv[:, jj] = bin_centers[(Xinv[:, jj]).astype(np.int64)] return Xinv def get_feature_names_out(self, input_features=None): """Get output feature names. Parameters ---------- input_features : array-like of str or None, default=None Input features. - If `input_features` is `None`, then `feature_names_in_` is used as feature names in. If `feature_names_in_` is not defined, then the following input feature names are generated: `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. - If `input_features` is an array-like, then `input_features` must match `feature_names_in_` if `feature_names_in_` is defined. Returns ------- feature_names_out : ndarray of str objects Transformed feature names. """ check_is_fitted(self, "n_features_in_") input_features = _check_feature_names_in(self, input_features) if hasattr(self, "_encoder"): return self._encoder.get_feature_names_out(input_features) # ordinal encoding return input_features
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/preprocessing/_function_transformer.py
sklearn/preprocessing/_function_transformer.py
# Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import warnings from functools import partial import numpy as np from sklearn.base import BaseEstimator, TransformerMixin, _fit_context from sklearn.utils._dataframe import is_pandas_df, is_polars_df from sklearn.utils._param_validation import StrOptions from sklearn.utils._repr_html.estimator import _VisualBlock from sklearn.utils._set_output import _get_adapter_from_container, _get_output_config from sklearn.utils.metaestimators import available_if from sklearn.utils.validation import ( _allclose_dense_sparse, _check_feature_names_in, _get_feature_names, check_array, validate_data, ) def _identity(X): """The identity function.""" return X class FunctionTransformer(TransformerMixin, BaseEstimator): """Constructs a transformer from an arbitrary callable. A FunctionTransformer forwards its X (and optionally y) arguments to a user-defined function or function object and returns the result of this function. This is useful for stateless transformations such as taking the log of frequencies, doing custom scaling, etc. Note: If a lambda is used as the function, then the resulting transformer will not be pickleable. .. versionadded:: 0.17 Read more in the :ref:`User Guide <function_transformer>`. Parameters ---------- func : callable, default=None The callable to use for the transformation. This will be passed the same arguments as transform, with args and kwargs forwarded. If func is None, then func will be the identity function. inverse_func : callable, default=None The callable to use for the inverse transformation. This will be passed the same arguments as inverse transform, with args and kwargs forwarded. If inverse_func is None, then inverse_func will be the identity function. validate : bool, default=False Indicate that the input X array should be checked before calling ``func``. The possibilities are: - If False, there is no input validation. - If True, then X will be converted to a 2-dimensional NumPy array or sparse matrix. If the conversion is not possible an exception is raised. .. versionchanged:: 0.22 The default of ``validate`` changed from True to False. accept_sparse : bool, default=False Indicate that func accepts a sparse matrix as input. If validate is False, this has no effect. Otherwise, if accept_sparse is false, sparse matrix inputs will cause an exception to be raised. check_inverse : bool, default=True Whether to check that or ``func`` followed by ``inverse_func`` leads to the original inputs. It can be used for a sanity check, raising a warning when the condition is not fulfilled. .. versionadded:: 0.20 feature_names_out : callable, 'one-to-one' or None, default=None Determines the list of feature names that will be returned by the `get_feature_names_out` method. If it is 'one-to-one', then the output feature names will be equal to the input feature names. If it is a callable, then it must take two positional arguments: this `FunctionTransformer` (`self`) and an array-like of input feature names (`input_features`). It must return an array-like of output feature names. The `get_feature_names_out` method is only defined if `feature_names_out` is not None. See ``get_feature_names_out`` for more details. .. versionadded:: 1.1 kw_args : dict, default=None Dictionary of additional keyword arguments to pass to func. .. versionadded:: 0.18 inv_kw_args : dict, default=None Dictionary of additional keyword arguments to pass to inverse_func. .. versionadded:: 0.18 Attributes ---------- n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- MaxAbsScaler : Scale each feature by its maximum absolute value. StandardScaler : Standardize features by removing the mean and scaling to unit variance. LabelBinarizer : Binarize labels in a one-vs-all fashion. MultiLabelBinarizer : Transform between iterable of iterables and a multilabel format. Notes ----- If `func` returns an output with a `columns` attribute, then the columns is enforced to be consistent with the output of `get_feature_names_out`. Examples -------- >>> import numpy as np >>> from sklearn.preprocessing import FunctionTransformer >>> transformer = FunctionTransformer(np.log1p) >>> X = np.array([[0, 1], [2, 3]]) >>> transformer.transform(X) array([[0. , 0.6931], [1.0986, 1.3862]]) """ _parameter_constraints: dict = { "func": [callable, None], "inverse_func": [callable, None], "validate": ["boolean"], "accept_sparse": ["boolean"], "check_inverse": ["boolean"], "feature_names_out": [callable, StrOptions({"one-to-one"}), None], "kw_args": [dict, None], "inv_kw_args": [dict, None], } def __init__( self, func=None, inverse_func=None, *, validate=False, accept_sparse=False, check_inverse=True, feature_names_out=None, kw_args=None, inv_kw_args=None, ): self.func = func self.inverse_func = inverse_func self.validate = validate self.accept_sparse = accept_sparse self.check_inverse = check_inverse self.feature_names_out = feature_names_out self.kw_args = kw_args self.inv_kw_args = inv_kw_args def _check_inverse_transform(self, X): """Check that func and inverse_func are the inverse.""" idx_selected = slice(None, None, max(1, X.shape[0] // 100)) X_round_trip = self.inverse_transform(self.transform(X[idx_selected])) if hasattr(X, "dtype"): dtypes = [X.dtype] elif hasattr(X, "dtypes"): # Dataframes can have multiple dtypes dtypes = X.dtypes # Not all dtypes are numpy dtypes, they can be pandas dtypes as well if not all( isinstance(d, np.dtype) and np.issubdtype(d, np.number) for d in dtypes ): raise ValueError( "'check_inverse' is only supported when all the elements in `X` is" " numerical." ) if not _allclose_dense_sparse(X[idx_selected], X_round_trip): warnings.warn( ( "The provided functions are not strictly" " inverse of each other. If you are sure you" " want to proceed regardless, set" " 'check_inverse=False'." ), UserWarning, ) @_fit_context(prefer_skip_nested_validation=True) def fit(self, X, y=None): """Fit transformer by checking X. If ``validate`` is ``True``, ``X`` will be checked. Parameters ---------- X : {array-like, sparse-matrix} of shape (n_samples, n_features) \ if `validate=True` else any object that `func` can handle Input array. y : Ignored Not used, present here for API consistency by convention. Returns ------- self : object FunctionTransformer class instance. """ X = validate_data( self, X, reset=True, accept_sparse=self.accept_sparse, skip_check_array=not self.validate, ) if self.check_inverse and not (self.func is None or self.inverse_func is None): self._check_inverse_transform(X) return self def transform(self, X): """Transform X using the forward function. Parameters ---------- X : {array-like, sparse-matrix} of shape (n_samples, n_features) \ if `validate=True` else any object that `func` can handle Input array. Returns ------- X_out : array-like, shape (n_samples, n_features) Transformed input. """ if self.validate: X = validate_data(self, X, reset=False, accept_sparse=self.accept_sparse) out = self._transform(X, func=self.func, kw_args=self.kw_args) output_config = _get_output_config("transform", self)["dense"] if hasattr(out, "columns") and self.feature_names_out is not None: # check the consistency between the column provided by `transform` and # the column names provided by `get_feature_names_out`. feature_names_out = self.get_feature_names_out() if list(out.columns) != list(feature_names_out): # we can override the column names of the output if it is inconsistent # with the column names provided by `get_feature_names_out` in the # following cases: # * `func` preserved the column names between the input and the output # * the input column names are all numbers # * the output is requested to be a DataFrame (pandas or polars) feature_names_in = getattr( X, "feature_names_in_", _get_feature_names(X) ) same_feature_names_in_out = feature_names_in is not None and list( feature_names_in ) == list(out.columns) not_all_str_columns = not all( isinstance(col, str) for col in out.columns ) if same_feature_names_in_out or not_all_str_columns: adapter = _get_adapter_from_container(out) out = adapter.create_container( X_output=out, X_original=out, columns=feature_names_out, inplace=False, ) else: raise ValueError( "The output generated by `func` have different column names " "than the ones provided by `get_feature_names_out`. " f"Got output with columns names: {list(out.columns)} and " "`get_feature_names_out` returned: " f"{list(self.get_feature_names_out())}. " "The column names can be overridden by setting " "`set_output(transform='pandas')` or " "`set_output(transform='polars')` such that the column names " "are set to the names provided by `get_feature_names_out`." ) if self.feature_names_out is None: warn_msg = ( "When `set_output` is configured to be '{0}', `func` should return " "a {0} DataFrame to follow the `set_output` API or `feature_names_out`" " should be defined." ) if output_config == "pandas" and not is_pandas_df(out): warnings.warn(warn_msg.format("pandas")) elif output_config == "polars" and not is_polars_df(out): warnings.warn(warn_msg.format("polars")) return out def inverse_transform(self, X): """Transform X using the inverse function. Parameters ---------- X : {array-like, sparse-matrix} of shape (n_samples, n_features) \ if `validate=True` else any object that `inverse_func` can handle Input array. Returns ------- X_original : array-like, shape (n_samples, n_features) Transformed input. """ if self.validate: X = check_array(X, accept_sparse=self.accept_sparse) return self._transform(X, func=self.inverse_func, kw_args=self.inv_kw_args) @available_if(lambda self: self.feature_names_out is not None) def get_feature_names_out(self, input_features=None): """Get output feature names for transformation. This method is only defined if `feature_names_out` is not None. Parameters ---------- input_features : array-like of str or None, default=None Input feature names. - If `input_features` is None, then `feature_names_in_` is used as the input feature names. If `feature_names_in_` is not defined, then names are generated: `[x0, x1, ..., x(n_features_in_ - 1)]`. - If `input_features` is array-like, then `input_features` must match `feature_names_in_` if `feature_names_in_` is defined. Returns ------- feature_names_out : ndarray of str objects Transformed feature names. - If `feature_names_out` is 'one-to-one', the input feature names are returned (see `input_features` above). This requires `feature_names_in_` and/or `n_features_in_` to be defined, which is done automatically if `validate=True`. Alternatively, you can set them in `func`. - If `feature_names_out` is a callable, then it is called with two arguments, `self` and `input_features`, and its return value is returned by this method. """ if hasattr(self, "n_features_in_") or input_features is not None: input_features = _check_feature_names_in(self, input_features) if self.feature_names_out == "one-to-one": names_out = input_features elif callable(self.feature_names_out): names_out = self.feature_names_out(self, input_features) else: raise ValueError( f"feature_names_out={self.feature_names_out!r} is invalid. " 'It must either be "one-to-one" or a callable with two ' "arguments: the function transformer and an array-like of " "input feature names. The callable must return an array-like " "of output feature names." ) return np.asarray(names_out, dtype=object) def _transform(self, X, func=None, kw_args=None): if func is None: func = _identity return func(X, **(kw_args if kw_args else {})) def __sklearn_is_fitted__(self): """Return True since FunctionTransfomer is stateless.""" return True def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.no_validation = not self.validate tags.requires_fit = False tags.input_tags.sparse = not self.validate or self.accept_sparse return tags def set_output(self, *, transform=None): """Set output container. See :ref:`sphx_glr_auto_examples_miscellaneous_plot_set_output.py` for an example on how to use the API. Parameters ---------- transform : {"default", "pandas", "polars"}, default=None Configure output of `transform` and `fit_transform`. - `"default"`: Default output format of a transformer - `"pandas"`: DataFrame output - `"polars"`: Polars output - `None`: Transform configuration is unchanged .. versionadded:: 1.4 `"polars"` option was added. Returns ------- self : estimator instance Estimator instance. """ if not hasattr(self, "_sklearn_output_config"): self._sklearn_output_config = {} self._sklearn_output_config["transform"] = transform return self def _get_function_name(self): """Get the name display of the `func` used in HTML representation.""" if hasattr(self.func, "__name__"): return self.func.__name__ if isinstance(self.func, partial): return self.func.func.__name__ return f"{self.func.__class__.__name__}(...)" def _sk_visual_block_(self): return _VisualBlock( "single", self, names=self._get_function_name(), name_details=str(self), name_caption="FunctionTransformer", doc_link_label="FunctionTransformer", )
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/preprocessing/_label.py
sklearn/preprocessing/_label.py
# Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import array import itertools import warnings from collections import defaultdict from numbers import Integral import numpy as np import scipy.sparse as sp from sklearn.base import BaseEstimator, TransformerMixin, _fit_context from sklearn.utils import column_or_1d from sklearn.utils._array_api import ( _convert_to_numpy, _find_matching_floating_dtype, _is_numpy_namespace, _isin, device, get_namespace, get_namespace_and_device, indexing_dtype, xpx, ) from sklearn.utils._encode import _encode, _unique from sklearn.utils._param_validation import Interval, validate_params from sklearn.utils.multiclass import type_of_target, unique_labels from sklearn.utils.sparsefuncs import min_max_axis from sklearn.utils.validation import _num_samples, check_array, check_is_fitted __all__ = [ "LabelBinarizer", "LabelEncoder", "MultiLabelBinarizer", "label_binarize", ] class LabelEncoder(TransformerMixin, BaseEstimator, auto_wrap_output_keys=None): """Encode target labels with value between 0 and n_classes-1. This transformer should be used to encode target values, *i.e.* `y`, and not the input `X`. Read more in the :ref:`User Guide <preprocessing_targets>`. .. versionadded:: 0.12 Attributes ---------- classes_ : ndarray of shape (n_classes,) Holds the label for each class. See Also -------- OrdinalEncoder : Encode categorical features using an ordinal encoding scheme. OneHotEncoder : Encode categorical features as a one-hot numeric array. Examples -------- `LabelEncoder` can be used to normalize labels. >>> from sklearn.preprocessing import LabelEncoder >>> le = LabelEncoder() >>> le.fit([1, 2, 2, 6]) LabelEncoder() >>> le.classes_ array([1, 2, 6]) >>> le.transform([1, 1, 2, 6]) array([0, 0, 1, 2]...) >>> le.inverse_transform([0, 0, 1, 2]) array([1, 1, 2, 6]) It can also be used to transform non-numerical labels (as long as they are hashable and comparable) to numerical labels. >>> le = LabelEncoder() >>> le.fit(["paris", "paris", "tokyo", "amsterdam"]) LabelEncoder() >>> list(le.classes_) [np.str_('amsterdam'), np.str_('paris'), np.str_('tokyo')] >>> le.transform(["tokyo", "tokyo", "paris"]) array([2, 2, 1]...) >>> list(le.inverse_transform([2, 2, 1])) [np.str_('tokyo'), np.str_('tokyo'), np.str_('paris')] """ def fit(self, y): """Fit label encoder. Parameters ---------- y : array-like of shape (n_samples,) Target values. Returns ------- self : returns an instance of self. Fitted label encoder. """ y = column_or_1d(y, warn=True) self.classes_ = _unique(y) return self def fit_transform(self, y): """Fit label encoder and return encoded labels. Parameters ---------- y : array-like of shape (n_samples,) Target values. Returns ------- y : array-like of shape (n_samples,) Encoded labels. """ y = column_or_1d(y, warn=True) self.classes_, y = _unique(y, return_inverse=True) return y def transform(self, y): """Transform labels to normalized encoding. Parameters ---------- y : array-like of shape (n_samples,) Target values. Returns ------- y : array-like of shape (n_samples,) Labels as normalized encodings. """ check_is_fitted(self) xp, _ = get_namespace(y) y = column_or_1d(y, dtype=self.classes_.dtype, warn=True) # transform of empty array is empty array if _num_samples(y) == 0: return xp.asarray([]) return _encode(y, uniques=self.classes_) def inverse_transform(self, y): """Transform labels back to original encoding. Parameters ---------- y : array-like of shape (n_samples,) Target values. Returns ------- y_original : ndarray of shape (n_samples,) Original encoding. """ check_is_fitted(self) xp, _ = get_namespace(y) y = column_or_1d(y, warn=True) # inverse transform of empty array is empty array if _num_samples(y) == 0: return xp.asarray([]) diff = xpx.setdiff1d( y, xp.arange(self.classes_.shape[0], device=device(y)), xp=xp, ) if diff.shape[0]: raise ValueError("y contains previously unseen labels: %s" % str(diff)) y = xp.asarray(y) return xp.take(self.classes_, y, axis=0) def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.array_api_support = True tags.input_tags.two_d_array = False tags.target_tags.one_d_labels = True return tags class LabelBinarizer(TransformerMixin, BaseEstimator, auto_wrap_output_keys=None): """Binarize labels in a one-vs-all fashion. Several regression and binary classification algorithms are available in scikit-learn. A simple way to extend these algorithms to the multi-class classification case is to use the so-called one-vs-all scheme. At learning time, this simply consists in learning one regressor or binary classifier per class. In doing so, one needs to convert multi-class labels to binary labels (belong or does not belong to the class). `LabelBinarizer` makes this process easy with the transform method. At prediction time, one assigns the class for which the corresponding model gave the greatest confidence. `LabelBinarizer` makes this easy with the :meth:`inverse_transform` method. Read more in the :ref:`User Guide <preprocessing_targets>`. Parameters ---------- neg_label : int, default=0 Value with which negative labels must be encoded. pos_label : int, default=1 Value with which positive labels must be encoded. sparse_output : bool, default=False True if the returned array from transform is desired to be in sparse CSR format. Attributes ---------- classes_ : ndarray of shape (n_classes,) Holds the label for each class. y_type_ : str Represents the type of the target data as evaluated by :func:`~sklearn.utils.multiclass.type_of_target`. Possible type are 'continuous', 'continuous-multioutput', 'binary', 'multiclass', 'multiclass-multioutput', 'multilabel-indicator', and 'unknown'. sparse_input_ : bool `True` if the input data to transform is given as a sparse matrix, `False` otherwise. See Also -------- label_binarize : Function to perform the transform operation of LabelBinarizer with fixed classes. OneHotEncoder : Encode categorical features using a one-hot aka one-of-K scheme. Examples -------- >>> from sklearn.preprocessing import LabelBinarizer >>> lb = LabelBinarizer() >>> lb.fit([1, 2, 6, 4, 2]) LabelBinarizer() >>> lb.classes_ array([1, 2, 4, 6]) >>> lb.transform([1, 6]) array([[1, 0, 0, 0], [0, 0, 0, 1]]) Binary targets transform to a column vector >>> lb = LabelBinarizer() >>> lb.fit_transform(['yes', 'no', 'no', 'yes']) array([[1], [0], [0], [1]]) Passing a 2D matrix for multilabel classification >>> import numpy as np >>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]])) LabelBinarizer() >>> lb.classes_ array([0, 1, 2]) >>> lb.transform([0, 1, 2, 1]) array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 1, 0]]) """ _parameter_constraints: dict = { "neg_label": [Integral], "pos_label": [Integral], "sparse_output": ["boolean"], } def __init__(self, *, neg_label=0, pos_label=1, sparse_output=False): self.neg_label = neg_label self.pos_label = pos_label self.sparse_output = sparse_output @_fit_context(prefer_skip_nested_validation=True) def fit(self, y): """Fit label binarizer. Parameters ---------- y : ndarray of shape (n_samples,) or (n_samples, n_classes) Target values. The 2-d matrix should only contain 0 and 1, represents multilabel classification. Returns ------- self : object Returns the instance itself. """ if self.neg_label >= self.pos_label: raise ValueError( f"neg_label={self.neg_label} must be strictly less than " f"pos_label={self.pos_label}." ) if self.sparse_output and (self.pos_label == 0 or self.neg_label != 0): raise ValueError( "Sparse binarization is only supported with non " "zero pos_label and zero neg_label, got " f"pos_label={self.pos_label} and neg_label={self.neg_label}" ) xp, is_array_api = get_namespace(y) if is_array_api and self.sparse_output and not _is_numpy_namespace(xp): raise ValueError( "`sparse_output=True` is not supported for array API " f"namespace {xp.__name__}. " "Use `sparse_output=False` to return a dense array instead." ) self.y_type_ = type_of_target(y, input_name="y") if "multioutput" in self.y_type_: raise ValueError( "Multioutput target data is not supported with label binarization" ) if _num_samples(y) == 0: raise ValueError("y has 0 samples: %r" % y) self.sparse_input_ = sp.issparse(y) self.classes_ = unique_labels(y) return self def fit_transform(self, y): """Fit label binarizer/transform multi-class labels to binary labels. The output of transform is sometimes referred to as the 1-of-K coding scheme. Parameters ---------- y : {ndarray, sparse matrix} of shape (n_samples,) or \ (n_samples, n_classes) Target values. The 2-d matrix should only contain 0 and 1, represents multilabel classification. Sparse matrix can be CSR, CSC, COO, DOK, or LIL. Returns ------- Y : {ndarray, sparse matrix} of shape (n_samples, n_classes) Shape will be (n_samples, 1) for binary problems. Sparse matrix will be of CSR format. """ return self.fit(y).transform(y) def transform(self, y): """Transform multi-class labels to binary labels. The output of transform is sometimes referred to by some authors as the 1-of-K coding scheme. Parameters ---------- y : {array, sparse matrix} of shape (n_samples,) or \ (n_samples, n_classes) Target values. The 2-d matrix should only contain 0 and 1, represents multilabel classification. Sparse matrix can be CSR, CSC, COO, DOK, or LIL. Returns ------- Y : {ndarray, sparse matrix} of shape (n_samples, n_classes) Shape will be (n_samples, 1) for binary problems. Sparse matrix will be of CSR format. """ check_is_fitted(self) xp, is_array_api = get_namespace(y) if is_array_api and self.sparse_output and not _is_numpy_namespace(xp): raise ValueError( "`sparse_output=True` is not supported for array API " f"namespace {xp.__name__}. " "Use `sparse_output=False` to return a dense array instead." ) y_is_multilabel = type_of_target(y).startswith("multilabel") if y_is_multilabel and not self.y_type_.startswith("multilabel"): raise ValueError("The object was not fitted with multilabel input.") return label_binarize( y, classes=self.classes_, pos_label=self.pos_label, neg_label=self.neg_label, sparse_output=self.sparse_output, ) def inverse_transform(self, Y, threshold=None): """Transform binary labels back to multi-class labels. Parameters ---------- Y : {ndarray, sparse matrix} of shape (n_samples, n_classes) Target values. All sparse matrices are converted to CSR before inverse transformation. threshold : float, default=None Threshold used in the binary and multi-label cases. Use 0 when ``Y`` contains the output of :term:`decision_function` (classifier). Use 0.5 when ``Y`` contains the output of :term:`predict_proba`. If None, the threshold is assumed to be half way between neg_label and pos_label. Returns ------- y_original : {ndarray, sparse matrix} of shape (n_samples,) Target values. Sparse matrix will be of CSR format. Notes ----- In the case when the binary labels are fractional (probabilistic), :meth:`inverse_transform` chooses the class with the greatest value. Typically, this allows to use the output of a linear model's :term:`decision_function` method directly as the input of :meth:`inverse_transform`. """ check_is_fitted(self) xp, is_array_api = get_namespace(Y) if is_array_api and self.sparse_input_ and not _is_numpy_namespace(xp): raise ValueError( "`LabelBinarizer` was fitted on a sparse matrix, and therefore cannot " f"inverse transform a {xp.__name__} array back to a sparse matrix." ) if threshold is None: threshold = (self.pos_label + self.neg_label) / 2.0 if self.y_type_ == "multiclass": y_inv = _inverse_binarize_multiclass(Y, self.classes_, xp=xp) else: y_inv = _inverse_binarize_thresholding( Y, self.y_type_, self.classes_, threshold, xp=xp ) if self.sparse_input_: y_inv = sp.csr_matrix(y_inv) elif sp.issparse(y_inv): y_inv = y_inv.toarray() return y_inv def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.input_tags.two_d_array = False tags.target_tags.one_d_labels = True return tags @validate_params( { "y": ["array-like", "sparse matrix"], "classes": ["array-like"], "neg_label": [Interval(Integral, None, None, closed="neither")], "pos_label": [Interval(Integral, None, None, closed="neither")], "sparse_output": ["boolean"], }, prefer_skip_nested_validation=True, ) def label_binarize(y, *, classes, neg_label=0, pos_label=1, sparse_output=False): """Binarize labels in a one-vs-all fashion. Several regression and binary classification algorithms are available in scikit-learn. A simple way to extend these algorithms to the multi-class classification case is to use the so-called one-vs-all scheme. This function makes it possible to compute this transformation for a fixed set of class labels known ahead of time. Parameters ---------- y : array-like or sparse matrix Sequence of integer labels or multilabel data to encode. classes : array-like of shape (n_classes,) Uniquely holds the label for each class. neg_label : int, default=0 Value with which negative labels must be encoded. pos_label : int, default=1 Value with which positive labels must be encoded. sparse_output : bool, default=False, Set to true if output binary array is desired in CSR sparse format. Returns ------- Y : {ndarray, sparse matrix} of shape (n_samples, n_classes) Shape will be (n_samples, 1) for binary problems. Sparse matrix will be of CSR format. See Also -------- LabelBinarizer : Class used to wrap the functionality of label_binarize and allow for fitting to classes independently of the transform operation. Examples -------- >>> from sklearn.preprocessing import label_binarize >>> label_binarize([1, 6], classes=[1, 2, 4, 6]) array([[1, 0, 0, 0], [0, 0, 0, 1]]) The class ordering is preserved: >>> label_binarize([1, 6], classes=[1, 6, 4, 2]) array([[1, 0, 0, 0], [0, 1, 0, 0]]) Binary targets transform to a column vector >>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes']) array([[1], [0], [0], [1]]) """ if not isinstance(y, list): # XXX Workaround that will be removed when list of list format is # dropped y = check_array( y, input_name="y", accept_sparse="csr", ensure_2d=False, dtype=None ) else: if _num_samples(y) == 0: raise ValueError("y has 0 samples: %r" % y) if neg_label >= pos_label: raise ValueError( "neg_label={0} must be strictly less than pos_label={1}.".format( neg_label, pos_label ) ) if sparse_output and (pos_label == 0 or neg_label != 0): raise ValueError( "Sparse binarization is only supported with non " "zero pos_label and zero neg_label, got " "pos_label={0} and neg_label={1}" "".format(pos_label, neg_label) ) # To account for pos_label == 0 in the dense case pos_switch = pos_label == 0 if pos_switch: pos_label = -neg_label y_type = type_of_target(y) if "multioutput" in y_type: raise ValueError( "Multioutput target data is not supported with label binarization" ) if y_type == "unknown": raise ValueError("The type of target data is not known") xp, is_array_api, device_ = get_namespace_and_device(y) if is_array_api and sparse_output and not _is_numpy_namespace(xp): raise ValueError( "`sparse_output=True` is not supported for array API " f"'namespace {xp.__name__}'. " "Use `sparse_output=False` to return a dense array instead." ) try: classes = xp.asarray(classes, device=device_) except (ValueError, TypeError) as e: # `classes` contains an unsupported dtype for this namespace. # For example, attempting to create torch.tensor(["yes", "no"]) will fail. raise ValueError( f"`classes` contains unsupported dtype for array API namespace " f"'{xp.__name__}'." ) from e n_samples = y.shape[0] if hasattr(y, "shape") else len(y) n_classes = classes.shape[0] if hasattr(y, "dtype") and xp.isdtype(y.dtype, "integral"): int_dtype_ = y.dtype else: int_dtype_ = indexing_dtype(xp) if y_type == "binary": if n_classes == 1: if sparse_output: return sp.csr_matrix((n_samples, 1), dtype=int) else: Y = xp.zeros((n_samples, 1), dtype=int_dtype_) Y += neg_label return Y elif n_classes >= 3: y_type = "multiclass" sorted_class = xp.sort(classes) if y_type == "multilabel-indicator": y_n_classes = y.shape[1] if hasattr(y, "shape") else len(y[0]) if n_classes != y_n_classes: raise ValueError( "classes {0} mismatch with the labels {1} found in the data".format( classes, unique_labels(y) ) ) if y_type in ("binary", "multiclass"): y = column_or_1d(y) # pick out the known labels from y y_in_classes = _isin(y, classes, xp=xp) y_seen = y[y_in_classes] indices = xp.searchsorted(sorted_class, y_seen) # cast `y_in_classes` to integer dtype for `xp.cumulative_sum` y_in_classes = xp.astype(y_in_classes, int_dtype_) indptr = xp.concat( ( xp.asarray([0], device=device_), xp.cumulative_sum(y_in_classes, axis=0), ) ) data = xp.full_like(indices, pos_label) # Use NumPy to construct the sparse matrix of one-hot labels Y = sp.csr_matrix( ( _convert_to_numpy(data, xp=xp), _convert_to_numpy(indices, xp=xp), _convert_to_numpy(indptr, xp=xp), ), shape=(n_samples, n_classes), ) if not sparse_output: Y = xp.asarray(Y.toarray(), device=device_) elif y_type == "multilabel-indicator": if sparse_output: Y = sp.csr_matrix(y) if pos_label != 1: data = xp.full_like(Y.data, pos_label) Y.data = data else: if sp.issparse(y): y = y.toarray() Y = xp.asarray(y, device=device_, copy=True) if pos_label != 1: Y[Y != 0] = pos_label else: raise ValueError( "%s target data is not supported with label binarization" % y_type ) if not sparse_output: if neg_label != 0: Y[Y == 0] = neg_label if pos_switch: Y[Y == pos_label] = 0 Y = xp.astype(Y, int_dtype_, copy=False) else: Y.data = Y.data.astype(int, copy=False) # preserve label ordering if xp.any(classes != sorted_class): indices = xp.searchsorted(sorted_class, classes) Y = Y[:, indices] if y_type == "binary": if sparse_output: Y = Y[:, [-1]] else: Y = xp.reshape(Y[:, -1], (-1, 1)) return Y def _inverse_binarize_multiclass(y, classes, xp=None): """Inverse label binarization transformation for multiclass. Multiclass uses the maximal score instead of a threshold. """ if sp.issparse(y): classes = np.asarray(classes) # Find the argmax for each row in y where y is a CSR matrix y = y.tocsr() n_samples, n_outputs = y.shape outputs = np.arange(n_outputs) row_max = min_max_axis(y, 1)[1] row_nnz = np.diff(y.indptr) y_data_repeated_max = np.repeat(row_max, row_nnz) # picks out all indices obtaining the maximum per row y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data) # For corner case where last row has a max of 0 if row_max[-1] == 0: y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)]) # Gets the index of the first argmax in each row from y_i_all_argmax index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1]) # first argmax of each row y_ind_ext = np.append(y.indices, [0]) y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]] # Handle rows of all 0 y_i_argmax[np.where(row_nnz == 0)[0]] = 0 # Handles rows with max of 0 that contain negative numbers samples = np.arange(n_samples)[(row_nnz > 0) & (row_max.ravel() == 0)] for i in samples: ind = y.indices[y.indptr[i] : y.indptr[i + 1]] y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0] return classes[y_i_argmax] else: xp, _, device_ = get_namespace_and_device(y, xp=xp) classes = xp.asarray(classes, device=device_) indices = xp.argmax(y, axis=1) indices = xp.clip(indices, 0, classes.shape[0] - 1) return classes[indices] def _inverse_binarize_thresholding(y, output_type, classes, threshold, xp=None): """Inverse label binarization transformation using thresholding.""" if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2: raise ValueError("output_type='binary', but y.shape = {0}".format(y.shape)) xp, _, device_ = get_namespace_and_device(y, xp=xp) classes = xp.asarray(classes, device=device_) if output_type != "binary" and y.shape[1] != classes.shape[0]: raise ValueError( "The number of class is not equal to the number of dimension of y." ) dtype_ = _find_matching_floating_dtype(y, xp=xp) if hasattr(y, "dtype") and xp.isdtype(y.dtype, "integral"): int_dtype_ = y.dtype else: int_dtype_ = indexing_dtype(xp) # Perform thresholding if sp.issparse(y): if threshold > 0: if y.format not in ("csr", "csc"): y = y.tocsr() y.data = np.array(y.data > threshold, dtype=int) y.eliminate_zeros() else: y = xp.asarray(y.toarray() > threshold, dtype=int_dtype_, device=device_) else: y = xp.asarray( xp.asarray(y, dtype=dtype_, device=device_) > threshold, dtype=int_dtype_, device=device_, ) # Inverse transform data if output_type == "binary": if sp.issparse(y): y = y.toarray() if y.ndim == 2 and y.shape[1] == 2: return classes[y[:, 1]] else: if classes.shape[0] == 1: return xp.repeat(classes[0], len(y)) else: return classes[xp.reshape(y, (-1,))] elif output_type == "multilabel-indicator": return y else: raise ValueError("{0} format is not supported".format(output_type)) class MultiLabelBinarizer(TransformerMixin, BaseEstimator, auto_wrap_output_keys=None): """Transform between iterable of iterables and a multilabel format. Although a list of sets or tuples is a very intuitive format for multilabel data, it is unwieldy to process. This transformer converts between this intuitive format and the supported multilabel format: a (samples x classes) binary matrix indicating the presence of a class label. Read more in the :ref:`User Guide <multilabelbinarizer>`. Parameters ---------- classes : array-like of shape (n_classes,), default=None Indicates an ordering for the class labels. All entries should be unique (cannot contain duplicate classes). sparse_output : bool, default=False Set to True if output binary array is desired in CSR sparse format. Attributes ---------- classes_ : ndarray of shape (n_classes,) A copy of the `classes` parameter when provided. Otherwise it corresponds to the sorted set of classes found when fitting. See Also -------- OneHotEncoder : Encode categorical features using a one-hot aka one-of-K scheme. Examples -------- >>> from sklearn.preprocessing import MultiLabelBinarizer >>> mlb = MultiLabelBinarizer() >>> mlb.fit_transform([(1, 2), (3,)]) array([[1, 1, 0], [0, 0, 1]]) >>> mlb.classes_ array([1, 2, 3]) >>> mlb.fit_transform([{'sci-fi', 'thriller'}, {'comedy'}]) array([[0, 1, 1], [1, 0, 0]]) >>> list(mlb.classes_) ['comedy', 'sci-fi', 'thriller'] A common mistake is to pass in a list, which leads to the following issue: >>> mlb = MultiLabelBinarizer() >>> mlb.fit(['sci-fi', 'thriller', 'comedy']) MultiLabelBinarizer() >>> mlb.classes_ array(['-', 'c', 'd', 'e', 'f', 'h', 'i', 'l', 'm', 'o', 'r', 's', 't', 'y'], dtype=object) To correct this, the list of labels should be passed in as: >>> mlb = MultiLabelBinarizer() >>> mlb.fit([['sci-fi', 'thriller', 'comedy']]) MultiLabelBinarizer() >>> mlb.classes_ array(['comedy', 'sci-fi', 'thriller'], dtype=object) """ _parameter_constraints: dict = { "classes": ["array-like", None], "sparse_output": ["boolean"], } def __init__(self, *, classes=None, sparse_output=False): self.classes = classes self.sparse_output = sparse_output @_fit_context(prefer_skip_nested_validation=True) def fit(self, y): """Fit the label sets binarizer, storing :term:`classes_`. Parameters ---------- y : iterable of iterables A set of labels (any orderable and hashable object) for each sample. If the `classes` parameter is set, `y` will not be iterated. Returns ------- self : object Fitted estimator. """ self._cached_dict = None if self.classes is None: classes = sorted(set(itertools.chain.from_iterable(y))) elif len(set(self.classes)) < len(self.classes): raise ValueError( "The classes argument contains duplicate " "classes. Remove these duplicates before passing " "them to MultiLabelBinarizer." ) else: classes = self.classes dtype = int if all(isinstance(c, int) for c in classes) else object self.classes_ = np.empty(len(classes), dtype=dtype) self.classes_[:] = classes return self @_fit_context(prefer_skip_nested_validation=True) def fit_transform(self, y): """Fit the label sets binarizer and transform the given label sets. Parameters ---------- y : iterable of iterables A set of labels (any orderable and hashable object) for each sample. If the `classes` parameter is set, `y` will not be iterated. Returns ------- y_indicator : {ndarray, sparse matrix} of shape (n_samples, n_classes) A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in `y[i]`, and 0 otherwise. Sparse matrix will be of CSR format. """ if self.classes is not None: return self.fit(y).transform(y) self._cached_dict = None # Automatically increment on new class class_mapping = defaultdict(int) class_mapping.default_factory = class_mapping.__len__ yt = self._transform(y, class_mapping) # sort classes and reorder columns tmp = sorted(class_mapping, key=class_mapping.get) # (make safe for tuples) dtype = int if all(isinstance(c, int) for c in tmp) else object class_mapping = np.empty(len(tmp), dtype=dtype) class_mapping[:] = tmp self.classes_, inverse = np.unique(class_mapping, return_inverse=True) # ensure yt.indices keeps its current dtype yt.indices = np.asarray(inverse[yt.indices], dtype=yt.indices.dtype) if not self.sparse_output: yt = yt.toarray() return yt def transform(self, y): """Transform the given label sets. Parameters ---------- y : iterable of iterables A set of labels (any orderable and hashable object) for each sample. If the `classes` parameter is set, `y` will not be iterated. Returns ------- y_indicator : array or CSR matrix, shape (n_samples, n_classes) A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in `y[i]`, and 0 otherwise. """ check_is_fitted(self) class_to_index = self._build_cache() yt = self._transform(y, class_to_index) if not self.sparse_output: yt = yt.toarray() return yt def _build_cache(self): if self._cached_dict is None: self._cached_dict = dict(zip(self.classes_, range(len(self.classes_)))) return self._cached_dict def _transform(self, y, class_mapping): """Transforms the label sets with a given mapping. Parameters ---------- y : iterable of iterables A set of labels (any orderable and hashable object) for each sample. If the `classes` parameter is set, `y` will not be iterated. class_mapping : Mapping Maps from label to column index in label indicator matrix. Returns ------- y_indicator : sparse matrix of shape (n_samples, n_classes) Label indicator matrix. Will be of CSR format. """ indices = array.array("i") indptr = array.array("i", [0]) unknown = set() for labels in y: index = set() for label in labels: try:
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
true
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/preprocessing/__init__.py
sklearn/preprocessing/__init__.py
"""Methods for scaling, centering, normalization, binarization, and more.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from sklearn.preprocessing._data import ( Binarizer, KernelCenterer, MaxAbsScaler, MinMaxScaler, Normalizer, PowerTransformer, QuantileTransformer, RobustScaler, StandardScaler, add_dummy_feature, binarize, maxabs_scale, minmax_scale, normalize, power_transform, quantile_transform, robust_scale, scale, ) from sklearn.preprocessing._discretization import KBinsDiscretizer from sklearn.preprocessing._encoders import OneHotEncoder, OrdinalEncoder from sklearn.preprocessing._function_transformer import FunctionTransformer from sklearn.preprocessing._label import ( LabelBinarizer, LabelEncoder, MultiLabelBinarizer, label_binarize, ) from sklearn.preprocessing._polynomial import PolynomialFeatures, SplineTransformer from sklearn.preprocessing._target_encoder import TargetEncoder __all__ = [ "Binarizer", "FunctionTransformer", "KBinsDiscretizer", "KernelCenterer", "LabelBinarizer", "LabelEncoder", "MaxAbsScaler", "MinMaxScaler", "MultiLabelBinarizer", "Normalizer", "OneHotEncoder", "OrdinalEncoder", "PolynomialFeatures", "PowerTransformer", "QuantileTransformer", "RobustScaler", "SplineTransformer", "StandardScaler", "TargetEncoder", "add_dummy_feature", "binarize", "label_binarize", "maxabs_scale", "minmax_scale", "normalize", "power_transform", "quantile_transform", "robust_scale", "scale", ]
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/preprocessing/_data.py
sklearn/preprocessing/_data.py
# Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import warnings from numbers import Integral, Real import numpy as np from scipy import sparse, stats from scipy.special import boxcox, inv_boxcox from sklearn.base import ( BaseEstimator, ClassNamePrefixFeaturesOutMixin, OneToOneFeatureMixin, TransformerMixin, _fit_context, ) from sklearn.preprocessing._encoders import OneHotEncoder from sklearn.utils import _array_api, check_array, metadata_routing, resample from sklearn.utils._array_api import ( _find_matching_floating_dtype, _max_precision_float_dtype, _modify_in_place_if_numpy, device, get_namespace, get_namespace_and_device, size, supported_float_dtypes, ) from sklearn.utils._param_validation import ( Interval, Options, StrOptions, validate_params, ) from sklearn.utils.extmath import _incremental_mean_and_var, row_norms from sklearn.utils.sparsefuncs import ( incr_mean_variance_axis, inplace_column_scale, mean_variance_axis, min_max_axis, ) from sklearn.utils.sparsefuncs_fast import ( inplace_csr_row_normalize_l1, inplace_csr_row_normalize_l2, ) from sklearn.utils.validation import ( FLOAT_DTYPES, _check_sample_weight, check_is_fitted, check_random_state, validate_data, ) BOUNDS_THRESHOLD = 1e-7 __all__ = [ "Binarizer", "KernelCenterer", "MaxAbsScaler", "MinMaxScaler", "Normalizer", "OneHotEncoder", "PowerTransformer", "QuantileTransformer", "RobustScaler", "StandardScaler", "add_dummy_feature", "binarize", "maxabs_scale", "minmax_scale", "normalize", "power_transform", "quantile_transform", "robust_scale", "scale", ] def _is_constant_feature(var, mean, n_samples): """Detect if a feature is indistinguishable from a constant feature. The detection is based on its computed variance and on the theoretical error bounds of the '2 pass algorithm' for variance computation. See "Algorithms for computing the sample variance: analysis and recommendations", by Chan, Golub, and LeVeque. """ # In scikit-learn, variance is always computed using float64 accumulators. xp, _, device_ = get_namespace_and_device(var, mean) max_float_dtype = _max_precision_float_dtype(xp=xp, device=device_) eps = xp.finfo(max_float_dtype).eps upper_bound = n_samples * eps * var + (n_samples * mean * eps) ** 2 return var <= upper_bound def _handle_zeros_in_scale(scale, copy=True, constant_mask=None): """Set scales of near constant features to 1. The goal is to avoid division by very small or zero values. Near constant features are detected automatically by identifying scales close to machine precision unless they are precomputed by the caller and passed with the `constant_mask` kwarg. Typically for standard scaling, the scales are the standard deviation while near constant features are better detected on the computed variances which are closer to machine precision by construction. """ # if we are fitting on 1D arrays, scale might be a scalar if np.isscalar(scale): if scale == 0.0: scale = 1.0 return scale # scale is an array else: xp, _ = get_namespace(scale) if constant_mask is None: # Detect near constant values to avoid dividing by a very small # value that could lead to surprising results and numerical # stability issues. constant_mask = scale < 10 * xp.finfo(scale.dtype).eps if copy: # New array to avoid side-effects scale = xp.asarray(scale, copy=True) scale[constant_mask] = 1.0 return scale @validate_params( { "X": ["array-like", "sparse matrix"], "axis": [Options(Integral, {0, 1})], "with_mean": ["boolean"], "with_std": ["boolean"], "copy": ["boolean"], }, prefer_skip_nested_validation=True, ) def scale(X, *, axis=0, with_mean=True, with_std=True, copy=True): """Standardize a dataset along any axis. Center to the mean and component wise scale to unit variance. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data to center and scale. axis : {0, 1}, default=0 Axis used to compute the means and standard deviations along. If 0, independently standardize each feature, otherwise (if 1) standardize each sample. with_mean : bool, default=True If True, center the data before scaling. with_std : bool, default=True If True, scale the data to unit variance (or equivalently, unit standard deviation). copy : bool, default=True If False, try to avoid a copy and scale in place. This is not guaranteed to always work in place; e.g. if the data is a numpy array with an int dtype, a copy will be returned even with copy=False. Returns ------- X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) The transformed data. See Also -------- StandardScaler : Performs scaling to unit variance using the Transformer API (e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`). Notes ----- This implementation will refuse to center scipy.sparse matrices since it would make them non-sparse and would potentially crash the program with memory exhaustion problems. Instead the caller is expected to either set explicitly `with_mean=False` (in that case, only variance scaling will be performed on the features of the CSC matrix) or to call `X.toarray()` if he/she expects the materialized dense array to fit in memory. To avoid memory copy the caller should pass a CSC matrix. NaNs are treated as missing values: disregarded to compute the statistics, and maintained during the data transformation. We use a biased estimator for the standard deviation, equivalent to `numpy.std(x, ddof=0)`. Note that the choice of `ddof` is unlikely to affect model performance. For a comparison of the different scalers, transformers, and normalizers, see: :ref:`sphx_glr_auto_examples_preprocessing_plot_all_scaling.py`. .. warning:: Risk of data leak Do not use :func:`~sklearn.preprocessing.scale` unless you know what you are doing. A common mistake is to apply it to the entire data *before* splitting into training and test sets. This will bias the model evaluation because information would have leaked from the test set to the training set. In general, we recommend using :class:`~sklearn.preprocessing.StandardScaler` within a :ref:`Pipeline <pipeline>` in order to prevent most risks of data leaking: `pipe = make_pipeline(StandardScaler(), LogisticRegression())`. Examples -------- >>> from sklearn.preprocessing import scale >>> X = [[-2, 1, 2], [-1, 0, 1]] >>> scale(X, axis=0) # scaling each column independently array([[-1., 1., 1.], [ 1., -1., -1.]]) >>> scale(X, axis=1) # scaling each row independently array([[-1.37, 0.39, 0.98], [-1.22, 0. , 1.22]]) """ X = check_array( X, accept_sparse="csc", copy=copy, ensure_2d=False, estimator="the scale function", dtype=FLOAT_DTYPES, ensure_all_finite="allow-nan", input_name="X", ) if sparse.issparse(X): if with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` instead" " See docstring for motivation and alternatives." ) if axis != 0: raise ValueError( "Can only scale sparse matrix on axis=0, got axis=%d" % axis ) if with_std: _, var = mean_variance_axis(X, axis=0) var = _handle_zeros_in_scale(var, copy=False) inplace_column_scale(X, 1 / np.sqrt(var)) else: X = np.asarray(X) if with_mean: mean_ = np.nanmean(X, axis) if with_std: scale_ = np.nanstd(X, axis) # Xr is a view on the original array that enables easy use of # broadcasting on the axis in which we are interested in Xr = np.rollaxis(X, axis) if with_mean: Xr -= mean_ mean_1 = np.nanmean(Xr, axis=0) # Verify that mean_1 is 'close to zero'. If X contains very # large values, mean_1 can also be very large, due to a lack of # precision of mean_. In this case, a pre-scaling of the # concerned feature is efficient, for instance by its mean or # maximum. if not np.allclose(mean_1, 0): warnings.warn( "Numerical issues were encountered " "when centering the data " "and might not be solved. Dataset may " "contain too large values. You may need " "to prescale your features." ) Xr -= mean_1 if with_std: scale_ = _handle_zeros_in_scale(scale_, copy=False) Xr /= scale_ if with_mean: mean_2 = np.nanmean(Xr, axis=0) # If mean_2 is not 'close to zero', it comes from the fact that # scale_ is very small so that mean_2 = mean_1/scale_ > 0, even # if mean_1 was close to zero. The problem is thus essentially # due to the lack of precision of mean_. A solution is then to # subtract the mean again: if not np.allclose(mean_2, 0): warnings.warn( "Numerical issues were encountered " "when scaling the data " "and might not be solved. The standard " "deviation of the data is probably " "very close to 0. " ) Xr -= mean_2 return X class MinMaxScaler(OneToOneFeatureMixin, TransformerMixin, BaseEstimator): """Transform features by scaling each feature to a given range. This estimator scales and translates each feature individually such that it is in the given range on the training set, e.g. between zero and one. The transformation is given by:: X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) X_scaled = X_std * (max - min) + min where min, max = feature_range. This transformation is often used as an alternative to zero mean, unit variance scaling. `MinMaxScaler` doesn't reduce the effect of outliers, but it linearly scales them down into a fixed range, where the largest occurring data point corresponds to the maximum value and the smallest one corresponds to the minimum value. For an example visualization, refer to :ref:`Compare MinMaxScaler with other scalers <plot_all_scaling_minmax_scaler_section>`. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- feature_range : tuple (min, max), default=(0, 1) Desired range of transformed data. copy : bool, default=True Set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array). clip : bool, default=False Set to True to clip transformed values of held-out data to provided `feature_range`. Since this parameter will clip values, `inverse_transform` may not be able to restore the original data. .. note:: Setting `clip=True` does not prevent feature drift (a distribution shift between training and test data). The transformed values are clipped to the `feature_range`, which helps avoid unintended behavior in models sensitive to out-of-range inputs (e.g. linear models). Use with care, as clipping can distort the distribution of test data. .. versionadded:: 0.24 Attributes ---------- min_ : ndarray of shape (n_features,) Per feature adjustment for minimum. Equivalent to ``min - X.min(axis=0) * self.scale_`` scale_ : ndarray of shape (n_features,) Per feature relative scaling of the data. Equivalent to ``(max - min) / (X.max(axis=0) - X.min(axis=0))`` .. versionadded:: 0.17 *scale_* attribute. data_min_ : ndarray of shape (n_features,) Per feature minimum seen in the data .. versionadded:: 0.17 *data_min_* data_max_ : ndarray of shape (n_features,) Per feature maximum seen in the data .. versionadded:: 0.17 *data_max_* data_range_ : ndarray of shape (n_features,) Per feature range ``(data_max_ - data_min_)`` seen in the data .. versionadded:: 0.17 *data_range_* n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 n_samples_seen_ : int The number of samples processed by the estimator. It will be reset on new calls to fit, but increments across ``partial_fit`` calls. feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- minmax_scale : Equivalent function without the estimator API. Notes ----- NaNs are treated as missing values: disregarded in fit, and maintained in transform. Examples -------- >>> from sklearn.preprocessing import MinMaxScaler >>> data = [[-1, 2], [-0.5, 6], [0, 10], [1, 18]] >>> scaler = MinMaxScaler() >>> print(scaler.fit(data)) MinMaxScaler() >>> print(scaler.data_max_) [ 1. 18.] >>> print(scaler.transform(data)) [[0. 0. ] [0.25 0.25] [0.5 0.5 ] [1. 1. ]] >>> print(scaler.transform([[2, 2]])) [[1.5 0. ]] """ _parameter_constraints: dict = { "feature_range": [tuple], "copy": ["boolean"], "clip": ["boolean"], } def __init__(self, feature_range=(0, 1), *, copy=True, clip=False): self.feature_range = feature_range self.copy = copy self.clip = clip def _reset(self): """Reset internal data-dependent state of the scaler, if necessary. __init__ parameters are not touched. """ # Checking one attribute is enough, because they are all set together # in partial_fit if hasattr(self, "scale_"): del self.scale_ del self.min_ del self.n_samples_seen_ del self.data_min_ del self.data_max_ del self.data_range_ def fit(self, X, y=None): """Compute the minimum and maximum to be used for later scaling. Parameters ---------- X : array-like of shape (n_samples, n_features) The data used to compute the per-feature minimum and maximum used for later scaling along the features axis. y : None Ignored. Returns ------- self : object Fitted scaler. """ # Reset internal state before fitting self._reset() return self.partial_fit(X, y) @_fit_context(prefer_skip_nested_validation=True) def partial_fit(self, X, y=None): """Online computation of min and max on X for later scaling. All of X is processed as a single batch. This is intended for cases when :meth:`fit` is not feasible due to very large number of `n_samples` or because X is read from a continuous stream. Parameters ---------- X : array-like of shape (n_samples, n_features) The data used to compute the mean and standard deviation used for later scaling along the features axis. y : None Ignored. Returns ------- self : object Fitted scaler. """ feature_range = self.feature_range if feature_range[0] >= feature_range[1]: raise ValueError( "Minimum of desired feature range must be smaller than maximum. Got %s." % str(feature_range) ) if sparse.issparse(X): raise TypeError( "MinMaxScaler does not support sparse input. " "Consider using MaxAbsScaler instead." ) xp, _ = get_namespace(X) first_pass = not hasattr(self, "n_samples_seen_") X = validate_data( self, X, reset=first_pass, dtype=_array_api.supported_float_dtypes(xp), ensure_all_finite="allow-nan", ) device_ = device(X) feature_range = ( xp.asarray(feature_range[0], dtype=X.dtype, device=device_), xp.asarray(feature_range[1], dtype=X.dtype, device=device_), ) data_min = _array_api._nanmin(X, axis=0, xp=xp) data_max = _array_api._nanmax(X, axis=0, xp=xp) if first_pass: self.n_samples_seen_ = X.shape[0] else: data_min = xp.minimum(self.data_min_, data_min) data_max = xp.maximum(self.data_max_, data_max) self.n_samples_seen_ += X.shape[0] data_range = data_max - data_min self.scale_ = (feature_range[1] - feature_range[0]) / _handle_zeros_in_scale( data_range, copy=True ) self.min_ = feature_range[0] - data_min * self.scale_ self.data_min_ = data_min self.data_max_ = data_max self.data_range_ = data_range return self def transform(self, X): """Scale features of X according to feature_range. Parameters ---------- X : array-like of shape (n_samples, n_features) Input data that will be transformed. Returns ------- Xt : ndarray of shape (n_samples, n_features) Transformed data. """ check_is_fitted(self) xp, _ = get_namespace(X) X = validate_data( self, X, copy=self.copy, dtype=_array_api.supported_float_dtypes(xp), force_writeable=True, ensure_all_finite="allow-nan", reset=False, ) X *= self.scale_ X += self.min_ if self.clip: device_ = device(X) X = _modify_in_place_if_numpy( xp, xp.clip, X, xp.asarray(self.feature_range[0], dtype=X.dtype, device=device_), xp.asarray(self.feature_range[1], dtype=X.dtype, device=device_), out=X, ) return X def inverse_transform(self, X): """Undo the scaling of X according to feature_range. Parameters ---------- X : array-like of shape (n_samples, n_features) Input data that will be transformed. It cannot be sparse. Returns ------- X_original : ndarray of shape (n_samples, n_features) Transformed data. """ check_is_fitted(self) xp, _ = get_namespace(X) X = check_array( X, copy=self.copy, dtype=_array_api.supported_float_dtypes(xp), force_writeable=True, ensure_all_finite="allow-nan", ) X -= self.min_ X /= self.scale_ return X def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.input_tags.allow_nan = True tags.array_api_support = True return tags @validate_params( { "X": ["array-like"], "axis": [Options(Integral, {0, 1})], }, prefer_skip_nested_validation=False, ) def minmax_scale(X, feature_range=(0, 1), *, axis=0, copy=True): """Transform features by scaling each feature to a given range. This estimator scales and translates each feature individually such that it is in the given range on the training set, i.e. between zero and one. The transformation is given by (when ``axis=0``):: X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) X_scaled = X_std * (max - min) + min where min, max = feature_range. The transformation is calculated as (when ``axis=0``):: X_scaled = scale * X + min - X.min(axis=0) * scale where scale = (max - min) / (X.max(axis=0) - X.min(axis=0)) This transformation is often used as an alternative to zero mean, unit variance scaling. Read more in the :ref:`User Guide <preprocessing_scaler>`. .. versionadded:: 0.17 *minmax_scale* function interface to :class:`~sklearn.preprocessing.MinMaxScaler`. Parameters ---------- X : array-like of shape (n_samples, n_features) The data. feature_range : tuple (min, max), default=(0, 1) Desired range of transformed data. axis : {0, 1}, default=0 Axis used to scale along. If 0, independently scale each feature, otherwise (if 1) scale each sample. copy : bool, default=True If False, try to avoid a copy and scale in place. This is not guaranteed to always work in place; e.g. if the data is a numpy array with an int dtype, a copy will be returned even with copy=False. Returns ------- X_tr : ndarray of shape (n_samples, n_features) The transformed data. .. warning:: Risk of data leak Do not use :func:`~sklearn.preprocessing.minmax_scale` unless you know what you are doing. A common mistake is to apply it to the entire data *before* splitting into training and test sets. This will bias the model evaluation because information would have leaked from the test set to the training set. In general, we recommend using :class:`~sklearn.preprocessing.MinMaxScaler` within a :ref:`Pipeline <pipeline>` in order to prevent most risks of data leaking: `pipe = make_pipeline(MinMaxScaler(), LogisticRegression())`. See Also -------- MinMaxScaler : Performs scaling to a given range using the Transformer API (e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`). Notes ----- For a comparison of the different scalers, transformers, and normalizers, see: :ref:`sphx_glr_auto_examples_preprocessing_plot_all_scaling.py`. Examples -------- >>> from sklearn.preprocessing import minmax_scale >>> X = [[-2, 1, 2], [-1, 0, 1]] >>> minmax_scale(X, axis=0) # scale each column independently array([[0., 1., 1.], [1., 0., 0.]]) >>> minmax_scale(X, axis=1) # scale each row independently array([[0. , 0.75, 1. ], [0. , 0.5 , 1. ]]) """ # Unlike the scaler object, this function allows 1d input. # If copy is required, it will be done inside the scaler object. X = check_array( X, copy=False, ensure_2d=False, dtype=FLOAT_DTYPES, ensure_all_finite="allow-nan", ) original_ndim = X.ndim if original_ndim == 1: X = X.reshape(X.shape[0], 1) s = MinMaxScaler(feature_range=feature_range, copy=copy) if axis == 0: X = s.fit_transform(X) else: X = s.fit_transform(X.T).T if original_ndim == 1: X = X.ravel() return X class StandardScaler(OneToOneFeatureMixin, TransformerMixin, BaseEstimator): """Standardize features by removing the mean and scaling to unit variance. The standard score of a sample `x` is calculated as: .. code-block:: text z = (x - u) / s where `u` is the mean of the training samples or zero if `with_mean=False`, and `s` is the standard deviation of the training samples or one if `with_std=False`. Centering and scaling happen independently on each feature by computing the relevant statistics on the samples in the training set. Mean and standard deviation are then stored to be used on later data using :meth:`transform`. Standardization of a dataset is a common requirement for many machine learning estimators: they might behave badly if the individual features do not more or less look like standard normally distributed data (e.g. Gaussian with 0 mean and unit variance). For instance many elements used in the objective function of a learning algorithm (such as the RBF kernel of Support Vector Machines or the L1 and L2 regularizers of linear models) assume that all features are centered around 0 and have variance in the same order. If a feature has a variance that is orders of magnitude larger than others, it might dominate the objective function and make the estimator unable to learn from other features correctly as expected. `StandardScaler` is sensitive to outliers, and the features may scale differently from each other in the presence of outliers. For an example visualization, refer to :ref:`Compare StandardScaler with other scalers <plot_all_scaling_standard_scaler_section>`. This scaler can also be applied to sparse CSR or CSC matrices by passing `with_mean=False` to avoid breaking the sparsity structure of the data. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- copy : bool, default=True If False, try to avoid a copy and do inplace scaling instead. This is not guaranteed to always work inplace; e.g. if the data is not a NumPy array or scipy.sparse CSR matrix, a copy may still be returned. with_mean : bool, default=True If True, center the data before scaling. This does not work (and will raise an exception) when attempted on sparse matrices, because centering them entails building a dense matrix which in common use cases is likely to be too large to fit in memory. with_std : bool, default=True If True, scale the data to unit variance (or equivalently, unit standard deviation). Attributes ---------- scale_ : ndarray of shape (n_features,) or None Per feature relative scaling of the data to achieve zero mean and unit variance. Generally this is calculated using `np.sqrt(var_)`. If a variance is zero, we can't achieve unit variance, and the data is left as-is, giving a scaling factor of 1. `scale_` is equal to `None` when `with_std=False`. .. versionadded:: 0.17 *scale_* mean_ : ndarray of shape (n_features,) or None The mean value for each feature in the training set. Equal to ``None`` when ``with_mean=False`` and ``with_std=False``. var_ : ndarray of shape (n_features,) or None The variance for each feature in the training set. Used to compute `scale_`. Equal to ``None`` when ``with_mean=False`` and ``with_std=False``. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 n_samples_seen_ : int or ndarray of shape (n_features,) The number of samples processed by the estimator for each feature. If there are no missing samples, the ``n_samples_seen`` will be an integer, otherwise it will be an array of dtype int. If `sample_weights` are used it will be a float (if no missing data) or an array of dtype float that sums the weights seen so far. Will be reset on new calls to fit, but increments across ``partial_fit`` calls. See Also -------- scale : Equivalent function without the estimator API. :class:`~sklearn.decomposition.PCA` : Further removes the linear correlation across features with 'whiten=True'. Notes ----- NaNs are treated as missing values: disregarded in fit, and maintained in transform. We use a biased estimator for the standard deviation, equivalent to `numpy.std(x, ddof=0)`. Note that the choice of `ddof` is unlikely to affect model performance. Examples -------- >>> from sklearn.preprocessing import StandardScaler >>> data = [[0, 0], [0, 0], [1, 1], [1, 1]] >>> scaler = StandardScaler() >>> print(scaler.fit(data)) StandardScaler() >>> print(scaler.mean_) [0.5 0.5] >>> print(scaler.transform(data)) [[-1. -1.] [-1. -1.] [ 1. 1.] [ 1. 1.]] >>> print(scaler.transform([[2, 2]])) [[3. 3.]] """ _parameter_constraints: dict = { "copy": ["boolean"], "with_mean": ["boolean"], "with_std": ["boolean"], } def __init__(self, *, copy=True, with_mean=True, with_std=True): self.with_mean = with_mean self.with_std = with_std self.copy = copy def _reset(self): """Reset internal data-dependent state of the scaler, if necessary. __init__ parameters are not touched. """ # Checking one attribute is enough, because they are all set together # in partial_fit if hasattr(self, "scale_"): del self.scale_ del self.n_samples_seen_ del self.mean_ del self.var_ def fit(self, X, y=None, sample_weight=None): """Compute the mean and std to be used for later scaling. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data used to compute the mean and standard deviation used for later scaling along the features axis. y : None Ignored. sample_weight : array-like of shape (n_samples,), default=None Individual weights for each sample. .. versionadded:: 0.24 parameter *sample_weight* support to StandardScaler. Returns ------- self : object Fitted scaler. """ # Reset internal state before fitting self._reset() return self.partial_fit(X, y, sample_weight) @_fit_context(prefer_skip_nested_validation=True) def partial_fit(self, X, y=None, sample_weight=None): """Online computation of mean and std on X for later scaling. All of X is processed as a single batch. This is intended for cases when :meth:`fit` is not feasible due to very large number of `n_samples` or because X is read from a continuous stream. The algorithm for incremental mean and std is given in Equation 1.5a,b in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms for computing the sample variance: Analysis and recommendations." The American Statistician 37.3 (1983): 242-247: Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data used to compute the mean and standard deviation used for later scaling along the features axis. y : None Ignored. sample_weight : array-like of shape (n_samples,), default=None Individual weights for each sample. .. versionadded:: 0.24 parameter *sample_weight* support to StandardScaler. Returns ------- self : object Fitted scaler. """ xp, _, X_device = get_namespace_and_device(X) first_call = not hasattr(self, "n_samples_seen_") X = validate_data( self, X, accept_sparse=("csr", "csc"), dtype=supported_float_dtypes(xp, X_device), ensure_all_finite="allow-nan", reset=first_call, ) n_features = X.shape[1] if sample_weight is not None: sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
true
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/preprocessing/tests/test_discretization.py
sklearn/preprocessing/tests/test_discretization.py
import warnings import numpy as np import pytest import scipy.sparse as sp from sklearn import clone from sklearn.preprocessing import KBinsDiscretizer, OneHotEncoder from sklearn.utils._testing import ( assert_allclose, assert_allclose_dense_sparse, assert_array_almost_equal, assert_array_equal, ignore_warnings, ) X = [[-2, 1.5, -4, -1], [-1, 2.5, -3, -0.5], [0, 3.5, -2, 0.5], [1, 4.5, -1, 2]] @pytest.mark.parametrize( "strategy, quantile_method, expected, sample_weight", [ ( "uniform", "warn", # default, will not warn when strategy != "quantile" [[0, 0, 0, 0], [1, 1, 1, 0], [2, 2, 2, 1], [2, 2, 2, 2]], None, ), ( "kmeans", "warn", # default, will not warn when strategy != "quantile" [[0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2]], None, ), ( "quantile", "averaged_inverted_cdf", [[0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2], [2, 2, 2, 2]], None, ), ( "uniform", "warn", # default, will not warn when strategy != "quantile" [[0, 0, 0, 0], [1, 1, 1, 0], [2, 2, 2, 1], [2, 2, 2, 2]], [1, 1, 2, 1], ), ( "uniform", "warn", # default, will not warn when strategy != "quantile" [[0, 0, 0, 0], [1, 1, 1, 0], [2, 2, 2, 1], [2, 2, 2, 2]], [1, 1, 1, 1], ), ( "quantile", "averaged_inverted_cdf", [[0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2], [2, 2, 2, 2]], [1, 1, 2, 1], ), ( "quantile", "averaged_inverted_cdf", [[0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2], [2, 2, 2, 2]], [1, 1, 1, 1], ), ( "quantile", "averaged_inverted_cdf", [[0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2]], [0, 1, 1, 1], ), ( "kmeans", "warn", # default, will not warn when strategy != "quantile" [[0, 0, 0, 0], [1, 1, 1, 0], [1, 1, 1, 1], [2, 2, 2, 2]], [1, 0, 3, 1], ), ( "kmeans", "warn", # default, will not warn when strategy != "quantile" [[0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2]], [1, 1, 1, 1], ), ], ) def test_fit_transform(strategy, quantile_method, expected, sample_weight): est = KBinsDiscretizer( n_bins=3, encode="ordinal", strategy=strategy, quantile_method=quantile_method ) with ignore_warnings(category=UserWarning): # Ignore the warning on removed small bins. est.fit(X, sample_weight=sample_weight) assert_array_equal(est.transform(X), expected) def test_valid_n_bins(): KBinsDiscretizer(n_bins=2, quantile_method="averaged_inverted_cdf").fit_transform(X) KBinsDiscretizer( n_bins=np.array([2])[0], quantile_method="averaged_inverted_cdf" ).fit_transform(X) assert KBinsDiscretizer(n_bins=2, quantile_method="averaged_inverted_cdf").fit( X ).n_bins_.dtype == np.dtype(int) def test_invalid_n_bins_array(): # Bad shape n_bins = np.full((2, 4), 2.0) est = KBinsDiscretizer(n_bins=n_bins, quantile_method="averaged_inverted_cdf") err_msg = r"n_bins must be a scalar or array of shape \(n_features,\)." with pytest.raises(ValueError, match=err_msg): est.fit_transform(X) # Incorrect number of features n_bins = [1, 2, 2] est = KBinsDiscretizer(n_bins=n_bins, quantile_method="averaged_inverted_cdf") err_msg = r"n_bins must be a scalar or array of shape \(n_features,\)." with pytest.raises(ValueError, match=err_msg): est.fit_transform(X) # Bad bin values n_bins = [1, 2, 2, 1] est = KBinsDiscretizer(n_bins=n_bins, quantile_method="averaged_inverted_cdf") err_msg = ( "KBinsDiscretizer received an invalid number of bins " "at indices 0, 3. Number of bins must be at least 2, " "and must be an int." ) with pytest.raises(ValueError, match=err_msg): est.fit_transform(X) # Float bin values n_bins = [2.1, 2, 2.1, 2] est = KBinsDiscretizer(n_bins=n_bins, quantile_method="averaged_inverted_cdf") err_msg = ( "KBinsDiscretizer received an invalid number of bins " "at indices 0, 2. Number of bins must be at least 2, " "and must be an int." ) with pytest.raises(ValueError, match=err_msg): est.fit_transform(X) @pytest.mark.parametrize( "strategy, quantile_method, expected, sample_weight", [ ( "uniform", "warn", # default, will not warn when strategy != "quantile" [[0, 0, 0, 0], [0, 1, 1, 0], [1, 2, 2, 1], [1, 2, 2, 2]], None, ), ( "kmeans", "warn", # default, will not warn when strategy != "quantile" [[0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1], [1, 2, 2, 2]], None, ), ( "quantile", "linear", [[0, 0, 0, 0], [0, 1, 1, 1], [1, 2, 2, 2], [1, 2, 2, 2]], None, ), ( "quantile", "averaged_inverted_cdf", [[0, 0, 0, 0], [0, 1, 1, 1], [1, 2, 2, 2], [1, 2, 2, 2]], None, ), ( "quantile", "averaged_inverted_cdf", [[0, 0, 0, 0], [0, 1, 1, 1], [1, 2, 2, 2], [1, 2, 2, 2]], [1, 1, 1, 1], ), ( "quantile", "averaged_inverted_cdf", [[0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1], [1, 1, 1, 1]], [0, 1, 3, 1], ), ( "quantile", "averaged_inverted_cdf", [[0, 0, 0, 0], [0, 0, 0, 0], [1, 2, 2, 2], [1, 2, 2, 2]], [1, 1, 3, 1], ), ( "kmeans", "warn", # default, will not warn when strategy != "quantile" [[0, 0, 0, 0], [0, 1, 1, 0], [1, 1, 1, 1], [1, 2, 2, 2]], [1, 0, 3, 1], ), ], ) def test_fit_transform_n_bins_array(strategy, quantile_method, expected, sample_weight): est = KBinsDiscretizer( n_bins=[2, 3, 3, 3], encode="ordinal", strategy=strategy, quantile_method=quantile_method, ).fit(X, sample_weight=sample_weight) assert_array_equal(est.transform(X), expected) # test the shape of bin_edges_ n_features = np.array(X).shape[1] assert est.bin_edges_.shape == (n_features,) for bin_edges, n_bins in zip(est.bin_edges_, est.n_bins_): assert bin_edges.shape == (n_bins + 1,) @pytest.mark.filterwarnings("ignore: Bins whose width are too small") def test_kbinsdiscretizer_effect_sample_weight(): """Check the impact of `sample_weight` one computed quantiles.""" X = np.array([[-2], [-1], [1], [3], [500], [1000]]) # add a large number of bins such that each sample with a non-null weight # will be used as bin edge est = KBinsDiscretizer( n_bins=10, encode="ordinal", strategy="quantile", quantile_method="averaged_inverted_cdf", ) est.fit(X, sample_weight=[1, 1, 1, 1, 0, 0]) assert_allclose(est.bin_edges_[0], [-2, -1, 0, 1, 3]) assert_allclose(est.transform(X), [[0.0], [1.0], [3.0], [3.0], [3.0], [3.0]]) @pytest.mark.parametrize("strategy", ["kmeans", "quantile"]) def test_kbinsdiscretizer_no_mutating_sample_weight(strategy): """Make sure that `sample_weight` is not changed in place.""" if strategy == "quantile": est = KBinsDiscretizer( n_bins=3, encode="ordinal", strategy=strategy, quantile_method="averaged_inverted_cdf", ) else: est = KBinsDiscretizer(n_bins=3, encode="ordinal", strategy=strategy) sample_weight = np.array([1, 3, 1, 2], dtype=np.float64) sample_weight_copy = np.copy(sample_weight) est.fit(X, sample_weight=sample_weight) assert_allclose(sample_weight, sample_weight_copy) @pytest.mark.parametrize("strategy", ["uniform", "kmeans", "quantile"]) def test_same_min_max(strategy): warnings.simplefilter("always") X = np.array([[1, -2], [1, -1], [1, 0], [1, 1]]) if strategy == "quantile": est = KBinsDiscretizer( strategy=strategy, n_bins=3, encode="ordinal", quantile_method="averaged_inverted_cdf", ) else: est = KBinsDiscretizer(strategy=strategy, n_bins=3, encode="ordinal") warning_message = "Feature 0 is constant and will be replaced with 0." with pytest.warns(UserWarning, match=warning_message): est.fit(X) assert est.n_bins_[0] == 1 # replace the feature with zeros Xt = est.transform(X) assert_array_equal(Xt[:, 0], np.zeros(X.shape[0])) def test_transform_1d_behavior(): X = np.arange(4) est = KBinsDiscretizer(n_bins=2, quantile_method="averaged_inverted_cdf") with pytest.raises(ValueError): est.fit(X) est = KBinsDiscretizer(n_bins=2, quantile_method="averaged_inverted_cdf") est.fit(X.reshape(-1, 1)) with pytest.raises(ValueError): est.transform(X) @pytest.mark.parametrize("i", range(1, 9)) def test_numeric_stability(i): X_init = np.array([2.0, 4.0, 6.0, 8.0, 10.0]).reshape(-1, 1) Xt_expected = np.array([0, 0, 1, 1, 1]).reshape(-1, 1) # Test up to discretizing nano units X = X_init / 10**i Xt = KBinsDiscretizer( n_bins=2, encode="ordinal", quantile_method="averaged_inverted_cdf" ).fit_transform(X) assert_array_equal(Xt_expected, Xt) def test_encode_options(): est = KBinsDiscretizer( n_bins=[2, 3, 3, 3], encode="ordinal", quantile_method="averaged_inverted_cdf" ).fit(X) Xt_1 = est.transform(X) est = KBinsDiscretizer( n_bins=[2, 3, 3, 3], encode="onehot-dense", quantile_method="averaged_inverted_cdf", ).fit(X) Xt_2 = est.transform(X) assert not sp.issparse(Xt_2) assert_array_equal( OneHotEncoder( categories=[np.arange(i) for i in [2, 3, 3, 3]], sparse_output=False ).fit_transform(Xt_1), Xt_2, ) est = KBinsDiscretizer( n_bins=[2, 3, 3, 3], encode="onehot", quantile_method="averaged_inverted_cdf" ).fit(X) Xt_3 = est.transform(X) assert sp.issparse(Xt_3) assert_array_equal( OneHotEncoder( categories=[np.arange(i) for i in [2, 3, 3, 3]], sparse_output=True ) .fit_transform(Xt_1) .toarray(), Xt_3.toarray(), ) @pytest.mark.parametrize( "strategy, quantile_method, expected_2bins, expected_3bins, expected_5bins", [ ("uniform", "warn", [0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 2, 2], [0, 0, 1, 1, 4, 4]), ("kmeans", "warn", [0, 0, 0, 0, 1, 1], [0, 0, 1, 1, 2, 2], [0, 0, 1, 2, 3, 4]), ( "quantile", "averaged_inverted_cdf", [0, 0, 0, 1, 1, 1], [0, 0, 1, 1, 2, 2], [0, 1, 2, 3, 4, 4], ), ], ) def test_nonuniform_strategies( strategy, quantile_method, expected_2bins, expected_3bins, expected_5bins ): X = np.array([0, 0.5, 2, 3, 9, 10]).reshape(-1, 1) # with 2 bins est = KBinsDiscretizer( n_bins=2, strategy=strategy, quantile_method=quantile_method, encode="ordinal" ) Xt = est.fit_transform(X) assert_array_equal(expected_2bins, Xt.ravel()) # with 3 bins est = KBinsDiscretizer( n_bins=3, strategy=strategy, quantile_method=quantile_method, encode="ordinal" ) Xt = est.fit_transform(X) assert_array_equal(expected_3bins, Xt.ravel()) # with 5 bins est = KBinsDiscretizer( n_bins=5, strategy=strategy, quantile_method=quantile_method, encode="ordinal" ) Xt = est.fit_transform(X) assert_array_equal(expected_5bins, Xt.ravel()) @pytest.mark.parametrize( "strategy, expected_inv,quantile_method", [ ( "uniform", [ [-1.5, 2.0, -3.5, -0.5], [-0.5, 3.0, -2.5, -0.5], [0.5, 4.0, -1.5, 0.5], [0.5, 4.0, -1.5, 1.5], ], "warn", # default, will not warn when strategy != "quantile" ), ( "kmeans", [ [-1.375, 2.125, -3.375, -0.5625], [-1.375, 2.125, -3.375, -0.5625], [-0.125, 3.375, -2.125, 0.5625], [0.75, 4.25, -1.25, 1.625], ], "warn", # default, will not warn when strategy != "quantile" ), ( "quantile", [ [-1.5, 2.0, -3.5, -0.75], [-0.5, 3.0, -2.5, 0.0], [0.5, 4.0, -1.5, 1.25], [0.5, 4.0, -1.5, 1.25], ], "averaged_inverted_cdf", ), ], ) @pytest.mark.parametrize("encode", ["ordinal", "onehot", "onehot-dense"]) def test_inverse_transform(strategy, encode, expected_inv, quantile_method): kbd = KBinsDiscretizer( n_bins=3, strategy=strategy, quantile_method=quantile_method, encode=encode ) Xt = kbd.fit_transform(X) Xinv = kbd.inverse_transform(Xt) assert_array_almost_equal(expected_inv, Xinv) @pytest.mark.parametrize("strategy", ["uniform", "kmeans", "quantile"]) def test_transform_outside_fit_range(strategy): X = np.array([0, 1, 2, 3])[:, None] if strategy == "quantile": kbd = KBinsDiscretizer( n_bins=4, strategy=strategy, encode="ordinal", quantile_method="averaged_inverted_cdf", ) else: kbd = KBinsDiscretizer(n_bins=4, strategy=strategy, encode="ordinal") kbd.fit(X) X2 = np.array([-2, 5])[:, None] X2t = kbd.transform(X2) assert_array_equal(X2t.max(axis=0) + 1, kbd.n_bins_) assert_array_equal(X2t.min(axis=0), [0]) def test_overwrite(): X = np.array([0, 1, 2, 3])[:, None] X_before = X.copy() est = KBinsDiscretizer( n_bins=3, quantile_method="averaged_inverted_cdf", encode="ordinal" ) Xt = est.fit_transform(X) assert_array_equal(X, X_before) Xt_before = Xt.copy() Xinv = est.inverse_transform(Xt) assert_array_equal(Xt, Xt_before) assert_array_equal(Xinv, np.array([[0.5], [1.5], [2.5], [2.5]])) @pytest.mark.parametrize( "strategy, expected_bin_edges, quantile_method", [ ("quantile", [0, 1.5, 3], "averaged_inverted_cdf"), ("kmeans", [0, 1.5, 3], "warn"), ], ) def test_redundant_bins(strategy, expected_bin_edges, quantile_method): X = [[0], [0], [0], [0], [3], [3]] kbd = KBinsDiscretizer( n_bins=3, strategy=strategy, quantile_method=quantile_method, subsample=None ) warning_message = "Consider decreasing the number of bins." with pytest.warns(UserWarning, match=warning_message): kbd.fit(X) assert_array_almost_equal(kbd.bin_edges_[0], expected_bin_edges) def test_percentile_numeric_stability(): X = np.array([0.05, 0.05, 0.95]).reshape(-1, 1) bin_edges = np.array([0.05, 0.23, 0.41, 0.59, 0.77, 0.95]) Xt = np.array([0, 0, 4]).reshape(-1, 1) kbd = KBinsDiscretizer( n_bins=10, encode="ordinal", strategy="quantile", quantile_method="linear", ) ## TODO: change to averaged inverted cdf, but that means we only get bin ## edges of 0.05 and 0.95 and nothing in between warning_message = "Consider decreasing the number of bins." with pytest.warns(UserWarning, match=warning_message): kbd.fit(X) assert_array_almost_equal(kbd.bin_edges_[0], bin_edges) assert_array_almost_equal(kbd.transform(X), Xt) @pytest.mark.parametrize("in_dtype", [np.float16, np.float32, np.float64]) @pytest.mark.parametrize("out_dtype", [None, np.float32, np.float64]) @pytest.mark.parametrize("encode", ["ordinal", "onehot", "onehot-dense"]) def test_consistent_dtype(in_dtype, out_dtype, encode): X_input = np.array(X, dtype=in_dtype) kbd = KBinsDiscretizer( n_bins=3, encode=encode, quantile_method="averaged_inverted_cdf", dtype=out_dtype, ) kbd.fit(X_input) # test output dtype if out_dtype is not None: expected_dtype = out_dtype elif out_dtype is None and X_input.dtype == np.float16: # wrong numeric input dtype are cast in np.float64 expected_dtype = np.float64 else: expected_dtype = X_input.dtype Xt = kbd.transform(X_input) assert Xt.dtype == expected_dtype @pytest.mark.parametrize("input_dtype", [np.float16, np.float32, np.float64]) @pytest.mark.parametrize("encode", ["ordinal", "onehot", "onehot-dense"]) def test_32_equal_64(input_dtype, encode): # TODO this check is redundant with common checks and can be removed # once #16290 is merged X_input = np.array(X, dtype=input_dtype) # 32 bit output kbd_32 = KBinsDiscretizer( n_bins=3, encode=encode, quantile_method="averaged_inverted_cdf", dtype=np.float32, ) kbd_32.fit(X_input) Xt_32 = kbd_32.transform(X_input) # 64 bit output kbd_64 = KBinsDiscretizer( n_bins=3, encode=encode, quantile_method="averaged_inverted_cdf", dtype=np.float64, ) kbd_64.fit(X_input) Xt_64 = kbd_64.transform(X_input) assert_allclose_dense_sparse(Xt_32, Xt_64) def test_kbinsdiscretizer_subsample_default(): # Since the size of X is small (< 2e5), subsampling will not take place. X = np.array([-2, 1.5, -4, -1]).reshape(-1, 1) kbd_default = KBinsDiscretizer( n_bins=10, encode="ordinal", strategy="quantile", quantile_method="averaged_inverted_cdf", ) kbd_default.fit(X) kbd_without_subsampling = clone(kbd_default) kbd_without_subsampling.set_params(subsample=None) kbd_without_subsampling.fit(X) for bin_kbd_default, bin_kbd_with_subsampling in zip( kbd_default.bin_edges_[0], kbd_without_subsampling.bin_edges_[0] ): np.testing.assert_allclose(bin_kbd_default, bin_kbd_with_subsampling) assert kbd_default.bin_edges_.shape == kbd_without_subsampling.bin_edges_.shape @pytest.mark.parametrize( "encode, expected_names", [ ( "onehot", [ f"feat{col_id}_{float(bin_id)}" for col_id in range(3) for bin_id in range(4) ], ), ( "onehot-dense", [ f"feat{col_id}_{float(bin_id)}" for col_id in range(3) for bin_id in range(4) ], ), ("ordinal", [f"feat{col_id}" for col_id in range(3)]), ], ) def test_kbinsdiscrtizer_get_feature_names_out(encode, expected_names): """Check get_feature_names_out for different settings. Non-regression test for #22731 """ X = [[-2, 1, -4], [-1, 2, -3], [0, 3, -2], [1, 4, -1]] kbd = KBinsDiscretizer( n_bins=4, encode=encode, quantile_method="averaged_inverted_cdf" ).fit(X) Xt = kbd.transform(X) input_features = [f"feat{i}" for i in range(3)] output_names = kbd.get_feature_names_out(input_features) assert Xt.shape[1] == output_names.shape[0] assert_array_equal(output_names, expected_names) @pytest.mark.parametrize("strategy", ["uniform", "kmeans", "quantile"]) def test_kbinsdiscretizer_subsample(strategy, global_random_seed): # Check that the bin edges are almost the same when subsampling is used. X = np.random.RandomState(global_random_seed).random_sample((100000, 1)) + 1 if strategy == "quantile": kbd_subsampling = KBinsDiscretizer( strategy=strategy, subsample=50000, random_state=global_random_seed, quantile_method="averaged_inverted_cdf", ) else: kbd_subsampling = KBinsDiscretizer( strategy=strategy, subsample=50000, random_state=global_random_seed ) kbd_subsampling.fit(X) kbd_no_subsampling = clone(kbd_subsampling) kbd_no_subsampling.set_params(subsample=None) kbd_no_subsampling.fit(X) # We use a large tolerance because we can't expect the bin edges to be exactly the # same when subsampling is used. assert_allclose( kbd_subsampling.bin_edges_[0], kbd_no_subsampling.bin_edges_[0], rtol=1e-2 ) def test_quantile_method_future_warnings(): X = [[-2, 1, -4], [-1, 2, -3], [0, 3, -2], [1, 4, -1]] with pytest.warns( FutureWarning, match="The current default behavior, quantile_method='linear', will be " "changed to quantile_method='averaged_inverted_cdf' in " "scikit-learn version 1.9 to naturally support sample weight " "equivalence properties by default. Pass " "quantile_method='averaged_inverted_cdf' explicitly to silence this " "warning.", ): KBinsDiscretizer(strategy="quantile").fit(X) def test_invalid_quantile_method_with_sample_weight(): X = [[-2, 1, -4], [-1, 2, -3], [0, 3, -2], [1, 4, -1]] expected_msg = ( "When fitting with strategy='quantile' and sample weights, " "quantile_method should either be set to 'averaged_inverted_cdf' or " "'inverted_cdf', got quantile_method='linear' instead." ) with pytest.raises( ValueError, match=expected_msg, ): KBinsDiscretizer(strategy="quantile", quantile_method="linear").fit( X, sample_weight=[1, 1, 2, 2], )
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/preprocessing/tests/test_data.py
sklearn/preprocessing/tests/test_data.py
# Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import re import warnings import numpy as np import numpy.linalg as la import pytest from scipy import sparse, stats from sklearn import config_context, datasets from sklearn.base import clone from sklearn.exceptions import NotFittedError from sklearn.externals._packaging.version import parse as parse_version from sklearn.metrics.pairwise import linear_kernel from sklearn.model_selection import cross_val_predict from sklearn.pipeline import Pipeline from sklearn.preprocessing import ( Binarizer, KernelCenterer, MaxAbsScaler, MinMaxScaler, Normalizer, PowerTransformer, QuantileTransformer, RobustScaler, StandardScaler, add_dummy_feature, maxabs_scale, minmax_scale, normalize, power_transform, quantile_transform, robust_scale, scale, ) from sklearn.preprocessing._data import BOUNDS_THRESHOLD, _handle_zeros_in_scale from sklearn.svm import SVR from sklearn.utils import gen_batches, shuffle from sklearn.utils._array_api import ( _convert_to_numpy, _get_namespace_device_dtype_ids, yield_namespace_device_dtype_combinations, ) from sklearn.utils._testing import ( _array_api_for_tests, _convert_container, assert_allclose, assert_allclose_dense_sparse, assert_almost_equal, assert_array_almost_equal, assert_array_equal, assert_array_less, skip_if_32bit, ) from sklearn.utils.estimator_checks import ( _get_check_estimator_ids, check_array_api_input_and_values, ) from sklearn.utils.fixes import ( _IS_WASM, COO_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS, LIL_CONTAINERS, sp_version, ) from sklearn.utils.sparsefuncs import mean_variance_axis iris = datasets.load_iris() # Make some data to be used many times rng = np.random.RandomState(0) n_features = 30 n_samples = 1000 offsets = rng.uniform(-1, 1, size=n_features) scales = rng.uniform(1, 10, size=n_features) X_2d = rng.randn(n_samples, n_features) * scales + offsets X_1row = X_2d[0, :].reshape(1, n_features) X_1col = X_2d[:, 0].reshape(n_samples, 1) X_list_1row = X_1row.tolist() X_list_1col = X_1col.tolist() def toarray(a): if hasattr(a, "toarray"): a = a.toarray() return a def _check_dim_1axis(a): return np.asarray(a).shape[0] def assert_correct_incr(i, batch_start, batch_stop, n, chunk_size, n_samples_seen): if batch_stop != n: assert (i + 1) * chunk_size == n_samples_seen else: assert i * chunk_size + (batch_stop - batch_start) == n_samples_seen def test_raises_value_error_if_sample_weights_greater_than_1d(): # Sample weights must be either scalar or 1D n_sampless = [2, 3] n_featuress = [3, 2] for n_samples, n_features in zip(n_sampless, n_featuress): X = rng.randn(n_samples, n_features) y = rng.randn(n_samples) scaler = StandardScaler() # make sure Error is raised the sample weights greater than 1d sample_weight_notOK = rng.randn(n_samples, 1) ** 2 with pytest.raises(ValueError): scaler.fit(X, y, sample_weight=sample_weight_notOK) def _yield_xw_x_sampleweight(): yield from ( ( [[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [1, 2, 3], [4, 5, 6]], [2.0, 1.0], ), ( [[1, 0, 1], [0, 0, 1]], [[1, 0, 1], [0, 0, 1], [0, 0, 1], [0, 0, 1]], np.array([1, 3]), ), ( [[1, np.nan, 1], [np.nan, np.nan, 1]], [ [1, np.nan, 1], [np.nan, np.nan, 1], [np.nan, np.nan, 1], [np.nan, np.nan, 1], ], np.array([1, 3]), ), ) @pytest.mark.parametrize(["Xw", "X", "sample_weight"], _yield_xw_x_sampleweight()) @pytest.mark.parametrize("array_constructor", ["array", "sparse_csr", "sparse_csc"]) def test_standard_scaler_sample_weight(Xw, X, sample_weight, array_constructor): with_mean = not array_constructor.startswith("sparse") X = _convert_container(X, array_constructor) Xw = _convert_container(Xw, array_constructor) # weighted StandardScaler yw = np.ones(Xw.shape[0]) scaler_w = StandardScaler(with_mean=with_mean) scaler_w.fit(Xw, yw, sample_weight=sample_weight) # unweighted, but with repeated samples y = np.ones(X.shape[0]) scaler = StandardScaler(with_mean=with_mean) scaler.fit(X, y) X_test = [[1.5, 2.5, 3.5], [3.5, 4.5, 5.5]] assert_almost_equal(scaler.mean_, scaler_w.mean_) assert_almost_equal(scaler.var_, scaler_w.var_) assert_almost_equal(scaler.transform(X_test), scaler_w.transform(X_test)) @pytest.mark.parametrize(["Xw", "X", "sample_weight"], _yield_xw_x_sampleweight()) @pytest.mark.parametrize( "namespace, dev, dtype", yield_namespace_device_dtype_combinations(), ids=_get_namespace_device_dtype_ids, ) def test_standard_scaler_sample_weight_array_api( Xw, X, sample_weight, namespace, dev, dtype ): # N.B. The sample statistics for Xw w/ sample_weight should match # the statistics of X w/ uniform sample_weight. xp = _array_api_for_tests(namespace, dev) X = np.array(X).astype(dtype, copy=False) y = np.ones(X.shape[0]).astype(dtype, copy=False) Xw = np.array(Xw).astype(dtype, copy=False) yw = np.ones(Xw.shape[0]).astype(dtype, copy=False) X_test = np.array([[1.5, 2.5, 3.5], [3.5, 4.5, 5.5]]).astype(dtype, copy=False) scaler = StandardScaler() scaler.fit(X, y) scaler_w = StandardScaler() scaler_w.fit(Xw, yw, sample_weight=sample_weight) # Test array-api support and correctness. X_xp = xp.asarray(X, device=dev) y_xp = xp.asarray(y, device=dev) Xw_xp = xp.asarray(Xw, device=dev) yw_xp = xp.asarray(yw, device=dev) X_test_xp = xp.asarray(X_test, device=dev) sample_weight_xp = xp.asarray(sample_weight, device=dev) scaler_w_xp = StandardScaler() with config_context(array_api_dispatch=True): scaler_w_xp.fit(Xw_xp, yw_xp, sample_weight=sample_weight_xp) w_mean = _convert_to_numpy(scaler_w_xp.mean_, xp=xp) w_var = _convert_to_numpy(scaler_w_xp.var_, xp=xp) assert_allclose(scaler_w.mean_, w_mean) assert_allclose(scaler_w.var_, w_var) # unweighted, but with repeated samples scaler_xp = StandardScaler() with config_context(array_api_dispatch=True): scaler_xp.fit(X_xp, y_xp) uw_mean = _convert_to_numpy(scaler_xp.mean_, xp=xp) uw_var = _convert_to_numpy(scaler_xp.var_, xp=xp) assert_allclose(scaler.mean_, uw_mean) assert_allclose(scaler.var_, uw_var) # Check that both array-api outputs match. assert_allclose(uw_mean, w_mean) assert_allclose(uw_var, w_var) with config_context(array_api_dispatch=True): assert_allclose( _convert_to_numpy(scaler_xp.transform(X_test_xp), xp=xp), _convert_to_numpy(scaler_w_xp.transform(X_test_xp), xp=xp), ) def test_standard_scaler_1d(): # Test scaling of dataset along single axis for X in [X_1row, X_1col, X_list_1row, X_list_1row]: scaler = StandardScaler() X_scaled = scaler.fit(X).transform(X, copy=True) if isinstance(X, list): X = np.array(X) # cast only after scaling done if _check_dim_1axis(X) == 1: assert_almost_equal(scaler.mean_, X.ravel()) assert_almost_equal(scaler.scale_, np.ones(n_features)) assert_array_almost_equal(X_scaled.mean(axis=0), np.zeros_like(n_features)) assert_array_almost_equal(X_scaled.std(axis=0), np.zeros_like(n_features)) else: assert_almost_equal(scaler.mean_, X.mean()) assert_almost_equal(scaler.scale_, X.std()) assert_array_almost_equal(X_scaled.mean(axis=0), np.zeros_like(n_features)) assert_array_almost_equal(X_scaled.mean(axis=0), 0.0) assert_array_almost_equal(X_scaled.std(axis=0), 1.0) assert scaler.n_samples_seen_ == X.shape[0] # check inverse transform X_scaled_back = scaler.inverse_transform(X_scaled) assert_array_almost_equal(X_scaled_back, X) # Constant feature X = np.ones((5, 1)) scaler = StandardScaler() X_scaled = scaler.fit(X).transform(X, copy=True) assert_almost_equal(scaler.mean_, 1.0) assert_almost_equal(scaler.scale_, 1.0) assert_array_almost_equal(X_scaled.mean(axis=0), 0.0) assert_array_almost_equal(X_scaled.std(axis=0), 0.0) assert scaler.n_samples_seen_ == X.shape[0] @pytest.mark.parametrize("sparse_container", [None] + CSC_CONTAINERS + CSR_CONTAINERS) @pytest.mark.parametrize("add_sample_weight", [False, True]) def test_standard_scaler_dtype(add_sample_weight, sparse_container): # Ensure scaling does not affect dtype rng = np.random.RandomState(0) n_samples = 10 n_features = 3 if add_sample_weight: sample_weight = np.ones(n_samples) else: sample_weight = None with_mean = True if sparse_container is not None: # scipy sparse containers do not support float16, see # https://github.com/scipy/scipy/issues/7408 for more details. supported_dtype = [np.float64, np.float32] else: supported_dtype = [np.float64, np.float32, np.float16] for dtype in supported_dtype: X = rng.randn(n_samples, n_features).astype(dtype) if sparse_container is not None: X = sparse_container(X) with_mean = False scaler = StandardScaler(with_mean=with_mean) X_scaled = scaler.fit(X, sample_weight=sample_weight).transform(X) assert X.dtype == X_scaled.dtype assert scaler.mean_.dtype == np.float64 assert scaler.scale_.dtype == np.float64 @pytest.mark.parametrize( "scaler", [ StandardScaler(with_mean=False), RobustScaler(with_centering=False), ], ) @pytest.mark.parametrize("sparse_container", [None] + CSC_CONTAINERS + CSR_CONTAINERS) @pytest.mark.parametrize("add_sample_weight", [False, True]) @pytest.mark.parametrize("dtype", [np.float32, np.float64]) @pytest.mark.parametrize("constant", [0, 1.0, 100.0]) def test_standard_scaler_constant_features( scaler, add_sample_weight, sparse_container, dtype, constant ): scaler = clone(scaler) # Avoid side effects from previous tests. if isinstance(scaler, RobustScaler) and add_sample_weight: pytest.skip(f"{scaler.__class__.__name__} does not yet support sample_weight") rng = np.random.RandomState(0) n_samples = 100 n_features = 1 if add_sample_weight: fit_params = dict(sample_weight=rng.uniform(size=n_samples) * 2) else: fit_params = {} X_array = np.full(shape=(n_samples, n_features), fill_value=constant, dtype=dtype) X = X_array if sparse_container is None else sparse_container(X_array) X_scaled = scaler.fit(X, **fit_params).transform(X) if isinstance(scaler, StandardScaler): # The variance info should be close to zero for constant features. assert_allclose(scaler.var_, np.zeros(X.shape[1]), atol=1e-7) # Constant features should not be scaled (scale of 1.): assert_allclose(scaler.scale_, np.ones(X.shape[1])) assert X_scaled is not X # make sure we make a copy assert_allclose_dense_sparse(X_scaled, X) if isinstance(scaler, StandardScaler) and not add_sample_weight: # Also check consistency with the standard scale function. X_scaled_2 = scale(X, with_mean=scaler.with_mean) assert X_scaled_2 is not X # make sure we did a copy assert_allclose_dense_sparse(X_scaled_2, X) @pytest.mark.parametrize("n_samples", [10, 100, 10_000]) @pytest.mark.parametrize("average", [1e-10, 1, 1e10]) @pytest.mark.parametrize("dtype", [np.float32, np.float64]) @pytest.mark.parametrize("sparse_container", [None] + CSC_CONTAINERS + CSR_CONTAINERS) def test_standard_scaler_near_constant_features( n_samples, sparse_container, average, dtype ): # Check that when the variance is too small (var << mean**2) the feature # is considered constant and not scaled. scale_min, scale_max = -30, 19 scales = np.array([10**i for i in range(scale_min, scale_max + 1)], dtype=dtype) n_features = scales.shape[0] X = np.empty((n_samples, n_features), dtype=dtype) # Make a dataset of known var = scales**2 and mean = average X[: n_samples // 2, :] = average + scales X[n_samples // 2 :, :] = average - scales X_array = X if sparse_container is None else sparse_container(X) scaler = StandardScaler(with_mean=False).fit(X_array) # StandardScaler uses float64 accumulators even if the data has a float32 # dtype. eps = np.finfo(np.float64).eps # if var < bound = N.eps.var + N².eps².mean², the feature is considered # constant and the scale_ attribute is set to 1. bounds = n_samples * eps * scales**2 + n_samples**2 * eps**2 * average**2 within_bounds = scales**2 <= bounds # Check that scale_min is small enough to have some scales below the # bound and therefore detected as constant: assert np.any(within_bounds) # Check that such features are actually treated as constant by the scaler: assert all(scaler.var_[within_bounds] <= bounds[within_bounds]) assert_allclose(scaler.scale_[within_bounds], 1.0) # Depending the on the dtype of X, some features might not actually be # representable as non constant for small scales (even if above the # precision bound of the float64 variance estimate). Such feature should # be correctly detected as constants with 0 variance by StandardScaler. representable_diff = X[0, :] - X[-1, :] != 0 assert_allclose(scaler.var_[np.logical_not(representable_diff)], 0) assert_allclose(scaler.scale_[np.logical_not(representable_diff)], 1) # The other features are scaled and scale_ is equal to sqrt(var_) assuming # that scales are large enough for average + scale and average - scale to # be distinct in X (depending on X's dtype). common_mask = np.logical_and(scales**2 > bounds, representable_diff) assert_allclose(scaler.scale_[common_mask], np.sqrt(scaler.var_)[common_mask]) def test_scale_1d(): # 1-d inputs X_list = [1.0, 3.0, 5.0, 0.0] X_arr = np.array(X_list) for X in [X_list, X_arr]: X_scaled = scale(X) assert_array_almost_equal(X_scaled.mean(), 0.0) assert_array_almost_equal(X_scaled.std(), 1.0) assert_array_equal(scale(X, with_mean=False, with_std=False), X) @skip_if_32bit def test_standard_scaler_numerical_stability(): # Test numerical stability of scaling # np.log(1e-5) is taken because of its floating point representation # was empirically found to cause numerical problems with np.mean & np.std. x = np.full(8, np.log(1e-5), dtype=np.float64) # This does not raise a warning as the number of samples is too low # to trigger the problem in recent numpy with warnings.catch_warnings(): warnings.simplefilter("error", UserWarning) scale(x) assert_array_almost_equal(scale(x), np.zeros(8)) # with 2 more samples, the std computation run into numerical issues: x = np.full(10, np.log(1e-5), dtype=np.float64) warning_message = "standard deviation of the data is probably very close to 0" with pytest.warns(UserWarning, match=warning_message): x_scaled = scale(x) assert_array_almost_equal(x_scaled, np.zeros(10)) x = np.full(10, 1e-100, dtype=np.float64) with warnings.catch_warnings(): warnings.simplefilter("error", UserWarning) x_small_scaled = scale(x) assert_array_almost_equal(x_small_scaled, np.zeros(10)) # Large values can cause (often recoverable) numerical stability issues: x_big = np.full(10, 1e100, dtype=np.float64) warning_message = "Dataset may contain too large values" with pytest.warns(UserWarning, match=warning_message): x_big_scaled = scale(x_big) assert_array_almost_equal(x_big_scaled, np.zeros(10)) assert_array_almost_equal(x_big_scaled, x_small_scaled) with pytest.warns(UserWarning, match=warning_message): x_big_centered = scale(x_big, with_std=False) assert_array_almost_equal(x_big_centered, np.zeros(10)) assert_array_almost_equal(x_big_centered, x_small_scaled) def test_scaler_2d_arrays(): # Test scaling of 2d array along first axis rng = np.random.RandomState(0) n_features = 5 n_samples = 4 X = rng.randn(n_samples, n_features) X[:, 0] = 0.0 # first feature is always of zero scaler = StandardScaler() X_scaled = scaler.fit(X).transform(X, copy=True) assert not np.any(np.isnan(X_scaled)) assert scaler.n_samples_seen_ == n_samples assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0]) assert_array_almost_equal(X_scaled.std(axis=0), [0.0, 1.0, 1.0, 1.0, 1.0]) # Check that X has been copied assert X_scaled is not X # check inverse transform X_scaled_back = scaler.inverse_transform(X_scaled) assert X_scaled_back is not X assert X_scaled_back is not X_scaled assert_array_almost_equal(X_scaled_back, X) X_scaled = scale(X, axis=1, with_std=False) assert not np.any(np.isnan(X_scaled)) assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0]) X_scaled = scale(X, axis=1, with_std=True) assert not np.any(np.isnan(X_scaled)) assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0]) assert_array_almost_equal(X_scaled.std(axis=1), n_samples * [1.0]) # Check that the data hasn't been modified assert X_scaled is not X X_scaled = scaler.fit(X).transform(X, copy=False) assert not np.any(np.isnan(X_scaled)) assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0]) assert_array_almost_equal(X_scaled.std(axis=0), [0.0, 1.0, 1.0, 1.0, 1.0]) # Check that X has not been copied assert X_scaled is X X = rng.randn(4, 5) X[:, 0] = 1.0 # first feature is a constant, non zero feature scaler = StandardScaler() X_scaled = scaler.fit(X).transform(X, copy=True) assert not np.any(np.isnan(X_scaled)) assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0]) assert_array_almost_equal(X_scaled.std(axis=0), [0.0, 1.0, 1.0, 1.0, 1.0]) # Check that X has not been copied assert X_scaled is not X def test_scaler_float16_overflow(): # Test if the scaler will not overflow on float16 numpy arrays rng = np.random.RandomState(0) # float16 has a maximum of 65500.0. On the worst case 5 * 200000 is 100000 # which is enough to overflow the data type X = rng.uniform(5, 10, [200000, 1]).astype(np.float16) with np.errstate(over="raise"): scaler = StandardScaler().fit(X) X_scaled = scaler.transform(X) # Calculate the float64 equivalent to verify result X_scaled_f64 = StandardScaler().fit_transform(X.astype(np.float64)) # Overflow calculations may cause -inf, inf, or nan. Since there is no nan # input, all of the outputs should be finite. This may be redundant since a # FloatingPointError exception will be thrown on overflow above. assert np.all(np.isfinite(X_scaled)) # The normal distribution is very unlikely to go above 4. At 4.0-8.0 the # float16 precision is 2^-8 which is around 0.004. Thus only 2 decimals are # checked to account for precision differences. assert_array_almost_equal(X_scaled, X_scaled_f64, decimal=2) def test_handle_zeros_in_scale(): s1 = np.array([0, 1e-16, 1, 2, 3]) s2 = _handle_zeros_in_scale(s1, copy=True) assert_allclose(s1, np.array([0, 1e-16, 1, 2, 3])) assert_allclose(s2, np.array([1, 1, 1, 2, 3])) def test_minmax_scaler_partial_fit(): # Test if partial_fit run over many batches of size 1 and 50 # gives the same results as fit X = X_2d n = X.shape[0] for chunk_size in [1, 2, 50, n, n + 42]: # Test mean at the end of the process scaler_batch = MinMaxScaler().fit(X) scaler_incr = MinMaxScaler() for batch in gen_batches(n_samples, chunk_size): scaler_incr = scaler_incr.partial_fit(X[batch]) assert_array_almost_equal(scaler_batch.data_min_, scaler_incr.data_min_) assert_array_almost_equal(scaler_batch.data_max_, scaler_incr.data_max_) assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_ assert_array_almost_equal(scaler_batch.data_range_, scaler_incr.data_range_) assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_) assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_) # Test std after 1 step batch0 = slice(0, chunk_size) scaler_batch = MinMaxScaler().fit(X[batch0]) scaler_incr = MinMaxScaler().partial_fit(X[batch0]) assert_array_almost_equal(scaler_batch.data_min_, scaler_incr.data_min_) assert_array_almost_equal(scaler_batch.data_max_, scaler_incr.data_max_) assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_ assert_array_almost_equal(scaler_batch.data_range_, scaler_incr.data_range_) assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_) assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_) # Test std until the end of partial fits, and scaler_batch = MinMaxScaler().fit(X) scaler_incr = MinMaxScaler() # Clean estimator for i, batch in enumerate(gen_batches(n_samples, chunk_size)): scaler_incr = scaler_incr.partial_fit(X[batch]) assert_correct_incr( i, batch_start=batch.start, batch_stop=batch.stop, n=n, chunk_size=chunk_size, n_samples_seen=scaler_incr.n_samples_seen_, ) def test_standard_scaler_partial_fit(): # Test if partial_fit run over many batches of size 1 and 50 # gives the same results as fit X = X_2d n = X.shape[0] for chunk_size in [1, 2, 50, n, n + 42]: # Test mean at the end of the process scaler_batch = StandardScaler(with_std=False).fit(X) scaler_incr = StandardScaler(with_std=False) for batch in gen_batches(n_samples, chunk_size): scaler_incr = scaler_incr.partial_fit(X[batch]) assert_array_almost_equal(scaler_batch.mean_, scaler_incr.mean_) assert scaler_batch.var_ == scaler_incr.var_ # Nones assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_ # Test std after 1 step batch0 = slice(0, chunk_size) scaler_incr = StandardScaler().partial_fit(X[batch0]) if chunk_size == 1: assert_array_almost_equal( np.zeros(n_features, dtype=np.float64), scaler_incr.var_ ) assert_array_almost_equal( np.ones(n_features, dtype=np.float64), scaler_incr.scale_ ) else: assert_array_almost_equal(np.var(X[batch0], axis=0), scaler_incr.var_) assert_array_almost_equal( np.std(X[batch0], axis=0), scaler_incr.scale_ ) # no constants # Test std until the end of partial fits, and scaler_batch = StandardScaler().fit(X) scaler_incr = StandardScaler() # Clean estimator for i, batch in enumerate(gen_batches(n_samples, chunk_size)): scaler_incr = scaler_incr.partial_fit(X[batch]) assert_correct_incr( i, batch_start=batch.start, batch_stop=batch.stop, n=n, chunk_size=chunk_size, n_samples_seen=scaler_incr.n_samples_seen_, ) assert_array_almost_equal(scaler_batch.var_, scaler_incr.var_) assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_ @pytest.mark.parametrize("sparse_container", CSC_CONTAINERS + CSR_CONTAINERS) def test_standard_scaler_partial_fit_numerical_stability(sparse_container): # Test if the incremental computation introduces significative errors # for large datasets with values of large magniture rng = np.random.RandomState(0) n_features = 2 n_samples = 100 offsets = rng.uniform(-1e15, 1e15, size=n_features) scales = rng.uniform(1e3, 1e6, size=n_features) X = rng.randn(n_samples, n_features) * scales + offsets scaler_batch = StandardScaler().fit(X) scaler_incr = StandardScaler() for chunk in X: scaler_incr = scaler_incr.partial_fit(chunk.reshape(1, n_features)) # Regardless of abs values, they must not be more diff 6 significant digits tol = 10 ** (-6) assert_allclose(scaler_incr.mean_, scaler_batch.mean_, rtol=tol) assert_allclose(scaler_incr.var_, scaler_batch.var_, rtol=tol) assert_allclose(scaler_incr.scale_, scaler_batch.scale_, rtol=tol) # NOTE Be aware that for much larger offsets std is very unstable (last # assert) while mean is OK. # Sparse input size = (100, 3) scale = 1e20 X = sparse_container(rng.randint(0, 2, size).astype(np.float64) * scale) # with_mean=False is required with sparse input scaler = StandardScaler(with_mean=False).fit(X) scaler_incr = StandardScaler(with_mean=False) for chunk in X: if chunk.ndim == 1: # Sparse arrays can be 1D (in scipy 1.14 and later) while old # sparse matrix instances are always 2D. chunk = chunk.reshape(1, -1) scaler_incr = scaler_incr.partial_fit(chunk) # Regardless of magnitude, they must not differ more than of 6 digits tol = 10 ** (-6) assert scaler.mean_ is not None assert_allclose(scaler_incr.var_, scaler.var_, rtol=tol) assert_allclose(scaler_incr.scale_, scaler.scale_, rtol=tol) @pytest.mark.parametrize("sample_weight", [True, None]) @pytest.mark.parametrize("sparse_container", CSC_CONTAINERS + CSR_CONTAINERS) def test_partial_fit_sparse_input(sample_weight, sparse_container): # Check that sparsity is not destroyed X = sparse_container(np.array([[1.0], [0.0], [0.0], [5.0]])) if sample_weight: sample_weight = rng.rand(X.shape[0]) null_transform = StandardScaler(with_mean=False, with_std=False, copy=True) X_null = null_transform.partial_fit(X, sample_weight=sample_weight).transform(X) assert_array_equal(X_null.toarray(), X.toarray()) X_orig = null_transform.inverse_transform(X_null) assert_array_equal(X_orig.toarray(), X_null.toarray()) assert_array_equal(X_orig.toarray(), X.toarray()) @pytest.mark.parametrize("sample_weight", [True, None]) def test_standard_scaler_trasform_with_partial_fit(sample_weight): # Check some postconditions after applying partial_fit and transform X = X_2d[:100, :] if sample_weight: sample_weight = rng.rand(X.shape[0]) scaler_incr = StandardScaler() for i, batch in enumerate(gen_batches(X.shape[0], 1)): X_sofar = X[: (i + 1), :] chunks_copy = X_sofar.copy() if sample_weight is None: scaled_batch = StandardScaler().fit_transform(X_sofar) scaler_incr = scaler_incr.partial_fit(X[batch]) else: scaled_batch = StandardScaler().fit_transform( X_sofar, sample_weight=sample_weight[: i + 1] ) scaler_incr = scaler_incr.partial_fit( X[batch], sample_weight=sample_weight[batch] ) scaled_incr = scaler_incr.transform(X_sofar) assert_array_almost_equal(scaled_batch, scaled_incr) assert_array_almost_equal(X_sofar, chunks_copy) # No change right_input = scaler_incr.inverse_transform(scaled_incr) assert_array_almost_equal(X_sofar, right_input) zero = np.zeros(X.shape[1]) epsilon = np.finfo(float).eps assert_array_less(zero, scaler_incr.var_ + epsilon) # as less or equal assert_array_less(zero, scaler_incr.scale_ + epsilon) if sample_weight is None: # (i+1) because the Scaler has been already fitted assert (i + 1) == scaler_incr.n_samples_seen_ else: assert np.sum(sample_weight[: i + 1]) == pytest.approx( scaler_incr.n_samples_seen_ ) def test_standard_check_array_of_inverse_transform(): # Check if StandardScaler inverse_transform is # converting the integer array to float x = np.array( [ [1, 1, 1, 0, 1, 0], [1, 1, 1, 0, 1, 0], [0, 8, 0, 1, 0, 0], [1, 4, 1, 1, 0, 0], [0, 1, 0, 0, 1, 0], [0, 4, 0, 1, 0, 1], ], dtype=np.int32, ) scaler = StandardScaler() scaler.fit(x) # The of inverse_transform should be converted # to a float array. # If not X *= self.scale_ will fail. scaler.inverse_transform(x) @pytest.mark.parametrize( "array_namespace, device, dtype_name", yield_namespace_device_dtype_combinations(), ids=_get_namespace_device_dtype_ids, ) @pytest.mark.parametrize( "check", [check_array_api_input_and_values], ids=_get_check_estimator_ids, ) @pytest.mark.parametrize( "estimator", [ MaxAbsScaler(), MaxAbsScaler(clip=True), MinMaxScaler(), MinMaxScaler(clip=True), KernelCenterer(), Normalizer(norm="l1"), Normalizer(norm="l2"), Normalizer(norm="max"), Binarizer(), ], ids=_get_check_estimator_ids, ) def test_preprocessing_array_api_compliance( estimator, check, array_namespace, device, dtype_name ): name = estimator.__class__.__name__ check(name, estimator, array_namespace, device=device, dtype_name=dtype_name) @pytest.mark.parametrize( "array_namespace, device, dtype_name", yield_namespace_device_dtype_combinations(), ids=_get_namespace_device_dtype_ids, ) @pytest.mark.parametrize( "check", [check_array_api_input_and_values], ids=_get_check_estimator_ids, ) @pytest.mark.parametrize("sample_weight", [True, None]) def test_standard_scaler_array_api_compliance( check, sample_weight, array_namespace, device, dtype_name ): estimator = StandardScaler() name = estimator.__class__.__name__ check( name, estimator, array_namespace, device=device, dtype_name=dtype_name, check_sample_weight=sample_weight, ) def test_min_max_scaler_iris(): X = iris.data scaler = MinMaxScaler() # default params X_trans = scaler.fit_transform(X) assert_array_almost_equal(X_trans.min(axis=0), 0) assert_array_almost_equal(X_trans.max(axis=0), 1) X_trans_inv = scaler.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) # not default params: min=1, max=2 scaler = MinMaxScaler(feature_range=(1, 2)) X_trans = scaler.fit_transform(X) assert_array_almost_equal(X_trans.min(axis=0), 1) assert_array_almost_equal(X_trans.max(axis=0), 2) X_trans_inv = scaler.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) # min=-.5, max=.6 scaler = MinMaxScaler(feature_range=(-0.5, 0.6)) X_trans = scaler.fit_transform(X) assert_array_almost_equal(X_trans.min(axis=0), -0.5) assert_array_almost_equal(X_trans.max(axis=0), 0.6) X_trans_inv = scaler.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) # raises on invalid range scaler = MinMaxScaler(feature_range=(2, 1)) with pytest.raises(ValueError): scaler.fit(X) def test_min_max_scaler_zero_variance_features(): # Check min max scaler on toy data with zero variance features X = [[0.0, 1.0, +0.5], [0.0, 1.0, -0.1], [0.0, 1.0, +1.1]] X_new = [[+0.0, 2.0, 0.5], [-1.0, 1.0, 0.0], [+0.0, 1.0, 1.5]] # default params scaler = MinMaxScaler() X_trans = scaler.fit_transform(X) X_expected_0_1 = [[0.0, 0.0, 0.5], [0.0, 0.0, 0.0], [0.0, 0.0, 1.0]] assert_array_almost_equal(X_trans, X_expected_0_1) X_trans_inv = scaler.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) X_trans_new = scaler.transform(X_new) X_expected_0_1_new = [[+0.0, 1.0, 0.500], [-1.0, 0.0, 0.083], [+0.0, 0.0, 1.333]] assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2) # not default params scaler = MinMaxScaler(feature_range=(1, 2)) X_trans = scaler.fit_transform(X) X_expected_1_2 = [[1.0, 1.0, 1.5], [1.0, 1.0, 1.0], [1.0, 1.0, 2.0]]
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
true
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/preprocessing/tests/test_polynomial.py
sklearn/preprocessing/tests/test_polynomial.py
import re import sys import numpy as np import pytest from numpy.testing import assert_allclose, assert_array_equal from scipy import sparse from scipy.interpolate import BSpline from scipy.sparse import random as sparse_random from sklearn._config import config_context from sklearn.linear_model import LinearRegression from sklearn.pipeline import Pipeline from sklearn.preprocessing import ( KBinsDiscretizer, PolynomialFeatures, SplineTransformer, ) from sklearn.preprocessing._csr_polynomial_expansion import ( _get_sizeof_LARGEST_INT_t, ) from sklearn.utils._array_api import ( _convert_to_numpy, _get_namespace_device_dtype_ids, _is_numpy_namespace, device, get_namespace, yield_namespace_device_dtype_combinations, ) from sklearn.utils._mask import _get_mask from sklearn.utils._testing import ( _array_api_for_tests, assert_allclose_dense_sparse, assert_array_almost_equal, ) from sklearn.utils.fixes import ( CSC_CONTAINERS, CSR_CONTAINERS, ) @pytest.mark.parametrize("est", (PolynomialFeatures, SplineTransformer)) def test_polynomial_and_spline_array_order(est): """Test that output array has the given order.""" X = np.arange(10).reshape(5, 2) def is_c_contiguous(a): return np.isfortran(a.T) assert is_c_contiguous(est().fit_transform(X)) assert is_c_contiguous(est(order="C").fit_transform(X)) assert np.isfortran(est(order="F").fit_transform(X)) @pytest.mark.parametrize( "params, err_msg", [ ({"knots": [[1]]}, r"Number of knots, knots.shape\[0\], must be >= 2."), ({"knots": [[1, 1], [2, 2]]}, r"knots.shape\[1\] == n_features is violated"), ({"knots": [[1], [0]]}, "knots must be sorted without duplicates."), ], ) def test_spline_transformer_input_validation(params, err_msg): """Test that we raise errors for invalid input in SplineTransformer.""" X = [[1], [2]] with pytest.raises(ValueError, match=err_msg): SplineTransformer(**params).fit(X) @pytest.mark.parametrize("extrapolation", ["continue", "periodic"]) def test_spline_transformer_integer_knots(extrapolation): """Test that SplineTransformer accepts integer value knot positions.""" X = np.arange(20).reshape(10, 2) knots = [[0, 1], [1, 2], [5, 5], [11, 10], [12, 11]] _ = SplineTransformer( degree=3, knots=knots, extrapolation=extrapolation ).fit_transform(X) def test_spline_transformer_feature_names(): """Test that SplineTransformer generates correct feature names.""" X = np.arange(20).reshape(10, 2) splt = SplineTransformer(n_knots=3, degree=3, include_bias=True).fit(X) feature_names = splt.get_feature_names_out() assert_array_equal( feature_names, [ "x0_sp_0", "x0_sp_1", "x0_sp_2", "x0_sp_3", "x0_sp_4", "x1_sp_0", "x1_sp_1", "x1_sp_2", "x1_sp_3", "x1_sp_4", ], ) splt = SplineTransformer(n_knots=3, degree=3, include_bias=False).fit(X) feature_names = splt.get_feature_names_out(["a", "b"]) assert_array_equal( feature_names, [ "a_sp_0", "a_sp_1", "a_sp_2", "a_sp_3", "b_sp_0", "b_sp_1", "b_sp_2", "b_sp_3", ], ) @pytest.mark.parametrize( "extrapolation", ["constant", "linear", "continue", "periodic"], ) @pytest.mark.parametrize("degree", [2, 3]) def test_split_transform_feature_names_extrapolation_degree(extrapolation, degree): """Test feature names are correct for different extrapolations and degree. Non-regression test for gh-25292. """ X = np.arange(20).reshape(10, 2) splt = SplineTransformer(degree=degree, extrapolation=extrapolation).fit(X) feature_names = splt.get_feature_names_out(["a", "b"]) assert len(feature_names) == splt.n_features_out_ X_trans = splt.transform(X) assert X_trans.shape[1] == len(feature_names) @pytest.mark.parametrize("degree", range(1, 5)) @pytest.mark.parametrize("n_knots", range(3, 5)) @pytest.mark.parametrize("knots", ["uniform", "quantile"]) @pytest.mark.parametrize("extrapolation", ["constant", "periodic"]) def test_spline_transformer_unity_decomposition(degree, n_knots, knots, extrapolation): """Test that B-splines are indeed a decomposition of unity. Splines basis functions must sum up to 1 per row, if we stay in between boundaries. """ X = np.linspace(0, 1, 100)[:, None] # make the boundaries 0 and 1 part of X_train, for sure. X_train = np.r_[[[0]], X[::2, :], [[1]]] X_test = X[1::2, :] if extrapolation == "periodic": n_knots = n_knots + degree # periodic splines require degree < n_knots splt = SplineTransformer( n_knots=n_knots, degree=degree, knots=knots, include_bias=True, extrapolation=extrapolation, ) splt.fit(X_train) for X in [X_train, X_test]: assert_allclose(np.sum(splt.transform(X), axis=1), 1) @pytest.mark.parametrize(["bias", "intercept"], [(True, False), (False, True)]) def test_spline_transformer_linear_regression(bias, intercept): """Test that B-splines fit a sinusodial curve pretty well.""" X = np.linspace(0, 10, 100)[:, None] y = np.sin(X[:, 0]) + 2 # +2 to avoid the value 0 in assert_allclose pipe = Pipeline( steps=[ ( "spline", SplineTransformer( n_knots=15, degree=3, include_bias=bias, extrapolation="constant", ), ), ("ols", LinearRegression(fit_intercept=intercept)), ] ) pipe.fit(X, y) assert_allclose(pipe.predict(X), y, rtol=1e-3) @pytest.mark.parametrize( ["knots", "n_knots", "sample_weight", "expected_knots"], [ ("uniform", 3, None, np.array([[0, 2], [3, 8], [6, 14]])), ( "uniform", 3, np.array([0, 0, 1, 1, 0, 3, 1]), np.array([[2, 2], [4, 8], [6, 14]]), ), ("uniform", 4, None, np.array([[0, 2], [2, 6], [4, 10], [6, 14]])), ("quantile", 3, None, np.array([[0, 2], [3, 3], [6, 14]])), ( "quantile", 3, np.array([0, 0, 1, 1, 0, 3, 1]), np.array([[2, 2], [5, 8], [6, 14]]), ), ], ) def test_spline_transformer_get_base_knot_positions( knots, n_knots, sample_weight, expected_knots ): """Check the behaviour to find knot positions with and without sample_weight.""" X = np.array([[0, 2], [0, 2], [2, 2], [3, 3], [4, 6], [5, 8], [6, 14]]) base_knots = SplineTransformer._get_base_knot_positions( X=X, knots=knots, n_knots=n_knots, sample_weight=sample_weight ) assert_allclose(base_knots, expected_knots) @pytest.mark.parametrize(["bias", "intercept"], [(True, False), (False, True)]) def test_spline_transformer_periodic_linear_regression(bias, intercept): """Test that B-splines fit a periodic curve pretty well.""" # "+ 3" to avoid the value 0 in assert_allclose def f(x): return np.sin(2 * np.pi * x) - np.sin(8 * np.pi * x) + 3 X = np.linspace(0, 1, 101)[:, None] pipe = Pipeline( steps=[ ( "spline", SplineTransformer( n_knots=20, degree=3, include_bias=bias, extrapolation="periodic", ), ), ("ols", LinearRegression(fit_intercept=intercept)), ] ) pipe.fit(X, f(X[:, 0])) # Generate larger array to check periodic extrapolation X_ = np.linspace(-1, 2, 301)[:, None] predictions = pipe.predict(X_) assert_allclose(predictions, f(X_[:, 0]), atol=0.01, rtol=0.01) assert_allclose(predictions[0:100], predictions[100:200], rtol=1e-3) def test_spline_transformer_periodic_spline_backport(): """Test that the backport of extrapolate="periodic" works correctly""" X = np.linspace(-2, 3.5, 10)[:, None] degree = 2 # Use periodic extrapolation backport in SplineTransformer transformer = SplineTransformer( degree=degree, extrapolation="periodic", knots=[[-1.0], [0.0], [1.0]] ) Xt = transformer.fit_transform(X) # Use periodic extrapolation in BSpline coef = np.array([[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]]) spl = BSpline(np.arange(-3, 4), coef, degree, "periodic") Xspl = spl(X[:, 0]) assert_allclose(Xt, Xspl) def test_spline_transformer_periodic_splines_periodicity(): """Test if shifted knots result in the same transformation up to permutation.""" X = np.linspace(0, 10, 101)[:, None] transformer_1 = SplineTransformer( degree=3, extrapolation="periodic", knots=[[0.0], [1.0], [3.0], [4.0], [5.0], [8.0]], ) transformer_2 = SplineTransformer( degree=3, extrapolation="periodic", knots=[[1.0], [3.0], [4.0], [5.0], [8.0], [9.0]], ) Xt_1 = transformer_1.fit_transform(X) Xt_2 = transformer_2.fit_transform(X) assert_allclose(Xt_1, Xt_2[:, [4, 0, 1, 2, 3]]) @pytest.mark.parametrize("degree", [3, 5]) def test_spline_transformer_periodic_splines_smoothness(degree): """Test that spline transformation is smooth at first / last knot.""" X = np.linspace(-2, 10, 10_000)[:, None] transformer = SplineTransformer( degree=degree, extrapolation="periodic", knots=[[0.0], [1.0], [3.0], [4.0], [5.0], [8.0]], ) Xt = transformer.fit_transform(X) delta = (X.max() - X.min()) / len(X) tol = 10 * delta dXt = Xt # We expect splines of degree `degree` to be (`degree`-1) times # continuously differentiable. I.e. for d = 0, ..., `degree` - 1 the d-th # derivative should be continuous. This is the case if the (d+1)-th # numerical derivative is reasonably small (smaller than `tol` in absolute # value). We thus compute d-th numeric derivatives for d = 1, ..., `degree` # and compare them to `tol`. # # Note that the 0-th derivative is the function itself, such that we are # also checking its continuity. for d in range(1, degree + 1): # Check continuity of the (d-1)-th derivative diff = np.diff(dXt, axis=0) assert np.abs(diff).max() < tol # Compute d-th numeric derivative dXt = diff / delta # As degree `degree` splines are not `degree` times continuously # differentiable at the knots, the `degree + 1`-th numeric derivative # should have spikes at the knots. diff = np.diff(dXt, axis=0) assert np.abs(diff).max() > 1 @pytest.mark.parametrize(["bias", "intercept"], [(True, False), (False, True)]) @pytest.mark.parametrize("degree", [1, 2, 3, 4, 5]) def test_spline_transformer_extrapolation(bias, intercept, degree): """Test that B-spline extrapolation works correctly.""" # we use a straight line for that X = np.linspace(-1, 1, 100)[:, None] y = X.squeeze() # 'constant' pipe = Pipeline( [ [ "spline", SplineTransformer( n_knots=4, degree=degree, include_bias=bias, extrapolation="constant", ), ], ["ols", LinearRegression(fit_intercept=intercept)], ] ) pipe.fit(X, y) assert_allclose(pipe.predict([[-10], [5]]), [-1, 1]) # 'linear' pipe = Pipeline( [ [ "spline", SplineTransformer( n_knots=4, degree=degree, include_bias=bias, extrapolation="linear", ), ], ["ols", LinearRegression(fit_intercept=intercept)], ] ) pipe.fit(X, y) assert_allclose(pipe.predict([[-10], [5]]), [-10, 5]) # 'error' splt = SplineTransformer( n_knots=4, degree=degree, include_bias=bias, extrapolation="error" ) splt.fit(X) msg = "`X` contains values beyond the limits of the knots" with pytest.raises(ValueError, match=msg): splt.transform([[-10]]) with pytest.raises(ValueError, match=msg): splt.transform([[5]]) def test_spline_transformer_kbindiscretizer(global_random_seed): """Test that a B-spline of degree=0 is equivalent to KBinsDiscretizer.""" rng = np.random.RandomState(global_random_seed) X = rng.randn(200).reshape(200, 1) n_bins = 5 n_knots = n_bins + 1 splt = SplineTransformer( n_knots=n_knots, degree=0, knots="quantile", include_bias=True ) splines = splt.fit_transform(X) kbd = KBinsDiscretizer( n_bins=n_bins, encode="onehot-dense", strategy="quantile", quantile_method="averaged_inverted_cdf", ) kbins = kbd.fit_transform(X) # Though they should be exactly equal, we test approximately with high # accuracy. assert_allclose(splines, kbins, rtol=1e-13) @pytest.mark.parametrize("degree", range(1, 3)) @pytest.mark.parametrize("knots", ["uniform", "quantile"]) @pytest.mark.parametrize( "extrapolation", ["error", "constant", "linear", "continue", "periodic"] ) @pytest.mark.parametrize("include_bias", [False, True]) def test_spline_transformer_sparse_output( degree, knots, extrapolation, include_bias, global_random_seed ): rng = np.random.RandomState(global_random_seed) X = rng.randn(200).reshape(40, 5) splt_dense = SplineTransformer( degree=degree, knots=knots, extrapolation=extrapolation, include_bias=include_bias, sparse_output=False, ) splt_sparse = SplineTransformer( degree=degree, knots=knots, extrapolation=extrapolation, include_bias=include_bias, sparse_output=True, ) splt_dense.fit(X) splt_sparse.fit(X) X_trans_sparse = splt_sparse.transform(X) X_trans_dense = splt_dense.transform(X) assert sparse.issparse(X_trans_sparse) and X_trans_sparse.format == "csr" assert_allclose(X_trans_dense, X_trans_sparse.toarray()) # extrapolation regime X_min = np.amin(X, axis=0) X_max = np.amax(X, axis=0) X_extra = np.r_[ np.linspace(X_min - 5, X_min, 10), np.linspace(X_max, X_max + 5, 10) ] if extrapolation == "error": msg = "`X` contains values beyond the limits of the knots" with pytest.raises(ValueError, match=msg): splt_dense.transform(X_extra) msg = "Out of bounds" with pytest.raises(ValueError, match=msg): splt_sparse.transform(X_extra) else: assert_allclose( splt_dense.transform(X_extra), splt_sparse.transform(X_extra).toarray() ) @pytest.mark.parametrize("n_knots", [5, 10]) @pytest.mark.parametrize("include_bias", [True, False]) @pytest.mark.parametrize("degree", [3, 4]) @pytest.mark.parametrize( "extrapolation", ["error", "constant", "linear", "continue", "periodic"] ) @pytest.mark.parametrize("sparse_output", [False, True]) def test_spline_transformer_n_features_out( n_knots, include_bias, degree, extrapolation, sparse_output ): """Test that transform results in n_features_out_ features.""" splt = SplineTransformer( n_knots=n_knots, degree=degree, include_bias=include_bias, extrapolation=extrapolation, sparse_output=sparse_output, ) X = np.linspace(0, 1, 10)[:, None] splt.fit(X) assert splt.transform(X).shape[1] == splt.n_features_out_ @pytest.mark.parametrize( "extrapolation", ["error", "constant", "linear", "continue", "periodic"] ) @pytest.mark.parametrize("sparse_output", [False, True]) def test_spline_transformer_handles_missing_values(extrapolation, sparse_output): """Test that SplineTransformer handles missing values correctly. We only test for knots="uniform", since for "quantile" the metrics are calculated differently with nans present and a different result is thus expected. """ X = np.array([[1, 1], [2, 2], [3, 3], [4, 5], [4, 4]], dtype=np.float64) X_nan = X.copy() X_nan[3, 0] = np.nan # Check correct error message for handle_missing="error": msg = "Input X contains NaN values and `SplineTransformer` is configured to error" with pytest.raises(ValueError, match=re.escape(msg)): spline = SplineTransformer( degree=2, n_knots=3, handle_missing="error", extrapolation=extrapolation, ) spline.fit_transform(X_nan) # Test correct results for handle_missing="zeros" spline = SplineTransformer( degree=2, n_knots=3, handle_missing="zeros", extrapolation=extrapolation, sparse_output=sparse_output, ) # Check `fit_transform` does the same as `fit` and then `transform`: X_nan_transform = spline.fit_transform(X_nan) X_nan_fit_then_transform = spline.fit(X_nan).transform(X_nan) assert_allclose_dense_sparse(X_nan_transform, X_nan_fit_then_transform) # Check that missing values are handled the same when sample_weight is passed: X_nan_transform_with_sample_weight = spline.fit_transform( X_nan, sample_weight=[1, 1, 1, 1, 1] ) assert_allclose_dense_sparse(X_nan_transform, X_nan_transform_with_sample_weight) # Check that `transform` works as expected when the passed data has a different # shape than the training data passed to `fit`: X_nan_transform_same_shape = spline.fit_transform(X_nan)[::2] X_nan_transform_different_shapes = spline.fit(X_nan).transform(X_nan[::2]) assert_allclose_dense_sparse( X_nan_transform_same_shape, X_nan_transform_different_shapes ) # Check that the masked nan-values are 0s: nan_mask = _get_mask(X_nan, np.nan) encoded_nan_mask = np.repeat(nan_mask, spline.bsplines_[0].c.shape[1], axis=1) assert (X_nan_transform[encoded_nan_mask] == 0).all() # Check the nan handling doesn't change that B-Splines basis functions are always in # the interval [0, 1]: X_nan_transform = spline.fit_transform(X_nan) if sparse.issparse(X_nan_transform): X_nan_transform = X_nan_transform.toarray() assert (X_nan_transform >= 0).all() assert (X_nan_transform <= 1).all() # Check that additional nan values don't change the calculation of the other # splines. Note: this assertion only holds as long as no np.nan value constructs the # min or max value of the data space (in this case, SplineTransformer's stats would # be calculated based on the other values and thus differ from another # SplineTransformer fit on the whole range). X_transform = spline.fit_transform(X) X_nan_transform = spline.fit_transform(X_nan) assert_allclose_dense_sparse( X_transform[~encoded_nan_mask], X_nan_transform[~encoded_nan_mask] ) @pytest.mark.parametrize( "extrapolation", ["error", "constant", "linear", "continue", "periodic"] ) @pytest.mark.parametrize("sparse_output", [False, True]) def test_spline_transformer_handles_all_nans(extrapolation, sparse_output): """Test that SplineTransformer encodes missing values to zeros even for all-nan-features.""" X = np.array([[1, 1], [2, 2], [3, 3], [4, 5], [4, 4]]) X_nan_full_column = np.array([[np.nan, np.nan], [np.nan, 1]]) spline = SplineTransformer( degree=2, n_knots=3, handle_missing="zeros", extrapolation=extrapolation, sparse_output=sparse_output, ) spline.fit(X_nan_full_column) all_missing_column_encoded = spline.transform(X_nan_full_column) nan_mask = _get_mask(X_nan_full_column, np.nan) encoded_nan_mask = np.repeat(nan_mask, spline.bsplines_[0].c.shape[1], axis=1) assert (all_missing_column_encoded[encoded_nan_mask] == 0).all() @pytest.mark.parametrize( "params, err_msg", [ ({"degree": (-1, 2)}, r"degree=\(min_degree, max_degree\) must"), ({"degree": (0, 1.5)}, r"degree=\(min_degree, max_degree\) must"), ({"degree": (3, 2)}, r"degree=\(min_degree, max_degree\) must"), ({"degree": (1, 2, 3)}, r"int or tuple \(min_degree, max_degree\)"), ], ) def test_polynomial_features_input_validation(params, err_msg): """Test that we raise errors for invalid input in PolynomialFeatures.""" X = [[1], [2]] with pytest.raises(ValueError, match=err_msg): PolynomialFeatures(**params).fit(X) @pytest.fixture() def single_feature_degree3(): X = np.arange(6)[:, np.newaxis] P = np.hstack([np.ones_like(X), X, X**2, X**3]) return X, P @pytest.mark.parametrize( "degree, include_bias, interaction_only, indices", [ (3, True, False, slice(None, None)), (3, False, False, slice(1, None)), (3, True, True, [0, 1]), (3, False, True, [1]), ((2, 3), True, False, [0, 2, 3]), ((2, 3), False, False, [2, 3]), ((2, 3), True, True, [0]), ((2, 3), False, True, []), ], ) @pytest.mark.parametrize("X_container", [None] + CSR_CONTAINERS + CSC_CONTAINERS) def test_polynomial_features_one_feature( single_feature_degree3, degree, include_bias, interaction_only, indices, X_container, ): """Test PolynomialFeatures on single feature up to degree 3.""" X, P = single_feature_degree3 if X_container is not None: X = X_container(X) tf = PolynomialFeatures( degree=degree, include_bias=include_bias, interaction_only=interaction_only ).fit(X) out = tf.transform(X) if X_container is not None: out = out.toarray() assert_allclose(out, P[:, indices]) if tf.n_output_features_ > 0: assert tf.powers_.shape == (tf.n_output_features_, tf.n_features_in_) @pytest.fixture() def two_features_degree3(): X = np.arange(6).reshape((3, 2)) x1 = X[:, :1] x2 = X[:, 1:] P = np.hstack( [ x1**0 * x2**0, # 0 x1**1 * x2**0, # 1 x1**0 * x2**1, # 2 x1**2 * x2**0, # 3 x1**1 * x2**1, # 4 x1**0 * x2**2, # 5 x1**3 * x2**0, # 6 x1**2 * x2**1, # 7 x1**1 * x2**2, # 8 x1**0 * x2**3, # 9 ] ) return X, P @pytest.mark.parametrize( "degree, include_bias, interaction_only, indices", [ (2, True, False, slice(0, 6)), (2, False, False, slice(1, 6)), (2, True, True, [0, 1, 2, 4]), (2, False, True, [1, 2, 4]), ((2, 2), True, False, [0, 3, 4, 5]), ((2, 2), False, False, [3, 4, 5]), ((2, 2), True, True, [0, 4]), ((2, 2), False, True, [4]), (3, True, False, slice(None, None)), (3, False, False, slice(1, None)), (3, True, True, [0, 1, 2, 4]), (3, False, True, [1, 2, 4]), ((2, 3), True, False, [0, 3, 4, 5, 6, 7, 8, 9]), ((2, 3), False, False, slice(3, None)), ((2, 3), True, True, [0, 4]), ((2, 3), False, True, [4]), ((3, 3), True, False, [0, 6, 7, 8, 9]), ((3, 3), False, False, [6, 7, 8, 9]), ((3, 3), True, True, [0]), ((3, 3), False, True, []), # would need 3 input features ], ) @pytest.mark.parametrize("X_container", [None] + CSR_CONTAINERS + CSC_CONTAINERS) def test_polynomial_features_two_features( two_features_degree3, degree, include_bias, interaction_only, indices, X_container, ): """Test PolynomialFeatures on 2 features up to degree 3.""" X, P = two_features_degree3 if X_container is not None: X = X_container(X) tf = PolynomialFeatures( degree=degree, include_bias=include_bias, interaction_only=interaction_only ).fit(X) out = tf.transform(X) if X_container is not None: out = out.toarray() assert_allclose(out, P[:, indices]) if tf.n_output_features_ > 0: assert tf.powers_.shape == (tf.n_output_features_, tf.n_features_in_) def test_polynomial_feature_names(): X = np.arange(30).reshape(10, 3) poly = PolynomialFeatures(degree=2, include_bias=True).fit(X) feature_names = poly.get_feature_names_out() assert_array_equal( ["1", "x0", "x1", "x2", "x0^2", "x0 x1", "x0 x2", "x1^2", "x1 x2", "x2^2"], feature_names, ) assert len(feature_names) == poly.transform(X).shape[1] poly = PolynomialFeatures(degree=3, include_bias=False).fit(X) feature_names = poly.get_feature_names_out(["a", "b", "c"]) assert_array_equal( [ "a", "b", "c", "a^2", "a b", "a c", "b^2", "b c", "c^2", "a^3", "a^2 b", "a^2 c", "a b^2", "a b c", "a c^2", "b^3", "b^2 c", "b c^2", "c^3", ], feature_names, ) assert len(feature_names) == poly.transform(X).shape[1] poly = PolynomialFeatures(degree=(2, 3), include_bias=False).fit(X) feature_names = poly.get_feature_names_out(["a", "b", "c"]) assert_array_equal( [ "a^2", "a b", "a c", "b^2", "b c", "c^2", "a^3", "a^2 b", "a^2 c", "a b^2", "a b c", "a c^2", "b^3", "b^2 c", "b c^2", "c^3", ], feature_names, ) assert len(feature_names) == poly.transform(X).shape[1] poly = PolynomialFeatures( degree=(3, 3), include_bias=True, interaction_only=True ).fit(X) feature_names = poly.get_feature_names_out(["a", "b", "c"]) assert_array_equal(["1", "a b c"], feature_names) assert len(feature_names) == poly.transform(X).shape[1] # test some unicode poly = PolynomialFeatures(degree=1, include_bias=True).fit(X) feature_names = poly.get_feature_names_out(["\u0001F40D", "\u262e", "\u05d0"]) assert_array_equal(["1", "\u0001F40D", "\u262e", "\u05d0"], feature_names) @pytest.mark.parametrize( ["deg", "include_bias", "interaction_only", "dtype"], [ (1, True, False, int), (2, True, False, int), (2, True, False, np.float32), (2, True, False, np.float64), (3, False, False, np.float64), (3, False, True, np.float64), (4, False, False, np.float64), (4, False, True, np.float64), ], ) @pytest.mark.parametrize("csc_container", CSC_CONTAINERS) def test_polynomial_features_csc_X( deg, include_bias, interaction_only, dtype, csc_container, global_random_seed ): rng = np.random.RandomState(global_random_seed) X = rng.randint(0, 2, (100, 2)) X_csc = csc_container(X) est = PolynomialFeatures( deg, include_bias=include_bias, interaction_only=interaction_only ) Xt_csc = est.fit_transform(X_csc.astype(dtype)) Xt_dense = est.fit_transform(X.astype(dtype)) assert sparse.issparse(Xt_csc) and Xt_csc.format == "csc" assert Xt_csc.dtype == Xt_dense.dtype assert_array_almost_equal(Xt_csc.toarray(), Xt_dense) @pytest.mark.parametrize( ["deg", "include_bias", "interaction_only", "dtype"], [ (1, True, False, int), (2, True, False, int), (2, True, False, np.float32), (2, True, False, np.float64), (3, False, False, np.float64), (3, False, True, np.float64), ], ) @pytest.mark.parametrize("csr_container", CSR_CONTAINERS) def test_polynomial_features_csr_X( deg, include_bias, interaction_only, dtype, csr_container, global_random_seed ): rng = np.random.RandomState(global_random_seed) X = rng.randint(0, 2, (100, 2)) X_csr = csr_container(X) est = PolynomialFeatures( deg, include_bias=include_bias, interaction_only=interaction_only ) Xt_csr = est.fit_transform(X_csr.astype(dtype)) Xt_dense = est.fit_transform(X.astype(dtype, copy=False)) assert sparse.issparse(Xt_csr) and Xt_csr.format == "csr" assert Xt_csr.dtype == Xt_dense.dtype assert_array_almost_equal(Xt_csr.toarray(), Xt_dense) @pytest.mark.parametrize("n_features", [1, 4, 5]) @pytest.mark.parametrize( "min_degree, max_degree", [(0, 1), (0, 2), (1, 3), (0, 4), (3, 4)] ) @pytest.mark.parametrize("interaction_only", [True, False]) @pytest.mark.parametrize("include_bias", [True, False]) @pytest.mark.parametrize("csr_container", CSR_CONTAINERS) def test_num_combinations( n_features, min_degree, max_degree, interaction_only, include_bias, csr_container ): """ Test that n_output_features_ is calculated correctly. """ x = csr_container(([1], ([0], [n_features - 1]))) est = PolynomialFeatures( degree=max_degree, interaction_only=interaction_only, include_bias=include_bias, ) est.fit(x) num_combos = est.n_output_features_ combos = PolynomialFeatures._combinations( n_features=n_features, min_degree=0, max_degree=max_degree, interaction_only=interaction_only, include_bias=include_bias, ) assert num_combos == sum([1 for _ in combos]) @pytest.mark.parametrize( ["deg", "include_bias", "interaction_only", "dtype"], [ (2, True, False, np.float32), (2, True, False, np.float64), (3, False, False, np.float64), (3, False, True, np.float64), ], ) @pytest.mark.parametrize("csr_container", CSR_CONTAINERS) def test_polynomial_features_csr_X_floats( deg, include_bias, interaction_only, dtype, csr_container, global_random_seed ): X_csr = csr_container(sparse_random(1000, 10, 0.5, random_state=global_random_seed)) X = X_csr.toarray() est = PolynomialFeatures( deg, include_bias=include_bias, interaction_only=interaction_only ) Xt_csr = est.fit_transform(X_csr.astype(dtype)) Xt_dense = est.fit_transform(X.astype(dtype)) assert sparse.issparse(Xt_csr) and Xt_csr.format == "csr" assert Xt_csr.dtype == Xt_dense.dtype assert_array_almost_equal(Xt_csr.toarray(), Xt_dense) @pytest.mark.parametrize( ["zero_row_index", "deg", "interaction_only"], [ (0, 2, True), (1, 2, True), (2, 2, True), (0, 3, True), (1, 3, True), (2, 3, True), (0, 2, False), (1, 2, False), (2, 2, False), (0, 3, False), (1, 3, False), (2, 3, False), ], ) @pytest.mark.parametrize("csr_container", CSR_CONTAINERS) def test_polynomial_features_csr_X_zero_row( zero_row_index, deg, interaction_only, csr_container, global_random_seed ): X_csr = csr_container(sparse_random(3, 10, 1.0, random_state=global_random_seed)) X_csr[zero_row_index, :] = 0.0 X = X_csr.toarray() est = PolynomialFeatures(deg, include_bias=False, interaction_only=interaction_only) Xt_csr = est.fit_transform(X_csr) Xt_dense = est.fit_transform(X) assert sparse.issparse(Xt_csr) and Xt_csr.format == "csr" assert Xt_csr.dtype == Xt_dense.dtype assert_array_almost_equal(Xt_csr.toarray(), Xt_dense) # This degree should always be one more than the highest degree supported by # _csr_expansion. @pytest.mark.parametrize( ["include_bias", "interaction_only"], [(True, True), (True, False), (False, True), (False, False)], ) @pytest.mark.parametrize("csr_container", CSR_CONTAINERS) def test_polynomial_features_csr_X_degree_4( include_bias, interaction_only, csr_container, global_random_seed ): X_csr = csr_container(sparse_random(1000, 10, 0.5, random_state=global_random_seed)) X = X_csr.toarray() est = PolynomialFeatures( 4, include_bias=include_bias, interaction_only=interaction_only ) Xt_csr = est.fit_transform(X_csr) Xt_dense = est.fit_transform(X) assert sparse.issparse(Xt_csr) and Xt_csr.format == "csr" assert Xt_csr.dtype == Xt_dense.dtype assert_array_almost_equal(Xt_csr.toarray(), Xt_dense) @pytest.mark.parametrize( ["deg", "dim", "interaction_only"], [ (2, 1, True), (2, 2, True), (3, 1, True), (3, 2, True), (3, 3, True), (2, 1, False), (2, 2, False), (3, 1, False), (3, 2, False), (3, 3, False), ], ) @pytest.mark.parametrize("csr_container", CSR_CONTAINERS) def test_polynomial_features_csr_X_dim_edges(
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
true
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/preprocessing/tests/test_encoders.py
sklearn/preprocessing/tests/test_encoders.py
import re import warnings import numpy as np import pytest from scipy import sparse from sklearn.exceptions import NotFittedError from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder from sklearn.utils._missing import is_scalar_nan from sklearn.utils._testing import ( _convert_container, assert_allclose, assert_array_equal, ) from sklearn.utils.fixes import CSR_CONTAINERS def test_one_hot_encoder_sparse_dense(): # check that sparse and dense will give the same results X = np.array([[3, 2, 1], [0, 1, 1]]) enc_sparse = OneHotEncoder() enc_dense = OneHotEncoder(sparse_output=False) X_trans_sparse = enc_sparse.fit_transform(X) X_trans_dense = enc_dense.fit_transform(X) assert X_trans_sparse.shape == (2, 5) assert X_trans_dense.shape == (2, 5) assert sparse.issparse(X_trans_sparse) assert not sparse.issparse(X_trans_dense) # check outcome assert_array_equal( X_trans_sparse.toarray(), [[0.0, 1.0, 0.0, 1.0, 1.0], [1.0, 0.0, 1.0, 0.0, 1.0]] ) assert_array_equal(X_trans_sparse.toarray(), X_trans_dense) @pytest.mark.parametrize("handle_unknown", ["ignore", "infrequent_if_exist", "warn"]) def test_one_hot_encoder_handle_unknown(handle_unknown): X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]]) X2 = np.array([[4, 1, 1]]) # Test that one hot encoder raises error for unknown features # present during transform. oh = OneHotEncoder(handle_unknown="error") oh.fit(X) with pytest.raises(ValueError, match="Found unknown categories"): oh.transform(X2) # Test the ignore option, ignores unknown features (giving all 0's) oh = OneHotEncoder(handle_unknown=handle_unknown) oh.fit(X) X2_passed = X2.copy() assert_array_equal( oh.transform(X2_passed).toarray(), np.array([[0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0]]), ) # ensure transformed data was not modified in place assert_allclose(X2, X2_passed) @pytest.mark.parametrize("handle_unknown", ["ignore", "infrequent_if_exist", "warn"]) def test_one_hot_encoder_handle_unknown_strings(handle_unknown): X = np.array(["11111111", "22", "333", "4444"]).reshape((-1, 1)) X2 = np.array(["55555", "22"]).reshape((-1, 1)) # Non Regression test for the issue #12470 # Test the ignore option, when categories are numpy string dtype # particularly when the known category strings are larger # than the unknown category strings oh = OneHotEncoder(handle_unknown=handle_unknown) oh.fit(X) X2_passed = X2.copy() assert_array_equal( oh.transform(X2_passed).toarray(), np.array([[0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]]), ) # ensure transformed data was not modified in place assert_array_equal(X2, X2_passed) @pytest.mark.parametrize("output_dtype", [np.int32, np.float32, np.float64]) @pytest.mark.parametrize("input_dtype", [np.int32, np.float32, np.float64]) def test_one_hot_encoder_dtype(input_dtype, output_dtype): X = np.asarray([[0, 1]], dtype=input_dtype).T X_expected = np.asarray([[1, 0], [0, 1]], dtype=output_dtype) oh = OneHotEncoder(categories="auto", dtype=output_dtype) assert_array_equal(oh.fit_transform(X).toarray(), X_expected) assert_array_equal(oh.fit(X).transform(X).toarray(), X_expected) oh = OneHotEncoder(categories="auto", dtype=output_dtype, sparse_output=False) assert_array_equal(oh.fit_transform(X), X_expected) assert_array_equal(oh.fit(X).transform(X), X_expected) @pytest.mark.parametrize("output_dtype", [np.int32, np.float32, np.float64]) def test_one_hot_encoder_dtype_pandas(output_dtype): pd = pytest.importorskip("pandas") X_df = pd.DataFrame({"A": ["a", "b"], "B": [1, 2]}) X_expected = np.array([[1, 0, 1, 0], [0, 1, 0, 1]], dtype=output_dtype) oh = OneHotEncoder(dtype=output_dtype) assert_array_equal(oh.fit_transform(X_df).toarray(), X_expected) assert_array_equal(oh.fit(X_df).transform(X_df).toarray(), X_expected) oh = OneHotEncoder(dtype=output_dtype, sparse_output=False) assert_array_equal(oh.fit_transform(X_df), X_expected) assert_array_equal(oh.fit(X_df).transform(X_df), X_expected) def test_one_hot_encoder_feature_names(): enc = OneHotEncoder() X = [ ["Male", 1, "girl", 2, 3], ["Female", 41, "girl", 1, 10], ["Male", 51, "boy", 12, 3], ["Male", 91, "girl", 21, 30], ] enc.fit(X) feature_names = enc.get_feature_names_out() assert_array_equal( [ "x0_Female", "x0_Male", "x1_1", "x1_41", "x1_51", "x1_91", "x2_boy", "x2_girl", "x3_1", "x3_2", "x3_12", "x3_21", "x4_3", "x4_10", "x4_30", ], feature_names, ) feature_names2 = enc.get_feature_names_out(["one", "two", "three", "four", "five"]) assert_array_equal( [ "one_Female", "one_Male", "two_1", "two_41", "two_51", "two_91", "three_boy", "three_girl", "four_1", "four_2", "four_12", "four_21", "five_3", "five_10", "five_30", ], feature_names2, ) with pytest.raises(ValueError, match="input_features should have length"): enc.get_feature_names_out(["one", "two"]) def test_one_hot_encoder_feature_names_unicode(): enc = OneHotEncoder() X = np.array([["c❤t1", "dat2"]], dtype=object).T enc.fit(X) feature_names = enc.get_feature_names_out() assert_array_equal(["x0_c❤t1", "x0_dat2"], feature_names) feature_names = enc.get_feature_names_out(input_features=["n👍me"]) assert_array_equal(["n👍me_c❤t1", "n👍me_dat2"], feature_names) def test_one_hot_encoder_custom_feature_name_combiner(): """Check the behaviour of `feature_name_combiner` as a callable.""" def name_combiner(feature, category): return feature + "_" + repr(category) enc = OneHotEncoder(feature_name_combiner=name_combiner) X = np.array([["None", None]], dtype=object).T enc.fit(X) feature_names = enc.get_feature_names_out() assert_array_equal(["x0_'None'", "x0_None"], feature_names) feature_names = enc.get_feature_names_out(input_features=["a"]) assert_array_equal(["a_'None'", "a_None"], feature_names) def wrong_combiner(feature, category): # we should be returning a Python string return 0 enc = OneHotEncoder(feature_name_combiner=wrong_combiner).fit(X) err_msg = ( "When `feature_name_combiner` is a callable, it should return a Python string." ) with pytest.raises(TypeError, match=err_msg): enc.get_feature_names_out() def test_one_hot_encoder_set_params(): X = np.array([[1, 2]]).T oh = OneHotEncoder() # set params on not yet fitted object oh.set_params(categories=[[0, 1, 2, 3]]) assert oh.get_params()["categories"] == [[0, 1, 2, 3]] assert oh.fit_transform(X).toarray().shape == (2, 4) # set params on already fitted object oh.set_params(categories=[[0, 1, 2, 3, 4]]) assert oh.fit_transform(X).toarray().shape == (2, 5) def check_categorical_onehot(X): enc = OneHotEncoder(categories="auto") Xtr1 = enc.fit_transform(X) enc = OneHotEncoder(categories="auto", sparse_output=False) Xtr2 = enc.fit_transform(X) assert_allclose(Xtr1.toarray(), Xtr2) assert sparse.issparse(Xtr1) and Xtr1.format == "csr" return Xtr1.toarray() @pytest.mark.parametrize( "X", [ [["def", 1, 55], ["abc", 2, 55]], np.array([[10, 1, 55], [5, 2, 55]]), np.array([["b", "A", "cat"], ["a", "B", "cat"]], dtype=object), np.array([["b", 1, "cat"], ["a", np.nan, "cat"]], dtype=object), np.array([["b", 1, "cat"], ["a", float("nan"), "cat"]], dtype=object), np.array([[None, 1, "cat"], ["a", 2, "cat"]], dtype=object), np.array([[None, 1, None], ["a", np.nan, None]], dtype=object), np.array([[None, 1, None], ["a", float("nan"), None]], dtype=object), ], ids=[ "mixed", "numeric", "object", "mixed-nan", "mixed-float-nan", "mixed-None", "mixed-None-nan", "mixed-None-float-nan", ], ) def test_one_hot_encoder(X): Xtr = check_categorical_onehot(np.array(X)[:, [0]]) assert_allclose(Xtr, [[0, 1], [1, 0]]) Xtr = check_categorical_onehot(np.array(X)[:, [0, 1]]) assert_allclose(Xtr, [[0, 1, 1, 0], [1, 0, 0, 1]]) Xtr = OneHotEncoder(categories="auto").fit_transform(X) assert_allclose(Xtr.toarray(), [[0, 1, 1, 0, 1], [1, 0, 0, 1, 1]]) @pytest.mark.parametrize("handle_unknown", ["ignore", "infrequent_if_exist", "warn"]) @pytest.mark.parametrize("sparse_", [False, True]) @pytest.mark.parametrize("drop", [None, "first"]) def test_one_hot_encoder_inverse(handle_unknown, sparse_, drop): X = [["abc", 2, 55], ["def", 1, 55], ["abc", 3, 55]] enc = OneHotEncoder(sparse_output=sparse_, drop=drop) X_tr = enc.fit_transform(X) exp = np.array(X, dtype=object) assert_array_equal(enc.inverse_transform(X_tr), exp) X = [[2, 55], [1, 55], [3, 55]] enc = OneHotEncoder(sparse_output=sparse_, categories="auto", drop=drop) X_tr = enc.fit_transform(X) exp = np.array(X) assert_array_equal(enc.inverse_transform(X_tr), exp) if drop is None: # with unknown categories # drop is incompatible with handle_unknown=ignore X = [["abc", 2, 55], ["def", 1, 55], ["abc", 3, 55]] enc = OneHotEncoder( sparse_output=sparse_, handle_unknown=handle_unknown, categories=[["abc", "def"], [1, 2], [54, 55, 56]], ) X_tr = enc.fit_transform(X) exp = np.array(X, dtype=object) exp[2, 1] = None assert_array_equal(enc.inverse_transform(X_tr), exp) # with an otherwise numerical output, still object if unknown X = [[2, 55], [1, 55], [3, 55]] enc = OneHotEncoder( sparse_output=sparse_, categories=[[1, 2], [54, 56]], handle_unknown=handle_unknown, ) X_tr = enc.fit_transform(X) exp = np.array(X, dtype=object) exp[2, 0] = None exp[:, 1] = None assert_array_equal(enc.inverse_transform(X_tr), exp) # incorrect shape raises X_tr = np.array([[0, 1, 1], [1, 0, 1]]) msg = re.escape("Shape of the passed X data is not correct") with pytest.raises(ValueError, match=msg): enc.inverse_transform(X_tr) @pytest.mark.parametrize("sparse_", [False, True]) @pytest.mark.parametrize( "X, X_trans", [ ([[2, 55], [1, 55], [2, 55]], [[0, 1, 1], [0, 0, 0], [0, 1, 1]]), ( [["one", "a"], ["two", "a"], ["three", "b"], ["two", "a"]], [[0, 0, 0, 0, 0], [0, 0, 0, 0, 1], [0, 1, 0, 0, 0]], ), ], ) def test_one_hot_encoder_inverse_transform_raise_error_with_unknown( X, X_trans, sparse_ ): """Check that `inverse_transform` raise an error with unknown samples, no dropped feature, and `handle_unknow="error`. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/14934 """ enc = OneHotEncoder(sparse_output=sparse_).fit(X) msg = ( r"Samples \[(\d )*\d\] can not be inverted when drop=None and " r"handle_unknown='error' because they contain all zeros" ) if sparse_: # emulate sparse data transform by a one-hot encoder sparse. X_trans = _convert_container(X_trans, "sparse") with pytest.raises(ValueError, match=msg): enc.inverse_transform(X_trans) def test_one_hot_encoder_inverse_if_binary(): X = np.array([["Male", 1], ["Female", 3], ["Female", 2]], dtype=object) ohe = OneHotEncoder(drop="if_binary", sparse_output=False) X_tr = ohe.fit_transform(X) assert_array_equal(ohe.inverse_transform(X_tr), X) @pytest.mark.parametrize("drop", ["if_binary", "first", None]) @pytest.mark.parametrize("reset_drop", ["if_binary", "first", None]) def test_one_hot_encoder_drop_reset(drop, reset_drop): # check that resetting drop option without refitting does not throw an error X = np.array([["Male", 1], ["Female", 3], ["Female", 2]], dtype=object) ohe = OneHotEncoder(drop=drop, sparse_output=False) ohe.fit(X) X_tr = ohe.transform(X) feature_names = ohe.get_feature_names_out() ohe.set_params(drop=reset_drop) assert_array_equal(ohe.inverse_transform(X_tr), X) assert_allclose(ohe.transform(X), X_tr) assert_array_equal(ohe.get_feature_names_out(), feature_names) @pytest.mark.parametrize("method", ["fit", "fit_transform"]) @pytest.mark.parametrize("X", [[1, 2], np.array([3.0, 4.0])]) def test_X_is_not_1D(X, method): oh = OneHotEncoder() msg = "Expected 2D array, got 1D array instead" with pytest.raises(ValueError, match=msg): getattr(oh, method)(X) @pytest.mark.parametrize("method", ["fit", "fit_transform"]) def test_X_is_not_1D_pandas(method): pd = pytest.importorskip("pandas") X = pd.Series([6, 3, 4, 6]) oh = OneHotEncoder() msg = f"Expected a 2-dimensional container but got {type(X)} instead." with pytest.raises(ValueError, match=msg): getattr(oh, method)(X) @pytest.mark.parametrize( "X, cat_exp, cat_dtype", [ ([["abc", 55], ["def", 55]], [["abc", "def"], [55]], np.object_), (np.array([[1, 2], [3, 2]]), [[1, 3], [2]], np.integer), ( np.array([["A", "cat"], ["B", "cat"]], dtype=object), [["A", "B"], ["cat"]], np.object_, ), (np.array([["A", "cat"], ["B", "cat"]]), [["A", "B"], ["cat"]], np.str_), (np.array([[1, 2], [np.nan, 2]]), [[1, np.nan], [2]], np.float64), ( np.array([["A", np.nan], [None, np.nan]], dtype=object), [["A", None], [np.nan]], np.object_, ), ( np.array([["A", float("nan")], [None, float("nan")]], dtype=object), [["A", None], [float("nan")]], np.object_, ), ], ids=[ "mixed", "numeric", "object", "string", "missing-float", "missing-np.nan-object", "missing-float-nan-object", ], ) def test_one_hot_encoder_categories(X, cat_exp, cat_dtype): # order of categories should not depend on order of samples for Xi in [X, X[::-1]]: enc = OneHotEncoder(categories="auto") enc.fit(Xi) # assert enc.categories == 'auto' assert isinstance(enc.categories_, list) for res, exp in zip(enc.categories_, cat_exp): res_list = res.tolist() if is_scalar_nan(exp[-1]): assert is_scalar_nan(res_list[-1]) assert res_list[:-1] == exp[:-1] else: assert res.tolist() == exp assert np.issubdtype(res.dtype, cat_dtype) @pytest.mark.parametrize("handle_unknown", ["ignore", "infrequent_if_exist", "warn"]) @pytest.mark.parametrize( "X, X2, cats, cat_dtype", [ ( np.array([["a", "b"]], dtype=object).T, np.array([["a", "d"]], dtype=object).T, [["a", "b", "c"]], np.object_, ), ( np.array([[1, 2]], dtype="int64").T, np.array([[1, 4]], dtype="int64").T, [[1, 2, 3]], np.int64, ), ( np.array([["a", "b"]], dtype=object).T, np.array([["a", "d"]], dtype=object).T, [np.array(["a", "b", "c"])], np.object_, ), ( np.array([[None, "a"]], dtype=object).T, np.array([[None, "b"]], dtype=object).T, [[None, "a", "z"]], object, ), ( np.array([["a", "b"]], dtype=object).T, np.array([["a", np.nan]], dtype=object).T, [["a", "b", "z"]], object, ), ( np.array([["a", None]], dtype=object).T, np.array([["a", np.nan]], dtype=object).T, [["a", None, "z"]], object, ), ], ids=[ "object", "numeric", "object-string", "object-string-none", "object-string-nan", "object-None-and-nan", ], ) def test_one_hot_encoder_specified_categories(X, X2, cats, cat_dtype, handle_unknown): enc = OneHotEncoder(categories=cats) exp = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]]) assert_array_equal(enc.fit_transform(X).toarray(), exp) assert list(enc.categories[0]) == list(cats[0]) assert enc.categories_[0].tolist() == list(cats[0]) # manually specified categories should have same dtype as # the data when coerced from lists assert enc.categories_[0].dtype == cat_dtype # when specifying categories manually, unknown categories should already # raise when fitting enc = OneHotEncoder(categories=cats) with pytest.raises(ValueError, match="Found unknown categories"): enc.fit(X2) enc = OneHotEncoder(categories=cats, handle_unknown=handle_unknown) exp = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 0.0]]) assert_array_equal(enc.fit(X2).transform(X2).toarray(), exp) def test_one_hot_encoder_unsorted_categories(): X = np.array([["a", "b"]], dtype=object).T enc = OneHotEncoder(categories=[["b", "a", "c"]]) exp = np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 0.0]]) assert_array_equal(enc.fit(X).transform(X).toarray(), exp) assert_array_equal(enc.fit_transform(X).toarray(), exp) assert enc.categories_[0].tolist() == ["b", "a", "c"] assert np.issubdtype(enc.categories_[0].dtype, np.object_) # unsorted passed categories still raise for numerical values X = np.array([[1, 2]]).T enc = OneHotEncoder(categories=[[2, 1, 3]]) msg = "Unsorted categories are not supported" with pytest.raises(ValueError, match=msg): enc.fit_transform(X) @pytest.mark.parametrize("Encoder", [OneHotEncoder, OrdinalEncoder]) def test_encoder_nan_ending_specified_categories(Encoder): """Test encoder for specified categories that nan is at the end. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/27088 """ cats = [np.array([0, np.nan, 1])] enc = Encoder(categories=cats) X = np.array([[0, 1]], dtype=object).T with pytest.raises(ValueError, match="Nan should be the last element"): enc.fit(X) def test_one_hot_encoder_specified_categories_mixed_columns(): # multiple columns X = np.array([["a", "b"], [0, 2]], dtype=object).T enc = OneHotEncoder(categories=[["a", "b", "c"], [0, 1, 2]]) exp = np.array([[1.0, 0.0, 0.0, 1.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, 0.0, 1.0]]) assert_array_equal(enc.fit_transform(X).toarray(), exp) assert enc.categories_[0].tolist() == ["a", "b", "c"] assert np.issubdtype(enc.categories_[0].dtype, np.object_) assert enc.categories_[1].tolist() == [0, 1, 2] # integer categories but from object dtype data assert np.issubdtype(enc.categories_[1].dtype, np.object_) def test_one_hot_encoder_pandas(): pd = pytest.importorskip("pandas") X_df = pd.DataFrame({"A": ["a", "b"], "B": [1, 2]}) Xtr = check_categorical_onehot(X_df) assert_allclose(Xtr, [[1, 0, 1, 0], [0, 1, 0, 1]]) @pytest.mark.parametrize( "drop, expected_names", [ ("first", ["x0_c", "x2_b"]), ("if_binary", ["x0_c", "x1_2", "x2_b"]), (["c", 2, "b"], ["x0_b", "x2_a"]), ], ids=["first", "binary", "manual"], ) def test_one_hot_encoder_feature_names_drop(drop, expected_names): X = [["c", 2, "a"], ["b", 2, "b"]] ohe = OneHotEncoder(drop=drop) ohe.fit(X) feature_names = ohe.get_feature_names_out() assert_array_equal(expected_names, feature_names) def test_one_hot_encoder_drop_equals_if_binary(): # Canonical case X = [[10, "yes"], [20, "no"], [30, "yes"]] expected = np.array( [[1.0, 0.0, 0.0, 1.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 1.0]] ) expected_drop_idx = np.array([None, 0]) ohe = OneHotEncoder(drop="if_binary", sparse_output=False) result = ohe.fit_transform(X) assert_array_equal(ohe.drop_idx_, expected_drop_idx) assert_allclose(result, expected) # with only one cat, the behaviour is equivalent to drop=None X = [["true", "a"], ["false", "a"], ["false", "a"]] expected = np.array([[1.0, 1.0], [0.0, 1.0], [0.0, 1.0]]) expected_drop_idx = np.array([0, None]) ohe = OneHotEncoder(drop="if_binary", sparse_output=False) result = ohe.fit_transform(X) assert_array_equal(ohe.drop_idx_, expected_drop_idx) assert_allclose(result, expected) @pytest.mark.parametrize( "X", [ [["abc", 2, 55], ["def", 1, 55]], np.array([[10, 2, 55], [20, 1, 55]]), np.array([["a", "B", "cat"], ["b", "A", "cat"]], dtype=object), ], ids=["mixed", "numeric", "object"], ) def test_ordinal_encoder(X): enc = OrdinalEncoder() exp = np.array([[0, 1, 0], [1, 0, 0]], dtype="int64") assert_array_equal(enc.fit_transform(X), exp.astype("float64")) enc = OrdinalEncoder(dtype="int64") assert_array_equal(enc.fit_transform(X), exp) @pytest.mark.parametrize( "X, X2, cats, cat_dtype", [ ( np.array([["a", "b"]], dtype=object).T, np.array([["a", "d"]], dtype=object).T, [["a", "b", "c"]], np.object_, ), ( np.array([[1, 2]], dtype="int64").T, np.array([[1, 4]], dtype="int64").T, [[1, 2, 3]], np.int64, ), ( np.array([["a", "b"]], dtype=object).T, np.array([["a", "d"]], dtype=object).T, [np.array(["a", "b", "c"])], np.object_, ), ], ids=["object", "numeric", "object-string-cat"], ) def test_ordinal_encoder_specified_categories(X, X2, cats, cat_dtype): enc = OrdinalEncoder(categories=cats) exp = np.array([[0.0], [1.0]]) assert_array_equal(enc.fit_transform(X), exp) assert list(enc.categories[0]) == list(cats[0]) assert enc.categories_[0].tolist() == list(cats[0]) # manually specified categories should have same dtype as # the data when coerced from lists assert enc.categories_[0].dtype == cat_dtype # when specifying categories manually, unknown categories should already # raise when fitting enc = OrdinalEncoder(categories=cats) with pytest.raises(ValueError, match="Found unknown categories"): enc.fit(X2) def test_ordinal_encoder_inverse(): X = [["abc", 2, 55], ["def", 1, 55]] enc = OrdinalEncoder() X_tr = enc.fit_transform(X) exp = np.array(X, dtype=object) assert_array_equal(enc.inverse_transform(X_tr), exp) # incorrect shape raises X_tr = np.array([[0, 1, 1, 2], [1, 0, 1, 0]]) msg = re.escape("Shape of the passed X data is not correct") with pytest.raises(ValueError, match=msg): enc.inverse_transform(X_tr) def test_ordinal_encoder_handle_unknowns_string(): enc = OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-2) X_fit = np.array([["a", "x"], ["b", "y"], ["c", "z"]], dtype=object) X_trans = np.array([["c", "xy"], ["bla", "y"], ["a", "x"]], dtype=object) enc.fit(X_fit) X_trans_enc = enc.transform(X_trans) exp = np.array([[2, -2], [-2, 1], [0, 0]], dtype="int64") assert_array_equal(X_trans_enc, exp) X_trans_inv = enc.inverse_transform(X_trans_enc) inv_exp = np.array([["c", None], [None, "y"], ["a", "x"]], dtype=object) assert_array_equal(X_trans_inv, inv_exp) @pytest.mark.parametrize("dtype", [float, int]) def test_ordinal_encoder_handle_unknowns_numeric(dtype): enc = OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-999) X_fit = np.array([[1, 7], [2, 8], [3, 9]], dtype=dtype) X_trans = np.array([[3, 12], [23, 8], [1, 7]], dtype=dtype) enc.fit(X_fit) X_trans_enc = enc.transform(X_trans) exp = np.array([[2, -999], [-999, 1], [0, 0]], dtype="int64") assert_array_equal(X_trans_enc, exp) X_trans_inv = enc.inverse_transform(X_trans_enc) inv_exp = np.array([[3, None], [None, 8], [1, 7]], dtype=object) assert_array_equal(X_trans_inv, inv_exp) def test_ordinal_encoder_handle_unknowns_nan(): # Make sure unknown_value=np.nan properly works enc = OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=np.nan) X_fit = np.array([[1], [2], [3]]) enc.fit(X_fit) X_trans = enc.transform([[1], [2], [4]]) assert_array_equal(X_trans, [[0], [1], [np.nan]]) def test_ordinal_encoder_handle_unknowns_nan_non_float_dtype(): # Make sure an error is raised when unknown_value=np.nan and the dtype # isn't a float dtype enc = OrdinalEncoder( handle_unknown="use_encoded_value", unknown_value=np.nan, dtype=int ) X_fit = np.array([[1], [2], [3]]) with pytest.raises(ValueError, match="dtype parameter should be a float dtype"): enc.fit(X_fit) def test_ordinal_encoder_raise_categories_shape(): X = np.array([["Low", "Medium", "High", "Medium", "Low"]], dtype=object).T cats = ["Low", "Medium", "High"] enc = OrdinalEncoder(categories=cats) msg = "Shape mismatch: if categories is an array," with pytest.raises(ValueError, match=msg): enc.fit(X) def test_encoder_dtypes(): # check that dtypes are preserved when determining categories enc = OneHotEncoder(categories="auto") exp = np.array([[1.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0]], dtype="float64") for X in [ np.array([[1, 2], [3, 4]], dtype="int64"), np.array([[1, 2], [3, 4]], dtype="float64"), np.array([["a", "b"], ["c", "d"]]), # str dtype np.array([[b"a", b"b"], [b"c", b"d"]]), # bytes dtype np.array([[1, "a"], [3, "b"]], dtype="object"), ]: enc.fit(X) assert all([enc.categories_[i].dtype == X.dtype for i in range(2)]) assert_array_equal(enc.transform(X).toarray(), exp) X = [[1, 2], [3, 4]] enc.fit(X) assert all([np.issubdtype(enc.categories_[i].dtype, np.integer) for i in range(2)]) assert_array_equal(enc.transform(X).toarray(), exp) X = [[1, "a"], [3, "b"]] enc.fit(X) assert all([enc.categories_[i].dtype == "object" for i in range(2)]) assert_array_equal(enc.transform(X).toarray(), exp) def test_encoder_dtypes_pandas(): # check dtype (similar to test_categorical_encoder_dtypes for dataframes) pd = pytest.importorskip("pandas") enc = OneHotEncoder(categories="auto") exp = np.array( [[1.0, 0.0, 1.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0, 0.0, 1.0]], dtype="float64", ) X = pd.DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]}, dtype="int64") enc.fit(X) assert all([enc.categories_[i].dtype == "int64" for i in range(2)]) assert_array_equal(enc.transform(X).toarray(), exp) X = pd.DataFrame({"A": [1, 2], "B": ["a", "b"], "C": [3.0, 4.0]}) expected_cat_type = ["int64", "object", "float64"] enc.fit(X) assert all([enc.categories_[i].dtype == expected_cat_type[i] for i in range(3)]) assert_array_equal(enc.transform(X).toarray(), exp) def test_one_hot_encoder_warning(): enc = OneHotEncoder() X = [["Male", 1], ["Female", 3]] with warnings.catch_warnings(): warnings.simplefilter("error") enc.fit_transform(X) @pytest.mark.parametrize("drop", ["if_binary", "first"]) def test_ohe_handle_unknown_warn(drop): """Check handle_unknown='warn' works correctly.""" X = [["a", 0], ["b", 2], ["b", 1]] ohe = OneHotEncoder( drop=drop, sparse_output=False, handle_unknown="warn", categories=[["b", "a"], [1, 2]], ) ohe.fit(X) X_test = [["c", 1]] X_expected = np.array([[0, 0]]) warn_msg = ( r"Found unknown categories in columns \[0\] during transform. " r"These unknown categories will be encoded as the " r"infrequent category." ) with pytest.warns(UserWarning, match=warn_msg): X_trans = ohe.transform(X_test) assert_allclose(X_trans, X_expected) @pytest.mark.parametrize("missing_value", [np.nan, None, float("nan")]) def test_one_hot_encoder_drop_manual(missing_value): cats_to_drop = ["def", 12, 3, 56, missing_value] enc = OneHotEncoder(drop=cats_to_drop) X = [ ["abc", 12, 2, 55, "a"], ["def", 12, 1, 55, "a"], ["def", 12, 3, 56, missing_value], ] trans = enc.fit_transform(X).toarray() exp = [[1, 0, 1, 1, 1], [0, 1, 0, 1, 1], [0, 0, 0, 0, 0]] assert_array_equal(trans, exp) assert enc.drop is cats_to_drop dropped_cats = [ cat[feature] for cat, feature in zip(enc.categories_, enc.drop_idx_) ] X_inv_trans = enc.inverse_transform(trans) X_array = np.array(X, dtype=object) # last value is np.nan if is_scalar_nan(cats_to_drop[-1]): assert_array_equal(dropped_cats[:-1], cats_to_drop[:-1]) assert is_scalar_nan(dropped_cats[-1]) assert is_scalar_nan(cats_to_drop[-1]) # do not include the last column which includes missing values assert_array_equal(X_array[:, :-1], X_inv_trans[:, :-1]) # check last column is the missing value assert_array_equal(X_array[-1, :-1], X_inv_trans[-1, :-1]) assert is_scalar_nan(X_array[-1, -1]) assert is_scalar_nan(X_inv_trans[-1, -1]) else: assert_array_equal(dropped_cats, cats_to_drop) assert_array_equal(X_array, X_inv_trans) @pytest.mark.parametrize("drop", [["abc", 3], ["abc", 3, 41, "a"]]) def test_invalid_drop_length(drop): enc = OneHotEncoder(drop=drop) err_msg = "`drop` should have length equal to the number" with pytest.raises(ValueError, match=err_msg): enc.fit([["abc", 2, 55], ["def", 1, 55], ["def", 3, 59]]) @pytest.mark.parametrize("density", [True, False], ids=["sparse", "dense"]) @pytest.mark.parametrize("drop", ["first", ["a", 2, "b"]], ids=["first", "manual"]) def test_categories(density, drop): ohe_base = OneHotEncoder(sparse_output=density) ohe_test = OneHotEncoder(sparse_output=density, drop=drop) X = [["c", 1, "a"], ["a", 2, "b"]] ohe_base.fit(X) ohe_test.fit(X) assert_array_equal(ohe_base.categories_, ohe_test.categories_) if drop == "first": assert_array_equal(ohe_test.drop_idx_, 0) else: for drop_cat, drop_idx, cat_list in zip( drop, ohe_test.drop_idx_, ohe_test.categories_ ): assert cat_list[int(drop_idx)] == drop_cat assert isinstance(ohe_test.drop_idx_, np.ndarray) assert ohe_test.drop_idx_.dtype == object @pytest.mark.parametrize("Encoder", [OneHotEncoder, OrdinalEncoder]) def test_encoders_has_categorical_tags(Encoder): assert Encoder().__sklearn_tags__().input_tags.categorical @pytest.mark.parametrize( "kwargs", [ {"max_categories": 2}, {"min_frequency": 11}, {"min_frequency": 0.29}, {"max_categories": 2, "min_frequency": 6}, {"max_categories": 4, "min_frequency": 12}, ], ) @pytest.mark.parametrize("categories", ["auto", [["a", "b", "c", "d"]]]) def test_ohe_infrequent_two_levels(kwargs, categories): """Test that different parameters for combine 'a', 'c', and 'd' into the infrequent category works as expected.""" X_train = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3]).T ohe = OneHotEncoder( categories=categories, handle_unknown="infrequent_if_exist", sparse_output=False, **kwargs, ).fit(X_train) assert_array_equal(ohe.infrequent_categories_, [["a", "c", "d"]]) X_test = [["b"], ["a"], ["c"], ["d"], ["e"]] expected = np.array([[1, 0], [0, 1], [0, 1], [0, 1], [0, 1]]) X_trans = ohe.transform(X_test) assert_allclose(expected, X_trans) expected_inv = [[col] for col in ["b"] + ["infrequent_sklearn"] * 4] X_inv = ohe.inverse_transform(X_trans) assert_array_equal(expected_inv, X_inv) feature_names = ohe.get_feature_names_out() assert_array_equal(["x0_b", "x0_infrequent_sklearn"], feature_names) @pytest.mark.parametrize("drop", ["if_binary", "first", ["b"]]) def test_ohe_infrequent_two_levels_drop_frequent(drop): """Test two levels and dropping the frequent category."""
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
true
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/preprocessing/tests/test_common.py
sklearn/preprocessing/tests/test_common.py
import warnings import numpy as np import pytest from sklearn.base import clone from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split from sklearn.preprocessing import ( MaxAbsScaler, MinMaxScaler, PowerTransformer, QuantileTransformer, RobustScaler, StandardScaler, maxabs_scale, minmax_scale, power_transform, quantile_transform, robust_scale, scale, ) from sklearn.utils._testing import assert_allclose, assert_array_equal from sklearn.utils.fixes import ( BSR_CONTAINERS, COO_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS, DIA_CONTAINERS, DOK_CONTAINERS, LIL_CONTAINERS, ) iris = load_iris() def _get_valid_samples_by_column(X, col): """Get non NaN samples in column of X""" return X[:, [col]][~np.isnan(X[:, col])] @pytest.mark.parametrize( "est, func, support_sparse, strictly_positive, omit_kwargs", [ (MaxAbsScaler(), maxabs_scale, True, False, ["clip"]), (MinMaxScaler(), minmax_scale, False, False, ["clip"]), (StandardScaler(), scale, False, False, []), (StandardScaler(with_mean=False), scale, True, False, []), (PowerTransformer("yeo-johnson"), power_transform, False, False, []), (PowerTransformer("box-cox"), power_transform, False, True, []), (QuantileTransformer(n_quantiles=10), quantile_transform, True, False, []), (RobustScaler(), robust_scale, False, False, []), (RobustScaler(with_centering=False), robust_scale, True, False, []), ], ) def test_missing_value_handling( est, func, support_sparse, strictly_positive, omit_kwargs ): # check that the preprocessing method let pass nan rng = np.random.RandomState(42) X = iris.data.copy() n_missing = 50 X[ rng.randint(X.shape[0], size=n_missing), rng.randint(X.shape[1], size=n_missing) ] = np.nan if strictly_positive: X += np.nanmin(X) + 0.1 X_train, X_test = train_test_split(X, random_state=1) # sanity check assert not np.all(np.isnan(X_train), axis=0).any() assert np.any(np.isnan(X_train), axis=0).all() assert np.any(np.isnan(X_test), axis=0).all() X_test[:, 0] = np.nan # make sure this boundary case is tested est = clone(est) with warnings.catch_warnings(): warnings.simplefilter("error", RuntimeWarning) Xt = est.fit(X_train).transform(X_test) # ensure no warnings are raised # missing values should still be missing, and only them assert_array_equal(np.isnan(Xt), np.isnan(X_test)) # check that the function leads to the same results as the class with warnings.catch_warnings(): warnings.simplefilter("error", RuntimeWarning) Xt_class = est.transform(X_train) kwargs = est.get_params() # remove the parameters which should be omitted because they # are not defined in the counterpart function of the preprocessing class for kwarg in omit_kwargs: _ = kwargs.pop(kwarg) Xt_func = func(X_train, **kwargs) assert_array_equal(np.isnan(Xt_func), np.isnan(Xt_class)) assert_allclose(Xt_func[~np.isnan(Xt_func)], Xt_class[~np.isnan(Xt_class)]) # check that the inverse transform keep NaN Xt_inv = est.inverse_transform(Xt) assert_array_equal(np.isnan(Xt_inv), np.isnan(X_test)) # FIXME: we can introduce equal_nan=True in recent version of numpy. # For the moment which just check that non-NaN values are almost equal. assert_allclose(Xt_inv[~np.isnan(Xt_inv)], X_test[~np.isnan(X_test)]) for i in range(X.shape[1]): # train only on non-NaN est.fit(_get_valid_samples_by_column(X_train, i)) # check transforming with NaN works even when training without NaN with warnings.catch_warnings(): warnings.simplefilter("error", RuntimeWarning) Xt_col = est.transform(X_test[:, [i]]) assert_allclose(Xt_col, Xt[:, [i]]) # check non-NaN is handled as before - the 1st column is all nan if not np.isnan(X_test[:, i]).all(): Xt_col_nonan = est.transform(_get_valid_samples_by_column(X_test, i)) assert_array_equal(Xt_col_nonan, Xt_col[~np.isnan(Xt_col.squeeze())]) if support_sparse: est_dense = clone(est) est_sparse = clone(est) with warnings.catch_warnings(): warnings.simplefilter("error", RuntimeWarning) Xt_dense = est_dense.fit(X_train).transform(X_test) Xt_inv_dense = est_dense.inverse_transform(Xt_dense) for sparse_container in ( BSR_CONTAINERS + COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS + DIA_CONTAINERS + DOK_CONTAINERS + LIL_CONTAINERS ): # check that the dense and sparse inputs lead to the same results # precompute the matrix to avoid catching side warnings X_train_sp = sparse_container(X_train) X_test_sp = sparse_container(X_test) with warnings.catch_warnings(): warnings.simplefilter("ignore", PendingDeprecationWarning) warnings.simplefilter("error", RuntimeWarning) Xt_sp = est_sparse.fit(X_train_sp).transform(X_test_sp) assert_allclose(Xt_sp.toarray(), Xt_dense) with warnings.catch_warnings(): warnings.simplefilter("ignore", PendingDeprecationWarning) warnings.simplefilter("error", RuntimeWarning) Xt_inv_sp = est_sparse.inverse_transform(Xt_sp) assert_allclose(Xt_inv_sp.toarray(), Xt_inv_dense) @pytest.mark.parametrize( "est, func", [ (MaxAbsScaler(), maxabs_scale), (MinMaxScaler(), minmax_scale), (StandardScaler(), scale), (StandardScaler(with_mean=False), scale), (PowerTransformer("yeo-johnson"), power_transform), ( PowerTransformer("box-cox"), power_transform, ), (QuantileTransformer(n_quantiles=3), quantile_transform), (RobustScaler(), robust_scale), (RobustScaler(with_centering=False), robust_scale), ], ) def test_missing_value_pandas_na_support(est, func): # Test pandas IntegerArray with pd.NA pd = pytest.importorskip("pandas") X = np.array( [ [1, 2, 3, np.nan, np.nan, 4, 5, 1], [np.nan, np.nan, 8, 4, 6, np.nan, np.nan, 8], [1, 2, 3, 4, 5, 6, 7, 8], ] ).T # Creates dataframe with IntegerArrays with pd.NA X_df = pd.DataFrame(X, dtype="Int16", columns=["a", "b", "c"]) X_df["c"] = X_df["c"].astype("int") X_trans = est.fit_transform(X) X_df_trans = est.fit_transform(X_df) assert_allclose(X_trans, X_df_trans)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/preprocessing/tests/test_function_transformer.py
sklearn/preprocessing/tests/test_function_transformer.py
import warnings import numpy as np import pytest from sklearn.pipeline import make_pipeline from sklearn.preprocessing import FunctionTransformer, StandardScaler from sklearn.utils._testing import ( _convert_container, assert_allclose_dense_sparse, assert_array_equal, ) from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS def _make_func(args_store, kwargs_store, func=lambda X, *a, **k: X): def _func(X, *args, **kwargs): args_store.append(X) args_store.extend(args) kwargs_store.update(kwargs) return func(X) return _func def test_delegate_to_func(): # (args|kwargs)_store will hold the positional and keyword arguments # passed to the function inside the FunctionTransformer. args_store = [] kwargs_store = {} X = np.arange(10).reshape((5, 2)) assert_array_equal( FunctionTransformer(_make_func(args_store, kwargs_store)).transform(X), X, "transform should have returned X unchanged", ) # The function should only have received X. assert args_store == [X], ( "Incorrect positional arguments passed to func: {args}".format(args=args_store) ) assert not kwargs_store, ( "Unexpected keyword arguments passed to func: {args}".format(args=kwargs_store) ) # reset the argument stores. args_store[:] = [] kwargs_store.clear() transformed = FunctionTransformer( _make_func(args_store, kwargs_store), ).transform(X) assert_array_equal( transformed, X, err_msg="transform should have returned X unchanged" ) # The function should have received X assert args_store == [X], ( "Incorrect positional arguments passed to func: {args}".format(args=args_store) ) assert not kwargs_store, ( "Unexpected keyword arguments passed to func: {args}".format(args=kwargs_store) ) def test_np_log(): X = np.arange(10).reshape((5, 2)) # Test that the numpy.log example still works. assert_array_equal( FunctionTransformer(np.log1p).transform(X), np.log1p(X), ) def test_kw_arg(): X = np.linspace(0, 1, num=10).reshape((5, 2)) F = FunctionTransformer(np.around, kw_args=dict(decimals=3)) # Test that rounding is correct assert_array_equal(F.transform(X), np.around(X, decimals=3)) def test_kw_arg_update(): X = np.linspace(0, 1, num=10).reshape((5, 2)) F = FunctionTransformer(np.around, kw_args=dict(decimals=3)) F.kw_args["decimals"] = 1 # Test that rounding is correct assert_array_equal(F.transform(X), np.around(X, decimals=1)) def test_kw_arg_reset(): X = np.linspace(0, 1, num=10).reshape((5, 2)) F = FunctionTransformer(np.around, kw_args=dict(decimals=3)) F.kw_args = dict(decimals=1) # Test that rounding is correct assert_array_equal(F.transform(X), np.around(X, decimals=1)) def test_inverse_transform(): X = np.array([1, 4, 9, 16]).reshape((2, 2)) # Test that inverse_transform works correctly F = FunctionTransformer( func=np.sqrt, inverse_func=np.around, inv_kw_args=dict(decimals=3), ) assert_array_equal( F.inverse_transform(F.transform(X)), np.around(np.sqrt(X), decimals=3), ) @pytest.mark.parametrize("sparse_container", [None] + CSC_CONTAINERS + CSR_CONTAINERS) def test_check_inverse(sparse_container): X = np.array([1, 4, 9, 16], dtype=np.float64).reshape((2, 2)) if sparse_container is not None: X = sparse_container(X) trans = FunctionTransformer( func=np.sqrt, inverse_func=np.around, accept_sparse=sparse_container is not None, check_inverse=True, validate=True, ) warning_message = ( "The provided functions are not strictly" " inverse of each other. If you are sure you" " want to proceed regardless, set" " 'check_inverse=False'." ) with pytest.warns(UserWarning, match=warning_message): trans.fit(X) trans = FunctionTransformer( func=np.expm1, inverse_func=np.log1p, accept_sparse=sparse_container is not None, check_inverse=True, validate=True, ) with warnings.catch_warnings(): warnings.simplefilter("error", UserWarning) Xt = trans.fit_transform(X) assert_allclose_dense_sparse(X, trans.inverse_transform(Xt)) def test_check_inverse_func_or_inverse_not_provided(): # check that we don't check inverse when one of the func or inverse is not # provided. X = np.array([1, 4, 9, 16], dtype=np.float64).reshape((2, 2)) trans = FunctionTransformer( func=np.expm1, inverse_func=None, check_inverse=True, validate=True ) with warnings.catch_warnings(): warnings.simplefilter("error", UserWarning) trans.fit(X) trans = FunctionTransformer( func=None, inverse_func=np.expm1, check_inverse=True, validate=True ) with warnings.catch_warnings(): warnings.simplefilter("error", UserWarning) trans.fit(X) def test_function_transformer_frame(): pd = pytest.importorskip("pandas") X_df = pd.DataFrame(np.random.randn(100, 10)) transformer = FunctionTransformer() X_df_trans = transformer.fit_transform(X_df) assert hasattr(X_df_trans, "loc") @pytest.mark.parametrize("X_type", ["array", "series"]) def test_function_transformer_raise_error_with_mixed_dtype(X_type): """Check that `FunctionTransformer.check_inverse` raises error on mixed dtype.""" mapping = {"one": 1, "two": 2, "three": 3, 5: "five", 6: "six"} inverse_mapping = {value: key for key, value in mapping.items()} dtype = "object" data = ["one", "two", "three", "one", "one", 5, 6] data = _convert_container(data, X_type, columns_name=["value"], dtype=dtype) def func(X): return np.array([mapping[X[i]] for i in range(X.size)], dtype=object) def inverse_func(X): return _convert_container( [inverse_mapping[x] for x in X], X_type, columns_name=["value"], dtype=dtype, ) transformer = FunctionTransformer( func=func, inverse_func=inverse_func, validate=False, check_inverse=True ) msg = "'check_inverse' is only supported when all the elements in `X` is numerical." with pytest.raises(ValueError, match=msg): transformer.fit(data) def test_function_transformer_support_all_nummerical_dataframes_check_inverse_True(): """Check support for dataframes with only numerical values.""" pd = pytest.importorskip("pandas") df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) transformer = FunctionTransformer( func=lambda x: x + 2, inverse_func=lambda x: x - 2, check_inverse=True ) # Does not raise an error df_out = transformer.fit_transform(df) assert_allclose_dense_sparse(df_out, df + 2) def test_function_transformer_with_dataframe_and_check_inverse_True(): """Check error is raised when check_inverse=True. Non-regresion test for gh-25261. """ pd = pytest.importorskip("pandas") transformer = FunctionTransformer( func=lambda x: x, inverse_func=lambda x: x, check_inverse=True ) df_mixed = pd.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]}) msg = "'check_inverse' is only supported when all the elements in `X` is numerical." with pytest.raises(ValueError, match=msg): transformer.fit(df_mixed) @pytest.mark.parametrize( "X, feature_names_out, input_features, expected", [ ( # NumPy inputs, default behavior: generate names np.random.rand(100, 3), "one-to-one", None, ("x0", "x1", "x2"), ), ( # Pandas input, default behavior: use input feature names {"a": np.random.rand(100), "b": np.random.rand(100)}, "one-to-one", None, ("a", "b"), ), ( # NumPy input, feature_names_out=callable np.random.rand(100, 3), lambda transformer, input_features: ("a", "b"), None, ("a", "b"), ), ( # Pandas input, feature_names_out=callable {"a": np.random.rand(100), "b": np.random.rand(100)}, lambda transformer, input_features: ("c", "d", "e"), None, ("c", "d", "e"), ), ( # NumPy input, feature_names_out=callable – default input_features np.random.rand(100, 3), lambda transformer, input_features: tuple(input_features) + ("a",), None, ("x0", "x1", "x2", "a"), ), ( # Pandas input, feature_names_out=callable – default input_features {"a": np.random.rand(100), "b": np.random.rand(100)}, lambda transformer, input_features: tuple(input_features) + ("c",), None, ("a", "b", "c"), ), ( # NumPy input, input_features=list of names np.random.rand(100, 3), "one-to-one", ("a", "b", "c"), ("a", "b", "c"), ), ( # Pandas input, input_features=list of names {"a": np.random.rand(100), "b": np.random.rand(100)}, "one-to-one", ("a", "b"), # must match feature_names_in_ ("a", "b"), ), ( # NumPy input, feature_names_out=callable, input_features=list np.random.rand(100, 3), lambda transformer, input_features: tuple(input_features) + ("d",), ("a", "b", "c"), ("a", "b", "c", "d"), ), ( # Pandas input, feature_names_out=callable, input_features=list {"a": np.random.rand(100), "b": np.random.rand(100)}, lambda transformer, input_features: tuple(input_features) + ("c",), ("a", "b"), # must match feature_names_in_ ("a", "b", "c"), ), ], ) @pytest.mark.parametrize("validate", [True, False]) def test_function_transformer_get_feature_names_out( X, feature_names_out, input_features, expected, validate ): if isinstance(X, dict): pd = pytest.importorskip("pandas") X = pd.DataFrame(X) transformer = FunctionTransformer( feature_names_out=feature_names_out, validate=validate ) transformer.fit(X) names = transformer.get_feature_names_out(input_features) assert isinstance(names, np.ndarray) assert names.dtype == object assert_array_equal(names, expected) def test_function_transformer_get_feature_names_out_without_validation(): transformer = FunctionTransformer(feature_names_out="one-to-one", validate=False) X = np.random.rand(100, 2) transformer.fit_transform(X) names = transformer.get_feature_names_out(("a", "b")) assert isinstance(names, np.ndarray) assert names.dtype == object assert_array_equal(names, ("a", "b")) def test_function_transformer_feature_names_out_is_None(): transformer = FunctionTransformer() X = np.random.rand(100, 2) transformer.fit_transform(X) msg = "This 'FunctionTransformer' has no attribute 'get_feature_names_out'" with pytest.raises(AttributeError, match=msg): transformer.get_feature_names_out() def test_function_transformer_feature_names_out_uses_estimator(): def add_n_random_features(X, n): return np.concatenate([X, np.random.rand(len(X), n)], axis=1) def feature_names_out(transformer, input_features): n = transformer.kw_args["n"] return list(input_features) + [f"rnd{i}" for i in range(n)] transformer = FunctionTransformer( func=add_n_random_features, feature_names_out=feature_names_out, kw_args=dict(n=3), validate=True, ) pd = pytest.importorskip("pandas") df = pd.DataFrame({"a": np.random.rand(100), "b": np.random.rand(100)}) transformer.fit_transform(df) names = transformer.get_feature_names_out() assert isinstance(names, np.ndarray) assert names.dtype == object assert_array_equal(names, ("a", "b", "rnd0", "rnd1", "rnd2")) def test_function_transformer_validate_inverse(): """Test that function transformer does not reset estimator in `inverse_transform`.""" def add_constant_feature(X): X_one = np.ones((X.shape[0], 1)) return np.concatenate((X, X_one), axis=1) def inverse_add_constant(X): return X[:, :-1] X = np.array([[1, 2], [3, 4], [3, 4]]) trans = FunctionTransformer( func=add_constant_feature, inverse_func=inverse_add_constant, validate=True, ) X_trans = trans.fit_transform(X) assert trans.n_features_in_ == X.shape[1] trans.inverse_transform(X_trans) assert trans.n_features_in_ == X.shape[1] @pytest.mark.parametrize( "feature_names_out, expected", [ ("one-to-one", ["pet", "color"]), [lambda est, names: [f"{n}_out" for n in names], ["pet_out", "color_out"]], ], ) @pytest.mark.parametrize("in_pipeline", [True, False]) def test_get_feature_names_out_dataframe_with_string_data( feature_names_out, expected, in_pipeline ): """Check that get_feature_names_out works with DataFrames with string data.""" pd = pytest.importorskip("pandas") X = pd.DataFrame({"pet": ["dog", "cat"], "color": ["red", "green"]}) def func(X): if feature_names_out == "one-to-one": return X else: name = feature_names_out(None, X.columns) return X.rename(columns=dict(zip(X.columns, name))) transformer = FunctionTransformer(func=func, feature_names_out=feature_names_out) if in_pipeline: transformer = make_pipeline(transformer) X_trans = transformer.fit_transform(X) assert isinstance(X_trans, pd.DataFrame) names = transformer.get_feature_names_out() assert isinstance(names, np.ndarray) assert names.dtype == object assert_array_equal(names, expected) def test_set_output_func(): """Check behavior of set_output with different settings.""" pd = pytest.importorskip("pandas") X = pd.DataFrame({"a": [1, 2, 3], "b": [10, 20, 100]}) ft = FunctionTransformer(np.log, feature_names_out="one-to-one") # no warning is raised when feature_names_out is defined with warnings.catch_warnings(): warnings.simplefilter("error", UserWarning) ft.set_output(transform="pandas") X_trans = ft.fit_transform(X) assert isinstance(X_trans, pd.DataFrame) assert_array_equal(X_trans.columns, ["a", "b"]) ft = FunctionTransformer(lambda x: 2 * x) ft.set_output(transform="pandas") # no warning is raised when func returns a panda dataframe with warnings.catch_warnings(): warnings.simplefilter("error", UserWarning) X_trans = ft.fit_transform(X) assert isinstance(X_trans, pd.DataFrame) assert_array_equal(X_trans.columns, ["a", "b"]) # Warning is raised when func returns an ndarray ft_np = FunctionTransformer(lambda x: np.asarray(x)) for transform in ("pandas", "polars"): ft_np.set_output(transform=transform) msg = ( f"When `set_output` is configured to be '{transform}'.*{transform} " "DataFrame.*" ) with pytest.warns(UserWarning, match=msg): ft_np.fit_transform(X) # default transform does not warn ft_np.set_output(transform="default") with warnings.catch_warnings(): warnings.simplefilter("error", UserWarning) ft_np.fit_transform(X) def test_consistence_column_name_between_steps(): """Check that we have a consistence between the feature names out of `FunctionTransformer` and the feature names in of the next step in the pipeline. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/27695 """ pd = pytest.importorskip("pandas") def with_suffix(_, names): return [name + "__log" for name in names] pipeline = make_pipeline( FunctionTransformer(np.log1p, feature_names_out=with_suffix), StandardScaler() ) df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], columns=["a", "b"]) X_trans = pipeline.fit_transform(df) assert pipeline.get_feature_names_out().tolist() == ["a__log", "b__log"] # StandardScaler will convert to a numpy array assert isinstance(X_trans, np.ndarray) @pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"]) @pytest.mark.parametrize("transform_output", ["default", "pandas", "polars"]) def test_function_transformer_overwrite_column_names(dataframe_lib, transform_output): """Check that we overwrite the column names when we should.""" lib = pytest.importorskip(dataframe_lib) if transform_output != "numpy": pytest.importorskip(transform_output) df = lib.DataFrame({"a": [1, 2, 3], "b": [10, 20, 100]}) def with_suffix(_, names): return [name + "__log" for name in names] transformer = FunctionTransformer(feature_names_out=with_suffix).set_output( transform=transform_output ) X_trans = transformer.fit_transform(df) assert_array_equal(np.asarray(X_trans), np.asarray(df)) feature_names = transformer.get_feature_names_out() assert list(X_trans.columns) == with_suffix(None, df.columns) assert feature_names.tolist() == with_suffix(None, df.columns) @pytest.mark.parametrize( "feature_names_out", ["one-to-one", lambda _, names: [f"{name}_log" for name in names]], ) def test_function_transformer_overwrite_column_names_numerical(feature_names_out): """Check the same as `test_function_transformer_overwrite_column_names` but for the specific case of pandas where column names can be numerical.""" pd = pytest.importorskip("pandas") df = pd.DataFrame({0: [1, 2, 3], 1: [10, 20, 100]}) transformer = FunctionTransformer(feature_names_out=feature_names_out) X_trans = transformer.fit_transform(df) assert_array_equal(np.asarray(X_trans), np.asarray(df)) feature_names = transformer.get_feature_names_out() assert list(X_trans.columns) == list(feature_names) @pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"]) @pytest.mark.parametrize( "feature_names_out", ["one-to-one", lambda _, names: [f"{name}_log" for name in names]], ) def test_function_transformer_error_column_inconsistent( dataframe_lib, feature_names_out ): """Check that we raise an error when `func` returns a dataframe with new column names that become inconsistent with `get_feature_names_out`.""" lib = pytest.importorskip(dataframe_lib) df = lib.DataFrame({"a": [1, 2, 3], "b": [10, 20, 100]}) def func(df): if dataframe_lib == "pandas": return df.rename(columns={"a": "c"}) else: return df.rename({"a": "c"}) transformer = FunctionTransformer(func=func, feature_names_out=feature_names_out) err_msg = "The output generated by `func` have different column names" with pytest.raises(ValueError, match=err_msg): transformer.fit_transform(df).columns
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/preprocessing/tests/test_target_encoder.py
sklearn/preprocessing/tests/test_target_encoder.py
import re import warnings import numpy as np import pytest from numpy.testing import assert_allclose, assert_array_equal from sklearn.ensemble import RandomForestRegressor from sklearn.linear_model import Ridge from sklearn.model_selection import ( KFold, ShuffleSplit, StratifiedKFold, cross_val_score, train_test_split, ) from sklearn.pipeline import make_pipeline from sklearn.preprocessing import ( KBinsDiscretizer, LabelBinarizer, LabelEncoder, TargetEncoder, ) from sklearn.utils.fixes import parse_version def _encode_target(X_ordinal, y_numeric, n_categories, smooth): """Simple Python implementation of target encoding.""" cur_encodings = np.zeros(n_categories, dtype=np.float64) y_mean = np.mean(y_numeric) if smooth == "auto": y_variance = np.var(y_numeric) for c in range(n_categories): y_subset = y_numeric[X_ordinal == c] n_i = y_subset.shape[0] if n_i == 0: cur_encodings[c] = y_mean continue y_subset_variance = np.var(y_subset) m = y_subset_variance / y_variance lambda_ = n_i / (n_i + m) cur_encodings[c] = lambda_ * np.mean(y_subset) + (1 - lambda_) * y_mean return cur_encodings else: # float for c in range(n_categories): y_subset = y_numeric[X_ordinal == c] current_sum = np.sum(y_subset) + y_mean * smooth current_cnt = y_subset.shape[0] + smooth cur_encodings[c] = current_sum / current_cnt return cur_encodings @pytest.mark.parametrize( "categories, unknown_value", [ ([np.array([0, 1, 2], dtype=np.int64)], 4), ([np.array([1.0, 3.0, np.nan], dtype=np.float64)], 6.0), ([np.array(["cat", "dog", "snake"], dtype=object)], "bear"), ("auto", 3), ], ) @pytest.mark.parametrize("smooth", [5.0, "auto"]) @pytest.mark.parametrize("target_type", ["binary", "continuous"]) def test_encoding(categories, unknown_value, global_random_seed, smooth, target_type): """Check encoding for binary and continuous targets. Compare the values returned by `TargetEncoder.fit_transform` against the expected encodings for cv splits from a naive reference Python implementation in _encode_target. """ n_categories = 3 X_train_int_array = np.array([[0] * 20 + [1] * 30 + [2] * 40], dtype=np.int64).T X_test_int_array = np.array([[0, 1, 2]], dtype=np.int64).T n_samples = X_train_int_array.shape[0] if categories == "auto": X_train = X_train_int_array X_test = X_test_int_array else: X_train = categories[0][X_train_int_array] X_test = categories[0][X_test_int_array] X_test = np.concatenate((X_test, [[unknown_value]])) data_rng = np.random.RandomState(global_random_seed) n_splits = 3 if target_type == "binary": y_numeric = data_rng.randint(low=0, high=2, size=n_samples) target_names = np.array(["cat", "dog"], dtype=object) y_train = target_names[y_numeric] else: assert target_type == "continuous" y_numeric = data_rng.uniform(low=-10, high=20, size=n_samples) y_train = y_numeric shuffled_idx = data_rng.permutation(n_samples) X_train_int_array = X_train_int_array[shuffled_idx] X_train = X_train[shuffled_idx] y_train = y_train[shuffled_idx] y_numeric = y_numeric[shuffled_idx] # Define our CV splitting strategy if target_type == "binary": cv = StratifiedKFold( n_splits=n_splits, random_state=global_random_seed, shuffle=True ) else: cv = KFold(n_splits=n_splits, random_state=global_random_seed, shuffle=True) # Compute the expected values using our reference Python implementation of # target encoding: expected_X_fit_transform = np.empty_like(X_train_int_array, dtype=np.float64) for train_idx, test_idx in cv.split(X_train_int_array, y_train): X_, y_ = X_train_int_array[train_idx, 0], y_numeric[train_idx] cur_encodings = _encode_target(X_, y_, n_categories, smooth) expected_X_fit_transform[test_idx, 0] = cur_encodings[ X_train_int_array[test_idx, 0] ] # Check that we can obtain the same encodings by calling `fit_transform` on # the estimator with the same CV parameters: target_encoder = TargetEncoder( smooth=smooth, categories=categories, cv=n_splits, random_state=global_random_seed, ) X_fit_transform = target_encoder.fit_transform(X_train, y_train) assert target_encoder.target_type_ == target_type assert_allclose(X_fit_transform, expected_X_fit_transform) assert len(target_encoder.encodings_) == 1 if target_type == "binary": assert_array_equal(target_encoder.classes_, target_names) else: assert target_encoder.classes_ is None # compute encodings for all data to validate `transform` y_mean = np.mean(y_numeric) expected_encodings = _encode_target( X_train_int_array[:, 0], y_numeric, n_categories, smooth ) assert_allclose(target_encoder.encodings_[0], expected_encodings) assert target_encoder.target_mean_ == pytest.approx(y_mean) # Transform on test data, the last value is unknown so it is encoded as the target # mean expected_X_test_transform = np.concatenate( (expected_encodings, np.array([y_mean])) ).reshape(-1, 1) X_test_transform = target_encoder.transform(X_test) assert_allclose(X_test_transform, expected_X_test_transform) @pytest.mark.parametrize( "categories, unknown_values", [ ([np.array([0, 1, 2], dtype=np.int64)], "auto"), ([np.array(["cat", "dog", "snake"], dtype=object)], ["bear", "rabbit"]), ], ) @pytest.mark.parametrize( "target_labels", [np.array([1, 2, 3]), np.array(["a", "b", "c"])] ) @pytest.mark.parametrize("smooth", [5.0, "auto"]) def test_encoding_multiclass( global_random_seed, categories, unknown_values, target_labels, smooth ): """Check encoding for multiclass targets.""" rng = np.random.RandomState(global_random_seed) n_samples = 80 n_features = 2 feat_1_int = np.array(rng.randint(low=0, high=2, size=n_samples)) feat_2_int = np.array(rng.randint(low=0, high=3, size=n_samples)) feat_1 = categories[0][feat_1_int] feat_2 = categories[0][feat_2_int] X_train = np.column_stack((feat_1, feat_2)) X_train_int = np.column_stack((feat_1_int, feat_2_int)) categories_ = [[0, 1], [0, 1, 2]] n_classes = 3 y_train_int = np.array(rng.randint(low=0, high=n_classes, size=n_samples)) y_train = target_labels[y_train_int] y_train_enc = LabelBinarizer().fit_transform(y_train) n_splits = 3 cv = StratifiedKFold( n_splits=n_splits, random_state=global_random_seed, shuffle=True ) # Manually compute encodings for cv splits to validate `fit_transform` expected_X_fit_transform = np.empty( (X_train_int.shape[0], X_train_int.shape[1] * n_classes), dtype=np.float64, ) for f_idx, cats in enumerate(categories_): for c_idx in range(n_classes): for train_idx, test_idx in cv.split(X_train, y_train): y_class = y_train_enc[:, c_idx] X_, y_ = X_train_int[train_idx, f_idx], y_class[train_idx] current_encoding = _encode_target(X_, y_, len(cats), smooth) # f_idx: 0, 0, 0, 1, 1, 1 # c_idx: 0, 1, 2, 0, 1, 2 # exp_idx: 0, 1, 2, 3, 4, 5 exp_idx = c_idx + (f_idx * n_classes) expected_X_fit_transform[test_idx, exp_idx] = current_encoding[ X_train_int[test_idx, f_idx] ] target_encoder = TargetEncoder( smooth=smooth, cv=n_splits, random_state=global_random_seed, ) X_fit_transform = target_encoder.fit_transform(X_train, y_train) assert target_encoder.target_type_ == "multiclass" assert_allclose(X_fit_transform, expected_X_fit_transform) # Manually compute encoding to validate `transform` expected_encodings = [] for f_idx, cats in enumerate(categories_): for c_idx in range(n_classes): y_class = y_train_enc[:, c_idx] current_encoding = _encode_target( X_train_int[:, f_idx], y_class, len(cats), smooth ) expected_encodings.append(current_encoding) assert len(target_encoder.encodings_) == n_features * n_classes for i in range(n_features * n_classes): assert_allclose(target_encoder.encodings_[i], expected_encodings[i]) assert_array_equal(target_encoder.classes_, target_labels) # Include unknown values at the end X_test_int = np.array([[0, 1], [1, 2], [4, 5]]) if unknown_values == "auto": X_test = X_test_int else: X_test = np.empty_like(X_test_int[:-1, :], dtype=object) for column_idx in range(X_test_int.shape[1]): X_test[:, column_idx] = categories[0][X_test_int[:-1, column_idx]] # Add unknown values at end X_test = np.vstack((X_test, unknown_values)) y_mean = np.mean(y_train_enc, axis=0) expected_X_test_transform = np.empty( (X_test_int.shape[0], X_test_int.shape[1] * n_classes), dtype=np.float64, ) n_rows = X_test_int.shape[0] f_idx = [0, 0, 0, 1, 1, 1] # Last row are unknowns, dealt with later for row_idx in range(n_rows - 1): for i, enc in enumerate(expected_encodings): expected_X_test_transform[row_idx, i] = enc[X_test_int[row_idx, f_idx[i]]] # Unknowns encoded as target mean for each class # `y_mean` contains target mean for each class, thus cycle through mean of # each class, `n_features` times mean_idx = [0, 1, 2, 0, 1, 2] for i in range(n_classes * n_features): expected_X_test_transform[n_rows - 1, i] = y_mean[mean_idx[i]] X_test_transform = target_encoder.transform(X_test) assert_allclose(X_test_transform, expected_X_test_transform) @pytest.mark.parametrize( "X, categories", [ ( np.array([[0] * 10 + [1] * 10 + [3]], dtype=np.int64).T, # 3 is unknown [[0, 1, 2]], ), ( np.array( [["cat"] * 10 + ["dog"] * 10 + ["snake"]], dtype=object ).T, # snake is unknown [["dog", "cat", "cow"]], ), ], ) @pytest.mark.parametrize("smooth", [4.0, "auto"]) def test_custom_categories(X, categories, smooth): """Custom categories with unknown categories that are not in training data.""" rng = np.random.RandomState(0) y = rng.uniform(low=-10, high=20, size=X.shape[0]) enc = TargetEncoder(categories=categories, smooth=smooth, random_state=0).fit(X, y) # The last element is unknown and encoded as the mean y_mean = y.mean() X_trans = enc.transform(X[-1:]) assert X_trans[0, 0] == pytest.approx(y_mean) assert len(enc.encodings_) == 1 # custom category that is not in training data assert enc.encodings_[0][-1] == pytest.approx(y_mean) @pytest.mark.parametrize( "y, msg", [ ([1, 2, 0, 1], "Found input variables with inconsistent"), ( np.array([[1, 2, 0], [1, 2, 3]]).T, "Target type was inferred to be 'multiclass-multioutput'", ), ], ) def test_errors(y, msg): """Check invalidate input.""" X = np.array([[1, 0, 1]]).T enc = TargetEncoder() with pytest.raises(ValueError, match=msg): enc.fit_transform(X, y) def test_use_regression_target(): """Check inferred and specified `target_type` on regression target.""" X = np.array([[0, 1, 0, 1, 0, 1]]).T y = np.array([1.0, 2.0, 3.0, 2.0, 3.0, 4.0]) enc = TargetEncoder(cv=2) with pytest.warns( UserWarning, match=re.escape( "The least populated class in y has only 1 members, which is less than" " n_splits=2." ), ): enc.fit_transform(X, y) assert enc.target_type_ == "multiclass" enc = TargetEncoder(cv=2, target_type="continuous") enc.fit_transform(X, y) assert enc.target_type_ == "continuous" @pytest.mark.parametrize( "y, feature_names", [ ([1, 2] * 10, ["A", "B"]), ([1, 2, 3] * 6 + [1, 2], ["A_1", "A_2", "A_3", "B_1", "B_2", "B_3"]), ( ["y1", "y2", "y3"] * 6 + ["y1", "y2"], ["A_y1", "A_y2", "A_y3", "B_y1", "B_y2", "B_y3"], ), ], ) def test_feature_names_out_set_output(y, feature_names): """Check TargetEncoder works with set_output.""" pd = pytest.importorskip("pandas") X_df = pd.DataFrame({"A": ["a", "b"] * 10, "B": [1, 2] * 10}) enc_default = TargetEncoder(cv=2, smooth=3.0, random_state=0) enc_default.set_output(transform="default") enc_pandas = TargetEncoder(cv=2, smooth=3.0, random_state=0) enc_pandas.set_output(transform="pandas") X_default = enc_default.fit_transform(X_df, y) X_pandas = enc_pandas.fit_transform(X_df, y) assert_allclose(X_pandas.to_numpy(), X_default) assert_array_equal(enc_pandas.get_feature_names_out(), feature_names) assert_array_equal(enc_pandas.get_feature_names_out(), X_pandas.columns) @pytest.mark.parametrize("to_pandas", [True, False]) @pytest.mark.parametrize("smooth", [1.0, "auto"]) @pytest.mark.parametrize("target_type", ["binary-ints", "binary-str", "continuous"]) def test_multiple_features_quick(to_pandas, smooth, target_type): """Check target encoder with multiple features.""" X_ordinal = np.array( [[1, 1], [0, 1], [1, 1], [2, 1], [1, 0], [0, 1], [1, 0], [0, 0]], dtype=np.int64 ) if target_type == "binary-str": y_train = np.array(["a", "b", "a", "a", "b", "b", "a", "b"]) y_integer = LabelEncoder().fit_transform(y_train) cv = StratifiedKFold(2, random_state=0, shuffle=True) elif target_type == "binary-ints": y_train = np.array([3, 4, 3, 3, 3, 4, 4, 4]) y_integer = LabelEncoder().fit_transform(y_train) cv = StratifiedKFold(2, random_state=0, shuffle=True) else: y_train = np.array([3.0, 5.1, 2.4, 3.5, 4.1, 5.5, 10.3, 7.3], dtype=np.float32) y_integer = y_train cv = KFold(2, random_state=0, shuffle=True) y_mean = np.mean(y_integer) categories = [[0, 1, 2], [0, 1]] X_test = np.array( [ [0, 1], [3, 0], # 3 is unknown [1, 10], # 10 is unknown ], dtype=np.int64, ) if to_pandas: pd = pytest.importorskip("pandas") # convert second feature to an object X_train = pd.DataFrame( { "feat0": X_ordinal[:, 0], "feat1": np.array(["cat", "dog"], dtype=object)[X_ordinal[:, 1]], } ) # "snake" is unknown X_test = pd.DataFrame({"feat0": X_test[:, 0], "feat1": ["dog", "cat", "snake"]}) else: X_train = X_ordinal # manually compute encoding for fit_transform expected_X_fit_transform = np.empty_like(X_ordinal, dtype=np.float64) for f_idx, cats in enumerate(categories): for train_idx, test_idx in cv.split(X_ordinal, y_integer): X_, y_ = X_ordinal[train_idx, f_idx], y_integer[train_idx] current_encoding = _encode_target(X_, y_, len(cats), smooth) expected_X_fit_transform[test_idx, f_idx] = current_encoding[ X_ordinal[test_idx, f_idx] ] # manually compute encoding for transform expected_encodings = [] for f_idx, cats in enumerate(categories): current_encoding = _encode_target( X_ordinal[:, f_idx], y_integer, len(cats), smooth ) expected_encodings.append(current_encoding) expected_X_test_transform = np.array( [ [expected_encodings[0][0], expected_encodings[1][1]], [y_mean, expected_encodings[1][0]], [expected_encodings[0][1], y_mean], ], dtype=np.float64, ) enc = TargetEncoder(smooth=smooth, cv=2, random_state=0) X_fit_transform = enc.fit_transform(X_train, y_train) assert_allclose(X_fit_transform, expected_X_fit_transform) assert len(enc.encodings_) == 2 for i in range(2): assert_allclose(enc.encodings_[i], expected_encodings[i]) X_test_transform = enc.transform(X_test) assert_allclose(X_test_transform, expected_X_test_transform) @pytest.mark.parametrize( "y, y_mean", [ (np.array([3.4] * 20), 3.4), (np.array([0] * 20), 0), (np.array(["a"] * 20, dtype=object), 0), ], ids=["continuous", "binary", "binary-string"], ) @pytest.mark.parametrize("smooth", ["auto", 4.0, 0.0]) def test_constant_target_and_feature(y, y_mean, smooth): """Check edge case where feature and target is constant.""" X = np.array([[1] * 20]).T n_samples = X.shape[0] enc = TargetEncoder(cv=2, smooth=smooth, random_state=0) X_trans = enc.fit_transform(X, y) assert_allclose(X_trans, np.repeat([[y_mean]], n_samples, axis=0)) assert enc.encodings_[0][0] == pytest.approx(y_mean) assert enc.target_mean_ == pytest.approx(y_mean) X_test = np.array([[1], [0]]) X_test_trans = enc.transform(X_test) assert_allclose(X_test_trans, np.repeat([[y_mean]], 2, axis=0)) def test_fit_transform_not_associated_with_y_if_ordinal_categorical_is_not( global_random_seed, ): cardinality = 30 # not too large, otherwise we need a very large n_samples n_samples = 3000 rng = np.random.RandomState(global_random_seed) y_train = rng.normal(size=n_samples) X_train = rng.randint(0, cardinality, size=n_samples).reshape(-1, 1) # Sort by y_train to attempt to cause a leak y_sorted_indices = y_train.argsort() y_train = y_train[y_sorted_indices] X_train = X_train[y_sorted_indices] target_encoder = TargetEncoder(shuffle=True, random_state=global_random_seed) X_encoded_train_shuffled = target_encoder.fit_transform(X_train, y_train) target_encoder = TargetEncoder(shuffle=False) X_encoded_train_no_shuffled = target_encoder.fit_transform(X_train, y_train) # Check that no information about y_train has leaked into X_train: regressor = RandomForestRegressor( n_estimators=10, min_samples_leaf=20, random_state=global_random_seed ) # It's impossible to learn a good predictive model on the training set when # using the original representation X_train or the target encoded # representation with shuffled inner CV. For the latter, no information # about y_train has inadvertently leaked into the prior used to generate # `X_encoded_train_shuffled`: cv = ShuffleSplit(n_splits=50, random_state=global_random_seed) assert cross_val_score(regressor, X_train, y_train, cv=cv).mean() < 0.1 assert ( cross_val_score(regressor, X_encoded_train_shuffled, y_train, cv=cv).mean() < 0.1 ) # Without the inner CV shuffling, a lot of information about y_train goes into the # the per-fold y_train.mean() priors: shrinkage is no longer effective in this # case and would no longer be able to prevent downstream over-fitting. assert ( cross_val_score(regressor, X_encoded_train_no_shuffled, y_train, cv=cv).mean() > 0.5 ) def test_smooth_zero(): """Check edge case with zero smoothing and cv does not contain category.""" X = np.array([[0, 0, 0, 0, 0, 1, 1, 1, 1, 1]]).T y = np.array([2.1, 4.3, 1.2, 3.1, 1.0, 9.0, 10.3, 14.2, 13.3, 15.0]) enc = TargetEncoder(smooth=0.0, shuffle=False, cv=2) X_trans = enc.fit_transform(X, y) # With cv = 2, category 0 does not exist in the second half, thus # it will be encoded as the mean of the second half assert_allclose(X_trans[0], np.mean(y[5:])) # category 1 does not exist in the first half, thus it will be encoded as # the mean of the first half assert_allclose(X_trans[-1], np.mean(y[:5])) @pytest.mark.parametrize("smooth", [0.0, 1e3, "auto"]) def test_invariance_of_encoding_under_label_permutation(smooth, global_random_seed): # Check that the encoding does not depend on the integer of the value of # the integer labels. This is quite a trivial property but it is helpful # to understand the following test. rng = np.random.RandomState(global_random_seed) # Random y and informative categorical X to make the test non-trivial when # using smoothing. y = rng.normal(size=1000) n_categories = 30 X = KBinsDiscretizer( n_bins=n_categories, quantile_method="averaged_inverted_cdf", encode="ordinal" ).fit_transform(y.reshape(-1, 1)) X_train, X_test, y_train, y_test = train_test_split( X, y, random_state=global_random_seed ) # Shuffle the labels to make sure that the encoding is invariant to the # permutation of the labels permutated_labels = rng.permutation(n_categories) X_train_permuted = permutated_labels[X_train.astype(np.int32)] X_test_permuted = permutated_labels[X_test.astype(np.int32)] target_encoder = TargetEncoder(smooth=smooth, random_state=global_random_seed) X_train_encoded = target_encoder.fit_transform(X_train, y_train) X_test_encoded = target_encoder.transform(X_test) X_train_permuted_encoded = target_encoder.fit_transform(X_train_permuted, y_train) X_test_permuted_encoded = target_encoder.transform(X_test_permuted) assert_allclose(X_train_encoded, X_train_permuted_encoded) assert_allclose(X_test_encoded, X_test_permuted_encoded) @pytest.mark.parametrize("smooth", [0.0, "auto"]) def test_target_encoding_for_linear_regression(smooth, global_random_seed): # Check some expected statistical properties when fitting a linear # regression model on target encoded features depending on their relation # with that target. # In this test, we use the Ridge class with the "lsqr" solver and a little # bit of regularization to implement a linear regression model that # converges quickly for large `n_samples` and robustly in case of # correlated features. Since we will fit this model on a mean centered # target, we do not need to fit an intercept and this will help simplify # the analysis with respect to the expected coefficients. linear_regression = Ridge(alpha=1e-6, solver="lsqr", fit_intercept=False) # Construct a random target variable. We need a large number of samples for # this test to be stable across all values of the random seed. n_samples = 50_000 rng = np.random.RandomState(global_random_seed) y = rng.randn(n_samples) # Generate a single informative ordinal feature with medium cardinality. # Inject some irreducible noise to make it harder for a multivariate model # to identify the informative feature from other pure noise features. noise = 0.8 * rng.randn(n_samples) n_categories = 100 X_informative = KBinsDiscretizer( n_bins=n_categories, encode="ordinal", strategy="uniform", random_state=rng, ).fit_transform((y + noise).reshape(-1, 1)) # Let's permute the labels to hide the fact that this feature is # informative to naive linear regression model trained on the raw ordinal # values. As highlighted in the previous test, the target encoding should be # invariant to such a permutation. permutated_labels = rng.permutation(n_categories) X_informative = permutated_labels[X_informative.astype(np.int32)] # Generate a shuffled copy of the informative feature to destroy the # relationship with the target. X_shuffled = rng.permutation(X_informative) # Also include a very high cardinality categorical feature that is by # itself independent of the target variable: target encoding such a feature # without internal cross-validation should cause catastrophic overfitting # for the downstream regressor, even with shrinkage. This kind of features # typically represents near unique identifiers of samples. In general they # should be removed from a machine learning datasets but here we want to # study the ability of the default behavior of TargetEncoder to mitigate # them automatically. X_near_unique_categories = rng.choice( int(0.9 * n_samples), size=n_samples, replace=True ).reshape(-1, 1) # Assemble the dataset and do a train-test split: X = np.concatenate( [X_informative, X_shuffled, X_near_unique_categories], axis=1, ) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) # Let's first check that a linear regression model trained on the raw # features underfits because of the meaning-less ordinal encoding of the # labels. raw_model = linear_regression.fit(X_train, y_train) assert raw_model.score(X_train, y_train) < 0.1 assert raw_model.score(X_test, y_test) < 0.1 # Now do the same with target encoding using the internal CV mechanism # implemented when using fit_transform. model_with_cv = make_pipeline( TargetEncoder(smooth=smooth, random_state=rng), linear_regression ).fit(X_train, y_train) # This model should be able to fit the data well and also generalise to the # test data (assuming that the binning is fine-grained enough). The R2 # scores are not perfect because of the noise injected during the # generation of the unique informative feature. coef = model_with_cv[-1].coef_ assert model_with_cv.score(X_train, y_train) > 0.5, coef assert model_with_cv.score(X_test, y_test) > 0.5, coef # The target encoder recovers the linear relationship with slope 1 between # the target encoded unique informative predictor and the target. Since the # target encoding of the 2 other features is not informative thanks to the # use of internal cross-validation, the multivariate linear regressor # assigns a coef of 1 to the first feature and 0 to the other 2. assert coef[0] == pytest.approx(1, abs=1e-2) assert (np.abs(coef[1:]) < 0.2).all() # Let's now disable the internal cross-validation by calling fit and then # transform separately on the training set: target_encoder = TargetEncoder(smooth=smooth, random_state=rng).fit( X_train, y_train ) X_enc_no_cv_train = target_encoder.transform(X_train) X_enc_no_cv_test = target_encoder.transform(X_test) model_no_cv = linear_regression.fit(X_enc_no_cv_train, y_train) # The linear regression model should always overfit because it assigns # too much weight to the extremely high cardinality feature relatively to # the informative feature. Note that this is the case even when using # the empirical Bayes smoothing which is not enough to prevent such # overfitting alone. coef = model_no_cv.coef_ assert model_no_cv.score(X_enc_no_cv_train, y_train) > 0.7, coef assert model_no_cv.score(X_enc_no_cv_test, y_test) < 0.5, coef # The model overfits because it assigns too much weight to the high # cardinality yet non-informative feature instead of the lower # cardinality yet informative feature: assert abs(coef[0]) < abs(coef[2]) def test_pandas_copy_on_write(): """ Test target-encoder cython code when y is read-only. The numpy array underlying df["y"] is read-only when copy-on-write is enabled. Non-regression test for gh-27879. """ pd = pytest.importorskip("pandas", minversion="2.0") # Pandas currently warns that setting copy_on_write will be removed in pandas 4 # (and copy-on-write will always be enabled). # see https://github.com/scikit-learn/scikit-learn/issues/32829 # TODO: remove this workaround when pandas 4 is our minimum version if parse_version(pd.__version__) >= parse_version("4.0"): df = pd.DataFrame({"x": ["a", "b", "b"], "y": [4.0, 5.0, 6.0]}) TargetEncoder(target_type="continuous").fit(df[["x"]], df["y"]) else: with warnings.catch_warnings(): expected_message = ( ".*Copy-on-Write can no longer be disabled.*This option will" r" be removed in pandas 4\.0" ) warnings.filterwarnings( "ignore", message=expected_message, category=DeprecationWarning, ) with pd.option_context("mode.copy_on_write", True): df = pd.DataFrame({"x": ["a", "b", "b"], "y": [4.0, 5.0, 6.0]}) TargetEncoder(target_type="continuous").fit(df[["x"]], df["y"])
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/preprocessing/tests/test_label.py
sklearn/preprocessing/tests/test_label.py
import numpy as np import pytest from scipy.sparse import issparse from sklearn import config_context, datasets from sklearn.preprocessing._label import ( LabelBinarizer, LabelEncoder, MultiLabelBinarizer, _inverse_binarize_multiclass, _inverse_binarize_thresholding, label_binarize, ) from sklearn.utils._array_api import ( _convert_to_numpy, _get_namespace_device_dtype_ids, _is_numpy_namespace, device, get_namespace, yield_namespace_device_dtype_combinations, ) from sklearn.utils._testing import ( _array_api_for_tests, assert_array_equal, ) from sklearn.utils.fixes import ( COO_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS, DOK_CONTAINERS, LIL_CONTAINERS, ) from sklearn.utils.multiclass import type_of_target from sklearn.utils.validation import _to_object_array iris = datasets.load_iris() def toarray(a): if hasattr(a, "toarray"): a = a.toarray() return a def test_label_binarizer(): # one-class case defaults to negative label # For dense case: inp = ["pos", "pos", "pos", "pos"] lb = LabelBinarizer(sparse_output=False) expected = np.array([[0, 0, 0, 0]]).T got = lb.fit_transform(inp) assert_array_equal(lb.classes_, ["pos"]) assert_array_equal(expected, got) assert_array_equal(lb.inverse_transform(got), inp) # For sparse case: lb = LabelBinarizer(sparse_output=True) got = lb.fit_transform(inp) assert issparse(got) assert_array_equal(lb.classes_, ["pos"]) assert_array_equal(expected, got.toarray()) assert_array_equal(lb.inverse_transform(got.toarray()), inp) lb = LabelBinarizer(sparse_output=False) # two-class case inp = ["neg", "pos", "pos", "neg"] expected = np.array([[0, 1, 1, 0]]).T got = lb.fit_transform(inp) assert_array_equal(lb.classes_, ["neg", "pos"]) assert_array_equal(expected, got) to_invert = np.array([[1, 0], [0, 1], [0, 1], [1, 0]]) assert_array_equal(lb.inverse_transform(to_invert), inp) # multi-class case inp = ["spam", "ham", "eggs", "ham", "0"] expected = np.array( [[0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]] ) got = lb.fit_transform(inp) assert_array_equal(lb.classes_, ["0", "eggs", "ham", "spam"]) assert_array_equal(expected, got) assert_array_equal(lb.inverse_transform(got), inp) def test_label_binarizer_unseen_labels(): lb = LabelBinarizer() expected = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) got = lb.fit_transform(["b", "d", "e"]) assert_array_equal(expected, got) expected = np.array( [[0, 0, 0], [1, 0, 0], [0, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]] ) got = lb.transform(["a", "b", "c", "d", "e", "f"]) assert_array_equal(expected, got) def test_label_binarizer_set_label_encoding(): lb = LabelBinarizer(neg_label=-2, pos_label=0) # two-class case with pos_label=0 inp = np.array([0, 1, 1, 0]) expected = np.array([[-2, 0, 0, -2]]).T got = lb.fit_transform(inp) assert_array_equal(expected, got) assert_array_equal(lb.inverse_transform(got), inp) lb = LabelBinarizer(neg_label=-2, pos_label=2) # multi-class case inp = np.array([3, 2, 1, 2, 0]) expected = np.array( [ [-2, -2, -2, +2], [-2, -2, +2, -2], [-2, +2, -2, -2], [-2, -2, +2, -2], [+2, -2, -2, -2], ] ) got = lb.fit_transform(inp) assert_array_equal(expected, got) assert_array_equal(lb.inverse_transform(got), inp) @pytest.mark.parametrize("dtype", ["Int64", "Float64", "boolean"]) @pytest.mark.parametrize("unique_first", [True, False]) def test_label_binarizer_pandas_nullable(dtype, unique_first): """Checks that LabelBinarizer works with pandas nullable dtypes. Non-regression test for gh-25637. """ pd = pytest.importorskip("pandas") y_true = pd.Series([1, 0, 0, 1, 0, 1, 1, 0, 1], dtype=dtype) if unique_first: # Calling unique creates a pandas array which has a different interface # compared to a pandas Series. Specifically, pandas arrays do not have "iloc". y_true = y_true.unique() lb = LabelBinarizer().fit(y_true) y_out = lb.transform([1, 0]) assert_array_equal(y_out, [[1], [0]]) def test_label_binarizer_errors(): # Check that invalid arguments yield ValueError one_class = np.array([0, 0, 0, 0]) lb = LabelBinarizer().fit(one_class) multi_label = [(2, 3), (0,), (0, 2)] err_msg = "You appear to be using a legacy multi-label data representation." with pytest.raises(ValueError, match=err_msg): lb.transform(multi_label) lb = LabelBinarizer() err_msg = "This LabelBinarizer instance is not fitted yet" with pytest.raises(ValueError, match=err_msg): lb.transform([]) with pytest.raises(ValueError, match=err_msg): lb.inverse_transform([]) input_labels = [0, 1, 0, 1] err_msg = "neg_label=2 must be strictly less than pos_label=1." lb = LabelBinarizer(neg_label=2, pos_label=1) with pytest.raises(ValueError, match=err_msg): lb.fit(input_labels) err_msg = "neg_label=2 must be strictly less than pos_label=2." lb = LabelBinarizer(neg_label=2, pos_label=2) with pytest.raises(ValueError, match=err_msg): lb.fit(input_labels) err_msg = ( "Sparse binarization is only supported with non zero pos_label and zero " "neg_label, got pos_label=2 and neg_label=1" ) lb = LabelBinarizer(neg_label=1, pos_label=2, sparse_output=True) with pytest.raises(ValueError, match=err_msg): lb.fit(input_labels) # Sequence of seq type should raise ValueError y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]] err_msg = "You appear to be using a legacy multi-label data representation" with pytest.raises(ValueError, match=err_msg): LabelBinarizer().fit_transform(y_seq_of_seqs) # Fail on the dimension of 'binary' err_msg = "output_type='binary', but y.shape" with pytest.raises(ValueError, match=err_msg): _inverse_binarize_thresholding( y=np.array([[1, 2, 3], [2, 1, 3]]), output_type="binary", classes=[1, 2, 3], threshold=0, ) # Fail on multioutput data err_msg = "Multioutput target data is not supported with label binarization" with pytest.raises(ValueError, match=err_msg): LabelBinarizer().fit(np.array([[1, 3], [2, 1]])) with pytest.raises(ValueError, match=err_msg): label_binarize(np.array([[1, 3], [2, 1]]), classes=[1, 2, 3]) @pytest.mark.parametrize("csr_container", CSR_CONTAINERS) def test_label_binarizer_sparse_errors(csr_container): # Fail on y_type err_msg = "foo format is not supported" with pytest.raises(ValueError, match=err_msg): _inverse_binarize_thresholding( y=csr_container([[1, 2], [2, 1]]), output_type="foo", classes=[1, 2], threshold=0, ) # Fail on the number of classes err_msg = "The number of class is not equal to the number of dimension of y." with pytest.raises(ValueError, match=err_msg): _inverse_binarize_thresholding( y=csr_container([[1, 2], [2, 1]]), output_type="foo", classes=[1, 2, 3], threshold=0, ) @pytest.mark.parametrize( "y, classes, expected", [ [[1, 0, 0, 1], [0, 1], [[1], [0], [0], [1]]], [ [1, 0, 2, 9], [0, 1, 2, 9], [[0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], ], ], ) @pytest.mark.parametrize( "array_namespace, device_, dtype_name", yield_namespace_device_dtype_combinations() ) def test_label_binarizer_array_api_compliance( y, classes, expected, array_namespace, device_, dtype_name ): """Test that :class:`LabelBinarizer` works correctly with the Array API for binary and multi-class inputs for numerical labels and non-sparse outputs. """ xp = _array_api_for_tests(array_namespace, device_) y_np = np.asarray(y) with config_context(array_api_dispatch=True): y = xp.asarray(y, device=device_) # `sparse_output=True` is not allowed for non-NumPy namespaces. # Similarly, if `LabelBinarizer` is fitted on a sparse matrix, # then inverse-transforming non-NumPy arrays is not allowed. if not _is_numpy_namespace(xp): sparse_output_msg = "`sparse_output=True` is not supported for array API" with pytest.raises(ValueError, match=sparse_output_msg): LabelBinarizer(sparse_output=True).fit(y) lb_np = LabelBinarizer(sparse_output=True).fit(y_np) with pytest.raises(ValueError, match=sparse_output_msg): lb_np.transform(y) lb_sparse = LabelBinarizer().fit(y_np) lb_sparse.sparse_input_ = True sparse_input_msg = ( "`LabelBinarizer` was fitted on a sparse matrix, and therefore cannot" ) with pytest.raises(ValueError, match=sparse_input_msg): lb_sparse.inverse_transform(xp.asarray(expected, device=device_)) # Shouldn't raise error in both `fit` and `transform` when `sparse_output=False` lb_xp = LabelBinarizer() binarized = lb_xp.fit_transform(y) assert get_namespace(binarized)[0].__name__ == xp.__name__ assert "int" in str(binarized.dtype) assert device(binarized) == device(y) assert_array_equal(_convert_to_numpy(binarized, xp=xp), np.asarray(expected)) fitted_classes = lb_xp.classes_ assert get_namespace(fitted_classes)[0].__name__ == xp.__name__ assert device(fitted_classes) == device(y) assert "int" in str(fitted_classes.dtype) assert_array_equal( _convert_to_numpy(fitted_classes, xp=xp), np.asarray(classes) ) expected_xp = xp.asarray(expected, device=device_) binarized_inverse = lb_xp.inverse_transform(expected_xp) assert get_namespace(binarized_inverse)[0].__name__ == xp.__name__ assert "int" in str(binarized_inverse.dtype) assert device(binarized_inverse) == device(y) assert_array_equal( _convert_to_numpy(binarized_inverse, xp=xp), _convert_to_numpy(y, xp=xp) ) @pytest.mark.parametrize( "values, classes, unknown", [ ( np.array([2, 1, 3, 1, 3], dtype="int64"), np.array([1, 2, 3], dtype="int64"), np.array([4], dtype="int64"), ), ( np.array(["b", "a", "c", "a", "c"], dtype=object), np.array(["a", "b", "c"], dtype=object), np.array(["d"], dtype=object), ), ( np.array(["b", "a", "c", "a", "c"]), np.array(["a", "b", "c"]), np.array(["d"]), ), ], ids=["int64", "object", "str"], ) def test_label_encoder(values, classes, unknown): # Test LabelEncoder's transform, fit_transform and # inverse_transform methods le = LabelEncoder() le.fit(values) assert_array_equal(le.classes_, classes) assert_array_equal(le.transform(values), [1, 0, 2, 0, 2]) assert_array_equal(le.inverse_transform([1, 0, 2, 0, 2]), values) le = LabelEncoder() ret = le.fit_transform(values) assert_array_equal(ret, [1, 0, 2, 0, 2]) with pytest.raises(ValueError, match="unseen labels"): le.transform(unknown) def test_label_encoder_negative_ints(): le = LabelEncoder() le.fit([1, 1, 4, 5, -1, 0]) assert_array_equal(le.classes_, [-1, 0, 1, 4, 5]) assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]), [1, 2, 3, 3, 4, 0, 0]) assert_array_equal( le.inverse_transform([1, 2, 3, 3, 4, 0, 0]), [0, 1, 4, 4, 5, -1, -1] ) with pytest.raises(ValueError): le.transform([0, 6]) @pytest.mark.parametrize("dtype", ["str", "object"]) def test_label_encoder_str_bad_shape(dtype): le = LabelEncoder() le.fit(np.array(["apple", "orange"], dtype=dtype)) msg = "should be a 1d array" with pytest.raises(ValueError, match=msg): le.transform("apple") def test_label_encoder_errors(): # Check that invalid arguments yield ValueError le = LabelEncoder() with pytest.raises(ValueError): le.transform([]) with pytest.raises(ValueError): le.inverse_transform([]) # Fail on unseen labels le = LabelEncoder() le.fit([1, 2, 3, -1, 1]) msg = "contains previously unseen labels" with pytest.raises(ValueError, match=msg): le.inverse_transform([-2]) with pytest.raises(ValueError, match=msg): le.inverse_transform([-2, -3, -4]) # Fail on inverse_transform("") msg = r"should be a 1d array.+shape \(\)" with pytest.raises(ValueError, match=msg): le.inverse_transform("") @pytest.mark.parametrize( "values", [ np.array([2, 1, 3, 1, 3], dtype="int64"), np.array(["b", "a", "c", "a", "c"], dtype=object), np.array(["b", "a", "c", "a", "c"]), ], ids=["int64", "object", "str"], ) def test_label_encoder_empty_array(values): le = LabelEncoder() le.fit(values) # test empty transform transformed = le.transform([]) assert_array_equal(np.array([]), transformed) # test empty inverse transform inverse_transformed = le.inverse_transform([]) assert_array_equal(np.array([]), inverse_transformed) def test_sparse_output_multilabel_binarizer(): # test input as iterable of iterables inputs = [ lambda: [(2, 3), (1,), (1, 2)], lambda: ({2, 3}, {1}, {1, 2}), lambda: iter([iter((2, 3)), iter((1,)), {1, 2}]), ] indicator_mat = np.array([[0, 1, 1], [1, 0, 0], [1, 1, 0]]) inverse = inputs[0]() for sparse_output in [True, False]: for inp in inputs: # With fit_transform mlb = MultiLabelBinarizer(sparse_output=sparse_output) got = mlb.fit_transform(inp()) assert issparse(got) == sparse_output if sparse_output: # verify CSR assumption that indices and indptr have same dtype assert got.indices.dtype == got.indptr.dtype got = got.toarray() assert_array_equal(indicator_mat, got) assert_array_equal([1, 2, 3], mlb.classes_) assert mlb.inverse_transform(got) == inverse # With fit mlb = MultiLabelBinarizer(sparse_output=sparse_output) got = mlb.fit(inp()).transform(inp()) assert issparse(got) == sparse_output if sparse_output: # verify CSR assumption that indices and indptr have same dtype assert got.indices.dtype == got.indptr.dtype got = got.toarray() assert_array_equal(indicator_mat, got) assert_array_equal([1, 2, 3], mlb.classes_) assert mlb.inverse_transform(got) == inverse @pytest.mark.parametrize("csr_container", CSR_CONTAINERS) def test_sparse_output_multilabel_binarizer_errors(csr_container): inp = iter([iter((2, 3)), iter((1,)), {1, 2}]) mlb = MultiLabelBinarizer(sparse_output=False) mlb.fit(inp) with pytest.raises(ValueError): mlb.inverse_transform( csr_container(np.array([[0, 1, 1], [2, 0, 0], [1, 1, 0]])) ) def test_multilabel_binarizer(): # test input as iterable of iterables inputs = [ lambda: [(2, 3), (1,), (1, 2)], lambda: ({2, 3}, {1}, {1, 2}), lambda: iter([iter((2, 3)), iter((1,)), {1, 2}]), ] indicator_mat = np.array([[0, 1, 1], [1, 0, 0], [1, 1, 0]]) inverse = inputs[0]() for inp in inputs: # With fit_transform mlb = MultiLabelBinarizer() got = mlb.fit_transform(inp()) assert_array_equal(indicator_mat, got) assert_array_equal([1, 2, 3], mlb.classes_) assert mlb.inverse_transform(got) == inverse # With fit mlb = MultiLabelBinarizer() got = mlb.fit(inp()).transform(inp()) assert_array_equal(indicator_mat, got) assert_array_equal([1, 2, 3], mlb.classes_) assert mlb.inverse_transform(got) == inverse def test_multilabel_binarizer_empty_sample(): mlb = MultiLabelBinarizer() y = [[1, 2], [1], []] Y = np.array([[1, 1], [1, 0], [0, 0]]) assert_array_equal(mlb.fit_transform(y), Y) def test_multilabel_binarizer_unknown_class(): mlb = MultiLabelBinarizer() y = [[1, 2]] Y = np.array([[1, 0], [0, 1]]) warning_message = "unknown class.* will be ignored" with pytest.warns(UserWarning, match=warning_message): matrix = mlb.fit(y).transform([[4, 1], [2, 0]]) Y = np.array([[1, 0, 0], [0, 1, 0]]) mlb = MultiLabelBinarizer(classes=[1, 2, 3]) with pytest.warns(UserWarning, match=warning_message): matrix = mlb.fit(y).transform([[4, 1], [2, 0]]) assert_array_equal(matrix, Y) def test_multilabel_binarizer_given_classes(): inp = [(2, 3), (1,), (1, 2)] indicator_mat = np.array([[0, 1, 1], [1, 0, 0], [1, 0, 1]]) # fit_transform() mlb = MultiLabelBinarizer(classes=[1, 3, 2]) assert_array_equal(mlb.fit_transform(inp), indicator_mat) assert_array_equal(mlb.classes_, [1, 3, 2]) # fit().transform() mlb = MultiLabelBinarizer(classes=[1, 3, 2]) assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat) assert_array_equal(mlb.classes_, [1, 3, 2]) # ensure works with extra class mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2]) assert_array_equal( mlb.fit_transform(inp), np.hstack(([[0], [0], [0]], indicator_mat)) ) assert_array_equal(mlb.classes_, [4, 1, 3, 2]) # ensure fit is no-op as iterable is not consumed inp = iter(inp) mlb = MultiLabelBinarizer(classes=[1, 3, 2]) assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat) # ensure a ValueError is thrown if given duplicate classes err_msg = ( "The classes argument contains duplicate classes. Remove " "these duplicates before passing them to MultiLabelBinarizer." ) mlb = MultiLabelBinarizer(classes=[1, 3, 2, 3]) with pytest.raises(ValueError, match=err_msg): mlb.fit(inp) def test_multilabel_binarizer_multiple_calls(): inp = [(2, 3), (1,), (1, 2)] indicator_mat = np.array([[0, 1, 1], [1, 0, 0], [1, 0, 1]]) indicator_mat2 = np.array([[0, 1, 1], [1, 0, 0], [1, 1, 0]]) # first call mlb = MultiLabelBinarizer(classes=[1, 3, 2]) assert_array_equal(mlb.fit_transform(inp), indicator_mat) # second call change class mlb.classes = [1, 2, 3] assert_array_equal(mlb.fit_transform(inp), indicator_mat2) def test_multilabel_binarizer_same_length_sequence(): # Ensure sequences of the same length are not interpreted as a 2-d array inp = [[1], [0], [2]] indicator_mat = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]]) # fit_transform() mlb = MultiLabelBinarizer() assert_array_equal(mlb.fit_transform(inp), indicator_mat) assert_array_equal(mlb.inverse_transform(indicator_mat), inp) # fit().transform() mlb = MultiLabelBinarizer() assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat) assert_array_equal(mlb.inverse_transform(indicator_mat), inp) def test_multilabel_binarizer_non_integer_labels(): tuple_classes = _to_object_array([(1,), (2,), (3,)]) inputs = [ ([("2", "3"), ("1",), ("1", "2")], ["1", "2", "3"]), ([("b", "c"), ("a",), ("a", "b")], ["a", "b", "c"]), ([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes), ] indicator_mat = np.array([[0, 1, 1], [1, 0, 0], [1, 1, 0]]) for inp, classes in inputs: # fit_transform() mlb = MultiLabelBinarizer() inp = np.array(inp, dtype=object) assert_array_equal(mlb.fit_transform(inp), indicator_mat) assert_array_equal(mlb.classes_, classes) indicator_mat_inv = np.array(mlb.inverse_transform(indicator_mat), dtype=object) assert_array_equal(indicator_mat_inv, inp) # fit().transform() mlb = MultiLabelBinarizer() assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat) assert_array_equal(mlb.classes_, classes) indicator_mat_inv = np.array(mlb.inverse_transform(indicator_mat), dtype=object) assert_array_equal(indicator_mat_inv, inp) mlb = MultiLabelBinarizer() with pytest.raises(TypeError): mlb.fit_transform([({}), ({}, {"a": "b"})]) def test_multilabel_binarizer_non_unique(): inp = [(1, 1, 1, 0)] indicator_mat = np.array([[1, 1]]) mlb = MultiLabelBinarizer() assert_array_equal(mlb.fit_transform(inp), indicator_mat) def test_multilabel_binarizer_inverse_validation(): inp = [(1, 1, 1, 0)] mlb = MultiLabelBinarizer() mlb.fit_transform(inp) # Not binary with pytest.raises(ValueError): mlb.inverse_transform(np.array([[1, 3]])) # The following binary cases are fine, however mlb.inverse_transform(np.array([[0, 0]])) mlb.inverse_transform(np.array([[1, 1]])) mlb.inverse_transform(np.array([[1, 0]])) # Wrong shape with pytest.raises(ValueError): mlb.inverse_transform(np.array([[1]])) with pytest.raises(ValueError): mlb.inverse_transform(np.array([[1, 1, 1]])) def test_label_binarize_with_class_order(): out = label_binarize([1, 6], classes=[1, 2, 4, 6]) expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]]) assert_array_equal(out, expected) # Modified class order out = label_binarize([1, 6], classes=[1, 6, 4, 2]) expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]]) assert_array_equal(out, expected) out = label_binarize([0, 1, 2, 3], classes=[3, 2, 0, 1]) expected = np.array([[0, 0, 1, 0], [0, 0, 0, 1], [0, 1, 0, 0], [1, 0, 0, 0]]) assert_array_equal(out, expected) def check_binarized_results(y, classes, pos_label, neg_label, expected): for sparse_output in [True, False]: if (pos_label == 0 or neg_label != 0) and sparse_output: with pytest.raises(ValueError): label_binarize( y, classes=classes, neg_label=neg_label, pos_label=pos_label, sparse_output=sparse_output, ) continue # check label_binarize binarized = label_binarize( y, classes=classes, neg_label=neg_label, pos_label=pos_label, sparse_output=sparse_output, ) assert_array_equal(toarray(binarized), expected) assert issparse(binarized) == sparse_output # check inverse y_type = type_of_target(y) if y_type == "multiclass": inversed = _inverse_binarize_multiclass(binarized, classes=classes) else: inversed = _inverse_binarize_thresholding( binarized, output_type=y_type, classes=classes, threshold=((neg_label + pos_label) / 2.0), ) assert_array_equal(toarray(inversed), toarray(y)) # Check label binarizer lb = LabelBinarizer( neg_label=neg_label, pos_label=pos_label, sparse_output=sparse_output ) binarized = lb.fit_transform(y) assert_array_equal(toarray(binarized), expected) assert issparse(binarized) == sparse_output inverse_output = lb.inverse_transform(binarized) assert_array_equal(toarray(inverse_output), toarray(y)) assert issparse(inverse_output) == issparse(y) def test_label_binarize_binary(): y = [0, 1, 0] classes = [0, 1] pos_label = 2 neg_label = -1 expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1)) check_binarized_results(y, classes, pos_label, neg_label, expected) # Binary case where sparse_output = True will not result in a ValueError y = [0, 1, 0] classes = [0, 1] pos_label = 3 neg_label = 0 expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1)) check_binarized_results(y, classes, pos_label, neg_label, expected) def test_label_binarize_multiclass(): y = [0, 1, 2] classes = [0, 1, 2] pos_label = 2 neg_label = 0 expected = 2 * np.eye(3) check_binarized_results(y, classes, pos_label, neg_label, expected) with pytest.raises(ValueError): label_binarize( y, classes=classes, neg_label=-1, pos_label=pos_label, sparse_output=True ) @pytest.mark.parametrize( "arr_type", [np.array] + COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS + DOK_CONTAINERS + LIL_CONTAINERS, ) def test_label_binarize_multilabel(arr_type): y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]]) classes = [0, 1, 2] pos_label = 2 neg_label = 0 expected = pos_label * y_ind y = arr_type(y_ind) check_binarized_results(y, classes, pos_label, neg_label, expected) with pytest.raises(ValueError): label_binarize( y, classes=classes, neg_label=-1, pos_label=pos_label, sparse_output=True ) def test_invalid_input_label_binarize(): with pytest.raises(ValueError): label_binarize([0, 2], classes=[0, 2], pos_label=0, neg_label=1) with pytest.raises(ValueError, match="continuous target data is not "): label_binarize([1.2, 2.7], classes=[0, 1]) with pytest.raises(ValueError, match="mismatch with the labels"): label_binarize([[1, 3]], classes=[1, 2, 3]) @pytest.mark.parametrize( "y, classes, expected", [ [[1, 0, 0, 1], ["yes", "no"], [[0], [0], [0], [0]]], [ [1, 0, 2, 9], ["bird", "cat", "dog"], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], ], [[1, 0, 0, 1], [0, 1], [[1], [0], [0], [1]]], [[1, 0, 2, 1], [0, 1, 2], [[0, 1, 0], [1, 0, 0], [0, 0, 1], [0, 1, 0]]], ], ) @pytest.mark.parametrize( "array_namespace, device_, dtype_name", yield_namespace_device_dtype_combinations() ) def test_label_binarize_array_api_compliance( y, classes, expected, array_namespace, device_, dtype_name ): """Test that :func:`label_binarize` works correctly with the Array API for binary and multi-class inputs for numerical labels and non-sparse outputs. """ xp = _array_api_for_tests(array_namespace, device_) xp_is_numpy = _is_numpy_namespace(xp) numeric_dtype = np.issubdtype(np.asarray(y).dtype, np.integer) and np.issubdtype( np.asarray(classes).dtype, np.integer ) with config_context(array_api_dispatch=True): y = xp.asarray(y, device=device_) if numeric_dtype: # `sparse_output=True` is not allowed for non-NumPy namespaces if not xp_is_numpy: msg = "`sparse_output=True` is not supported for array API " with pytest.raises(ValueError, match=msg): label_binarize(y=y, classes=classes, sparse_output=True) # Numeric class labels should not raise any errors for non-NumPy namespaces binarized = label_binarize(y, classes=classes) expected = np.asarray(expected, dtype=int) assert get_namespace(binarized)[0].__name__ == xp.__name__ assert device(binarized) == device(y) assert "int" in str(binarized.dtype) assert_array_equal(_convert_to_numpy(binarized, xp=xp), expected) if not xp_is_numpy and not numeric_dtype: msg = "`classes` contains unsupported dtype for array API " with pytest.raises(ValueError, match=msg): label_binarize(y=y, classes=classes) @pytest.mark.parametrize("csr_container", CSR_CONTAINERS) def test_inverse_binarize_multiclass(csr_container): got = _inverse_binarize_multiclass( csr_container([[0, 1, 0], [-1, 0, -1], [0, 0, 0]]), np.arange(3) ) assert_array_equal(got, np.array([1, 1, 0])) def test_nan_label_encoder(): """Check that label encoder encodes nans in transform. Non-regression test for #22628. """ le = LabelEncoder() le.fit(["a", "a", "b", np.nan]) y_trans = le.transform([np.nan]) assert_array_equal(y_trans, [2]) @pytest.mark.parametrize( "encoder", [LabelEncoder(), LabelBinarizer(), MultiLabelBinarizer()] ) def test_label_encoders_do_not_have_set_output(encoder): """Check that label encoders do not define set_output and work with y as a kwarg. Non-regression test for #26854. """ assert not hasattr(encoder, "set_output") y_encoded_with_kwarg = encoder.fit_transform(y=["a", "b", "c"]) y_encoded_positional = encoder.fit_transform(["a", "b", "c"]) assert_array_equal(y_encoded_with_kwarg, y_encoded_positional) @pytest.mark.parametrize( "array_namespace, device, dtype", yield_namespace_device_dtype_combinations(), ids=_get_namespace_device_dtype_ids, ) @pytest.mark.parametrize( "y", [ np.array([2, 1, 3, 1, 3]), np.array([1, 1, 4, 5, -1, 0]), np.array([3, 5, 9, 5, 9, 3]), ], ) def test_label_encoder_array_api_compliance(y, array_namespace, device, dtype): xp = _array_api_for_tests(array_namespace, device) xp_y = xp.asarray(y, device=device) with config_context(array_api_dispatch=True): xp_label = LabelEncoder() np_label = LabelEncoder() xp_label = xp_label.fit(xp_y) xp_transformed = xp_label.transform(xp_y) xp_inv_transformed = xp_label.inverse_transform(xp_transformed) np_label = np_label.fit(y) np_transformed = np_label.transform(y) assert get_namespace(xp_transformed)[0].__name__ == xp.__name__ assert get_namespace(xp_inv_transformed)[0].__name__ == xp.__name__ assert get_namespace(xp_label.classes_)[0].__name__ == xp.__name__ assert_array_equal(_convert_to_numpy(xp_transformed, xp), np_transformed) assert_array_equal(_convert_to_numpy(xp_inv_transformed, xp), y) assert_array_equal(_convert_to_numpy(xp_label.classes_, xp), np_label.classes_) xp_label = LabelEncoder() np_label = LabelEncoder() xp_transformed = xp_label.fit_transform(xp_y) np_transformed = np_label.fit_transform(y) assert get_namespace(xp_transformed)[0].__name__ == xp.__name__ assert get_namespace(xp_label.classes_)[0].__name__ == xp.__name__ assert_array_equal(_convert_to_numpy(xp_transformed, xp), np_transformed) assert_array_equal(_convert_to_numpy(xp_label.classes_, xp), np_label.classes_)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/preprocessing/tests/__init__.py
sklearn/preprocessing/tests/__init__.py
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/tests/test_public_functions.py
sklearn/tests/test_public_functions.py
from importlib import import_module from inspect import signature from numbers import Integral, Real import pytest from sklearn.utils._param_validation import ( Interval, InvalidParameterError, generate_invalid_param_val, generate_valid_param, make_constraint, ) def _get_func_info(func_module): module_name, func_name = func_module.rsplit(".", 1) module = import_module(module_name) func = getattr(module, func_name) func_sig = signature(func) func_params = [ p.name for p in func_sig.parameters.values() if p.kind not in (p.VAR_POSITIONAL, p.VAR_KEYWORD) ] # The parameters `*args` and `**kwargs` are ignored since we cannot generate # constraints. required_params = [ p.name for p in func_sig.parameters.values() if p.default is p.empty and p.kind not in (p.VAR_POSITIONAL, p.VAR_KEYWORD) ] return func, func_name, func_params, required_params def _check_function_param_validation( func, func_name, func_params, required_params, parameter_constraints ): """Check that an informative error is raised when the value of a parameter does not have an appropriate type or value. """ # generate valid values for the required parameters valid_required_params = {} for param_name in required_params: if parameter_constraints[param_name] == "no_validation": valid_required_params[param_name] = 1 else: valid_required_params[param_name] = generate_valid_param( make_constraint(parameter_constraints[param_name][0]) ) # check that there is a constraint for each parameter if func_params: validation_params = parameter_constraints.keys() unexpected_params = set(validation_params) - set(func_params) missing_params = set(func_params) - set(validation_params) err_msg = ( "Mismatch between _parameter_constraints and the parameters of" f" {func_name}.\nConsider the unexpected parameters {unexpected_params} and" f" expected but missing parameters {missing_params}\n" ) assert set(validation_params) == set(func_params), err_msg # this object does not have a valid type for sure for all params param_with_bad_type = type("BadType", (), {})() for param_name in func_params: constraints = parameter_constraints[param_name] if constraints == "no_validation": # This parameter is not validated continue # Mixing an interval of reals and an interval of integers must be avoided. if any( isinstance(constraint, Interval) and constraint.type == Integral for constraint in constraints ) and any( isinstance(constraint, Interval) and constraint.type == Real for constraint in constraints ): raise ValueError( f"The constraint for parameter {param_name} of {func_name} can't have a" " mix of intervals of Integral and Real types. Use the type" " RealNotInt instead of Real." ) match = ( rf"The '{param_name}' parameter of {func_name} must be .* Got .* instead." ) err_msg = ( f"{func_name} does not raise an informative error message when the " f"parameter {param_name} does not have a valid type. If any Python type " "is valid, the constraint should be 'no_validation'." ) # First, check that the error is raised if param doesn't match any valid type. with pytest.raises(InvalidParameterError, match=match): func(**{**valid_required_params, param_name: param_with_bad_type}) pytest.fail(err_msg) # Then, for constraints that are more than a type constraint, check that the # error is raised if param does match a valid type but does not match any valid # value for this type. constraints = [make_constraint(constraint) for constraint in constraints] for constraint in constraints: try: bad_value = generate_invalid_param_val(constraint) except NotImplementedError: continue err_msg = ( f"{func_name} does not raise an informative error message when the " f"parameter {param_name} does not have a valid value.\n" "Constraints should be disjoint. For instance " "[StrOptions({'a_string'}), str] is not an acceptable set of " "constraint because generating an invalid string for the first " "constraint will always produce a valid string for the second " "constraint." ) with pytest.raises(InvalidParameterError, match=match): func(**{**valid_required_params, param_name: bad_value}) pytest.fail(err_msg) PARAM_VALIDATION_FUNCTION_LIST = [ "sklearn.calibration.calibration_curve", "sklearn.cluster.cluster_optics_dbscan", "sklearn.cluster.compute_optics_graph", "sklearn.cluster.estimate_bandwidth", "sklearn.cluster.kmeans_plusplus", "sklearn.cluster.cluster_optics_xi", "sklearn.cluster.ward_tree", "sklearn.covariance.empirical_covariance", "sklearn.covariance.ledoit_wolf_shrinkage", "sklearn.covariance.log_likelihood", "sklearn.covariance.shrunk_covariance", "sklearn.datasets.clear_data_home", "sklearn.datasets.dump_svmlight_file", "sklearn.datasets.fetch_20newsgroups", "sklearn.datasets.fetch_20newsgroups_vectorized", "sklearn.datasets.fetch_california_housing", "sklearn.datasets.fetch_covtype", "sklearn.datasets.fetch_kddcup99", "sklearn.datasets.fetch_lfw_pairs", "sklearn.datasets.fetch_lfw_people", "sklearn.datasets.fetch_olivetti_faces", "sklearn.datasets.fetch_rcv1", "sklearn.datasets.fetch_openml", "sklearn.datasets.fetch_species_distributions", "sklearn.datasets.get_data_home", "sklearn.datasets.load_breast_cancer", "sklearn.datasets.load_diabetes", "sklearn.datasets.load_digits", "sklearn.datasets.load_files", "sklearn.datasets.load_iris", "sklearn.datasets.load_linnerud", "sklearn.datasets.load_sample_image", "sklearn.datasets.load_svmlight_file", "sklearn.datasets.load_svmlight_files", "sklearn.datasets.load_wine", "sklearn.datasets.make_biclusters", "sklearn.datasets.make_blobs", "sklearn.datasets.make_checkerboard", "sklearn.datasets.make_circles", "sklearn.datasets.make_classification", "sklearn.datasets.make_friedman1", "sklearn.datasets.make_friedman2", "sklearn.datasets.make_friedman3", "sklearn.datasets.make_gaussian_quantiles", "sklearn.datasets.make_hastie_10_2", "sklearn.datasets.make_low_rank_matrix", "sklearn.datasets.make_moons", "sklearn.datasets.make_multilabel_classification", "sklearn.datasets.make_regression", "sklearn.datasets.make_s_curve", "sklearn.datasets.make_sparse_coded_signal", "sklearn.datasets.make_sparse_spd_matrix", "sklearn.datasets.make_sparse_uncorrelated", "sklearn.datasets.make_spd_matrix", "sklearn.datasets.make_swiss_roll", "sklearn.decomposition.sparse_encode", "sklearn.feature_extraction.grid_to_graph", "sklearn.feature_extraction.img_to_graph", "sklearn.feature_extraction.image.extract_patches_2d", "sklearn.feature_extraction.image.reconstruct_from_patches_2d", "sklearn.feature_selection.chi2", "sklearn.feature_selection.f_classif", "sklearn.feature_selection.f_regression", "sklearn.feature_selection.mutual_info_classif", "sklearn.feature_selection.mutual_info_regression", "sklearn.feature_selection.r_regression", "sklearn.inspection.partial_dependence", "sklearn.inspection.permutation_importance", "sklearn.isotonic.check_increasing", "sklearn.isotonic.isotonic_regression", "sklearn.linear_model.enet_path", "sklearn.linear_model.lars_path", "sklearn.linear_model.lars_path_gram", "sklearn.linear_model.lasso_path", "sklearn.linear_model.orthogonal_mp", "sklearn.linear_model.orthogonal_mp_gram", "sklearn.linear_model.ridge_regression", "sklearn.manifold.locally_linear_embedding", "sklearn.manifold.smacof", "sklearn.manifold.spectral_embedding", "sklearn.manifold.trustworthiness", "sklearn.metrics.accuracy_score", "sklearn.metrics.auc", "sklearn.metrics.average_precision_score", "sklearn.metrics.balanced_accuracy_score", "sklearn.metrics.brier_score_loss", "sklearn.metrics.calinski_harabasz_score", "sklearn.metrics.check_scoring", "sklearn.metrics.completeness_score", "sklearn.metrics.class_likelihood_ratios", "sklearn.metrics.classification_report", "sklearn.metrics.cluster.adjusted_mutual_info_score", "sklearn.metrics.cluster.contingency_matrix", "sklearn.metrics.cluster.fowlkes_mallows_score", "sklearn.metrics.cluster.homogeneity_completeness_v_measure", "sklearn.metrics.cluster.normalized_mutual_info_score", "sklearn.metrics.cluster.silhouette_samples", "sklearn.metrics.cluster.silhouette_score", "sklearn.metrics.cohen_kappa_score", "sklearn.metrics.confusion_matrix", "sklearn.metrics.confusion_matrix_at_thresholds", "sklearn.metrics.consensus_score", "sklearn.metrics.coverage_error", "sklearn.metrics.d2_absolute_error_score", "sklearn.metrics.d2_brier_score", "sklearn.metrics.d2_log_loss_score", "sklearn.metrics.d2_pinball_score", "sklearn.metrics.d2_tweedie_score", "sklearn.metrics.davies_bouldin_score", "sklearn.metrics.dcg_score", "sklearn.metrics.det_curve", "sklearn.metrics.explained_variance_score", "sklearn.metrics.f1_score", "sklearn.metrics.fbeta_score", "sklearn.metrics.get_scorer", "sklearn.metrics.hamming_loss", "sklearn.metrics.hinge_loss", "sklearn.metrics.homogeneity_score", "sklearn.metrics.jaccard_score", "sklearn.metrics.label_ranking_average_precision_score", "sklearn.metrics.label_ranking_loss", "sklearn.metrics.log_loss", "sklearn.metrics.make_scorer", "sklearn.metrics.matthews_corrcoef", "sklearn.metrics.max_error", "sklearn.metrics.mean_absolute_error", "sklearn.metrics.mean_absolute_percentage_error", "sklearn.metrics.mean_gamma_deviance", "sklearn.metrics.mean_pinball_loss", "sklearn.metrics.mean_poisson_deviance", "sklearn.metrics.mean_squared_error", "sklearn.metrics.mean_squared_log_error", "sklearn.metrics.mean_tweedie_deviance", "sklearn.metrics.median_absolute_error", "sklearn.metrics.multilabel_confusion_matrix", "sklearn.metrics.mutual_info_score", "sklearn.metrics.ndcg_score", "sklearn.metrics.pair_confusion_matrix", "sklearn.metrics.adjusted_rand_score", "sklearn.metrics.pairwise.additive_chi2_kernel", "sklearn.metrics.pairwise.chi2_kernel", "sklearn.metrics.pairwise.cosine_distances", "sklearn.metrics.pairwise.cosine_similarity", "sklearn.metrics.pairwise.euclidean_distances", "sklearn.metrics.pairwise.haversine_distances", "sklearn.metrics.pairwise.laplacian_kernel", "sklearn.metrics.pairwise.linear_kernel", "sklearn.metrics.pairwise.manhattan_distances", "sklearn.metrics.pairwise.nan_euclidean_distances", "sklearn.metrics.pairwise.paired_cosine_distances", "sklearn.metrics.pairwise.paired_distances", "sklearn.metrics.pairwise.paired_euclidean_distances", "sklearn.metrics.pairwise.paired_manhattan_distances", "sklearn.metrics.pairwise.pairwise_distances_argmin_min", "sklearn.metrics.pairwise.pairwise_kernels", "sklearn.metrics.pairwise.polynomial_kernel", "sklearn.metrics.pairwise.rbf_kernel", "sklearn.metrics.pairwise.sigmoid_kernel", "sklearn.metrics.pairwise_distances", "sklearn.metrics.pairwise_distances_argmin", "sklearn.metrics.pairwise_distances_chunked", "sklearn.metrics.precision_recall_curve", "sklearn.metrics.precision_recall_fscore_support", "sklearn.metrics.precision_score", "sklearn.metrics.r2_score", "sklearn.metrics.rand_score", "sklearn.metrics.recall_score", "sklearn.metrics.roc_auc_score", "sklearn.metrics.roc_curve", "sklearn.metrics.root_mean_squared_error", "sklearn.metrics.root_mean_squared_log_error", "sklearn.metrics.top_k_accuracy_score", "sklearn.metrics.v_measure_score", "sklearn.metrics.zero_one_loss", "sklearn.model_selection.cross_val_predict", "sklearn.model_selection.cross_val_score", "sklearn.model_selection.cross_validate", "sklearn.model_selection.learning_curve", "sklearn.model_selection.permutation_test_score", "sklearn.model_selection.train_test_split", "sklearn.model_selection.validation_curve", "sklearn.neighbors.kneighbors_graph", "sklearn.neighbors.radius_neighbors_graph", "sklearn.neighbors.sort_graph_by_row_values", "sklearn.preprocessing.add_dummy_feature", "sklearn.preprocessing.binarize", "sklearn.preprocessing.label_binarize", "sklearn.preprocessing.normalize", "sklearn.preprocessing.scale", "sklearn.random_projection.johnson_lindenstrauss_min_dim", "sklearn.svm.l1_min_c", "sklearn.tree.export_graphviz", "sklearn.tree.export_text", "sklearn.tree.plot_tree", "sklearn.utils.gen_batches", "sklearn.utils.gen_even_slices", "sklearn.utils.resample", "sklearn.utils.safe_mask", "sklearn.utils.extmath.randomized_svd", "sklearn.utils.class_weight.compute_class_weight", "sklearn.utils.class_weight.compute_sample_weight", "sklearn.utils.graph.single_source_shortest_path_length", ] @pytest.mark.parametrize("func_module", PARAM_VALIDATION_FUNCTION_LIST) def test_function_param_validation(func_module): """Check param validation for public functions that are not wrappers around estimators. """ func, func_name, func_params, required_params = _get_func_info(func_module) parameter_constraints = getattr(func, "_skl_parameter_constraints") _check_function_param_validation( func, func_name, func_params, required_params, parameter_constraints ) PARAM_VALIDATION_CLASS_WRAPPER_LIST = [ ("sklearn.cluster.affinity_propagation", "sklearn.cluster.AffinityPropagation"), ("sklearn.cluster.dbscan", "sklearn.cluster.DBSCAN"), ("sklearn.cluster.k_means", "sklearn.cluster.KMeans"), ("sklearn.cluster.mean_shift", "sklearn.cluster.MeanShift"), ("sklearn.cluster.spectral_clustering", "sklearn.cluster.SpectralClustering"), ("sklearn.covariance.graphical_lasso", "sklearn.covariance.GraphicalLasso"), ("sklearn.covariance.ledoit_wolf", "sklearn.covariance.LedoitWolf"), ("sklearn.covariance.oas", "sklearn.covariance.OAS"), ("sklearn.decomposition.dict_learning", "sklearn.decomposition.DictionaryLearning"), ( "sklearn.decomposition.dict_learning_online", "sklearn.decomposition.MiniBatchDictionaryLearning", ), ("sklearn.decomposition.fastica", "sklearn.decomposition.FastICA"), ("sklearn.decomposition.non_negative_factorization", "sklearn.decomposition.NMF"), ("sklearn.preprocessing.maxabs_scale", "sklearn.preprocessing.MaxAbsScaler"), ("sklearn.preprocessing.minmax_scale", "sklearn.preprocessing.MinMaxScaler"), ("sklearn.preprocessing.power_transform", "sklearn.preprocessing.PowerTransformer"), ( "sklearn.preprocessing.quantile_transform", "sklearn.preprocessing.QuantileTransformer", ), ("sklearn.preprocessing.robust_scale", "sklearn.preprocessing.RobustScaler"), ] @pytest.mark.parametrize( "func_module, class_module", PARAM_VALIDATION_CLASS_WRAPPER_LIST ) def test_class_wrapper_param_validation(func_module, class_module): """Check param validation for public functions that are wrappers around estimators. """ func, func_name, func_params, required_params = _get_func_info(func_module) module_name, class_name = class_module.rsplit(".", 1) module = import_module(module_name) klass = getattr(module, class_name) parameter_constraints_func = getattr(func, "_skl_parameter_constraints") parameter_constraints_class = getattr(klass, "_parameter_constraints") parameter_constraints = { **parameter_constraints_class, **parameter_constraints_func, } parameter_constraints = { k: v for k, v in parameter_constraints.items() if k in func_params } _check_function_param_validation( func, func_name, func_params, required_params, parameter_constraints )
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/tests/test_docstring_parameters_consistency.py
sklearn/tests/test_docstring_parameters_consistency.py
# Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import pytest from sklearn import metrics from sklearn.ensemble import ( BaggingClassifier, BaggingRegressor, IsolationForest, StackingClassifier, StackingRegressor, ) from sklearn.utils._testing import assert_docstring_consistency, skip_if_no_numpydoc CLASS_DOCSTRING_CONSISTENCY_CASES = [ { "objects": [BaggingClassifier, BaggingRegressor, IsolationForest], "include_params": ["max_samples"], "exclude_params": None, "include_attrs": False, "exclude_attrs": None, "include_returns": False, "exclude_returns": None, "descr_regex_pattern": r"The number of samples to draw from X to train each.*", "ignore_types": ("max_samples"), }, { "objects": [StackingClassifier, StackingRegressor], "include_params": ["cv", "n_jobs", "passthrough", "verbose"], "exclude_params": None, "include_attrs": True, "exclude_attrs": ["final_estimator_"], "include_returns": False, "exclude_returns": None, "descr_regex_pattern": None, }, ] FUNCTION_DOCSTRING_CONSISTENCY_CASES = [ { "objects": [ metrics.precision_recall_fscore_support, metrics.f1_score, metrics.fbeta_score, metrics.precision_score, metrics.recall_score, ], "include_params": True, "exclude_params": ["average", "zero_division"], "include_attrs": False, "exclude_attrs": None, "include_returns": False, "exclude_returns": None, "descr_regex_pattern": None, }, { "objects": [ metrics.precision_recall_fscore_support, metrics.f1_score, metrics.fbeta_score, metrics.precision_score, metrics.recall_score, ], "include_params": ["average"], "exclude_params": None, "include_attrs": False, "exclude_attrs": None, "include_returns": False, "exclude_returns": None, "descr_regex_pattern": " ".join( ( r"""This parameter is required for multiclass/multilabel targets\. If ``None``, the metrics for each class are returned\. Otherwise, this determines the type of averaging performed on the data: ``'binary'``: Only report results for the class specified by ``pos_label``\. This is applicable only if targets \(``y_\{true,pred\}``\) are binary\. ``'micro'``: Calculate metrics globally by counting the total true positives, false negatives and false positives\. ``'macro'``: Calculate metrics for each label, and find their unweighted mean\. This does not take label imbalance into account\. ``'weighted'``: Calculate metrics for each label, and find their average weighted by support \(the number of true instances for each label\)\. This alters 'macro' to account for label imbalance; it can result in an F-score that is not between precision and recall\.""" r"[\s\w]*\.*" # optionally match additional sentence r""" ``'samples'``: Calculate metrics for each instance, and find their average \(only meaningful for multilabel classification where this differs from :func:`accuracy_score`\)\.""" ).split() ), }, ] @pytest.mark.parametrize("case", CLASS_DOCSTRING_CONSISTENCY_CASES) @skip_if_no_numpydoc def test_class_docstring_consistency(case): """Check docstrings parameters consistency between related classes.""" assert_docstring_consistency(**case) @pytest.mark.parametrize("case", FUNCTION_DOCSTRING_CONSISTENCY_CASES) @skip_if_no_numpydoc def test_function_docstring_consistency(case): """Check docstrings parameters consistency between related functions.""" assert_docstring_consistency(**case)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/tests/test_metaestimators.py
sklearn/tests/test_metaestimators.py
"""Common tests for metaestimators""" import functools from contextlib import suppress from inspect import signature import numpy as np import pytest from sklearn.base import BaseEstimator, clone, is_regressor from sklearn.datasets import make_classification from sklearn.ensemble import BaggingClassifier from sklearn.exceptions import NotFittedError from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_selection import RFE, RFECV from sklearn.linear_model import LogisticRegression, Ridge from sklearn.model_selection import GridSearchCV, RandomizedSearchCV from sklearn.pipeline import Pipeline, make_pipeline from sklearn.preprocessing import MaxAbsScaler, StandardScaler from sklearn.semi_supervised import SelfTrainingClassifier from sklearn.utils import all_estimators from sklearn.utils._test_common.instance_generator import _construct_instances from sklearn.utils._testing import SkipTest, set_random_state from sklearn.utils.estimator_checks import ( _enforce_estimator_tags_X, _enforce_estimator_tags_y, ) from sklearn.utils.validation import check_is_fitted class DelegatorData: def __init__( self, name, construct, skip_methods=(), fit_args=make_classification(random_state=0), ): self.name = name self.construct = construct self.fit_args = fit_args self.skip_methods = skip_methods # For the following meta estimators we check for the existence of relevant # methods only if the sub estimator also contains them. Any methods that # are implemented in the meta estimator themselves and are not dependent # on the sub estimator are specified in the `skip_methods` parameter. DELEGATING_METAESTIMATORS = [ DelegatorData("Pipeline", lambda est: Pipeline([("est", est)])), DelegatorData( "GridSearchCV", lambda est: GridSearchCV(est, param_grid={"param": [5]}, cv=2), skip_methods=["score"], ), DelegatorData( "RandomizedSearchCV", lambda est: RandomizedSearchCV( est, param_distributions={"param": [5]}, cv=2, n_iter=1 ), skip_methods=["score"], ), DelegatorData("RFE", RFE, skip_methods=["transform", "inverse_transform"]), DelegatorData( "RFECV", RFECV, skip_methods=["transform", "inverse_transform", "score"] ), DelegatorData( "BaggingClassifier", BaggingClassifier, skip_methods=[ "transform", "inverse_transform", "score", "predict_proba", "predict_log_proba", "predict", ], ), DelegatorData( "SelfTrainingClassifier", lambda est: SelfTrainingClassifier(est), skip_methods=["transform", "inverse_transform", "predict_proba"], ), ] def test_metaestimator_delegation(): # Ensures specified metaestimators have methods iff subestimator does def hides(method): @property def wrapper(obj): if obj.hidden_method == method.__name__: raise AttributeError("%r is hidden" % obj.hidden_method) return functools.partial(method, obj) return wrapper class SubEstimator(BaseEstimator): def __init__(self, param=1, hidden_method=None): self.param = param self.hidden_method = hidden_method def fit(self, X, y=None, *args, **kwargs): self.coef_ = np.arange(X.shape[1]) self.classes_ = [] return True def _check_fit(self): check_is_fitted(self) @hides def inverse_transform(self, X, *args, **kwargs): self._check_fit() return X @hides def transform(self, X, *args, **kwargs): self._check_fit() return X @hides def predict(self, X, *args, **kwargs): self._check_fit() return np.ones(X.shape[0]) @hides def predict_proba(self, X, *args, **kwargs): self._check_fit() return np.ones(X.shape[0]) @hides def predict_log_proba(self, X, *args, **kwargs): self._check_fit() return np.ones(X.shape[0]) @hides def decision_function(self, X, *args, **kwargs): self._check_fit() return np.ones(X.shape[0]) @hides def score(self, X, y, *args, **kwargs): self._check_fit() return 1.0 methods = [ k for k in SubEstimator.__dict__.keys() if not k.startswith("_") and not k.startswith("fit") ] methods.sort() for delegator_data in DELEGATING_METAESTIMATORS: delegate = SubEstimator() delegator = delegator_data.construct(delegate) for method in methods: if method in delegator_data.skip_methods: continue assert hasattr(delegate, method) assert hasattr(delegator, method), ( "%s does not have method %r when its delegate does" % ( delegator_data.name, method, ) ) # delegation before fit raises a NotFittedError if method == "score": with pytest.raises(NotFittedError): getattr(delegator, method)( delegator_data.fit_args[0], delegator_data.fit_args[1] ) else: with pytest.raises(NotFittedError): getattr(delegator, method)(delegator_data.fit_args[0]) delegator.fit(*delegator_data.fit_args) for method in methods: if method in delegator_data.skip_methods: continue # smoke test delegation if method == "score": getattr(delegator, method)( delegator_data.fit_args[0], delegator_data.fit_args[1] ) else: getattr(delegator, method)(delegator_data.fit_args[0]) for method in methods: if method in delegator_data.skip_methods: continue delegate = SubEstimator(hidden_method=method) delegator = delegator_data.construct(delegate) assert not hasattr(delegate, method) assert not hasattr(delegator, method), ( "%s has method %r when its delegate does not" % ( delegator_data.name, method, ) ) def _get_instance_with_pipeline(meta_estimator, init_params): """Given a single meta-estimator instance, generate an instance with a pipeline""" if {"estimator", "base_estimator", "regressor"} & init_params: if is_regressor(meta_estimator): estimator = make_pipeline(TfidfVectorizer(), Ridge()) param_grid = {"ridge__alpha": [0.1, 1.0]} else: estimator = make_pipeline(TfidfVectorizer(), LogisticRegression()) param_grid = {"logisticregression__C": [0.1, 1.0]} if init_params.intersection( {"param_grid", "param_distributions"} ): # SearchCV estimators extra_params = {"n_iter": 2} if "n_iter" in init_params else {} return type(meta_estimator)(estimator, param_grid, **extra_params) else: return type(meta_estimator)(estimator) if "transformer_list" in init_params: # FeatureUnion transformer_list = [ ("trans1", make_pipeline(TfidfVectorizer(), MaxAbsScaler())), ( "trans2", make_pipeline(TfidfVectorizer(), StandardScaler(with_mean=False)), ), ] return type(meta_estimator)(transformer_list) if "estimators" in init_params: # stacking, voting if is_regressor(meta_estimator): estimator = [ ("est1", make_pipeline(TfidfVectorizer(), Ridge(alpha=0.1))), ("est2", make_pipeline(TfidfVectorizer(), Ridge(alpha=1))), ] else: estimator = [ ( "est1", make_pipeline(TfidfVectorizer(), LogisticRegression(C=0.1)), ), ("est2", make_pipeline(TfidfVectorizer(), LogisticRegression(C=1))), ] return type(meta_estimator)(estimator) def _generate_meta_estimator_instances_with_pipeline(): """Generate instances of meta-estimators fed with a pipeline Are considered meta-estimators all estimators accepting one of "estimator", "base_estimator" or "estimators". """ print("estimators: ", len(all_estimators())) for _, Estimator in sorted(all_estimators()): sig = set(signature(Estimator).parameters) print("\n", Estimator.__name__, sig) if not sig.intersection( { "estimator", "base_estimator", "regressor", "transformer_list", "estimators", } ): continue with suppress(SkipTest): for meta_estimator in _construct_instances(Estimator): print(meta_estimator) yield _get_instance_with_pipeline(meta_estimator, sig) # TODO: remove data validation for the following estimators # They should be able to work on any data and delegate data validation to # their inner estimator(s). DATA_VALIDATION_META_ESTIMATORS_TO_IGNORE = [ "AdaBoostClassifier", "AdaBoostRegressor", "BaggingClassifier", "BaggingRegressor", "ClassifierChain", # data validation is necessary "FrozenEstimator", # this estimator cannot be tested like others. "IterativeImputer", "OneVsOneClassifier", # input validation can't be avoided "RANSACRegressor", "RFE", "RFECV", "RegressorChain", # data validation is necessary "SelfTrainingClassifier", "SequentialFeatureSelector", # not applicable (2D data mandatory) ] DATA_VALIDATION_META_ESTIMATORS = [ est for est in _generate_meta_estimator_instances_with_pipeline() if est.__class__.__name__ not in DATA_VALIDATION_META_ESTIMATORS_TO_IGNORE ] def _get_meta_estimator_id(estimator): return estimator.__class__.__name__ @pytest.mark.parametrize( "estimator", DATA_VALIDATION_META_ESTIMATORS, ids=_get_meta_estimator_id ) def test_meta_estimators_delegate_data_validation(estimator): # Check that meta-estimators delegate data validation to the inner # estimator(s). # clone to avoid side effects and ensure thread-safe test execution. estimator = clone(estimator) rng = np.random.RandomState(0) set_random_state(estimator) n_samples = 30 X = rng.choice(np.array(["aa", "bb", "cc"], dtype=object), size=n_samples) if is_regressor(estimator): y = rng.normal(size=n_samples) else: y = rng.randint(3, size=n_samples) # We convert to lists to make sure it works on array-like X = _enforce_estimator_tags_X(estimator, X).tolist() y = _enforce_estimator_tags_y(estimator, y).tolist() # Calling fit should not raise any data validation exception since X is a # valid input datastructure for the first step of the pipeline passed as # base estimator to the meta estimator. estimator.fit(X, y) # n_features_in_ should not be defined since data is not tabular data. assert not hasattr(estimator, "n_features_in_")
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/tests/test_docstrings.py
sklearn/tests/test_docstrings.py
import re from inspect import signature from typing import Optional import pytest # make it possible to discover experimental estimators when calling `all_estimators` from sklearn.experimental import ( enable_halving_search_cv, # noqa: F401 enable_iterative_imputer, # noqa: F401 ) from sklearn.utils.discovery import all_displays, all_estimators, all_functions numpydoc_validation = pytest.importorskip("numpydoc.validate") def get_all_methods(): estimators = all_estimators() displays = all_displays() for name, Klass in estimators + displays: if name.startswith("_"): # skip private classes continue methods = [] for name in dir(Klass): if name.startswith("_"): continue method_obj = getattr(Klass, name) if hasattr(method_obj, "__call__") or isinstance(method_obj, property): methods.append(name) methods.append(None) for method in sorted(methods, key=str): yield Klass, method def get_all_functions_names(): functions = all_functions() for _, func in functions: # exclude functions from utils.fixex since they come from external packages if "utils.fixes" not in func.__module__: yield f"{func.__module__}.{func.__name__}" def filter_errors(errors, method, Klass=None): """ Ignore some errors based on the method type. These rules are specific for scikit-learn.""" for code, message in errors: # We ignore following error code, # - RT02: The first line of the Returns section # should contain only the type, .. # (as we may need refer to the name of the returned # object) # - GL01: Docstring text (summary) should start in the line # immediately after the opening quotes (not in the same line, # or leaving a blank line in between) # - GL02: If there's a blank line, it should be before the # first line of the Returns section, not after (it allows to have # short docstrings for properties). if code in ["RT02", "GL01", "GL02"]: continue # Ignore PR02: Unknown parameters for properties. We sometimes use # properties for ducktyping, i.e. SGDClassifier.predict_proba # Ignore GL08: Parsing of the method signature failed, possibly because this is # a property. Properties are sometimes used for deprecated attributes and the # attribute is already documented in the class docstring. # # All error codes: # https://numpydoc.readthedocs.io/en/latest/validation.html#built-in-validation-checks if code in ("PR02", "GL08") and Klass is not None and method is not None: method_obj = getattr(Klass, method) if isinstance(method_obj, property): continue # Following codes are only taken into account for the # top level class docstrings: # - ES01: No extended summary found # - SA01: See Also section not found # - EX01: No examples section found if method is not None and code in ["EX01", "SA01", "ES01"]: continue yield code, message def repr_errors(res, Klass=None, method: Optional[str] = None) -> str: """Pretty print original docstring and the obtained errors Parameters ---------- res : dict result of numpydoc.validate.validate Klass : {Estimator, Display, None} estimator object or None method : str if estimator is not None, either the method name or None. Returns ------- str String representation of the error. """ if method is None: if hasattr(Klass, "__init__"): method = "__init__" elif Klass is None: raise ValueError("At least one of Klass, method should be provided") else: raise NotImplementedError if Klass is not None: obj = getattr(Klass, method) try: obj_signature = str(signature(obj)) except TypeError: # In particular we can't parse the signature of properties obj_signature = ( "\nParsing of the method signature failed, " "possibly because this is a property." ) obj_name = Klass.__name__ + "." + method else: obj_signature = "" obj_name = method msg = "\n\n" + "\n\n".join( [ str(res["file"]), obj_name + obj_signature, res["docstring"], "# Errors", "\n".join( " - {}: {}".format(code, message) for code, message in res["errors"] ), ] ) return msg @pytest.mark.parametrize("function_name", get_all_functions_names()) def test_function_docstring(function_name, request): """Check function docstrings using numpydoc.""" res = numpydoc_validation.validate(function_name) res["errors"] = list(filter_errors(res["errors"], method="function")) if res["errors"]: msg = repr_errors(res, method=f"Tested function: {function_name}") raise ValueError(msg) @pytest.mark.parametrize("Klass, method", get_all_methods()) def test_docstring(Klass, method, request): base_import_path = Klass.__module__ import_path = [base_import_path, Klass.__name__] if method is not None: import_path.append(method) import_path = ".".join(import_path) res = numpydoc_validation.validate(import_path) res["errors"] = list(filter_errors(res["errors"], method, Klass=Klass)) if res["errors"]: msg = repr_errors(res, Klass, method) raise ValueError(msg) if __name__ == "__main__": import argparse import sys parser = argparse.ArgumentParser(description="Validate docstring with numpydoc.") parser.add_argument("import_path", help="Import path to validate") args = parser.parse_args() res = numpydoc_validation.validate(args.import_path) import_path_sections = args.import_path.split(".") # When applied to classes, detect class method. For functions # method = None. # TODO: this detection can be improved. Currently we assume that we have # class # methods if the second path element before last is in camel case. if len(import_path_sections) >= 2 and re.match( r"(?:[A-Z][a-z]*)+", import_path_sections[-2] ): method = import_path_sections[-1] else: method = None res["errors"] = list(filter_errors(res["errors"], method)) if res["errors"]: msg = repr_errors(res, method=args.import_path) print(msg) sys.exit(1) else: print("All docstring checks passed for {}!".format(args.import_path))
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/tests/metadata_routing_common.py
sklearn/tests/metadata_routing_common.py
import inspect from collections import defaultdict from functools import partial import numpy as np from numpy.testing import assert_array_equal from sklearn.base import ( BaseEstimator, ClassifierMixin, MetaEstimatorMixin, RegressorMixin, TransformerMixin, clone, ) from sklearn.metrics._scorer import _Scorer, mean_squared_error from sklearn.model_selection import BaseCrossValidator from sklearn.model_selection._split import GroupsConsumerMixin from sklearn.utils._metadata_requests import ( SIMPLE_METHODS, ) from sklearn.utils.metadata_routing import ( MetadataRouter, MethodMapping, process_routing, ) from sklearn.utils.multiclass import _check_partial_fit_first_call def record_metadata(obj, record_default=True, **kwargs): """Utility function to store passed metadata to a method of obj. If record_default is False, kwargs whose values are "default" are skipped. This is so that checks on keyword arguments whose default was not changed are skipped. """ stack = inspect.stack() callee = stack[1].function caller = stack[2].function if not hasattr(obj, "_records"): obj._records = defaultdict(lambda: defaultdict(list)) if not record_default: kwargs = { key: val for key, val in kwargs.items() if not isinstance(val, str) or (val != "default") } obj._records[callee][caller].append(kwargs) def check_recorded_metadata(obj, method, parent, split_params=tuple(), **kwargs): """Check whether the expected metadata is passed to the object's method. Parameters ---------- obj : estimator object sub-estimator to check routed params for method : str sub-estimator's method where metadata is routed to, or otherwise in the context of metadata routing referred to as 'callee' parent : str the parent method which should have called `method`, or otherwise in the context of metadata routing referred to as 'caller' split_params : tuple, default=empty specifies any parameters which are to be checked as being a subset of the original values **kwargs : dict passed metadata """ all_records = ( getattr(obj, "_records", dict()).get(method, dict()).get(parent, list()) ) for record in all_records: # first check that the names of the metadata passed are the same as # expected. The names are stored as keys in `record`. assert set(kwargs.keys()) == set(record.keys()), ( f"Expected {kwargs.keys()} vs {record.keys()}" ) for key, value in kwargs.items(): recorded_value = record[key] # The following condition is used to check for any specified parameters # being a subset of the original values if key in split_params and recorded_value is not None: assert np.isin(recorded_value, value).all() else: if isinstance(recorded_value, np.ndarray): assert_array_equal(recorded_value, value) else: assert recorded_value is value, ( f"Expected {recorded_value} vs {value}. Method: {method}" ) record_metadata_not_default = partial(record_metadata, record_default=False) def assert_request_is_empty(metadata_request, exclude=None): """Check if a metadata request dict is empty. One can exclude a method or a list of methods from the check using the ``exclude`` parameter. If metadata_request is a MetadataRouter, then ``exclude`` can be of the form ``{"object" : [method, ...]}``. """ if isinstance(metadata_request, MetadataRouter): for name, route_mapping in metadata_request: if exclude is not None and name in exclude: _exclude = exclude[name] else: _exclude = None assert_request_is_empty(route_mapping.router, exclude=_exclude) return exclude = [] if exclude is None else exclude for method in SIMPLE_METHODS: if method in exclude: continue mmr = getattr(metadata_request, method) props = [ prop for prop, alias in mmr.requests.items() if isinstance(alias, str) or alias is not None ] assert not props def assert_request_equal(request, dictionary): for method, requests in dictionary.items(): mmr = getattr(request, method) assert mmr.requests == requests empty_methods = [method for method in SIMPLE_METHODS if method not in dictionary] for method in empty_methods: assert not len(getattr(request, method).requests) class _Registry(list): # This list is used to get a reference to the sub-estimators, which are not # necessarily stored on the metaestimator. We need to override __deepcopy__ # because the sub-estimators are probably cloned, which would result in a # new copy of the list, but we need copy and deep copy both to return the # same instance. def __deepcopy__(self, memo): return self def __copy__(self): return self class ConsumingRegressor(RegressorMixin, BaseEstimator): """A regressor consuming metadata. Parameters ---------- registry : list, default=None If a list, the estimator will append itself to the list in order to have a reference to the estimator later on. Since that reference is not required in all tests, registration can be skipped by leaving this value as None. """ def __init__(self, registry=None): self.registry = registry def partial_fit(self, X, y, sample_weight="default", metadata="default"): if self.registry is not None: self.registry.append(self) record_metadata_not_default( self, sample_weight=sample_weight, metadata=metadata ) return self def fit(self, X, y, sample_weight="default", metadata="default"): if self.registry is not None: self.registry.append(self) record_metadata_not_default( self, sample_weight=sample_weight, metadata=metadata ) return self def predict(self, X, y=None, sample_weight="default", metadata="default"): record_metadata_not_default( self, sample_weight=sample_weight, metadata=metadata ) return np.zeros(shape=(len(X),)) def score(self, X, y, sample_weight="default", metadata="default"): record_metadata_not_default( self, sample_weight=sample_weight, metadata=metadata ) return 1 class NonConsumingClassifier(ClassifierMixin, BaseEstimator): """A classifier which accepts no metadata on any method.""" def __init__(self, alpha=0.0): self.alpha = alpha def fit(self, X, y): self.classes_ = np.unique(y) self.coef_ = np.ones_like(X) return self def partial_fit(self, X, y, classes=None): return self def decision_function(self, X): return self.predict(X) def predict(self, X): y_pred = np.empty(shape=(len(X),)) y_pred[: len(X) // 2] = 0 y_pred[len(X) // 2 :] = 1 return y_pred def predict_proba(self, X): # dummy probabilities to support predict_proba y_proba = np.empty(shape=(len(X), len(self.classes_)), dtype=np.float32) # each row sums up to 1.0: y_proba[:] = np.random.dirichlet(alpha=np.ones(len(self.classes_)), size=len(X)) return y_proba def predict_log_proba(self, X): # dummy probabilities to support predict_log_proba return self.predict_proba(X) class NonConsumingRegressor(RegressorMixin, BaseEstimator): """A classifier which accepts no metadata on any method.""" def fit(self, X, y): return self def partial_fit(self, X, y): return self def predict(self, X): return np.ones(len(X)) # pragma: no cover class ConsumingClassifier(ClassifierMixin, BaseEstimator): """A classifier consuming metadata. Parameters ---------- registry : list, default=None If a list, the estimator will append itself to the list in order to have a reference to the estimator later on. Since that reference is not required in all tests, registration can be skipped by leaving this value as None. alpha : float, default=0 This parameter is only used to test the ``*SearchCV`` objects, and doesn't do anything. """ def __init__(self, registry=None, alpha=0.0): self.alpha = alpha self.registry = registry def partial_fit( self, X, y, classes=None, sample_weight="default", metadata="default" ): if self.registry is not None: self.registry.append(self) record_metadata_not_default( self, sample_weight=sample_weight, metadata=metadata ) _check_partial_fit_first_call(self, classes) return self def fit(self, X, y, sample_weight="default", metadata="default"): if self.registry is not None: self.registry.append(self) record_metadata_not_default( self, sample_weight=sample_weight, metadata=metadata ) self.classes_ = np.unique(y) self.coef_ = np.ones_like(X) return self def predict(self, X, sample_weight="default", metadata="default"): record_metadata_not_default( self, sample_weight=sample_weight, metadata=metadata ) y_score = np.empty(shape=(len(X),), dtype="int8") y_score[len(X) // 2 :] = 0 y_score[: len(X) // 2] = 1 return y_score def predict_proba(self, X, sample_weight="default", metadata="default"): record_metadata_not_default( self, sample_weight=sample_weight, metadata=metadata ) y_proba = np.empty(shape=(len(X), len(self.classes_)), dtype=np.float32) # each row sums up to 1.0: y_proba[:] = np.random.dirichlet(alpha=np.ones(len(self.classes_)), size=len(X)) return y_proba def predict_log_proba(self, X, sample_weight="default", metadata="default"): record_metadata_not_default( self, sample_weight=sample_weight, metadata=metadata ) return self.predict_proba(X) def decision_function(self, X, sample_weight="default", metadata="default"): record_metadata_not_default( self, sample_weight=sample_weight, metadata=metadata ) y_score = np.empty(shape=(len(X),)) y_score[len(X) // 2 :] = 0 y_score[: len(X) // 2] = 1 return y_score def score(self, X, y, sample_weight="default", metadata="default"): record_metadata_not_default( self, sample_weight=sample_weight, metadata=metadata ) return 1 class ConsumingClassifierWithoutPredictProba(ConsumingClassifier): """ConsumingClassifier without a predict_proba method, but with predict_log_proba. Used to mimic dynamic method selection such as in the `_parallel_predict_proba()` function called by `BaggingClassifier`. """ @property def predict_proba(self): raise AttributeError("This estimator does not support predict_proba") class ConsumingClassifierWithoutPredictLogProba(ConsumingClassifier): """ConsumingClassifier without a predict_log_proba method, but with predict_proba. Used to mimic dynamic method selection such as in `BaggingClassifier.predict_log_proba()`. """ @property def predict_log_proba(self): raise AttributeError("This estimator does not support predict_log_proba") class ConsumingClassifierWithOnlyPredict(ConsumingClassifier): """ConsumingClassifier with only a predict method. Used to mimic dynamic method selection such as in `BaggingClassifier.predict_log_proba()`. """ @property def predict_proba(self): raise AttributeError("This estimator does not support predict_proba") @property def predict_log_proba(self): raise AttributeError("This estimator does not support predict_log_proba") class ConsumingTransformer(TransformerMixin, BaseEstimator): """A transformer which accepts metadata on fit and transform. Parameters ---------- registry : list, default=None If a list, the estimator will append itself to the list in order to have a reference to the estimator later on. Since that reference is not required in all tests, registration can be skipped by leaving this value as None. """ def __init__(self, registry=None): self.registry = registry def fit(self, X, y=None, sample_weight="default", metadata="default"): if self.registry is not None: self.registry.append(self) record_metadata_not_default( self, sample_weight=sample_weight, metadata=metadata ) self.fitted_ = True return self def transform(self, X, sample_weight="default", metadata="default"): record_metadata_not_default( self, sample_weight=sample_weight, metadata=metadata ) return X + 1 def fit_transform(self, X, y, sample_weight="default", metadata="default"): # implementing ``fit_transform`` is necessary since # ``TransformerMixin.fit_transform`` doesn't route any metadata to # ``transform``, while here we want ``transform`` to receive # ``sample_weight`` and ``metadata``. record_metadata_not_default( self, sample_weight=sample_weight, metadata=metadata ) return self.fit(X, y, sample_weight=sample_weight, metadata=metadata).transform( X, sample_weight=sample_weight, metadata=metadata ) def inverse_transform(self, X, sample_weight=None, metadata=None): record_metadata_not_default( self, sample_weight=sample_weight, metadata=metadata ) return X - 1 class ConsumingNoFitTransformTransformer(BaseEstimator): """A metadata consuming transformer that doesn't inherit from TransformerMixin, and thus doesn't implement `fit_transform`. Note that TransformerMixin's `fit_transform` doesn't route metadata to `transform`.""" def __init__(self, registry=None): self.registry = registry def fit(self, X, y=None, sample_weight=None, metadata=None): if self.registry is not None: self.registry.append(self) record_metadata(self, sample_weight=sample_weight, metadata=metadata) return self def transform(self, X, sample_weight=None, metadata=None): record_metadata(self, sample_weight=sample_weight, metadata=metadata) return X class ConsumingScorer(_Scorer): def __init__(self, registry=None): super().__init__( score_func=mean_squared_error, sign=1, kwargs={}, response_method="predict" ) self.registry = registry def _score(self, method_caller, clf, X, y, **kwargs): if self.registry is not None: self.registry.append(self) record_metadata_not_default(self, **kwargs) sample_weight = kwargs.get("sample_weight", None) return super()._score(method_caller, clf, X, y, sample_weight=sample_weight) class ConsumingSplitter(GroupsConsumerMixin, BaseCrossValidator): def __init__(self, registry=None): self.registry = registry def split(self, X, y=None, groups="default", metadata="default"): if self.registry is not None: self.registry.append(self) record_metadata_not_default(self, groups=groups, metadata=metadata) split_index = len(X) // 2 train_indices = list(range(0, split_index)) test_indices = list(range(split_index, len(X))) yield test_indices, train_indices yield train_indices, test_indices def get_n_splits(self, X=None, y=None, groups=None, metadata=None): return 2 def _iter_test_indices(self, X=None, y=None, groups=None): split_index = len(X) // 2 train_indices = list(range(0, split_index)) test_indices = list(range(split_index, len(X))) yield test_indices yield train_indices class MetaRegressor(MetaEstimatorMixin, RegressorMixin, BaseEstimator): """A meta-regressor which is only a router.""" def __init__(self, estimator): self.estimator = estimator def fit(self, X, y, **fit_params): params = process_routing(self, "fit", **fit_params) self.estimator_ = clone(self.estimator).fit(X, y, **params.estimator.fit) def get_metadata_routing(self): router = MetadataRouter(owner=self).add( estimator=self.estimator, method_mapping=MethodMapping().add(caller="fit", callee="fit"), ) return router class WeightedMetaRegressor(MetaEstimatorMixin, RegressorMixin, BaseEstimator): """A meta-regressor which is also a consumer.""" def __init__(self, estimator, registry=None): self.estimator = estimator self.registry = registry def fit(self, X, y, sample_weight=None, **fit_params): if self.registry is not None: self.registry.append(self) record_metadata(self, sample_weight=sample_weight) params = process_routing(self, "fit", sample_weight=sample_weight, **fit_params) self.estimator_ = clone(self.estimator).fit(X, y, **params.estimator.fit) return self def predict(self, X, **predict_params): params = process_routing(self, "predict", **predict_params) return self.estimator_.predict(X, **params.estimator.predict) def get_metadata_routing(self): router = ( MetadataRouter(owner=self) .add_self_request(self) .add( estimator=self.estimator, method_mapping=MethodMapping() .add(caller="fit", callee="fit") .add(caller="predict", callee="predict"), ) ) return router class WeightedMetaClassifier(MetaEstimatorMixin, ClassifierMixin, BaseEstimator): """A meta-estimator which also consumes sample_weight itself in ``fit``.""" def __init__(self, estimator, registry=None): self.estimator = estimator self.registry = registry def fit(self, X, y, sample_weight=None, **kwargs): if self.registry is not None: self.registry.append(self) record_metadata(self, sample_weight=sample_weight) params = process_routing(self, "fit", sample_weight=sample_weight, **kwargs) self.estimator_ = clone(self.estimator).fit(X, y, **params.estimator.fit) return self def get_metadata_routing(self): router = ( MetadataRouter(owner=self) .add_self_request(self) .add( estimator=self.estimator, method_mapping=MethodMapping().add(caller="fit", callee="fit"), ) ) return router class MetaTransformer(MetaEstimatorMixin, TransformerMixin, BaseEstimator): """A simple meta-transformer.""" def __init__(self, transformer): self.transformer = transformer def fit(self, X, y=None, **fit_params): params = process_routing(self, "fit", **fit_params) self.transformer_ = clone(self.transformer).fit(X, y, **params.transformer.fit) return self def transform(self, X, y=None, **transform_params): params = process_routing(self, "transform", **transform_params) return self.transformer_.transform(X, **params.transformer.transform) def get_metadata_routing(self): return MetadataRouter(owner=self).add( transformer=self.transformer, method_mapping=MethodMapping() .add(caller="fit", callee="fit") .add(caller="transform", callee="transform"), )
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/tests/test_calibration.py
sklearn/tests/test_calibration.py
# Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import numpy as np import pytest from numpy.testing import assert_allclose from sklearn import config_context from sklearn.base import BaseEstimator, ClassifierMixin, clone from sklearn.calibration import ( CalibratedClassifierCV, CalibrationDisplay, _CalibratedClassifier, _sigmoid_calibration, _SigmoidCalibration, _TemperatureScaling, calibration_curve, ) from sklearn.datasets import load_iris, make_blobs, make_classification from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.dummy import DummyClassifier from sklearn.ensemble import ( RandomForestClassifier, VotingClassifier, ) from sklearn.feature_extraction import DictVectorizer from sklearn.frozen import FrozenEstimator from sklearn.impute import SimpleImputer from sklearn.isotonic import IsotonicRegression from sklearn.linear_model import LogisticRegression, SGDClassifier from sklearn.metrics import ( accuracy_score, brier_score_loss, log_loss, roc_auc_score, ) from sklearn.model_selection import ( KFold, LeaveOneOut, check_cv, cross_val_predict, cross_val_score, train_test_split, ) from sklearn.naive_bayes import MultinomialNB from sklearn.pipeline import Pipeline, make_pipeline from sklearn.preprocessing import LabelEncoder, StandardScaler from sklearn.svm import LinearSVC from sklearn.tree import DecisionTreeClassifier from sklearn.utils._array_api import ( _convert_to_numpy, _get_namespace_device_dtype_ids, device, get_namespace, yield_namespace_device_dtype_combinations, ) from sklearn.utils._mocking import CheckingClassifier from sklearn.utils._tags import get_tags from sklearn.utils._testing import ( _array_api_for_tests, _convert_container, assert_almost_equal, assert_array_almost_equal, assert_array_equal, ) from sklearn.utils.extmath import softmax from sklearn.utils.fixes import CSR_CONTAINERS from sklearn.utils.validation import check_is_fitted N_SAMPLES = 200 @pytest.fixture(scope="module") def data(): X, y = make_classification(n_samples=N_SAMPLES, n_features=6, random_state=42) return X, y def test_calibration_method_raises(data): # Check that invalid values raise for the 'method' parameter. X, y = data invalid_method = "not sigmoid, isotonic, or temperature" with pytest.raises(ValueError): CalibratedClassifierCV(method=invalid_method).fit(X, y) @pytest.mark.parametrize("csr_container", CSR_CONTAINERS) @pytest.mark.parametrize("method", ["sigmoid", "isotonic"]) @pytest.mark.parametrize("ensemble", [True, False]) def test_calibration(data, method, csr_container, ensemble): # Test calibration objects with isotonic, sigmoid n_samples = N_SAMPLES // 2 X, y = data sample_weight = np.random.RandomState(seed=42).uniform(size=y.size) X = X - X.min() # MultinomialNB only allows positive X # split train and test X_train, y_train, sw_train = X[:n_samples], y[:n_samples], sample_weight[:n_samples] X_test, y_test = X[n_samples:], y[n_samples:] # Naive-Bayes clf = MultinomialNB().fit(X_train, y_train, sample_weight=sw_train) prob_pos_clf = clf.predict_proba(X_test)[:, 1] cal_clf = CalibratedClassifierCV(clf, cv=y.size + 1, ensemble=ensemble) with pytest.raises(ValueError): cal_clf.fit(X, y) # Naive Bayes with calibration for this_X_train, this_X_test in [ (X_train, X_test), (csr_container(X_train), csr_container(X_test)), ]: cal_clf = CalibratedClassifierCV(clf, method=method, cv=5, ensemble=ensemble) # Note that this fit overwrites the fit on the entire training # set cal_clf.fit(this_X_train, y_train, sample_weight=sw_train) prob_pos_cal_clf = cal_clf.predict_proba(this_X_test)[:, 1] # Check that brier score has improved after calibration assert brier_score_loss(y_test, prob_pos_clf) > brier_score_loss( y_test, prob_pos_cal_clf ) # Check invariance against relabeling [0, 1] -> [1, 2] cal_clf.fit(this_X_train, y_train + 1, sample_weight=sw_train) prob_pos_cal_clf_relabeled = cal_clf.predict_proba(this_X_test)[:, 1] assert_array_almost_equal(prob_pos_cal_clf, prob_pos_cal_clf_relabeled) # Check invariance against relabeling [0, 1] -> [-1, 1] cal_clf.fit(this_X_train, 2 * y_train - 1, sample_weight=sw_train) prob_pos_cal_clf_relabeled = cal_clf.predict_proba(this_X_test)[:, 1] assert_array_almost_equal(prob_pos_cal_clf, prob_pos_cal_clf_relabeled) # Check invariance against relabeling [0, 1] -> [1, 0] cal_clf.fit(this_X_train, (y_train + 1) % 2, sample_weight=sw_train) prob_pos_cal_clf_relabeled = cal_clf.predict_proba(this_X_test)[:, 1] if method == "sigmoid": assert_array_almost_equal(prob_pos_cal_clf, 1 - prob_pos_cal_clf_relabeled) else: # Isotonic calibration is not invariant against relabeling # but should improve in both cases assert brier_score_loss(y_test, prob_pos_clf) > brier_score_loss( (y_test + 1) % 2, prob_pos_cal_clf_relabeled ) def test_calibration_default_estimator(data): # Check estimator default is LinearSVC X, y = data calib_clf = CalibratedClassifierCV(cv=2) calib_clf.fit(X, y) base_est = calib_clf.calibrated_classifiers_[0].estimator assert isinstance(base_est, LinearSVC) @pytest.mark.parametrize("ensemble", [True, False]) def test_calibration_cv_splitter(data, ensemble): # Check when `cv` is a CV splitter X, y = data splits = 5 kfold = KFold(n_splits=splits) calib_clf = CalibratedClassifierCV(cv=kfold, ensemble=ensemble) assert isinstance(calib_clf.cv, KFold) assert calib_clf.cv.n_splits == splits calib_clf.fit(X, y) expected_n_clf = splits if ensemble else 1 assert len(calib_clf.calibrated_classifiers_) == expected_n_clf def test_calibration_cv_nfold(data): # Check error raised when number of examples per class less than nfold X, y = data kfold = KFold(n_splits=101) calib_clf = CalibratedClassifierCV(cv=kfold, ensemble=True) with pytest.raises(ValueError, match="Requesting 101-fold cross-validation"): calib_clf.fit(X, y) calib_clf = CalibratedClassifierCV(cv=LeaveOneOut(), ensemble=True) with pytest.raises(ValueError, match="LeaveOneOut cross-validation does"): calib_clf.fit(X, y) @pytest.mark.parametrize("method", ["sigmoid", "isotonic", "temperature"]) @pytest.mark.parametrize("ensemble", [True, False]) def test_sample_weight(data, method, ensemble): n_samples = N_SAMPLES // 2 X, y = data sample_weight = np.random.RandomState(seed=42).uniform(size=len(y)) X_train, y_train, sw_train = X[:n_samples], y[:n_samples], sample_weight[:n_samples] X_test = X[n_samples:] estimator = LinearSVC(random_state=42) calibrated_clf = CalibratedClassifierCV(estimator, method=method, ensemble=ensemble) calibrated_clf.fit(X_train, y_train, sample_weight=sw_train) probs_with_sw = calibrated_clf.predict_proba(X_test) # As the weights are used for the calibration, they should still yield # different predictions calibrated_clf.fit(X_train, y_train) probs_without_sw = calibrated_clf.predict_proba(X_test) diff = np.linalg.norm(probs_with_sw - probs_without_sw) assert diff > 0.1 # TODO: remove mark once loky bug is fixed: # https://github.com/joblib/loky/issues/458 @pytest.mark.thread_unsafe @pytest.mark.parametrize("method", ["sigmoid", "isotonic", "temperature"]) @pytest.mark.parametrize("ensemble", [True, False]) def test_parallel_execution(data, method, ensemble): """Test parallel calibration""" X, y = data X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) estimator = make_pipeline(StandardScaler(), LinearSVC(random_state=42)) cal_clf_parallel = CalibratedClassifierCV( estimator, method=method, n_jobs=2, ensemble=ensemble ) cal_clf_parallel.fit(X_train, y_train) probs_parallel = cal_clf_parallel.predict_proba(X_test) cal_clf_sequential = CalibratedClassifierCV( estimator, method=method, n_jobs=1, ensemble=ensemble ) cal_clf_sequential.fit(X_train, y_train) probs_sequential = cal_clf_sequential.predict_proba(X_test) assert_allclose(probs_parallel, probs_sequential) @pytest.mark.parametrize("method", ["sigmoid", "isotonic"]) @pytest.mark.parametrize("ensemble", [True, False]) # increase the number of RNG seeds to assess the statistical stability of this # test: @pytest.mark.parametrize("seed", range(2)) def test_calibration_multiclass(method, ensemble, seed): def multiclass_brier(y_true, proba_pred, n_classes): Y_onehot = np.eye(n_classes)[y_true] return np.sum((Y_onehot - proba_pred) ** 2) / Y_onehot.shape[0] # Test calibration for multiclass with classifier that implements # only decision function. clf = LinearSVC(random_state=7) X, y = make_blobs( n_samples=500, n_features=100, random_state=seed, centers=10, cluster_std=15.0 ) # Use an unbalanced dataset by collapsing 8 clusters into one class # to make the naive calibration based on a softmax more unlikely # to work. y[y > 2] = 2 n_classes = np.unique(y).shape[0] X_train, y_train = X[::2], y[::2] X_test, y_test = X[1::2], y[1::2] clf.fit(X_train, y_train) cal_clf = CalibratedClassifierCV(clf, method=method, cv=5, ensemble=ensemble) cal_clf.fit(X_train, y_train) probas = cal_clf.predict_proba(X_test) # Check probabilities sum to 1 assert_allclose(np.sum(probas, axis=1), np.ones(len(X_test))) # Check that the dataset is not too trivial, otherwise it's hard # to get interesting calibration data during the internal # cross-validation loop. assert 0.65 < clf.score(X_test, y_test) < 0.95 # Check that the accuracy of the calibrated model is never degraded # too much compared to the original classifier. assert cal_clf.score(X_test, y_test) > 0.95 * clf.score(X_test, y_test) # Check that Brier loss of calibrated classifier is smaller than # loss obtained by naively turning OvR decision function to # probabilities via a softmax uncalibrated_brier = multiclass_brier( y_test, softmax(clf.decision_function(X_test)), n_classes=n_classes ) calibrated_brier = multiclass_brier(y_test, probas, n_classes=n_classes) assert calibrated_brier < 1.1 * uncalibrated_brier # Test that calibration of a multiclass classifier decreases log-loss # for RandomForestClassifier clf = RandomForestClassifier(n_estimators=30, random_state=42) clf.fit(X_train, y_train) clf_probs = clf.predict_proba(X_test) uncalibrated_brier = multiclass_brier(y_test, clf_probs, n_classes=n_classes) cal_clf = CalibratedClassifierCV(clf, method=method, cv=5, ensemble=ensemble) cal_clf.fit(X_train, y_train) cal_clf_probs = cal_clf.predict_proba(X_test) calibrated_brier = multiclass_brier(y_test, cal_clf_probs, n_classes=n_classes) assert calibrated_brier < 1.1 * uncalibrated_brier def test_calibration_zero_probability(): # Test an edge case where _CalibratedClassifier avoids numerical errors # in the multiclass normalization step if all the calibrators output # are zero all at once for a given sample and instead fallback to uniform # probabilities. class ZeroCalibrator: # This function is called from _CalibratedClassifier.predict_proba. def predict(self, X): return np.zeros(X.shape[0]) X, y = make_blobs( n_samples=50, n_features=10, random_state=7, centers=10, cluster_std=15.0 ) clf = DummyClassifier().fit(X, y) calibrator = ZeroCalibrator() cal_clf = _CalibratedClassifier( estimator=clf, calibrators=[calibrator], classes=clf.classes_ ) probas = cal_clf.predict_proba(X) # Check that all probabilities are uniformly 1. / clf.n_classes_ assert_allclose(probas, 1.0 / clf.n_classes_) @pytest.mark.parametrize("csr_container", CSR_CONTAINERS) @pytest.mark.parametrize("method", ["sigmoid", "isotonic", "temperature"]) def test_calibration_frozen(csr_container, method): """Test calibration for frozen classifiers""" n_samples = 50 X, y = make_classification(n_samples=3 * n_samples, n_features=6, random_state=42) sample_weight = np.random.RandomState(seed=42).uniform(size=y.size) X -= X.min() # MultinomialNB only allows positive X # split train and test X_train, y_train, sw_train = X[:n_samples], y[:n_samples], sample_weight[:n_samples] X_calib, y_calib, sw_calib = ( X[n_samples : 2 * n_samples], y[n_samples : 2 * n_samples], sample_weight[n_samples : 2 * n_samples], ) X_test, y_test = X[2 * n_samples :], y[2 * n_samples :] # Naive-Bayes clf = MultinomialNB() clf.fit(X_train, y_train, sw_train) prob_pos_clf = clf.predict_proba(X_test)[:, 1] # Naive Bayes with calibration for this_X_calib, this_X_test in [ (X_calib, X_test), (csr_container(X_calib), csr_container(X_test)), ]: cal_clf_frozen = CalibratedClassifierCV(FrozenEstimator(clf), method=method) for sw in [sw_calib, None]: cal_clf_frozen.fit(this_X_calib, y_calib, sample_weight=sw) y_prob_frozen = cal_clf_frozen.predict_proba(this_X_test) y_pred_frozen = cal_clf_frozen.predict(this_X_test) prob_pos_cal_clf_frozen = y_prob_frozen[:, 1] assert_array_equal( y_pred_frozen, np.array([0, 1])[np.argmax(y_prob_frozen, axis=1)] ) assert brier_score_loss(y_test, prob_pos_clf) > brier_score_loss( y_test, prob_pos_cal_clf_frozen ) @pytest.mark.parametrize( ["method", "calibrator"], [ ("sigmoid", _SigmoidCalibration()), ("isotonic", IsotonicRegression(out_of_bounds="clip")), ("temperature", _TemperatureScaling()), ], ) def test_calibration_ensemble_false(data, method, calibrator): # Test that `ensemble=False` is the same as using predictions from # `cross_val_predict` to train calibrator. X, y = data clf = LinearSVC(random_state=7) cal_clf = CalibratedClassifierCV(clf, method=method, cv=3, ensemble=False) cal_clf.fit(X, y) cal_probas = cal_clf.predict_proba(X) # Get probas manually unbiased_preds = cross_val_predict(clf, X, y, cv=3, method="decision_function") calibrator.fit(unbiased_preds, y) # Use `clf` fit on all data clf.fit(X, y) clf_df = clf.decision_function(X) manual_probas = calibrator.predict(clf_df) if method == "temperature": if (manual_probas.ndim == 2) and (manual_probas.shape[1] == 2): manual_probas = manual_probas[:, 1] assert_allclose(cal_probas[:, 1], manual_probas) def test_sigmoid_calibration(): """Test calibration values with Platt sigmoid model""" exF = np.array([5, -4, 1.0]) exY = np.array([1, -1, -1]) # computed from my python port of the C++ code in LibSVM AB_lin_libsvm = np.array([-0.20261354391187855, 0.65236314980010512]) assert_array_almost_equal(AB_lin_libsvm, _sigmoid_calibration(exF, exY), 3) lin_prob = 1.0 / (1.0 + np.exp(AB_lin_libsvm[0] * exF + AB_lin_libsvm[1])) sk_prob = _SigmoidCalibration().fit(exF, exY).predict(exF) assert_array_almost_equal(lin_prob, sk_prob, 6) # check that _SigmoidCalibration().fit only accepts 1d array or 2d column # arrays with pytest.raises(ValueError): _SigmoidCalibration().fit(np.vstack((exF, exF)), exY) @pytest.mark.parametrize( "n_classes", [2, 3, 5], ) @pytest.mark.parametrize( "ensemble", [True, False], ) def test_temperature_scaling(n_classes, ensemble): """Check temperature scaling calibration""" X, y = make_classification( n_samples=1000, n_features=10, n_informative=10, n_redundant=0, n_classes=n_classes, n_clusters_per_class=1, class_sep=2.0, random_state=42, ) X_train, X_cal, y_train, y_cal = train_test_split(X, y, random_state=42) clf = LogisticRegression(C=np.inf, tol=1e-8, max_iter=200, random_state=0) clf.fit(X_train, y_train) # Train the calibrator on the calibrating set cal_clf = CalibratedClassifierCV( FrozenEstimator(clf), cv=3, method="temperature", ensemble=ensemble ).fit(X_cal, y_cal) calibrated_classifiers = cal_clf.calibrated_classifiers_ for calibrated_classifier in calibrated_classifiers: # There is one and only one temperature scaling calibrator # for each calibrated classifier assert len(calibrated_classifier.calibrators) == 1 calibrator = calibrated_classifier.calibrators[0] # Should not raise any error check_is_fitted(calibrator) # The optimal inverse temperature parameter should always be positive assert calibrator.beta_ > 0 if not ensemble: # Accuracy score is invariant under temperature scaling y_pred = clf.predict(X_cal) y_pred_cal = cal_clf.predict(X_cal) assert accuracy_score(y_cal, y_pred_cal) == accuracy_score(y_cal, y_pred) # Log Loss should be improved on the calibrating set y_scores = clf.predict_proba(X_cal) y_scores_cal = cal_clf.predict_proba(X_cal) assert log_loss(y_cal, y_scores_cal) <= log_loss(y_cal, y_scores) # Refinement error should be invariant under temperature scaling. # Use ROC AUC as a proxy for refinement error. Also note that ROC AUC # itself is invariant under strict monotone transformations. if n_classes == 2: y_scores = y_scores[:, 1] y_scores_cal = y_scores_cal[:, 1] assert_allclose( roc_auc_score(y_cal, y_scores, multi_class="ovr"), roc_auc_score(y_cal, y_scores_cal, multi_class="ovr"), ) # For Logistic Regression, the optimal temperature should be close to 1.0 # on the training set. y_scores_train = clf.predict_proba(X_train) ts = _TemperatureScaling().fit(y_scores_train, y_train) assert_allclose(ts.beta_, 1.0, atol=1e-6, rtol=0) def test_temperature_scaling_input_validation(global_dtype): # Check that _TemperatureScaling can handle 2d-array with only 1 feature X = np.arange(10).astype(global_dtype) X_2d = X.reshape(-1, 1) y = np.random.randint(0, 2, size=X.shape[0]) ts = _TemperatureScaling().fit(X, y) ts_2d = _TemperatureScaling().fit(X_2d, y) assert get_tags(ts) == get_tags(ts_2d) y_pred1 = ts.predict(X) y_pred2 = ts_2d.predict(X_2d) assert_allclose(y_pred1, y_pred2) def test_calibration_curve(): """Check calibration_curve function""" y_true = np.array([0, 0, 0, 1, 1, 1]) y_pred = np.array([0.0, 0.1, 0.2, 0.8, 0.9, 1.0]) prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=2) assert len(prob_true) == len(prob_pred) assert len(prob_true) == 2 assert_almost_equal(prob_true, [0, 1]) assert_almost_equal(prob_pred, [0.1, 0.9]) # Probabilities outside [0, 1] should not be accepted at all. with pytest.raises(ValueError): calibration_curve([1], [-0.1]) # test that quantiles work as expected y_true2 = np.array([0, 0, 0, 0, 1, 1]) y_pred2 = np.array([0.0, 0.1, 0.2, 0.5, 0.9, 1.0]) prob_true_quantile, prob_pred_quantile = calibration_curve( y_true2, y_pred2, n_bins=2, strategy="quantile" ) assert len(prob_true_quantile) == len(prob_pred_quantile) assert len(prob_true_quantile) == 2 assert_almost_equal(prob_true_quantile, [0, 2 / 3]) assert_almost_equal(prob_pred_quantile, [0.1, 0.8]) # Check that error is raised when invalid strategy is selected with pytest.raises(ValueError): calibration_curve(y_true2, y_pred2, strategy="percentile") @pytest.mark.parametrize("method", ["sigmoid", "isotonic", "temperature"]) @pytest.mark.parametrize("ensemble", [True, False]) def test_calibration_nan_imputer(method, ensemble): """Test that calibration can accept nan""" X, y = make_classification( n_samples=10, n_features=2, n_informative=2, n_redundant=0, random_state=42 ) X[0, 0] = np.nan clf = Pipeline( [("imputer", SimpleImputer()), ("rf", RandomForestClassifier(n_estimators=1))] ) clf_c = CalibratedClassifierCV(clf, cv=2, method=method, ensemble=ensemble) clf_c.fit(X, y) clf_c.predict(X) @pytest.mark.parametrize("method", ["sigmoid", "isotonic", "temperature"]) @pytest.mark.parametrize("ensemble", [True, False]) def test_calibration_prob_sum(method, ensemble): # Test that sum of probabilities is (max) 1. A non-regression test for # issue #7796 - when test has fewer classes than train X, _ = make_classification(n_samples=10, n_features=5, n_classes=2) y = [1, 1, 1, 1, 1, 0, 0, 0, 0, 0] clf = LinearSVC(C=1.0, random_state=7) # In the first and last fold, test will have 1 class while train will have 2 clf_prob = CalibratedClassifierCV( clf, method=method, cv=KFold(n_splits=3), ensemble=ensemble ) clf_prob.fit(X, y) assert_allclose(clf_prob.predict_proba(X).sum(axis=1), 1.0) @pytest.mark.parametrize("ensemble", [True, False]) def test_calibration_less_classes(ensemble): # Test to check calibration works fine when train set in a test-train # split does not contain all classes # In 1st split, train is missing class 0 # In 3rd split, train is missing class 3 X = np.random.randn(12, 5) y = [0, 0, 0, 1] + [1, 1, 2, 2] + [2, 3, 3, 3] clf = DecisionTreeClassifier(random_state=7) cal_clf = CalibratedClassifierCV( clf, method="sigmoid", cv=KFold(3), ensemble=ensemble ) cal_clf.fit(X, y) if ensemble: classes = np.arange(4) for calib_i, class_i in zip([0, 2], [0, 3]): proba = cal_clf.calibrated_classifiers_[calib_i].predict_proba(X) # Check that the unobserved class has proba=0 assert_array_equal(proba[:, class_i], np.zeros(len(y))) # Check for all other classes proba>0 assert np.all(proba[:, classes != class_i] > 0) # When `ensemble=False`, `cross_val_predict` is used to compute predictions # to fit only one `calibrated_classifiers_` else: proba = cal_clf.calibrated_classifiers_[0].predict_proba(X) assert_array_almost_equal(proba.sum(axis=1), np.ones(proba.shape[0])) @pytest.mark.parametrize( "X", [ np.random.RandomState(42).randn(15, 5, 2), np.random.RandomState(42).randn(15, 5, 2, 6), ], ) def test_calibration_accepts_ndarray(X): """Test that calibration accepts n-dimensional arrays as input""" y = [1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0] class MockTensorClassifier(ClassifierMixin, BaseEstimator): """A toy estimator that accepts tensor inputs""" def fit(self, X, y): self.classes_ = np.unique(y) return self def decision_function(self, X): # toy decision function that just needs to have the right shape: return X.reshape(X.shape[0], -1).sum(axis=1) calibrated_clf = CalibratedClassifierCV(MockTensorClassifier()) # we should be able to fit this classifier with no error calibrated_clf.fit(X, y) @pytest.fixture def dict_data(): dict_data = [ {"state": "NY", "age": "adult"}, {"state": "TX", "age": "adult"}, {"state": "VT", "age": "child"}, {"state": "CT", "age": "adult"}, {"state": "BR", "age": "child"}, ] text_labels = [1, 0, 1, 1, 0] return dict_data, text_labels @pytest.fixture def dict_data_pipeline(dict_data): X, y = dict_data pipeline_prefit = Pipeline( [("vectorizer", DictVectorizer()), ("clf", RandomForestClassifier())] ) return pipeline_prefit.fit(X, y) def test_calibration_dict_pipeline(dict_data, dict_data_pipeline): """Test that calibration works in prefit pipeline with transformer `X` is not array-like, sparse matrix or dataframe at the start. See https://github.com/scikit-learn/scikit-learn/issues/8710 Also test it can predict without running into validation errors. See https://github.com/scikit-learn/scikit-learn/issues/19637 """ X, y = dict_data clf = dict_data_pipeline calib_clf = CalibratedClassifierCV(FrozenEstimator(clf), cv=2) calib_clf.fit(X, y) # Check attributes are obtained from fitted estimator assert_array_equal(calib_clf.classes_, clf.classes_) # Neither the pipeline nor the calibration meta-estimator # expose the n_features_in_ check on this kind of data. assert not hasattr(clf, "n_features_in_") assert not hasattr(calib_clf, "n_features_in_") # Ensure that no error is thrown with predict and predict_proba calib_clf.predict(X) calib_clf.predict_proba(X) def test_calibration_attributes(): # Check that `n_features_in_` and `classes_` attributes created properly X, y = make_classification(n_samples=10, n_features=5, n_classes=2, random_state=7) calib_clf = CalibratedClassifierCV(LinearSVC(C=1), cv=2) calib_clf.fit(X, y) classes = LabelEncoder().fit(y).classes_ assert_array_equal(calib_clf.classes_, classes) assert calib_clf.n_features_in_ == X.shape[1] def test_calibration_inconsistent_prefit_n_features_in(): # Check that `n_features_in_` from prefit base estimator # is consistent with training set X, y = make_classification(n_samples=10, n_features=5, n_classes=2, random_state=7) clf = LinearSVC(C=1).fit(X, y) calib_clf = CalibratedClassifierCV(FrozenEstimator(clf)) msg = "X has 3 features, but LinearSVC is expecting 5 features as input." with pytest.raises(ValueError, match=msg): calib_clf.fit(X[:, :3], y) def test_calibration_votingclassifier(): # Check that `CalibratedClassifier` works with `VotingClassifier`. # The method `predict_proba` from `VotingClassifier` is dynamically # defined via a property that only works when voting="soft". X, y = make_classification(n_samples=10, n_features=5, n_classes=2, random_state=7) vote = VotingClassifier( estimators=[("lr" + str(i), LogisticRegression()) for i in range(3)], voting="soft", ) vote.fit(X, y) calib_clf = CalibratedClassifierCV(estimator=FrozenEstimator(vote)) # smoke test: should not raise an error calib_clf.fit(X, y) @pytest.fixture(scope="module") def iris_data(): return load_iris(return_X_y=True) @pytest.fixture(scope="module") def iris_data_binary(iris_data): X, y = iris_data return X[y < 2], y[y < 2] @pytest.mark.parametrize("n_bins", [5, 10]) @pytest.mark.parametrize("strategy", ["uniform", "quantile"]) def test_calibration_display_compute(pyplot, iris_data_binary, n_bins, strategy): # Ensure `CalibrationDisplay.from_predictions` and `calibration_curve` # compute the same results. Also checks attributes of the # CalibrationDisplay object. X, y = iris_data_binary lr = LogisticRegression().fit(X, y) viz = CalibrationDisplay.from_estimator( lr, X, y, n_bins=n_bins, strategy=strategy, alpha=0.8 ) y_prob = lr.predict_proba(X)[:, 1] prob_true, prob_pred = calibration_curve( y, y_prob, n_bins=n_bins, strategy=strategy ) assert_allclose(viz.prob_true, prob_true) assert_allclose(viz.prob_pred, prob_pred) assert_allclose(viz.y_prob, y_prob) assert viz.estimator_name == "LogisticRegression" # cannot fail thanks to pyplot fixture import matplotlib as mpl assert isinstance(viz.line_, mpl.lines.Line2D) assert viz.line_.get_alpha() == 0.8 assert isinstance(viz.ax_, mpl.axes.Axes) assert isinstance(viz.figure_, mpl.figure.Figure) assert viz.ax_.get_xlabel() == "Mean predicted probability (Positive class: 1)" assert viz.ax_.get_ylabel() == "Fraction of positives (Positive class: 1)" expected_legend_labels = ["LogisticRegression", "Perfectly calibrated"] legend_labels = viz.ax_.get_legend().get_texts() assert len(legend_labels) == len(expected_legend_labels) for labels in legend_labels: assert labels.get_text() in expected_legend_labels def test_plot_calibration_curve_pipeline(pyplot, iris_data_binary): # Ensure pipelines are supported by CalibrationDisplay.from_estimator X, y = iris_data_binary clf = make_pipeline(StandardScaler(), LogisticRegression()) clf.fit(X, y) viz = CalibrationDisplay.from_estimator(clf, X, y) expected_legend_labels = [viz.estimator_name, "Perfectly calibrated"] legend_labels = viz.ax_.get_legend().get_texts() assert len(legend_labels) == len(expected_legend_labels) for labels in legend_labels: assert labels.get_text() in expected_legend_labels @pytest.mark.parametrize( "name, expected_label", [(None, "_line1"), ("my_est", "my_est")] ) def test_calibration_display_default_labels(pyplot, name, expected_label): prob_true = np.array([0, 1, 1, 0]) prob_pred = np.array([0.2, 0.8, 0.8, 0.4]) y_prob = np.array([]) viz = CalibrationDisplay(prob_true, prob_pred, y_prob, estimator_name=name) viz.plot() expected_legend_labels = [] if name is None else [name] expected_legend_labels.append("Perfectly calibrated") legend_labels = viz.ax_.get_legend().get_texts() assert len(legend_labels) == len(expected_legend_labels) for labels in legend_labels: assert labels.get_text() in expected_legend_labels def test_calibration_display_label_class_plot(pyplot): # Checks that when instantiating `CalibrationDisplay` class then calling # `plot`, `self.estimator_name` is the one given in `plot` prob_true = np.array([0, 1, 1, 0]) prob_pred = np.array([0.2, 0.8, 0.8, 0.4]) y_prob = np.array([]) name = "name one" viz = CalibrationDisplay(prob_true, prob_pred, y_prob, estimator_name=name) assert viz.estimator_name == name name = "name two" viz.plot(name=name) expected_legend_labels = [name, "Perfectly calibrated"] legend_labels = viz.ax_.get_legend().get_texts() assert len(legend_labels) == len(expected_legend_labels) for labels in legend_labels: assert labels.get_text() in expected_legend_labels @pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"]) def test_calibration_display_name_multiple_calls( constructor_name, pyplot, iris_data_binary ): # Check that the `name` used when calling # `CalibrationDisplay.from_predictions` or # `CalibrationDisplay.from_estimator` is used when multiple # `CalibrationDisplay.viz.plot()` calls are made. X, y = iris_data_binary clf_name = "my hand-crafted name" clf = LogisticRegression().fit(X, y) y_prob = clf.predict_proba(X)[:, 1] constructor = getattr(CalibrationDisplay, constructor_name) params = (clf, X, y) if constructor_name == "from_estimator" else (y, y_prob) viz = constructor(*params, name=clf_name) assert viz.estimator_name == clf_name pyplot.close("all") viz.plot() expected_legend_labels = [clf_name, "Perfectly calibrated"] legend_labels = viz.ax_.get_legend().get_texts() assert len(legend_labels) == len(expected_legend_labels) for labels in legend_labels: assert labels.get_text() in expected_legend_labels pyplot.close("all") clf_name = "another_name" viz.plot(name=clf_name) assert len(legend_labels) == len(expected_legend_labels) for labels in legend_labels: assert labels.get_text() in expected_legend_labels def test_calibration_display_ref_line(pyplot, iris_data_binary): # Check that `ref_line` only appears once X, y = iris_data_binary lr = LogisticRegression().fit(X, y) dt = DecisionTreeClassifier().fit(X, y) viz = CalibrationDisplay.from_estimator(lr, X, y) viz2 = CalibrationDisplay.from_estimator(dt, X, y, ax=viz.ax_) labels = viz2.ax_.get_legend_handles_labels()[1] assert labels.count("Perfectly calibrated") == 1 @pytest.mark.parametrize("dtype_y_str", [str, object]) def test_calibration_curve_pos_label_error_str(dtype_y_str): """Check error message when a `pos_label` is not specified with `str` targets.""" rng = np.random.RandomState(42)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
true
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/tests/test_multiclass.py
sklearn/tests/test_multiclass.py
from re import escape import numpy as np import pytest import scipy.sparse as sp from numpy.testing import assert_allclose from sklearn import datasets, svm from sklearn.base import BaseEstimator, ClassifierMixin from sklearn.datasets import load_breast_cancer from sklearn.exceptions import NotFittedError from sklearn.impute import SimpleImputer from sklearn.linear_model import ( ElasticNet, Lasso, LinearRegression, LogisticRegression, Perceptron, Ridge, SGDClassifier, ) from sklearn.metrics import precision_score, recall_score from sklearn.model_selection import GridSearchCV, cross_val_score from sklearn.multiclass import ( OneVsOneClassifier, OneVsRestClassifier, OutputCodeClassifier, ) from sklearn.naive_bayes import MultinomialNB from sklearn.neighbors import KNeighborsClassifier from sklearn.pipeline import Pipeline, make_pipeline from sklearn.svm import SVC, LinearSVC from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor from sklearn.utils import ( check_array, shuffle, ) from sklearn.utils._mocking import CheckingClassifier from sklearn.utils._testing import assert_almost_equal, assert_array_equal from sklearn.utils.fixes import ( COO_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS, DOK_CONTAINERS, LIL_CONTAINERS, ) from sklearn.utils.multiclass import check_classification_targets, type_of_target iris = datasets.load_iris() rng = np.random.RandomState(0) perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] n_classes = 3 def test_ovr_exceptions(): ovr = OneVsRestClassifier(LinearSVC(random_state=0)) # test predicting without fitting with pytest.raises(NotFittedError): ovr.predict([]) # Fail on multioutput data msg = "Multioutput target data is not supported with label binarization" with pytest.raises(ValueError, match=msg): X = np.array([[1, 0], [0, 1]]) y = np.array([[1, 2], [3, 1]]) OneVsRestClassifier(MultinomialNB()).fit(X, y) with pytest.raises(ValueError, match=msg): X = np.array([[1, 0], [0, 1]]) y = np.array([[1.5, 2.4], [3.1, 0.8]]) OneVsRestClassifier(MultinomialNB()).fit(X, y) def test_check_classification_targets(): # Test that check_classification_target return correct type. #5782 y = np.array([0.0, 1.1, 2.0, 3.0]) msg = type_of_target(y) with pytest.raises(ValueError, match=msg): check_classification_targets(y) def test_ovr_ties(): """Check that ties-breaking matches np.argmax behavior Non-regression test for issue #14124 """ class Dummy(BaseEstimator): def fit(self, X, y): return self def decision_function(self, X): return np.zeros(len(X)) X = np.array([[0], [0], [0], [0]]) y = np.array([0, 1, 2, 3]) clf = OneVsRestClassifier(Dummy()).fit(X, y) assert_array_equal(clf.predict(X), np.argmax(clf.decision_function(X), axis=1)) def test_ovr_fit_predict(): # A classifier which implements decision_function. ovr = OneVsRestClassifier(LinearSVC(random_state=0)) pred = ovr.fit(iris.data, iris.target).predict(iris.data) assert len(ovr.estimators_) == n_classes clf = LinearSVC(random_state=0) pred2 = clf.fit(iris.data, iris.target).predict(iris.data) assert np.mean(iris.target == pred) == np.mean(iris.target == pred2) # A classifier which implements predict_proba. ovr = OneVsRestClassifier(MultinomialNB()) pred = ovr.fit(iris.data, iris.target).predict(iris.data) assert np.mean(iris.target == pred) > 0.65 def test_ovr_partial_fit(): # Test if partial_fit is working as intended X, y = shuffle(iris.data, iris.target, random_state=0) ovr = OneVsRestClassifier(MultinomialNB()) ovr.partial_fit(X[:100], y[:100], np.unique(y)) ovr.partial_fit(X[100:], y[100:]) pred = ovr.predict(X) ovr2 = OneVsRestClassifier(MultinomialNB()) pred2 = ovr2.fit(X, y).predict(X) assert_almost_equal(pred, pred2) assert len(ovr.estimators_) == len(np.unique(y)) assert np.mean(y == pred) > 0.65 # Test when mini batches doesn't have all classes # with SGDClassifier X = np.abs(np.random.randn(14, 2)) y = [1, 1, 1, 1, 2, 3, 3, 0, 0, 2, 3, 1, 2, 3] ovr = OneVsRestClassifier( SGDClassifier(max_iter=1, tol=None, shuffle=False, random_state=0) ) ovr.partial_fit(X[:7], y[:7], np.unique(y)) ovr.partial_fit(X[7:], y[7:]) pred = ovr.predict(X) ovr1 = OneVsRestClassifier( SGDClassifier(max_iter=1, tol=None, shuffle=False, random_state=0) ) pred1 = ovr1.fit(X, y).predict(X) assert np.mean(pred == y) == np.mean(pred1 == y) # test partial_fit only exists if estimator has it: ovr = OneVsRestClassifier(SVC()) assert not hasattr(ovr, "partial_fit") def test_ovr_partial_fit_exceptions(): ovr = OneVsRestClassifier(MultinomialNB()) X = np.abs(np.random.randn(14, 2)) y = [1, 1, 1, 1, 2, 3, 3, 0, 0, 2, 3, 1, 2, 3] ovr.partial_fit(X[:7], y[:7], np.unique(y)) # If a new class that was not in the first call of partial fit is seen # it should raise ValueError y1 = [5] + y[7:-1] msg = r"Mini-batch contains \[.+\] while classes must be subset of \[.+\]" with pytest.raises(ValueError, match=msg): ovr.partial_fit(X=X[7:], y=y1) def test_ovr_ovo_regressor(): # test that ovr and ovo work on regressors which don't have a decision_ # function ovr = OneVsRestClassifier(DecisionTreeRegressor()) pred = ovr.fit(iris.data, iris.target).predict(iris.data) assert len(ovr.estimators_) == n_classes assert_array_equal(np.unique(pred), [0, 1, 2]) # we are doing something sensible assert np.mean(pred == iris.target) > 0.9 ovr = OneVsOneClassifier(DecisionTreeRegressor()) pred = ovr.fit(iris.data, iris.target).predict(iris.data) assert len(ovr.estimators_) == n_classes * (n_classes - 1) / 2 assert_array_equal(np.unique(pred), [0, 1, 2]) # we are doing something sensible assert np.mean(pred == iris.target) > 0.9 @pytest.mark.parametrize( "sparse_container", CSR_CONTAINERS + CSC_CONTAINERS + COO_CONTAINERS + DOK_CONTAINERS + LIL_CONTAINERS, ) def test_ovr_fit_predict_sparse(sparse_container): base_clf = MultinomialNB(alpha=1) X, Y = datasets.make_multilabel_classification( n_samples=100, n_features=20, n_classes=5, n_labels=3, length=50, allow_unlabeled=True, random_state=0, ) X_train, Y_train = X[:80], Y[:80] X_test = X[80:] clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train) Y_pred = clf.predict(X_test) clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse_container(Y_train)) Y_pred_sprs = clf_sprs.predict(X_test) assert clf.multilabel_ assert sp.issparse(Y_pred_sprs) assert_array_equal(Y_pred_sprs.toarray(), Y_pred) # Test predict_proba Y_proba = clf_sprs.predict_proba(X_test) # predict assigns a label if the probability that the # sample has the label is greater than 0.5. pred = Y_proba > 0.5 assert_array_equal(pred, Y_pred_sprs.toarray()) # Test decision_function clf = svm.SVC() clf_sprs = OneVsRestClassifier(clf).fit(X_train, sparse_container(Y_train)) dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int) assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray()) def test_ovr_always_present(): # Test that ovr works with classes that are always present or absent. # Note: tests is the case where _ConstantPredictor is utilised X = np.ones((10, 2)) X[:5, :] = 0 # Build an indicator matrix where two features are always on. # As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)] y = np.zeros((10, 3)) y[5:, 0] = 1 y[:, 1] = 1 y[:, 2] = 1 ovr = OneVsRestClassifier(LogisticRegression()) msg = r"Label .+ is present in all training examples" with pytest.warns(UserWarning, match=msg): ovr.fit(X, y) y_pred = ovr.predict(X) assert_array_equal(np.array(y_pred), np.array(y)) y_pred = ovr.decision_function(X) assert np.unique(y_pred[:, -2:]) == 1 y_pred = ovr.predict_proba(X) assert_array_equal(y_pred[:, -1], np.ones(X.shape[0])) # y has a constantly absent label y = np.zeros((10, 2)) y[5:, 0] = 1 # variable label ovr = OneVsRestClassifier(LogisticRegression()) msg = r"Label not 1 is present in all training examples" with pytest.warns(UserWarning, match=msg): ovr.fit(X, y) y_pred = ovr.predict_proba(X) assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0])) def test_ovr_multiclass(): # Toy dataset where features correspond directly to labels. X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]]) y = ["eggs", "spam", "ham", "eggs", "ham"] Y = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0], [0, 0, 1], [1, 0, 0]]) classes = set("ham eggs spam".split()) for base_clf in ( MultinomialNB(), LinearSVC(random_state=0), LinearRegression(), Ridge(), ElasticNet(), ): clf = OneVsRestClassifier(base_clf).fit(X, y) assert set(clf.classes_) == classes y_pred = clf.predict(np.array([[0, 0, 4]]))[0] assert_array_equal(y_pred, ["eggs"]) # test input as label indicator matrix clf = OneVsRestClassifier(base_clf).fit(X, Y) y_pred = clf.predict([[0, 0, 4]])[0] assert_array_equal(y_pred, [0, 0, 1]) def test_ovr_binary(): # Toy dataset where features correspond directly to labels. X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]]) y = ["eggs", "spam", "spam", "eggs", "spam"] Y = np.array([[0, 1, 1, 0, 1]]).T classes = set("eggs spam".split()) def conduct_test(base_clf, test_predict_proba=False): clf = OneVsRestClassifier(base_clf).fit(X, y) assert set(clf.classes_) == classes y_pred = clf.predict(np.array([[0, 0, 4]]))[0] assert_array_equal(y_pred, ["eggs"]) if hasattr(base_clf, "decision_function"): dec = clf.decision_function(X) assert dec.shape == (5,) if test_predict_proba: X_test = np.array([[0, 0, 4]]) probabilities = clf.predict_proba(X_test) assert 2 == len(probabilities[0]) assert clf.classes_[np.argmax(probabilities, axis=1)] == clf.predict(X_test) # test input as label indicator matrix clf = OneVsRestClassifier(base_clf).fit(X, Y) y_pred = clf.predict([[3, 0, 0]])[0] assert y_pred == 1 for base_clf in ( LinearSVC(random_state=0), LinearRegression(), Ridge(), ElasticNet(), ): conduct_test(base_clf) for base_clf in (MultinomialNB(), SVC(probability=True), LogisticRegression()): conduct_test(base_clf, test_predict_proba=True) def test_ovr_multilabel(): # Toy dataset where features correspond directly to labels. X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]]) y = np.array([[0, 1, 1], [0, 1, 0], [1, 1, 1], [1, 0, 1], [1, 0, 0]]) for base_clf in ( MultinomialNB(), LinearSVC(random_state=0), LinearRegression(), Ridge(), ElasticNet(), Lasso(alpha=0.5), ): clf = OneVsRestClassifier(base_clf).fit(X, y) y_pred = clf.predict([[0, 4, 4]])[0] assert_array_equal(y_pred, [0, 1, 1]) assert clf.multilabel_ def test_ovr_fit_predict_svc(): ovr = OneVsRestClassifier(svm.SVC()) ovr.fit(iris.data, iris.target) assert len(ovr.estimators_) == 3 assert ovr.score(iris.data, iris.target) > 0.9 def test_ovr_multilabel_dataset(): base_clf = MultinomialNB(alpha=1) for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)): X, Y = datasets.make_multilabel_classification( n_samples=100, n_features=20, n_classes=5, n_labels=2, length=50, allow_unlabeled=au, random_state=0, ) X_train, Y_train = X[:80], Y[:80] X_test, Y_test = X[80:], Y[80:] clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train) Y_pred = clf.predict(X_test) assert clf.multilabel_ assert_almost_equal( precision_score(Y_test, Y_pred, average="micro"), prec, decimal=2 ) assert_almost_equal( recall_score(Y_test, Y_pred, average="micro"), recall, decimal=2 ) def test_ovr_multilabel_predict_proba(): base_clf = MultinomialNB(alpha=1) for au in (False, True): X, Y = datasets.make_multilabel_classification( n_samples=100, n_features=20, n_classes=5, n_labels=3, length=50, allow_unlabeled=au, random_state=0, ) X_train, Y_train = X[:80], Y[:80] X_test = X[80:] clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train) # Decision function only estimator. decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train) assert not hasattr(decision_only, "predict_proba") # Estimator with predict_proba disabled, depending on parameters. decision_only = OneVsRestClassifier(svm.SVC(probability=False)) assert not hasattr(decision_only, "predict_proba") decision_only.fit(X_train, Y_train) assert not hasattr(decision_only, "predict_proba") assert hasattr(decision_only, "decision_function") # Estimator which can get predict_proba enabled after fitting gs = GridSearchCV( svm.SVC(probability=False), param_grid={"probability": [True]} ) proba_after_fit = OneVsRestClassifier(gs) assert not hasattr(proba_after_fit, "predict_proba") proba_after_fit.fit(X_train, Y_train) assert hasattr(proba_after_fit, "predict_proba") Y_pred = clf.predict(X_test) Y_proba = clf.predict_proba(X_test) # predict assigns a label if the probability that the # sample has the label is greater than 0.5. pred = Y_proba > 0.5 assert_array_equal(pred, Y_pred) def test_ovr_single_label_predict_proba(): base_clf = MultinomialNB(alpha=1) X, Y = iris.data, iris.target X_train, Y_train = X[:80], Y[:80] X_test = X[80:] clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train) # Decision function only estimator. decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train) assert not hasattr(decision_only, "predict_proba") Y_pred = clf.predict(X_test) Y_proba = clf.predict_proba(X_test) assert_almost_equal(Y_proba.sum(axis=1), 1.0) # predict assigns a label if the probability that the # sample has the label with the greatest predictive probability. pred = Y_proba.argmax(axis=1) assert not (pred - Y_pred).any() def test_ovr_single_label_predict_proba_zero(): """Check that predic_proba returns all zeros when the base estimator never predicts the positive class. """ class NaiveBinaryClassifier(BaseEstimator, ClassifierMixin): def fit(self, X, y): self.classes_ = np.unique(y) return self def predict_proba(self, X): proba = np.ones((len(X), 2)) # Probability of being the positive class is always 0 proba[:, 1] = 0 return proba base_clf = NaiveBinaryClassifier() X, y = iris.data, iris.target # Three-class problem with 150 samples clf = OneVsRestClassifier(base_clf).fit(X, y) y_proba = clf.predict_proba(X) assert_allclose(y_proba, 0.0) def test_ovr_multilabel_decision_function(): X, Y = datasets.make_multilabel_classification( n_samples=100, n_features=20, n_classes=5, n_labels=3, length=50, allow_unlabeled=True, random_state=0, ) X_train, Y_train = X[:80], Y[:80] X_test = X[80:] clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train) assert_array_equal( (clf.decision_function(X_test) > 0).astype(int), clf.predict(X_test) ) def test_ovr_single_label_decision_function(): X, Y = datasets.make_classification(n_samples=100, n_features=20, random_state=0) X_train, Y_train = X[:80], Y[:80] X_test = X[80:] clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train) assert_array_equal(clf.decision_function(X_test).ravel() > 0, clf.predict(X_test)) def test_ovr_gridsearch(): ovr = OneVsRestClassifier(LinearSVC(random_state=0)) Cs = [0.1, 0.5, 0.8] cv = GridSearchCV(ovr, {"estimator__C": Cs}) cv.fit(iris.data, iris.target) best_C = cv.best_estimator_.estimators_[0].C assert best_C in Cs def test_ovr_pipeline(): # Test with pipeline of length one # This test is needed because the multiclass estimators may fail to detect # the presence of predict_proba or decision_function. clf = Pipeline([("tree", DecisionTreeClassifier())]) ovr_pipe = OneVsRestClassifier(clf) ovr_pipe.fit(iris.data, iris.target) ovr = OneVsRestClassifier(DecisionTreeClassifier()) ovr.fit(iris.data, iris.target) assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data)) def test_ovo_exceptions(): ovo = OneVsOneClassifier(LinearSVC(random_state=0)) with pytest.raises(NotFittedError): ovo.predict([]) def test_ovo_fit_on_list(): # Test that OneVsOne fitting works with a list of targets and yields the # same output as predict from an array ovo = OneVsOneClassifier(LinearSVC(random_state=0)) prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data) iris_data_list = [list(a) for a in iris.data] prediction_from_list = ovo.fit(iris_data_list, list(iris.target)).predict( iris_data_list ) assert_array_equal(prediction_from_array, prediction_from_list) def test_ovo_fit_predict(): # A classifier which implements decision_function. ovo = OneVsOneClassifier(LinearSVC(random_state=0)) ovo.fit(iris.data, iris.target).predict(iris.data) assert len(ovo.estimators_) == n_classes * (n_classes - 1) / 2 # A classifier which implements predict_proba. ovo = OneVsOneClassifier(MultinomialNB()) ovo.fit(iris.data, iris.target).predict(iris.data) assert len(ovo.estimators_) == n_classes * (n_classes - 1) / 2 def test_ovo_partial_fit_predict(): temp = datasets.load_iris() X, y = temp.data, temp.target ovo1 = OneVsOneClassifier(MultinomialNB()) ovo1.partial_fit(X[:100], y[:100], np.unique(y)) ovo1.partial_fit(X[100:], y[100:]) pred1 = ovo1.predict(X) ovo2 = OneVsOneClassifier(MultinomialNB()) ovo2.fit(X, y) pred2 = ovo2.predict(X) assert len(ovo1.estimators_) == n_classes * (n_classes - 1) / 2 assert np.mean(y == pred1) > 0.65 assert_almost_equal(pred1, pred2) # Test when mini-batches have binary target classes ovo1 = OneVsOneClassifier(MultinomialNB()) ovo1.partial_fit(X[:60], y[:60], np.unique(y)) ovo1.partial_fit(X[60:], y[60:]) pred1 = ovo1.predict(X) ovo2 = OneVsOneClassifier(MultinomialNB()) pred2 = ovo2.fit(X, y).predict(X) assert_almost_equal(pred1, pred2) assert len(ovo1.estimators_) == len(np.unique(y)) assert np.mean(y == pred1) > 0.65 ovo = OneVsOneClassifier(MultinomialNB()) X = np.random.rand(14, 2) y = [1, 1, 2, 3, 3, 0, 0, 4, 4, 4, 4, 4, 2, 2] ovo.partial_fit(X[:7], y[:7], [0, 1, 2, 3, 4]) ovo.partial_fit(X[7:], y[7:]) pred = ovo.predict(X) ovo2 = OneVsOneClassifier(MultinomialNB()) pred2 = ovo2.fit(X, y).predict(X) assert_almost_equal(pred, pred2) # raises error when mini-batch does not have classes from all_classes ovo = OneVsOneClassifier(MultinomialNB()) error_y = [0, 1, 2, 3, 4, 5, 2] message_re = escape( "Mini-batch contains {0} while it must be subset of {1}".format( np.unique(error_y), np.unique(y) ) ) with pytest.raises(ValueError, match=message_re): ovo.partial_fit(X[:7], error_y, np.unique(y)) # test partial_fit only exists if estimator has it: ovr = OneVsOneClassifier(SVC()) assert not hasattr(ovr, "partial_fit") def test_ovo_decision_function(): n_samples = iris.data.shape[0] ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0)) # first binary ovo_clf.fit(iris.data, iris.target == 0) decisions = ovo_clf.decision_function(iris.data) assert decisions.shape == (n_samples,) # then multi-class ovo_clf.fit(iris.data, iris.target) decisions = ovo_clf.decision_function(iris.data) assert decisions.shape == (n_samples, n_classes) assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data)) # Compute the votes votes = np.zeros((n_samples, n_classes)) k = 0 for i in range(n_classes): for j in range(i + 1, n_classes): pred = ovo_clf.estimators_[k].predict(iris.data) votes[pred == 0, i] += 1 votes[pred == 1, j] += 1 k += 1 # Extract votes and verify assert_array_equal(votes, np.round(decisions)) for class_idx in range(n_classes): # For each sample and each class, there only 3 possible vote levels # because they are only 3 distinct class pairs thus 3 distinct # binary classifiers. # Therefore, sorting predictions based on votes would yield # mostly tied predictions: assert set(votes[:, class_idx]).issubset(set([0.0, 1.0, 2.0])) # The OVO decision function on the other hand is able to resolve # most of the ties on this data as it combines both the vote counts # and the aggregated confidence levels of the binary classifiers # to compute the aggregate decision function. The iris dataset # has 150 samples with a couple of duplicates. The OvO decisions # can resolve most of the ties: assert len(np.unique(decisions[:, class_idx])) > 146 def test_ovo_gridsearch(): ovo = OneVsOneClassifier(LinearSVC(random_state=0)) Cs = [0.1, 0.5, 0.8] cv = GridSearchCV(ovo, {"estimator__C": Cs}) cv.fit(iris.data, iris.target) best_C = cv.best_estimator_.estimators_[0].C assert best_C in Cs def test_ovo_ties(): # Test that ties are broken using the decision function, # not defaulting to the smallest label X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]]) y = np.array([2, 0, 1, 2]) multi_clf = OneVsOneClassifier(Perceptron(shuffle=False, max_iter=4, tol=None)) ovo_prediction = multi_clf.fit(X, y).predict(X) ovo_decision = multi_clf.decision_function(X) # Classifiers are in order 0-1, 0-2, 1-2 # Use decision_function to compute the votes and the normalized # sum_of_confidences, which is used to disambiguate when there is a tie in # votes. votes = np.round(ovo_decision) normalized_confidences = ovo_decision - votes # For the first point, there is one vote per class assert_array_equal(votes[0, :], 1) # For the rest, there is no tie and the prediction is the argmax assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:]) # For the tie, the prediction is the class with the highest score assert ovo_prediction[0] == normalized_confidences[0].argmax() def test_ovo_ties2(): # test that ties can not only be won by the first two labels X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]]) y_ref = np.array([2, 0, 1, 2]) # cycle through labels so that each label wins once for i in range(3): y = (y_ref + i) % 3 multi_clf = OneVsOneClassifier(Perceptron(shuffle=False, max_iter=4, tol=None)) ovo_prediction = multi_clf.fit(X, y).predict(X) assert ovo_prediction[0] == i % 3 def test_ovo_string_y(): # Test that the OvO doesn't mess up the encoding of string labels X = np.eye(4) y = np.array(["a", "b", "c", "d"]) ovo = OneVsOneClassifier(LinearSVC()) ovo.fit(X, y) assert_array_equal(y, ovo.predict(X)) def test_ovo_one_class(): # Test error for OvO with one class X = np.eye(4) y = np.array(["a"] * 4) ovo = OneVsOneClassifier(LinearSVC()) msg = "when only one class" with pytest.raises(ValueError, match=msg): ovo.fit(X, y) def test_ovo_float_y(): # Test that the OvO errors on float targets X = iris.data y = iris.data[:, 0] ovo = OneVsOneClassifier(LinearSVC()) msg = "Unknown label type" with pytest.raises(ValueError, match=msg): ovo.fit(X, y) def test_ecoc_exceptions(): ecoc = OutputCodeClassifier(LinearSVC(random_state=0)) with pytest.raises(NotFittedError): ecoc.predict([]) def test_ecoc_fit_predict(): # A classifier which implements decision_function. ecoc = OutputCodeClassifier(LinearSVC(random_state=0), code_size=2, random_state=0) ecoc.fit(iris.data, iris.target).predict(iris.data) assert len(ecoc.estimators_) == n_classes * 2 # A classifier which implements predict_proba. ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0) ecoc.fit(iris.data, iris.target).predict(iris.data) assert len(ecoc.estimators_) == n_classes * 2 def test_ecoc_gridsearch(): ecoc = OutputCodeClassifier(LinearSVC(random_state=0), random_state=0) Cs = [0.1, 0.5, 0.8] cv = GridSearchCV(ecoc, {"estimator__C": Cs}) cv.fit(iris.data, iris.target) best_C = cv.best_estimator_.estimators_[0].C assert best_C in Cs def test_ecoc_float_y(): # Test that the OCC errors on float targets X = iris.data y = iris.data[:, 0] ovo = OutputCodeClassifier(LinearSVC()) msg = "Unknown label type" with pytest.raises(ValueError, match=msg): ovo.fit(X, y) @pytest.mark.parametrize("csc_container", CSC_CONTAINERS) def test_ecoc_delegate_sparse_base_estimator(csc_container): # Non-regression test for # https://github.com/scikit-learn/scikit-learn/issues/17218 X, y = iris.data, iris.target X_sp = csc_container(X) # create an estimator that does not support sparse input base_estimator = CheckingClassifier( check_X=check_array, check_X_params={"ensure_2d": True, "accept_sparse": False}, ) ecoc = OutputCodeClassifier(base_estimator, random_state=0) with pytest.raises(TypeError, match="Sparse data was passed"): ecoc.fit(X_sp, y) ecoc.fit(X, y) with pytest.raises(TypeError, match="Sparse data was passed"): ecoc.predict(X_sp) # smoke test to check when sparse input should be supported ecoc = OutputCodeClassifier(LinearSVC(random_state=0)) ecoc.fit(X_sp, y).predict(X_sp) assert len(ecoc.estimators_) == 4 def test_pairwise_indices(): clf_precomputed = svm.SVC(kernel="precomputed") X, y = iris.data, iris.target ovr_false = OneVsOneClassifier(clf_precomputed) linear_kernel = np.dot(X, X.T) ovr_false.fit(linear_kernel, y) n_estimators = len(ovr_false.estimators_) precomputed_indices = ovr_false.pairwise_indices_ for idx in precomputed_indices: assert ( idx.shape[0] * n_estimators / (n_estimators - 1) == linear_kernel.shape[0] ) def test_pairwise_n_features_in(): """Check the n_features_in_ attributes of the meta and base estimators When the training data is a regular design matrix, everything is intuitive. However, when the training data is a precomputed kernel matrix, the multiclass strategy can resample the kernel matrix of the underlying base estimator both row-wise and column-wise and this has a non-trivial impact on the expected value for the n_features_in_ of both the meta and the base estimators. """ X, y = iris.data, iris.target # Remove the last sample to make the classes not exactly balanced and make # the test more interesting. assert y[-1] == 0 X = X[:-1] y = y[:-1] # Fitting directly on the design matrix: assert X.shape == (149, 4) clf_notprecomputed = svm.SVC(kernel="linear").fit(X, y) assert clf_notprecomputed.n_features_in_ == 4 ovr_notprecomputed = OneVsRestClassifier(clf_notprecomputed).fit(X, y) assert ovr_notprecomputed.n_features_in_ == 4 for est in ovr_notprecomputed.estimators_: assert est.n_features_in_ == 4 ovo_notprecomputed = OneVsOneClassifier(clf_notprecomputed).fit(X, y) assert ovo_notprecomputed.n_features_in_ == 4 assert ovo_notprecomputed.n_classes_ == 3 assert len(ovo_notprecomputed.estimators_) == 3 for est in ovo_notprecomputed.estimators_: assert est.n_features_in_ == 4 # When working with precomputed kernels we have one "feature" per training # sample: K = X @ X.T assert K.shape == (149, 149) clf_precomputed = svm.SVC(kernel="precomputed").fit(K, y) assert clf_precomputed.n_features_in_ == 149 ovr_precomputed = OneVsRestClassifier(clf_precomputed).fit(K, y) assert ovr_precomputed.n_features_in_ == 149 assert ovr_precomputed.n_classes_ == 3 assert len(ovr_precomputed.estimators_) == 3 for est in ovr_precomputed.estimators_: assert est.n_features_in_ == 149 # This becomes really interesting with OvO and precomputed kernel together: # internally, OvO will drop the samples of the classes not part of the pair # of classes under consideration for a given binary classifier. Since we # use a precomputed kernel, it will also drop the matching columns of the # kernel matrix, and therefore we have fewer "features" as result. # # Since class 0 has 49 samples, and class 1 and 2 have 50 samples each, a # single OvO binary classifier works with a sub-kernel matrix of shape # either (99, 99) or (100, 100). ovo_precomputed = OneVsOneClassifier(clf_precomputed).fit(K, y) assert ovo_precomputed.n_features_in_ == 149 assert ovr_precomputed.n_classes_ == 3 assert len(ovr_precomputed.estimators_) == 3 assert ovo_precomputed.estimators_[0].n_features_in_ == 99 # class 0 vs class 1 assert ovo_precomputed.estimators_[1].n_features_in_ == 99 # class 0 vs class 2 assert ovo_precomputed.estimators_[2].n_features_in_ == 100 # class 1 vs class 2 @pytest.mark.parametrize( "MultiClassClassifier", [OneVsRestClassifier, OneVsOneClassifier] ) def test_pairwise_tag(MultiClassClassifier): clf_precomputed = svm.SVC(kernel="precomputed") clf_notprecomputed = svm.SVC() ovr_false = MultiClassClassifier(clf_notprecomputed) assert not ovr_false.__sklearn_tags__().input_tags.pairwise ovr_true = MultiClassClassifier(clf_precomputed) assert ovr_true.__sklearn_tags__().input_tags.pairwise @pytest.mark.parametrize( "MultiClassClassifier", [OneVsRestClassifier, OneVsOneClassifier] ) def test_pairwise_cross_val_score(MultiClassClassifier): clf_precomputed = svm.SVC(kernel="precomputed") clf_notprecomputed = svm.SVC(kernel="linear") X, y = iris.data, iris.target multiclass_clf_notprecomputed = MultiClassClassifier(clf_notprecomputed) multiclass_clf_precomputed = MultiClassClassifier(clf_precomputed) linear_kernel = np.dot(X, X.T) score_not_precomputed = cross_val_score( multiclass_clf_notprecomputed, X, y, error_score="raise" ) score_precomputed = cross_val_score( multiclass_clf_precomputed, linear_kernel, y, error_score="raise" ) assert_array_equal(score_precomputed, score_not_precomputed) @pytest.mark.parametrize( "MultiClassClassifier", [OneVsRestClassifier, OneVsOneClassifier] ) # FIXME: we should move this test in `estimator_checks` once we are able # to construct meta-estimator instances def test_support_missing_values(MultiClassClassifier): # smoke test to check that pipeline OvR and OvO classifiers are letting # the validation of missing values to # the underlying pipeline or classifiers rng = np.random.RandomState(42) X, y = iris.data, iris.target X = np.copy(X) # Copy to avoid that the original data is modified mask = rng.choice([1, 0], X.shape, p=[0.1, 0.9]).astype(bool) X[mask] = np.nan lr = make_pipeline(SimpleImputer(), LogisticRegression(random_state=rng)) MultiClassClassifier(lr).fit(X, y).score(X, y) @pytest.mark.parametrize("make_y", [np.ones, np.zeros]) def test_constant_int_target(make_y):
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
true
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/tests/test_build.py
sklearn/tests/test_build.py
import os import textwrap import pytest from sklearn import __version__ from sklearn.utils._openmp_helpers import _openmp_parallelism_enabled def test_openmp_parallelism_enabled(): # Check that sklearn is built with OpenMP-based parallelism enabled. # This test can be skipped by setting the environment variable # ``SKLEARN_SKIP_OPENMP_TEST``. if os.getenv("SKLEARN_SKIP_OPENMP_TEST"): pytest.skip("test explicitly skipped (SKLEARN_SKIP_OPENMP_TEST)") base_url = "dev" if __version__.endswith(".dev0") else "stable" err_msg = textwrap.dedent( """ This test fails because scikit-learn has been built without OpenMP. This is not recommended since some estimators will run in sequential mode instead of leveraging thread-based parallelism. You can find instructions to build scikit-learn with OpenMP at this address: https://scikit-learn.org/{}/developers/advanced_installation.html You can skip this test by setting the environment variable SKLEARN_SKIP_OPENMP_TEST to any value. """ ).format(base_url) assert _openmp_parallelism_enabled(), err_msg
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/tests/test_config.py
sklearn/tests/test_config.py
import time from concurrent.futures import ThreadPoolExecutor import pytest import sklearn from sklearn import config_context, get_config, set_config from sklearn.utils.fixes import _IS_WASM from sklearn.utils.parallel import Parallel, delayed def test_config_context(): assert get_config() == { "assume_finite": False, "working_memory": 1024, "print_changed_only": True, "display": "diagram", "array_api_dispatch": False, "pairwise_dist_chunk_size": 256, "enable_cython_pairwise_dist": True, "transform_output": "default", "enable_metadata_routing": False, "skip_parameter_validation": False, } # Not using as a context manager affects nothing config_context(assume_finite=True) assert get_config()["assume_finite"] is False with config_context(assume_finite=True): assert get_config() == { "assume_finite": True, "working_memory": 1024, "print_changed_only": True, "display": "diagram", "array_api_dispatch": False, "pairwise_dist_chunk_size": 256, "enable_cython_pairwise_dist": True, "transform_output": "default", "enable_metadata_routing": False, "skip_parameter_validation": False, } assert get_config()["assume_finite"] is False with config_context(assume_finite=True): with config_context(assume_finite=None): assert get_config()["assume_finite"] is True assert get_config()["assume_finite"] is True with config_context(assume_finite=False): assert get_config()["assume_finite"] is False with config_context(assume_finite=None): assert get_config()["assume_finite"] is False # global setting will not be retained outside of context that # did not modify this setting set_config(assume_finite=True) assert get_config()["assume_finite"] is True assert get_config()["assume_finite"] is False assert get_config()["assume_finite"] is True assert get_config() == { "assume_finite": False, "working_memory": 1024, "print_changed_only": True, "display": "diagram", "array_api_dispatch": False, "pairwise_dist_chunk_size": 256, "enable_cython_pairwise_dist": True, "transform_output": "default", "enable_metadata_routing": False, "skip_parameter_validation": False, } # No positional arguments with pytest.raises(TypeError): config_context(True) # No unknown arguments with pytest.raises(TypeError): config_context(do_something_else=True).__enter__() def test_config_context_exception(): assert get_config()["assume_finite"] is False try: with config_context(assume_finite=True): assert get_config()["assume_finite"] is True raise ValueError() except ValueError: pass assert get_config()["assume_finite"] is False def test_set_config(): assert get_config()["assume_finite"] is False set_config(assume_finite=None) assert get_config()["assume_finite"] is False set_config(assume_finite=True) assert get_config()["assume_finite"] is True set_config(assume_finite=None) assert get_config()["assume_finite"] is True set_config(assume_finite=False) assert get_config()["assume_finite"] is False # No unknown arguments with pytest.raises(TypeError): set_config(do_something_else=True) def set_assume_finite(assume_finite, sleep_duration): """Return the value of assume_finite after waiting `sleep_duration`.""" with config_context(assume_finite=assume_finite): time.sleep(sleep_duration) return get_config()["assume_finite"] @pytest.mark.parametrize("backend", ["loky", "multiprocessing", "threading"]) def test_config_threadsafe_joblib(backend): """Test that the global config is threadsafe with all joblib backends. Two jobs are spawned and sets assume_finite to two different values. When the job with a duration 0.1s completes, the assume_finite value should be the same as the value passed to the function. In other words, it is not influenced by the other job setting assume_finite to True. """ assume_finites = [False, True, False, True] sleep_durations = [0.1, 0.2, 0.1, 0.2] items = Parallel(backend=backend, n_jobs=2)( delayed(set_assume_finite)(assume_finite, sleep_dur) for assume_finite, sleep_dur in zip(assume_finites, sleep_durations) ) assert items == [False, True, False, True] @pytest.mark.xfail(_IS_WASM, reason="cannot start threads") def test_config_threadsafe(): """Uses threads directly to test that the global config does not change between threads. Same test as `test_config_threadsafe_joblib` but with `ThreadPoolExecutor`.""" assume_finites = [False, True, False, True] sleep_durations = [0.1, 0.2, 0.1, 0.2] with ThreadPoolExecutor(max_workers=2) as e: items = [ output for output in e.map(set_assume_finite, assume_finites, sleep_durations) ] assert items == [False, True, False, True] def test_config_array_api_dispatch_error_scipy(monkeypatch): """Check error when SciPy is too old""" monkeypatch.setattr(sklearn.utils._array_api.scipy, "__version__", "1.13.0") with pytest.raises(ImportError, match="SciPy must be 1.14.0 or newer"): with config_context(array_api_dispatch=True): pass with pytest.raises(ImportError, match="SciPy must be 1.14.0 or newer"): set_config(array_api_dispatch=True)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/tests/test_pipeline.py
sklearn/tests/test_pipeline.py
""" Test the pipeline module. """ import itertools import re import shutil import time from tempfile import mkdtemp import joblib import numpy as np import pytest from sklearn import config_context from sklearn.base import ( BaseEstimator, ClassifierMixin, TransformerMixin, clone, is_classifier, is_regressor, ) from sklearn.cluster import KMeans from sklearn.datasets import load_iris from sklearn.decomposition import PCA, TruncatedSVD from sklearn.dummy import DummyRegressor from sklearn.ensemble import ( HistGradientBoostingClassifier, RandomForestClassifier, RandomTreesEmbedding, ) from sklearn.exceptions import NotFittedError, UnsetMetadataPassedError from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_selection import SelectKBest, f_classif from sklearn.impute import SimpleImputer from sklearn.linear_model import Lasso, LinearRegression, LogisticRegression from sklearn.metrics import accuracy_score, r2_score from sklearn.model_selection import train_test_split from sklearn.neighbors import LocalOutlierFactor from sklearn.pipeline import FeatureUnion, Pipeline, make_pipeline, make_union from sklearn.preprocessing import FunctionTransformer, StandardScaler from sklearn.svm import SVC from sklearn.tests.metadata_routing_common import ( ConsumingNoFitTransformTransformer, ConsumingTransformer, _Registry, check_recorded_metadata, ) from sklearn.utils import get_tags from sklearn.utils._metadata_requests import COMPOSITE_METHODS, METHODS from sklearn.utils._testing import ( MinimalClassifier, MinimalRegressor, MinimalTransformer, assert_allclose, assert_array_almost_equal, assert_array_equal, ) from sklearn.utils.fixes import CSR_CONTAINERS from sklearn.utils.validation import _check_feature_names, check_is_fitted # Load a shared tests data sets for the tests in this module. Mark them # read-only to avoid unintentional in-place modifications that would introduce # side-effects between tests. iris = load_iris() iris.data.flags.writeable = False iris.target.flags.writeable = False JUNK_FOOD_DOCS = ( "the pizza pizza beer copyright", "the pizza burger beer copyright", "the the pizza beer beer copyright", "the burger beer beer copyright", "the coke burger coke copyright", "the coke burger burger", ) class NoFit(BaseEstimator): """Small class to test parameter dispatching.""" def __init__(self, a=None, b=None): self.a = a self.b = b class NoTrans(NoFit): def fit(self, X, y=None): return self def get_params(self, deep=False): return {"a": self.a, "b": self.b} def set_params(self, **params): self.a = params["a"] return self class NoInvTransf(TransformerMixin, NoTrans): def transform(self, X): return X class Transf(NoInvTransf): def transform(self, X): return X def inverse_transform(self, X): return X class TransfFitParams(Transf): def fit(self, X, y=None, **fit_params): self.fit_params = fit_params return self class Mult(TransformerMixin, BaseEstimator): def __init__(self, mult=1): self.mult = mult def __sklearn_is_fitted__(self): return True def fit(self, X, y=None): return self def transform(self, X): return np.asarray(X) * self.mult def inverse_transform(self, X): return np.asarray(X) / self.mult def predict(self, X): return (np.asarray(X) * self.mult).sum(axis=1) predict_proba = predict_log_proba = decision_function = predict def score(self, X, y=None): return np.sum(X) class FitParamT(BaseEstimator): """Mock classifier""" def __init__(self): self.successful = False def fit(self, X, y, should_succeed=False): self.successful = should_succeed self.fitted_ = True def predict(self, X): return self.successful def fit_predict(self, X, y, should_succeed=False): self.fit(X, y, should_succeed=should_succeed) return self.predict(X) def score(self, X, y=None, sample_weight=None): if sample_weight is not None: X = X * sample_weight return np.sum(X) class DummyTransf(Transf): """Transformer which store the column means""" def fit(self, X, y): self.means_ = np.mean(X, axis=0) # store timestamp to figure out whether the result of 'fit' has been # cached or not self.timestamp_ = time.time() return self class DummyEstimatorParams(BaseEstimator): """Mock classifier that takes params on predict""" def __sklearn_is_fitted__(self): return True def fit(self, X, y): return self def predict(self, X, got_attribute=False): self.got_attribute = got_attribute return self def predict_proba(self, X, got_attribute=False): self.got_attribute = got_attribute return self def predict_log_proba(self, X, got_attribute=False): self.got_attribute = got_attribute return self def test_pipeline_invalid_parameters(): # Test the various init parameters of the pipeline in fit # method pipeline = Pipeline([(1, 1)]) with pytest.raises(TypeError): pipeline.fit([[1]], [1]) # Check that we can't fit pipelines with objects without fit # method msg = ( "Last step of Pipeline should implement fit " "or be the string 'passthrough'" ".*NoFit.*" ) pipeline = Pipeline([("clf", NoFit())]) with pytest.raises(TypeError, match=msg): pipeline.fit([[1]], [1]) # Smoke test with only an estimator clf = NoTrans() pipe = Pipeline([("svc", clf)]) assert pipe.get_params(deep=True) == dict( svc__a=None, svc__b=None, svc=clf, **pipe.get_params(deep=False) ) # Check that params are set pipe.set_params(svc__a=0.1) assert clf.a == 0.1 assert clf.b is None # Smoke test the repr: repr(pipe) # Test with two objects clf = SVC() filter1 = SelectKBest(f_classif) pipe = Pipeline([("anova", filter1), ("svc", clf)]) # Check that estimators are not cloned on pipeline construction assert pipe.named_steps["anova"] is filter1 assert pipe.named_steps["svc"] is clf # Check that we can't fit with non-transformers on the way # Note that NoTrans implements fit, but not transform msg = "All intermediate steps should be transformers.*\\bNoTrans\\b.*" pipeline = Pipeline([("t", NoTrans()), ("svc", clf)]) with pytest.raises(TypeError, match=msg): pipeline.fit([[1]], [1]) # Check that params are set pipe.set_params(svc__C=0.1) assert clf.C == 0.1 # Smoke test the repr: repr(pipe) # Check that params are not set when naming them wrong msg = re.escape( "Invalid parameter 'C' for estimator SelectKBest(). Valid parameters are: ['k'," " 'score_func']." ) with pytest.raises(ValueError, match=msg): pipe.set_params(anova__C=0.1) # Test clone pipe2 = clone(pipe) assert pipe.named_steps["svc"] is not pipe2.named_steps["svc"] # Check that apart from estimators, the parameters are the same params = pipe.get_params(deep=True) params2 = pipe2.get_params(deep=True) for x in pipe.get_params(deep=False): params.pop(x) for x in pipe2.get_params(deep=False): params2.pop(x) # Remove estimators that where copied params.pop("svc") params.pop("anova") params2.pop("svc") params2.pop("anova") assert params == params2 @pytest.mark.parametrize( "meta_estimators", [ Pipeline([("pca", PCA)]), Pipeline([("pca", PCA), ("ident", None)]), Pipeline([("passthrough", "passthrough"), ("pca", PCA)]), Pipeline([("passthrough", None), ("pca", PCA)]), Pipeline([("scale", StandardScaler), ("pca", PCA())]), FeatureUnion([("pca", PCA), ("svd", TruncatedSVD())]), FeatureUnion([("pca", PCA()), ("svd", TruncatedSVD)]), FeatureUnion([("drop", "drop"), ("svd", TruncatedSVD)]), FeatureUnion([("pca", PCA), ("passthrough", "passthrough")]), ], ) def test_meta_estimator_raises_class_not_instance_error(meta_estimators): # non-regression tests for https://github.com/scikit-learn/scikit-learn/issues/32719 msg = "Expected an estimator instance (.*()), got estimator class instead (.*)." with pytest.raises(TypeError, match=msg): meta_estimators.fit([[1]]) def test_empty_pipeline(): X = iris.data y = iris.target pipe = Pipeline([]) msg = "The pipeline is empty. Please add steps." with pytest.raises(ValueError, match=msg): pipe.fit(X, y) def test_pipeline_init_tuple(): # Pipeline accepts steps as tuple X = np.array([[1, 2]]) pipe = Pipeline((("transf", Transf()), ("clf", FitParamT()))) pipe.fit(X, y=None) pipe.score(X) pipe.set_params(transf="passthrough") pipe.fit(X, y=None) pipe.score(X) def test_pipeline_methods_anova(): # Test the various methods of the pipeline (anova). X = iris.data y = iris.target # Test with Anova + LogisticRegression clf = LogisticRegression() filter1 = SelectKBest(f_classif, k=2) pipe = Pipeline([("anova", filter1), ("logistic", clf)]) pipe.fit(X, y) pipe.predict(X) pipe.predict_proba(X) pipe.predict_log_proba(X) pipe.score(X, y) def test_pipeline_fit_params(): # Test that the pipeline can take fit parameters pipe = Pipeline([("transf", Transf()), ("clf", FitParamT())]) pipe.fit(X=None, y=None, clf__should_succeed=True) # classifier should return True assert pipe.predict(None) # and transformer params should not be changed assert pipe.named_steps["transf"].a is None assert pipe.named_steps["transf"].b is None # invalid parameters should raise an error message msg = re.escape("fit() got an unexpected keyword argument 'bad'") with pytest.raises(TypeError, match=msg): pipe.fit(None, None, clf__bad=True) def test_pipeline_sample_weight_supported(): # Pipeline should pass sample_weight X = np.array([[1, 2]]) pipe = Pipeline([("transf", Transf()), ("clf", FitParamT())]) pipe.fit(X, y=None) assert pipe.score(X) == 3 assert pipe.score(X, y=None) == 3 assert pipe.score(X, y=None, sample_weight=None) == 3 assert pipe.score(X, sample_weight=np.array([2, 3])) == 8 def test_pipeline_sample_weight_unsupported(): # When sample_weight is None it shouldn't be passed X = np.array([[1, 2]]) pipe = Pipeline([("transf", Transf()), ("clf", Mult())]) pipe.fit(X, y=None) assert pipe.score(X) == 3 assert pipe.score(X, sample_weight=None) == 3 msg = re.escape("score() got an unexpected keyword argument 'sample_weight'") with pytest.raises(TypeError, match=msg): pipe.score(X, sample_weight=np.array([2, 3])) def test_pipeline_raise_set_params_error(): # Test pipeline raises set params error message for nested models. pipe = Pipeline([("cls", LinearRegression())]) # expected error message error_msg = re.escape( "Invalid parameter 'fake' for estimator Pipeline(steps=[('cls'," " LinearRegression())]). Valid parameters are: ['memory', 'steps'," " 'transform_input', 'verbose']." ) with pytest.raises(ValueError, match=error_msg): pipe.set_params(fake="nope") # invalid outer parameter name for compound parameter: the expected error message # is the same as above. with pytest.raises(ValueError, match=error_msg): pipe.set_params(fake__estimator="nope") # expected error message for invalid inner parameter error_msg = re.escape( "Invalid parameter 'invalid_param' for estimator LinearRegression(). Valid" " parameters are: ['copy_X', 'fit_intercept', 'n_jobs', 'positive', 'tol']." ) with pytest.raises(ValueError, match=error_msg): pipe.set_params(cls__invalid_param="nope") def test_pipeline_methods_pca_svm(): # Test the various methods of the pipeline (pca + svm). X = iris.data y = iris.target # Test with PCA + SVC clf = SVC(probability=True, random_state=0) pca = PCA(svd_solver="full", n_components="mle", whiten=True) pipe = Pipeline([("pca", pca), ("svc", clf)]) pipe.fit(X, y) pipe.predict(X) pipe.predict_proba(X) pipe.predict_log_proba(X) pipe.score(X, y) def test_pipeline_score_samples_pca_lof(): X = iris.data # Test that the score_samples method is implemented on a pipeline. # Test that the score_samples method on pipeline yields same results as # applying transform and score_samples steps separately. pca = PCA(svd_solver="full", n_components="mle", whiten=True) lof = LocalOutlierFactor(novelty=True) pipe = Pipeline([("pca", pca), ("lof", lof)]) pipe.fit(X) # Check the shapes assert pipe.score_samples(X).shape == (X.shape[0],) # Check the values lof.fit(pca.fit_transform(X)) assert_allclose(pipe.score_samples(X), lof.score_samples(pca.transform(X))) def test_score_samples_on_pipeline_without_score_samples(): X = np.array([[1], [2]]) y = np.array([1, 2]) # Test that a pipeline does not have score_samples method when the final # step of the pipeline does not have score_samples defined. pipe = make_pipeline(LogisticRegression()) pipe.fit(X, y) inner_msg = "'LogisticRegression' object has no attribute 'score_samples'" outer_msg = "'Pipeline' has no attribute 'score_samples'" with pytest.raises(AttributeError, match=outer_msg) as exec_info: pipe.score_samples(X) assert isinstance(exec_info.value.__cause__, AttributeError) assert inner_msg in str(exec_info.value.__cause__) def test_pipeline_methods_preprocessing_svm(): # Test the various methods of the pipeline (preprocessing + svm). X = iris.data y = iris.target n_samples = X.shape[0] n_classes = len(np.unique(y)) scaler = StandardScaler() pca = PCA(n_components=2, svd_solver="randomized", whiten=True) clf = SVC(probability=True, random_state=0, decision_function_shape="ovr") for preprocessing in [scaler, pca]: pipe = Pipeline([("preprocess", preprocessing), ("svc", clf)]) pipe.fit(X, y) # check shapes of various prediction functions predict = pipe.predict(X) assert predict.shape == (n_samples,) proba = pipe.predict_proba(X) assert proba.shape == (n_samples, n_classes) log_proba = pipe.predict_log_proba(X) assert log_proba.shape == (n_samples, n_classes) decision_function = pipe.decision_function(X) assert decision_function.shape == (n_samples, n_classes) pipe.score(X, y) def test_fit_predict_on_pipeline(): # test that the fit_predict method is implemented on a pipeline # test that the fit_predict on pipeline yields same results as applying # transform and clustering steps separately scaler = StandardScaler() km = KMeans(random_state=0, n_init="auto") # As pipeline doesn't clone estimators on construction, # it must have its own estimators scaler_for_pipeline = StandardScaler() km_for_pipeline = KMeans(random_state=0, n_init="auto") # first compute the transform and clustering step separately scaled = scaler.fit_transform(iris.data) separate_pred = km.fit_predict(scaled) # use a pipeline to do the transform and clustering in one step pipe = Pipeline([("scaler", scaler_for_pipeline), ("Kmeans", km_for_pipeline)]) pipeline_pred = pipe.fit_predict(iris.data) assert_array_almost_equal(pipeline_pred, separate_pred) def test_fit_predict_on_pipeline_without_fit_predict(): # tests that a pipeline does not have fit_predict method when final # step of pipeline does not have fit_predict defined scaler = StandardScaler() pca = PCA(svd_solver="full") pipe = Pipeline([("scaler", scaler), ("pca", pca)]) outer_msg = "'Pipeline' has no attribute 'fit_predict'" inner_msg = "'PCA' object has no attribute 'fit_predict'" with pytest.raises(AttributeError, match=outer_msg) as exec_info: getattr(pipe, "fit_predict") assert isinstance(exec_info.value.__cause__, AttributeError) assert inner_msg in str(exec_info.value.__cause__) def test_fit_predict_with_intermediate_fit_params(): # tests that Pipeline passes fit_params to intermediate steps # when fit_predict is invoked pipe = Pipeline([("transf", TransfFitParams()), ("clf", FitParamT())]) pipe.fit_predict( X=None, y=None, transf__should_get_this=True, clf__should_succeed=True ) assert pipe.named_steps["transf"].fit_params["should_get_this"] assert pipe.named_steps["clf"].successful assert "should_succeed" not in pipe.named_steps["transf"].fit_params @pytest.mark.parametrize( "method_name", ["predict", "predict_proba", "predict_log_proba"] ) def test_predict_methods_with_predict_params(method_name): # tests that Pipeline passes predict_* to the final estimator # when predict_* is invoked pipe = Pipeline([("transf", Transf()), ("clf", DummyEstimatorParams())]) pipe.fit(None, None) method = getattr(pipe, method_name) method(X=None, got_attribute=True) assert pipe.named_steps["clf"].got_attribute @pytest.mark.parametrize("csr_container", CSR_CONTAINERS) def test_feature_union(csr_container): # basic sanity check for feature union X = iris.data.copy() X -= X.mean(axis=0) y = iris.target svd = TruncatedSVD(n_components=2, random_state=0) select = SelectKBest(k=1) fs = FeatureUnion([("svd", svd), ("select", select)]) fs.fit(X, y) X_transformed = fs.transform(X) assert X_transformed.shape == (X.shape[0], 3) # check if it does the expected thing assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X)) assert_array_equal(X_transformed[:, -1], select.fit_transform(X, y).ravel()) # test if it also works for sparse input # We use a different svd object to control the random_state stream fs = FeatureUnion([("svd", svd), ("select", select)]) X_sp = csr_container(X) X_sp_transformed = fs.fit_transform(X_sp, y) assert_array_almost_equal(X_transformed, X_sp_transformed.toarray()) # Test clone fs2 = clone(fs) assert fs.transformer_list[0][1] is not fs2.transformer_list[0][1] # test setting parameters fs.set_params(select__k=2) assert fs.fit_transform(X, y).shape == (X.shape[0], 4) # test it works with transformers missing fit_transform fs = FeatureUnion([("mock", Transf()), ("svd", svd), ("select", select)]) X_transformed = fs.fit_transform(X, y) assert X_transformed.shape == (X.shape[0], 8) # test error if some elements do not support transform msg = "All estimators should implement fit and transform.*\\bNoTrans\\b" fs = FeatureUnion([("transform", Transf()), ("no_transform", NoTrans())]) with pytest.raises(TypeError, match=msg): fs.fit(X) # test that init accepts tuples fs = FeatureUnion((("svd", svd), ("select", select))) fs.fit(X, y) def test_feature_union_named_transformers(): """Check the behaviour of `named_transformers` attribute.""" transf = Transf() noinvtransf = NoInvTransf() fs = FeatureUnion([("transf", transf), ("noinvtransf", noinvtransf)]) assert fs.named_transformers["transf"] == transf assert fs.named_transformers["noinvtransf"] == noinvtransf # test named attribute assert fs.named_transformers.transf == transf assert fs.named_transformers.noinvtransf == noinvtransf def test_make_union(): pca = PCA(svd_solver="full") mock = Transf() fu = make_union(pca, mock) names, transformers = zip(*fu.transformer_list) assert names == ("pca", "transf") assert transformers == (pca, mock) def test_make_union_kwargs(): pca = PCA(svd_solver="full") mock = Transf() fu = make_union(pca, mock, n_jobs=3) assert fu.transformer_list == make_union(pca, mock).transformer_list assert 3 == fu.n_jobs # invalid keyword parameters should raise an error message msg = re.escape( "make_union() got an unexpected keyword argument 'transformer_weights'" ) with pytest.raises(TypeError, match=msg): make_union(pca, mock, transformer_weights={"pca": 10, "Transf": 1}) def create_mock_transformer(base_name, n_features=3): """Helper to create a mock transformer with custom feature names.""" mock = Transf() mock.get_feature_names_out = lambda input_features: [ f"{base_name}{i}" for i in range(n_features) ] return mock def test_make_union_passes_verbose_feature_names_out(): # Test that make_union passes verbose_feature_names_out # to the FeatureUnion. X = iris.data y = iris.target pca = PCA() mock = create_mock_transformer("transf") union = make_union(pca, mock, verbose_feature_names_out=False) assert not union.verbose_feature_names_out fu_union = make_union(pca, mock, verbose_feature_names_out=True) fu_union.fit(X, y) assert_array_equal( [ "pca__pca0", "pca__pca1", "pca__pca2", "pca__pca3", "transf__transf0", "transf__transf1", "transf__transf2", ], fu_union.get_feature_names_out(), ) def test_pipeline_transform(): # Test whether pipeline works with a transformer at the end. # Also test pipeline.transform and pipeline.inverse_transform X = iris.data pca = PCA(n_components=2, svd_solver="full") pipeline = Pipeline([("pca", pca)]) # test transform and fit_transform: X_trans = pipeline.fit(X).transform(X) X_trans2 = pipeline.fit_transform(X) X_trans3 = pca.fit_transform(X) assert_array_almost_equal(X_trans, X_trans2) assert_array_almost_equal(X_trans, X_trans3) X_back = pipeline.inverse_transform(X_trans) X_back2 = pca.inverse_transform(X_trans) assert_array_almost_equal(X_back, X_back2) def test_pipeline_fit_transform(): # Test whether pipeline works with a transformer missing fit_transform X = iris.data y = iris.target transf = Transf() pipeline = Pipeline([("mock", transf)]) # test fit_transform: X_trans = pipeline.fit_transform(X, y) X_trans2 = transf.fit(X, y).transform(X) assert_array_almost_equal(X_trans, X_trans2) @pytest.mark.parametrize( "start, end", [(0, 1), (0, 2), (1, 2), (1, 3), (None, 1), (1, None), (None, None)] ) def test_pipeline_slice(start, end): pipe = Pipeline( [("transf1", Transf()), ("transf2", Transf()), ("clf", FitParamT())], memory="123", verbose=True, ) pipe_slice = pipe[start:end] # Test class assert isinstance(pipe_slice, Pipeline) # Test steps assert pipe_slice.steps == pipe.steps[start:end] # Test named_steps attribute assert ( list(pipe_slice.named_steps.items()) == list(pipe.named_steps.items())[start:end] ) # Test the rest of the parameters pipe_params = pipe.get_params(deep=False) pipe_slice_params = pipe_slice.get_params(deep=False) del pipe_params["steps"] del pipe_slice_params["steps"] assert pipe_params == pipe_slice_params # Test exception msg = "Pipeline slicing only supports a step of 1" with pytest.raises(ValueError, match=msg): pipe[start:end:-1] def test_pipeline_index(): transf = Transf() clf = FitParamT() pipe = Pipeline([("transf", transf), ("clf", clf)]) assert pipe[0] == transf assert pipe["transf"] == transf assert pipe[-1] == clf assert pipe["clf"] == clf # should raise an error if slicing out of range with pytest.raises(IndexError): pipe[3] # should raise an error if indexing with wrong element name with pytest.raises(KeyError): pipe["foobar"] def test_set_pipeline_steps(): transf1 = Transf() transf2 = Transf() pipeline = Pipeline([("mock", transf1)]) assert pipeline.named_steps["mock"] is transf1 # Directly setting attr pipeline.steps = [("mock2", transf2)] assert "mock" not in pipeline.named_steps assert pipeline.named_steps["mock2"] is transf2 assert [("mock2", transf2)] == pipeline.steps # Using set_params pipeline.set_params(steps=[("mock", transf1)]) assert [("mock", transf1)] == pipeline.steps # Using set_params to replace single step pipeline.set_params(mock=transf2) assert [("mock", transf2)] == pipeline.steps # With invalid data pipeline.set_params(steps=[("junk", ())]) msg = re.escape( "Last step of Pipeline should implement fit or be the string 'passthrough'." ) with pytest.raises(TypeError, match=msg): pipeline.fit([[1]], [1]) msg = "This 'Pipeline' has no attribute 'fit_transform'" with pytest.raises(AttributeError, match=msg): pipeline.fit_transform([[1]], [1]) def test_pipeline_named_steps(): transf = Transf() mult2 = Mult(mult=2) pipeline = Pipeline([("mock", transf), ("mult", mult2)]) # Test access via named_steps bunch object assert "mock" in pipeline.named_steps assert "mock2" not in pipeline.named_steps assert pipeline.named_steps.mock is transf assert pipeline.named_steps.mult is mult2 # Test bunch with conflict attribute of dict pipeline = Pipeline([("values", transf), ("mult", mult2)]) assert pipeline.named_steps.values is not transf assert pipeline.named_steps.mult is mult2 @pytest.mark.parametrize("passthrough", [None, "passthrough"]) def test_pipeline_correctly_adjusts_steps(passthrough): X = np.array([[1]]) y = np.array([1]) mult2 = Mult(mult=2) mult3 = Mult(mult=3) mult5 = Mult(mult=5) pipeline = Pipeline( [("m2", mult2), ("bad", passthrough), ("m3", mult3), ("m5", mult5)] ) pipeline.fit(X, y) expected_names = ["m2", "bad", "m3", "m5"] actual_names = [name for name, _ in pipeline.steps] assert expected_names == actual_names @pytest.mark.parametrize("passthrough", [None, "passthrough"]) def test_set_pipeline_step_passthrough(passthrough): X = np.array([[1]]) y = np.array([1]) mult2 = Mult(mult=2) mult3 = Mult(mult=3) mult5 = Mult(mult=5) def make(): return Pipeline([("m2", mult2), ("m3", mult3), ("last", mult5)]) pipeline = make() exp = 2 * 3 * 5 assert_array_equal([[exp]], pipeline.fit_transform(X, y)) assert_array_equal([exp], pipeline.fit(X).predict(X)) assert_array_equal(X, pipeline.inverse_transform([[exp]])) pipeline.set_params(m3=passthrough) exp = 2 * 5 assert_array_equal([[exp]], pipeline.fit_transform(X, y)) assert_array_equal([exp], pipeline.fit(X).predict(X)) assert_array_equal(X, pipeline.inverse_transform([[exp]])) assert pipeline.get_params(deep=True) == { "steps": pipeline.steps, "m2": mult2, "m3": passthrough, "last": mult5, "memory": None, "m2__mult": 2, "last__mult": 5, "transform_input": None, "verbose": False, } pipeline.set_params(m2=passthrough) exp = 5 assert_array_equal([[exp]], pipeline.fit_transform(X, y)) assert_array_equal([exp], pipeline.fit(X).predict(X)) assert_array_equal(X, pipeline.inverse_transform([[exp]])) # for other methods, ensure no AttributeErrors on None: other_methods = [ "predict_proba", "predict_log_proba", "decision_function", "transform", "score", ] for method in other_methods: getattr(pipeline, method)(X) pipeline.set_params(m2=mult2) exp = 2 * 5 assert_array_equal([[exp]], pipeline.fit_transform(X, y)) assert_array_equal([exp], pipeline.fit(X).predict(X)) assert_array_equal(X, pipeline.inverse_transform([[exp]])) pipeline = make() pipeline.set_params(last=passthrough) # mult2 and mult3 are active exp = 6 assert_array_equal([[exp]], pipeline.fit(X, y).transform(X)) assert_array_equal([[exp]], pipeline.fit_transform(X, y)) assert_array_equal(X, pipeline.inverse_transform([[exp]])) inner_msg = "'str' object has no attribute 'predict'" outer_msg = "This 'Pipeline' has no attribute 'predict'" with pytest.raises(AttributeError, match=outer_msg) as exec_info: getattr(pipeline, "predict") assert isinstance(exec_info.value.__cause__, AttributeError) assert inner_msg in str(exec_info.value.__cause__) # Check 'passthrough' step at construction time exp = 2 * 5 pipeline = Pipeline([("m2", mult2), ("m3", passthrough), ("last", mult5)]) assert_array_equal([[exp]], pipeline.fit_transform(X, y)) assert_array_equal([exp], pipeline.fit(X).predict(X)) assert_array_equal(X, pipeline.inverse_transform([[exp]])) def test_pipeline_ducktyping(): pipeline = make_pipeline(Mult(5)) pipeline.predict pipeline.transform pipeline.inverse_transform pipeline = make_pipeline(Transf()) assert not hasattr(pipeline, "predict") pipeline.transform pipeline.inverse_transform pipeline = make_pipeline("passthrough") assert pipeline.steps[0] == ("passthrough", "passthrough") assert not hasattr(pipeline, "predict") pipeline.transform pipeline.inverse_transform pipeline = make_pipeline(Transf(), NoInvTransf()) assert not hasattr(pipeline, "predict") pipeline.transform assert not hasattr(pipeline, "inverse_transform") pipeline = make_pipeline(NoInvTransf(), Transf()) assert not hasattr(pipeline, "predict") pipeline.transform assert not hasattr(pipeline, "inverse_transform") def test_make_pipeline(): t1 = Transf() t2 = Transf() pipe = make_pipeline(t1, t2) assert isinstance(pipe, Pipeline) assert pipe.steps[0][0] == "transf-1" assert pipe.steps[1][0] == "transf-2" pipe = make_pipeline(t1, t2, FitParamT()) assert isinstance(pipe, Pipeline) assert pipe.steps[0][0] == "transf-1" assert pipe.steps[1][0] == "transf-2" assert pipe.steps[2][0] == "fitparamt" @pytest.mark.parametrize( "pipeline, check_estimator_type", [ (make_pipeline(StandardScaler(), LogisticRegression()), is_classifier), (make_pipeline(StandardScaler(), LinearRegression()), is_regressor), ( make_pipeline(StandardScaler()), lambda est: get_tags(est).estimator_type is None, ), (Pipeline([]), lambda est: get_tags(est).estimator_type is None), ], ) def test_pipeline_estimator_type(pipeline, check_estimator_type): """Check that the estimator type returned by the pipeline is correct. Non-regression test as part of: https://github.com/scikit-learn/scikit-learn/issues/30197 """ # Smoke test the repr repr(pipeline) assert check_estimator_type(pipeline) def test_sklearn_tags_with_empty_pipeline(): """Check that we propagate properly the tags in a Pipeline. Non-regression test as part of: https://github.com/scikit-learn/scikit-learn/issues/30197 """ empty_pipeline = Pipeline(steps=[]) be = BaseEstimator() expected_tags = be.__sklearn_tags__() assert empty_pipeline.__sklearn_tags__() == expected_tags def test_feature_union_weights(): # test feature union with transformer weights X = iris.data y = iris.target pca = PCA(n_components=2, svd_solver="randomized", random_state=0) select = SelectKBest(k=1) # test using fit followed by transform fs = FeatureUnion( [("pca", pca), ("select", select)], transformer_weights={"pca": 10} ) fs.fit(X, y) X_transformed = fs.transform(X) # test using fit_transform fs = FeatureUnion( [("pca", pca), ("select", select)], transformer_weights={"pca": 10} ) X_fit_transformed = fs.fit_transform(X, y) # test it works with transformers missing fit_transform fs = FeatureUnion( [("mock", Transf()), ("pca", pca), ("select", select)], transformer_weights={"mock": 10}, ) X_fit_transformed_wo_method = fs.fit_transform(X, y) # check against expected result # We use a different pca object to control the random_state stream assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X)) assert_array_equal(X_transformed[:, -1], select.fit_transform(X, y).ravel())
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
true
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/tests/test_common.py
sklearn/tests/test_common.py
""" General tests for all estimators in sklearn. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import os import pkgutil import re import warnings from functools import partial from itertools import chain import pytest from scipy.linalg import LinAlgWarning import sklearn from sklearn.base import BaseEstimator from sklearn.compose import ColumnTransformer from sklearn.exceptions import ConvergenceWarning # make it possible to discover experimental estimators when calling `all_estimators` from sklearn.experimental import ( enable_halving_search_cv, # noqa: F401 enable_iterative_imputer, # noqa: F401 ) from sklearn.linear_model import LogisticRegression from sklearn.pipeline import FeatureUnion, make_pipeline from sklearn.preprocessing import ( FunctionTransformer, MinMaxScaler, OneHotEncoder, StandardScaler, ) from sklearn.utils import all_estimators from sklearn.utils._test_common.instance_generator import ( _get_check_estimator_ids, _get_expected_failed_checks, _tested_estimators, _yield_instances_for_check, ) from sklearn.utils._testing import ( SkipTest, ignore_warnings, ) from sklearn.utils.estimator_checks import ( check_dataframe_column_names_consistency, check_estimator, check_get_feature_names_out_error, check_global_output_transform_pandas, check_global_set_output_transform_polars, check_inplace_ensure_writeable, check_param_validation, check_set_output_transform, check_set_output_transform_pandas, check_set_output_transform_polars, check_transformer_get_feature_names_out, check_transformer_get_feature_names_out_pandas, parametrize_with_checks, ) @pytest.mark.thread_unsafe # import side-effects def test_all_estimator_no_base_class(): # test that all_estimators doesn't find abstract classes. for name, Estimator in all_estimators(): msg = ( "Base estimators such as {0} should not be included in all_estimators" ).format(name) assert not name.lower().startswith("base"), msg def _sample_func(x, y=1): pass class CallableEstimator(BaseEstimator): """Dummy development stub for an estimator. This is to make sure a callable estimator passes common tests. """ def __call__(self): pass # pragma: nocover @pytest.mark.parametrize( "val, expected", [ (partial(_sample_func, y=1), "_sample_func(y=1)"), (_sample_func, "_sample_func"), (partial(_sample_func, "world"), "_sample_func"), (LogisticRegression(C=2.0), "LogisticRegression(C=2.0)"), ( LogisticRegression( random_state=1, solver="newton-cg", class_weight="balanced", warm_start=True, ), ( "LogisticRegression(class_weight='balanced',random_state=1," "solver='newton-cg',warm_start=True)" ), ), (CallableEstimator(), "CallableEstimator()"), ], ) def test_get_check_estimator_ids(val, expected): assert _get_check_estimator_ids(val) == expected @parametrize_with_checks( list(_tested_estimators()), expected_failed_checks=_get_expected_failed_checks ) def test_estimators(estimator, check, request): # Common tests for estimator instances with ignore_warnings( category=(FutureWarning, ConvergenceWarning, UserWarning, LinAlgWarning) ): check(estimator) @pytest.mark.filterwarnings( "ignore:Since version 1.0, it is not needed to import " "enable_hist_gradient_boosting anymore" ) @pytest.mark.thread_unsafe # import side-effects def test_import_all_consistency(): sklearn_path = [os.path.dirname(sklearn.__file__)] # Smoke test to check that any name in a __all__ list is actually defined # in the namespace of the module or package. pkgs = pkgutil.walk_packages( path=sklearn_path, prefix="sklearn.", onerror=lambda _: None ) submods = [modname for _, modname, _ in pkgs] for modname in submods + ["sklearn"]: if ".tests." in modname or "sklearn.externals" in modname: continue # Avoid test suite depending on build dependencies, for example Cython if "sklearn._build_utils" in modname: continue package = __import__(modname, fromlist="dummy") for name in getattr(package, "__all__", ()): assert hasattr(package, name), "Module '{0}' has no attribute '{1}'".format( modname, name ) def test_root_import_all_completeness(): sklearn_path = [os.path.dirname(sklearn.__file__)] EXCEPTIONS = ("utils", "tests", "base", "conftest") for _, modname, _ in pkgutil.walk_packages( path=sklearn_path, onerror=lambda _: None ): if "." in modname or modname.startswith("_") or modname in EXCEPTIONS: continue assert modname in sklearn.__all__ @pytest.mark.thread_unsafe # import side-effects def test_all_tests_are_importable(): # Ensure that for each contentful subpackage, there is a test directory # within it that is also a subpackage (i.e. a directory with __init__.py) HAS_TESTS_EXCEPTIONS = re.compile( r"""(?x) \.externals(\.|$)| \.tests(\.|$)| \._ """ ) resource_modules = { "sklearn.datasets.data", "sklearn.datasets.descr", "sklearn.datasets.images", } sklearn_path = [os.path.dirname(sklearn.__file__)] lookup = { name: ispkg for _, name, ispkg in pkgutil.walk_packages(sklearn_path, prefix="sklearn.") } missing_tests = [ name for name, ispkg in lookup.items() if ispkg and name not in resource_modules and not HAS_TESTS_EXCEPTIONS.search(name) and name + ".tests" not in lookup ] assert missing_tests == [], ( "{0} do not have `tests` subpackages. " "Perhaps they require " "__init__.py or a meson.build " "in the parent " "directory".format(missing_tests) ) def test_class_support_removed(): # Make sure passing classes to check_estimator or parametrize_with_checks # raises an error msg = "Passing a class was deprecated.* isn't supported anymore" with pytest.raises(TypeError, match=msg): check_estimator(LogisticRegression) with pytest.raises(TypeError, match=msg): parametrize_with_checks([LogisticRegression]) def _estimators_that_predict_in_fit(): for estimator in _tested_estimators(): est_params = set(estimator.get_params()) if "oob_score" in est_params: yield estimator.set_params(oob_score=True, bootstrap=True) elif "early_stopping" in est_params: est = estimator.set_params(early_stopping=True, n_iter_no_change=1) if est.__class__.__name__ in {"MLPClassifier", "MLPRegressor"}: # TODO: FIX MLP to not check validation set during MLP yield pytest.param( est, marks=pytest.mark.xfail(msg="MLP still validates in fit") ) else: yield est elif "n_iter_no_change" in est_params: yield estimator.set_params(n_iter_no_change=1) # NOTE: When running `check_dataframe_column_names_consistency` on a meta-estimator that # delegates validation to a base estimator, the check is testing that the base estimator # is checking for column name consistency. column_name_estimators = list( chain( _tested_estimators(), [make_pipeline(LogisticRegression(C=1))], _estimators_that_predict_in_fit(), ) ) @pytest.mark.parametrize( "estimator_orig", column_name_estimators, ids=_get_check_estimator_ids ) def test_pandas_column_name_consistency(estimator_orig): if isinstance(estimator_orig, ColumnTransformer): pytest.skip("ColumnTransformer is not tested here") if "check_dataframe_column_names_consistency" in _get_expected_failed_checks( estimator_orig ): pytest.skip( "Estimator does not support check_dataframe_column_names_consistency" ) for estimator in _yield_instances_for_check( check_dataframe_column_names_consistency, estimator_orig ): with ignore_warnings(category=(FutureWarning)): with warnings.catch_warnings(record=True) as record: check_dataframe_column_names_consistency( estimator.__class__.__name__, estimator ) for warning in record: assert "was fitted without feature names" not in str(warning.message) # TODO: As more modules support get_feature_names_out they should be removed # from this list to be tested GET_FEATURES_OUT_MODULES_TO_IGNORE = [ "ensemble", "kernel_approximation", ] def _include_in_get_feature_names_out_check(transformer): if hasattr(transformer, "get_feature_names_out"): return True module = transformer.__module__.split(".")[1] return module not in GET_FEATURES_OUT_MODULES_TO_IGNORE GET_FEATURES_OUT_ESTIMATORS = [ est for est in _tested_estimators("transformer") if _include_in_get_feature_names_out_check(est) ] @pytest.mark.parametrize( "transformer", GET_FEATURES_OUT_ESTIMATORS, ids=_get_check_estimator_ids ) def test_transformers_get_feature_names_out(transformer): with ignore_warnings(category=(FutureWarning)): check_transformer_get_feature_names_out( transformer.__class__.__name__, transformer ) check_transformer_get_feature_names_out_pandas( transformer.__class__.__name__, transformer ) ESTIMATORS_WITH_GET_FEATURE_NAMES_OUT = [ est for est in _tested_estimators() if hasattr(est, "get_feature_names_out") ] @pytest.mark.parametrize( "estimator", ESTIMATORS_WITH_GET_FEATURE_NAMES_OUT, ids=_get_check_estimator_ids ) def test_estimators_get_feature_names_out_error(estimator): estimator_name = estimator.__class__.__name__ check_get_feature_names_out_error(estimator_name, estimator) @pytest.mark.parametrize( "estimator", list(_tested_estimators()), ids=_get_check_estimator_ids ) def test_check_param_validation(estimator): if isinstance(estimator, FeatureUnion): pytest.skip("FeatureUnion is not tested here") name = estimator.__class__.__name__ check_param_validation(name, estimator) SET_OUTPUT_ESTIMATORS = list( chain( _tested_estimators("transformer"), [ make_pipeline(StandardScaler(), MinMaxScaler()), OneHotEncoder(sparse_output=False), FunctionTransformer(feature_names_out="one-to-one"), ], ) ) @pytest.mark.parametrize( "estimator_orig", SET_OUTPUT_ESTIMATORS, ids=_get_check_estimator_ids ) def test_set_output_transform(estimator_orig): name = estimator_orig.__class__.__name__ if not hasattr(estimator_orig, "set_output"): pytest.skip( f"Skipping check_set_output_transform for {name}: Does not support" " set_output API" ) for estimator in _yield_instances_for_check( check_set_output_transform, estimator_orig ): with ignore_warnings(category=(FutureWarning)): check_set_output_transform(estimator.__class__.__name__, estimator) @pytest.mark.parametrize( "estimator_orig", SET_OUTPUT_ESTIMATORS, ids=_get_check_estimator_ids ) @pytest.mark.parametrize( "check_func", [ check_set_output_transform_pandas, check_global_output_transform_pandas, check_set_output_transform_polars, check_global_set_output_transform_polars, ], ) def test_set_output_transform_configured(estimator_orig, check_func): name = estimator_orig.__class__.__name__ if not hasattr(estimator_orig, "set_output"): pytest.skip( f"Skipping {check_func.__name__} for {name}: Does not support" " set_output API yet" ) for estimator in _yield_instances_for_check(check_func, estimator_orig): with ignore_warnings(category=(FutureWarning)): check_func(estimator.__class__.__name__, estimator) @pytest.mark.parametrize( "estimator", _tested_estimators(), ids=_get_check_estimator_ids ) def test_check_inplace_ensure_writeable(estimator): name = estimator.__class__.__name__ if hasattr(estimator, "copy"): estimator.set_params(copy=False) elif hasattr(estimator, "copy_X"): estimator.set_params(copy_X=False) else: raise SkipTest(f"{name} doesn't require writeable input.") # The following estimators can work inplace only with certain settings if name == "HDBSCAN": estimator.set_params(metric="precomputed", algorithm="brute") if name == "PCA": estimator.set_params(svd_solver="full") if name == "KernelPCA": estimator.set_params(kernel="precomputed") check_inplace_ensure_writeable(name, estimator)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/tests/test_kernel_approximation.py
sklearn/tests/test_kernel_approximation.py
import re import numpy as np import pytest from sklearn.datasets import make_classification from sklearn.kernel_approximation import ( AdditiveChi2Sampler, Nystroem, PolynomialCountSketch, RBFSampler, SkewedChi2Sampler, ) from sklearn.metrics.pairwise import ( chi2_kernel, kernel_metrics, polynomial_kernel, rbf_kernel, ) from sklearn.utils._testing import ( assert_allclose, assert_array_almost_equal, assert_array_equal, ) from sklearn.utils.fixes import CSR_CONTAINERS # generate data rng = np.random.RandomState(0) X = rng.random_sample(size=(300, 50)) Y = rng.random_sample(size=(300, 50)) X /= X.sum(axis=1)[:, np.newaxis] Y /= Y.sum(axis=1)[:, np.newaxis] # Make sure X and Y are not writable to avoid introducing dependencies between # tests. X.flags.writeable = False Y.flags.writeable = False @pytest.mark.parametrize("gamma", [0.1, 1, 2.5]) @pytest.mark.parametrize("degree, n_components", [(1, 500), (2, 500), (3, 5000)]) @pytest.mark.parametrize("coef0", [0, 2.5]) def test_polynomial_count_sketch(gamma, degree, coef0, n_components): # test that PolynomialCountSketch approximates polynomial # kernel on random data # compute exact kernel kernel = polynomial_kernel(X, Y, gamma=gamma, degree=degree, coef0=coef0) # approximate kernel mapping ps_transform = PolynomialCountSketch( n_components=n_components, gamma=gamma, coef0=coef0, degree=degree, random_state=42, ) X_trans = ps_transform.fit_transform(X) Y_trans = ps_transform.transform(Y) kernel_approx = np.dot(X_trans, Y_trans.T) error = kernel - kernel_approx assert np.abs(np.mean(error)) <= 0.05 # close to unbiased np.abs(error, out=error) assert np.max(error) <= 0.1 # nothing too far off assert np.mean(error) <= 0.05 # mean is fairly close @pytest.mark.parametrize("csr_container", CSR_CONTAINERS) @pytest.mark.parametrize("gamma", [0.1, 1.0]) @pytest.mark.parametrize("degree", [1, 2, 3]) @pytest.mark.parametrize("coef0", [0, 2.5]) def test_polynomial_count_sketch_dense_sparse(gamma, degree, coef0, csr_container): """Check that PolynomialCountSketch results are the same for dense and sparse input. """ ps_dense = PolynomialCountSketch( n_components=500, gamma=gamma, degree=degree, coef0=coef0, random_state=42 ) Xt_dense = ps_dense.fit_transform(X) Yt_dense = ps_dense.transform(Y) ps_sparse = PolynomialCountSketch( n_components=500, gamma=gamma, degree=degree, coef0=coef0, random_state=42 ) Xt_sparse = ps_sparse.fit_transform(csr_container(X)) Yt_sparse = ps_sparse.transform(csr_container(Y)) assert_allclose(Xt_dense, Xt_sparse) assert_allclose(Yt_dense, Yt_sparse) def _linear_kernel(X, Y): return np.dot(X, Y.T) @pytest.mark.parametrize("csr_container", CSR_CONTAINERS) def test_additive_chi2_sampler(csr_container): # test that AdditiveChi2Sampler approximates kernel on random data # compute exact kernel # abbreviations for easier formula X_ = X[:, np.newaxis, :].copy() Y_ = Y[np.newaxis, :, :].copy() large_kernel = 2 * X_ * Y_ / (X_ + Y_) # reduce to n_samples_x x n_samples_y by summing over features kernel = large_kernel.sum(axis=2) # approximate kernel mapping transform = AdditiveChi2Sampler(sample_steps=3) X_trans = transform.fit_transform(X) Y_trans = transform.transform(Y) kernel_approx = np.dot(X_trans, Y_trans.T) assert_array_almost_equal(kernel, kernel_approx, 1) X_sp_trans = transform.fit_transform(csr_container(X)) Y_sp_trans = transform.transform(csr_container(Y)) assert_array_equal(X_trans, X_sp_trans.toarray()) assert_array_equal(Y_trans, Y_sp_trans.toarray()) # test error is raised on negative input Y_neg = Y.copy() Y_neg[0, 0] = -1 msg = "Negative values in data passed to" with pytest.raises(ValueError, match=msg): transform.fit(Y_neg) @pytest.mark.parametrize("method", ["fit", "fit_transform", "transform"]) @pytest.mark.parametrize("sample_steps", range(1, 4)) def test_additive_chi2_sampler_sample_steps(method, sample_steps): """Check that the input sample step doesn't raise an error and that sample interval doesn't change after fit. """ transformer = AdditiveChi2Sampler(sample_steps=sample_steps) getattr(transformer, method)(X) sample_interval = 0.5 transformer = AdditiveChi2Sampler( sample_steps=sample_steps, sample_interval=sample_interval, ) getattr(transformer, method)(X) assert transformer.sample_interval == sample_interval @pytest.mark.parametrize("method", ["fit", "fit_transform", "transform"]) def test_additive_chi2_sampler_wrong_sample_steps(method): """Check that we raise a ValueError on invalid sample_steps""" transformer = AdditiveChi2Sampler(sample_steps=4) msg = re.escape( "If sample_steps is not in [1, 2, 3], you need to provide sample_interval" ) with pytest.raises(ValueError, match=msg): getattr(transformer, method)(X) def test_skewed_chi2_sampler(): # test that RBFSampler approximates kernel on random data # compute exact kernel c = 0.03 # set on negative component but greater than c to ensure that the kernel # approximation is valid on the group (-c; +\infty) endowed with the skewed # multiplication. Y_ = Y.copy() Y_[0, 0] = -c / 2.0 # abbreviations for easier formula X_c = (X + c)[:, np.newaxis, :] Y_c = (Y_ + c)[np.newaxis, :, :] # we do it in log-space in the hope that it's more stable # this array is n_samples_x x n_samples_y big x n_features log_kernel = ( (np.log(X_c) / 2.0) + (np.log(Y_c) / 2.0) + np.log(2.0) - np.log(X_c + Y_c) ) # reduce to n_samples_x x n_samples_y by summing over features in log-space kernel = np.exp(log_kernel.sum(axis=2)) # approximate kernel mapping transform = SkewedChi2Sampler(skewedness=c, n_components=1000, random_state=42) X_trans = transform.fit_transform(X) Y_trans = transform.transform(Y_) kernel_approx = np.dot(X_trans, Y_trans.T) assert_array_almost_equal(kernel, kernel_approx, 1) assert np.isfinite(kernel).all(), "NaNs found in the Gram matrix" assert np.isfinite(kernel_approx).all(), "NaNs found in the approximate Gram matrix" # test error is raised on when inputs contains values smaller than -c Y_neg = Y_.copy() Y_neg[0, 0] = -c * 2.0 msg = "X may not contain entries smaller than -skewedness" with pytest.raises(ValueError, match=msg): transform.transform(Y_neg) def test_additive_chi2_sampler_exceptions(): """Ensures correct error message""" transformer = AdditiveChi2Sampler() X_neg = X.copy() X_neg[0, 0] = -1 with pytest.raises(ValueError, match="X in AdditiveChi2Sampler"): transformer.fit(X_neg) with pytest.raises(ValueError, match="X in AdditiveChi2Sampler"): transformer.fit(X) transformer.transform(X_neg) def test_rbf_sampler(): # test that RBFSampler approximates kernel on random data # compute exact kernel gamma = 10.0 kernel = rbf_kernel(X, Y, gamma=gamma) # approximate kernel mapping rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42) X_trans = rbf_transform.fit_transform(X) Y_trans = rbf_transform.transform(Y) kernel_approx = np.dot(X_trans, Y_trans.T) error = kernel - kernel_approx assert np.abs(np.mean(error)) <= 0.01 # close to unbiased np.abs(error, out=error) assert np.max(error) <= 0.1 # nothing too far off assert np.mean(error) <= 0.05 # mean is fairly close def test_rbf_sampler_fitted_attributes_dtype(global_dtype): """Check that the fitted attributes are stored accordingly to the data type of X.""" rbf = RBFSampler() X = np.array([[1, 2], [3, 4], [5, 6]], dtype=global_dtype) rbf.fit(X) assert rbf.random_offset_.dtype == global_dtype assert rbf.random_weights_.dtype == global_dtype def test_rbf_sampler_dtype_equivalence(): """Check the equivalence of the results with 32 and 64 bits input.""" rbf32 = RBFSampler(random_state=42) X32 = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float32) rbf32.fit(X32) rbf64 = RBFSampler(random_state=42) X64 = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float64) rbf64.fit(X64) assert_allclose(rbf32.random_offset_, rbf64.random_offset_) assert_allclose(rbf32.random_weights_, rbf64.random_weights_) def test_rbf_sampler_gamma_scale(): """Check the inner value computed when `gamma='scale'`.""" X, y = [[0.0], [1.0]], [0, 1] rbf = RBFSampler(gamma="scale") rbf.fit(X, y) assert rbf._gamma == pytest.approx(4) def test_skewed_chi2_sampler_fitted_attributes_dtype(global_dtype): """Check that the fitted attributes are stored accordingly to the data type of X.""" skewed_chi2_sampler = SkewedChi2Sampler() X = np.array([[1, 2], [3, 4], [5, 6]], dtype=global_dtype) skewed_chi2_sampler.fit(X) assert skewed_chi2_sampler.random_offset_.dtype == global_dtype assert skewed_chi2_sampler.random_weights_.dtype == global_dtype def test_skewed_chi2_sampler_dtype_equivalence(): """Check the equivalence of the results with 32 and 64 bits input.""" skewed_chi2_sampler_32 = SkewedChi2Sampler(random_state=42) X_32 = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float32) skewed_chi2_sampler_32.fit(X_32) skewed_chi2_sampler_64 = SkewedChi2Sampler(random_state=42) X_64 = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float64) skewed_chi2_sampler_64.fit(X_64) assert_allclose( skewed_chi2_sampler_32.random_offset_, skewed_chi2_sampler_64.random_offset_ ) assert_allclose( skewed_chi2_sampler_32.random_weights_, skewed_chi2_sampler_64.random_weights_ ) @pytest.mark.parametrize("csr_container", CSR_CONTAINERS) def test_input_validation(csr_container): # Regression test: kernel approx. transformers should work on lists # No assertions; the old versions would simply crash X = [[1, 2], [3, 4], [5, 6]] AdditiveChi2Sampler().fit(X).transform(X) SkewedChi2Sampler().fit(X).transform(X) RBFSampler().fit(X).transform(X) X = csr_container(X) RBFSampler().fit(X).transform(X) def test_nystroem_approximation(): # some basic tests rnd = np.random.RandomState(0) X = rnd.uniform(size=(10, 4)) # With n_components = n_samples this is exact X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X) K = rbf_kernel(X) assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K) trans = Nystroem(n_components=2, random_state=rnd) X_transformed = trans.fit(X).transform(X) assert X_transformed.shape == (X.shape[0], 2) # test callable kernel trans = Nystroem(n_components=2, kernel=_linear_kernel, random_state=rnd) X_transformed = trans.fit(X).transform(X) assert X_transformed.shape == (X.shape[0], 2) # test that available kernels fit and transform kernels_available = kernel_metrics() for kern in kernels_available: trans = Nystroem(n_components=2, kernel=kern, random_state=rnd) X_transformed = trans.fit(X).transform(X) assert X_transformed.shape == (X.shape[0], 2) def test_nystroem_default_parameters(): rnd = np.random.RandomState(42) X = rnd.uniform(size=(10, 4)) # rbf kernel should behave as gamma=None by default # aka gamma = 1 / n_features nystroem = Nystroem(n_components=10) X_transformed = nystroem.fit_transform(X) K = rbf_kernel(X, gamma=None) K2 = np.dot(X_transformed, X_transformed.T) assert_array_almost_equal(K, K2) # chi2 kernel should behave as gamma=1 by default nystroem = Nystroem(kernel="chi2", n_components=10) X_transformed = nystroem.fit_transform(X) K = chi2_kernel(X, gamma=1) K2 = np.dot(X_transformed, X_transformed.T) assert_array_almost_equal(K, K2) def test_nystroem_singular_kernel(): # test that nystroem works with singular kernel matrix rng = np.random.RandomState(0) X = rng.rand(10, 20) X = np.vstack([X] * 2) # duplicate samples gamma = 100 N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X) X_transformed = N.transform(X) K = rbf_kernel(X, gamma=gamma) assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T)) assert np.all(np.isfinite(Y)) def test_nystroem_poly_kernel_params(): # Non-regression: Nystroem should pass other parameters beside gamma. rnd = np.random.RandomState(37) X = rnd.uniform(size=(10, 4)) K = polynomial_kernel(X, degree=3.1, coef0=0.1) nystroem = Nystroem( kernel="polynomial", n_components=X.shape[0], degree=3.1, coef0=0.1 ) X_transformed = nystroem.fit_transform(X) assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K) def test_nystroem_callable(): # Test Nystroem on a callable. rnd = np.random.RandomState(42) n_samples = 10 X = rnd.uniform(size=(n_samples, 4)) def logging_histogram_kernel(x, y, log): """Histogram kernel that writes to a log.""" log.append(1) return np.minimum(x, y).sum() kernel_log = [] X = list(X) # test input validation Nystroem( kernel=logging_histogram_kernel, n_components=(n_samples - 1), kernel_params={"log": kernel_log}, ).fit(X) assert len(kernel_log) == n_samples * (n_samples - 1) / 2 # if degree, gamma or coef0 is passed, we raise a ValueError msg = "Don't pass gamma, coef0 or degree to Nystroem" params = ({"gamma": 1}, {"coef0": 1}, {"degree": 2}) for param in params: ny = Nystroem(kernel=_linear_kernel, n_components=(n_samples - 1), **param) with pytest.raises(ValueError, match=msg): ny.fit(X) def test_nystroem_precomputed_kernel(): # Non-regression: test Nystroem on precomputed kernel. # PR - 14706 rnd = np.random.RandomState(12) X = rnd.uniform(size=(10, 4)) K = polynomial_kernel(X, degree=2, coef0=0.1) nystroem = Nystroem(kernel="precomputed", n_components=X.shape[0]) X_transformed = nystroem.fit_transform(K) assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K) # if degree, gamma or coef0 is passed, we raise a ValueError msg = "Don't pass gamma, coef0 or degree to Nystroem" params = ({"gamma": 1}, {"coef0": 1}, {"degree": 2}) for param in params: ny = Nystroem(kernel="precomputed", n_components=X.shape[0], **param) with pytest.raises(ValueError, match=msg): ny.fit(K) def test_nystroem_component_indices(): """Check that `component_indices_` corresponds to the subset of training points used to construct the feature map. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/20474 """ X, _ = make_classification(n_samples=100, n_features=20) feature_map_nystroem = Nystroem( n_components=10, random_state=0, ) feature_map_nystroem.fit(X) assert feature_map_nystroem.component_indices_.shape == (10,) @pytest.mark.parametrize( "Estimator", [PolynomialCountSketch, RBFSampler, SkewedChi2Sampler, Nystroem] ) def test_get_feature_names_out(Estimator): """Check get_feature_names_out""" est = Estimator().fit(X) X_trans = est.transform(X) names_out = est.get_feature_names_out() class_name = Estimator.__name__.lower() expected_names = [f"{class_name}{i}" for i in range(X_trans.shape[1])] assert_array_equal(names_out, expected_names) def test_additivechi2sampler_get_feature_names_out(): """Check get_feature_names_out for AdditiveChi2Sampler.""" rng = np.random.RandomState(0) X = rng.random_sample(size=(300, 3)) chi2_sampler = AdditiveChi2Sampler(sample_steps=3).fit(X) input_names = ["f0", "f1", "f2"] suffixes = [ "f0_sqrt", "f1_sqrt", "f2_sqrt", "f0_cos1", "f1_cos1", "f2_cos1", "f0_sin1", "f1_sin1", "f2_sin1", "f0_cos2", "f1_cos2", "f2_cos2", "f0_sin2", "f1_sin2", "f2_sin2", ] names_out = chi2_sampler.get_feature_names_out(input_features=input_names) expected_names = [f"additivechi2sampler_{suffix}" for suffix in suffixes] assert_array_equal(names_out, expected_names)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/tests/test_base.py
sklearn/tests/test_base.py
# Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import pickle import re import warnings import numpy as np import pytest import scipy.sparse as sp from numpy.testing import assert_allclose import sklearn from sklearn import config_context, datasets from sklearn.base import ( BaseEstimator, OutlierMixin, TransformerMixin, clone, is_classifier, is_clusterer, is_regressor, ) from sklearn.cluster import KMeans from sklearn.decomposition import PCA from sklearn.exceptions import InconsistentVersionWarning from sklearn.metrics import get_scorer from sklearn.model_selection import GridSearchCV, KFold from sklearn.pipeline import Pipeline from sklearn.preprocessing import LabelEncoder, StandardScaler from sklearn.svm import SVC, SVR from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor from sklearn.utils._mocking import MockDataFrame from sklearn.utils._set_output import _get_output_config from sklearn.utils._testing import ( _convert_container, assert_array_equal, ) from sklearn.utils.validation import _check_n_features, validate_data ############################################################################# # A few test classes class MyEstimator(BaseEstimator): def __init__(self, l1=0, empty=None): self.l1 = l1 self.empty = empty class K(BaseEstimator): def __init__(self, c=None, d=None): self.c = c self.d = d class T(BaseEstimator): def __init__(self, a=None, b=None): self.a = a self.b = b class NaNTag(BaseEstimator): def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.input_tags.allow_nan = True return tags class NoNaNTag(BaseEstimator): def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.input_tags.allow_nan = False return tags class OverrideTag(NaNTag): def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.input_tags.allow_nan = False return tags class DiamondOverwriteTag(NaNTag, NoNaNTag): pass class InheritDiamondOverwriteTag(DiamondOverwriteTag): pass class ModifyInitParams(BaseEstimator): """Deprecated behavior. Equal parameters but with a type cast. Doesn't fulfill a is a """ def __init__(self, a=np.array([0])): self.a = a.copy() class Buggy(BaseEstimator): "A buggy estimator that does not set its parameters right." def __init__(self, a=None): self.a = 1 class NoEstimator: def __init__(self): pass def fit(self, X=None, y=None): return self def predict(self, X=None): return None class VargEstimator(BaseEstimator): """scikit-learn estimators shouldn't have vargs.""" def __init__(self, *vargs): pass ############################################################################# # The tests def test_clone(): # Tests that clone creates a correct deep copy. # We create an estimator, make a copy of its original state # (which, in this case, is the current state of the estimator), # and check that the obtained copy is a correct deep copy. from sklearn.feature_selection import SelectFpr, f_classif selector = SelectFpr(f_classif, alpha=0.1) new_selector = clone(selector) assert selector is not new_selector assert selector.get_params() == new_selector.get_params() selector = SelectFpr(f_classif, alpha=np.zeros((10, 2))) new_selector = clone(selector) assert selector is not new_selector def test_clone_2(): # Tests that clone doesn't copy everything. # We first create an estimator, give it an own attribute, and # make a copy of its original state. Then we check that the copy doesn't # have the specific attribute we manually added to the initial estimator. from sklearn.feature_selection import SelectFpr, f_classif selector = SelectFpr(f_classif, alpha=0.1) selector.own_attribute = "test" new_selector = clone(selector) assert not hasattr(new_selector, "own_attribute") def test_clone_buggy(): # Check that clone raises an error on buggy estimators. buggy = Buggy() buggy.a = 2 with pytest.raises(RuntimeError): clone(buggy) no_estimator = NoEstimator() with pytest.raises(TypeError): clone(no_estimator) varg_est = VargEstimator() with pytest.raises(RuntimeError): clone(varg_est) est = ModifyInitParams() with pytest.raises(RuntimeError): clone(est) def test_clone_empty_array(): # Regression test for cloning estimators with empty arrays clf = MyEstimator(empty=np.array([])) clf2 = clone(clf) assert_array_equal(clf.empty, clf2.empty) clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]]))) clf2 = clone(clf) assert_array_equal(clf.empty.data, clf2.empty.data) def test_clone_nan(): # Regression test for cloning estimators with default parameter as np.nan clf = MyEstimator(empty=np.nan) clf2 = clone(clf) assert clf.empty is clf2.empty def test_clone_dict(): # test that clone creates a clone of a dict orig = {"a": MyEstimator()} cloned = clone(orig) assert orig["a"] is not cloned["a"] def test_clone_sparse_matrices(): sparse_matrix_classes = [ cls for name in dir(sp) if name.endswith("_matrix") and type(cls := getattr(sp, name)) is type ] for cls in sparse_matrix_classes: sparse_matrix = cls(np.eye(5)) clf = MyEstimator(empty=sparse_matrix) clf_cloned = clone(clf) assert clf.empty.__class__ is clf_cloned.empty.__class__ assert_array_equal(clf.empty.toarray(), clf_cloned.empty.toarray()) def test_clone_estimator_types(): # Check that clone works for parameters that are types rather than # instances clf = MyEstimator(empty=MyEstimator) clf2 = clone(clf) assert clf.empty is clf2.empty def test_clone_class_rather_than_instance(): # Check that clone raises expected error message when # cloning class rather than instance msg = "You should provide an instance of scikit-learn estimator" with pytest.raises(TypeError, match=msg): clone(MyEstimator) def test_conditional_attrs_not_in_dir(): # Test that __dir__ includes only relevant attributes. #28558 encoder = LabelEncoder() assert "set_output" not in dir(encoder) scalar = StandardScaler() assert "set_output" in dir(scalar) svc = SVC(probability=False) assert "predict_proba" not in dir(svc) svc.probability = True assert "predict_proba" in dir(svc) def test_repr(): # Smoke test the repr of the base estimator. my_estimator = MyEstimator() repr(my_estimator) test = T(K(), K()) assert repr(test) == "T(a=K(), b=K())" some_est = T(a=["long_params"] * 1000) assert len(repr(some_est)) == 485 def test_str(): # Smoke test the str of the base estimator my_estimator = MyEstimator() str(my_estimator) def test_get_params(): test = T(K(), K) assert "a__d" in test.get_params(deep=True) assert "a__d" not in test.get_params(deep=False) test.set_params(a__d=2) assert test.a.d == 2 with pytest.raises(ValueError): test.set_params(a__a=2) @pytest.mark.parametrize( "estimator, expected_result", [ (SVC(), True), (GridSearchCV(SVC(), {"C": [0.1, 1]}), True), (Pipeline([("svc", SVC())]), True), (Pipeline([("svc_cv", GridSearchCV(SVC(), {"C": [0.1, 1]}))]), True), (SVR(), False), (GridSearchCV(SVR(), {"C": [0.1, 1]}), False), (Pipeline([("svr", SVR())]), False), (Pipeline([("svr_cv", GridSearchCV(SVR(), {"C": [0.1, 1]}))]), False), ], ) def test_is_classifier(estimator, expected_result): assert is_classifier(estimator) == expected_result @pytest.mark.parametrize( "estimator, expected_result", [ (SVR(), True), (GridSearchCV(SVR(), {"C": [0.1, 1]}), True), (Pipeline([("svr", SVR())]), True), (Pipeline([("svr_cv", GridSearchCV(SVR(), {"C": [0.1, 1]}))]), True), (SVC(), False), (GridSearchCV(SVC(), {"C": [0.1, 1]}), False), (Pipeline([("svc", SVC())]), False), (Pipeline([("svc_cv", GridSearchCV(SVC(), {"C": [0.1, 1]}))]), False), ], ) def test_is_regressor(estimator, expected_result): assert is_regressor(estimator) == expected_result @pytest.mark.parametrize( "estimator, expected_result", [ (KMeans(), True), (GridSearchCV(KMeans(), {"n_clusters": [3, 8]}), True), (Pipeline([("km", KMeans())]), True), (Pipeline([("km_cv", GridSearchCV(KMeans(), {"n_clusters": [3, 8]}))]), True), (SVC(), False), (GridSearchCV(SVC(), {"C": [0.1, 1]}), False), (Pipeline([("svc", SVC())]), False), (Pipeline([("svc_cv", GridSearchCV(SVC(), {"C": [0.1, 1]}))]), False), ], ) def test_is_clusterer(estimator, expected_result): assert is_clusterer(estimator) == expected_result def test_set_params(): # test nested estimator parameter setting clf = Pipeline([("svc", SVC())]) # non-existing parameter in svc with pytest.raises(ValueError): clf.set_params(svc__stupid_param=True) # non-existing parameter of pipeline with pytest.raises(ValueError): clf.set_params(svm__stupid_param=True) # we don't currently catch if the things in pipeline are estimators # bad_pipeline = Pipeline([("bad", NoEstimator())]) # with pytest.raises(AttributeError): # bad_pipeline.set_params(bad__stupid_param=True) def test_set_params_passes_all_parameters(): # Make sure all parameters are passed together to set_params # of nested estimator. Regression test for #9944 class TestDecisionTree(DecisionTreeClassifier): def set_params(self, **kwargs): super().set_params(**kwargs) # expected_kwargs is in test scope assert kwargs == expected_kwargs return self expected_kwargs = {"max_depth": 5, "min_samples_leaf": 2} for est in [ Pipeline([("estimator", TestDecisionTree())]), GridSearchCV(TestDecisionTree(), {}), ]: est.set_params(estimator__max_depth=5, estimator__min_samples_leaf=2) def test_set_params_updates_valid_params(): # Check that set_params tries to set SVC().C, not # DecisionTreeClassifier().C gscv = GridSearchCV(DecisionTreeClassifier(), {}) gscv.set_params(estimator=SVC(), estimator__C=42.0) assert gscv.estimator.C == 42.0 @pytest.mark.parametrize( "tree,dataset", [ ( DecisionTreeClassifier(max_depth=2, random_state=0), datasets.make_classification(random_state=0), ), ( DecisionTreeRegressor(max_depth=2, random_state=0), datasets.make_regression(random_state=0), ), ], ) def test_score_sample_weight(tree, dataset): tree = clone(tree) # avoid side effects from previous tests. rng = np.random.RandomState(0) # check that the score with and without sample weights are different X, y = dataset tree.fit(X, y) # generate random sample weights sample_weight = rng.randint(1, 10, size=len(y)) score_unweighted = tree.score(X, y) score_weighted = tree.score(X, y, sample_weight=sample_weight) msg = "Unweighted and weighted scores are unexpectedly equal" assert score_unweighted != score_weighted, msg def test_clone_pandas_dataframe(): class DummyEstimator(TransformerMixin, BaseEstimator): """This is a dummy class for generating numerical features This feature extractor extracts numerical features from pandas data frame. Parameters ---------- df: pandas data frame The pandas data frame parameter. Notes ----- """ def __init__(self, df=None, scalar_param=1): self.df = df self.scalar_param = scalar_param def fit(self, X, y=None): pass def transform(self, X): pass # build and clone estimator d = np.arange(10) df = MockDataFrame(d) e = DummyEstimator(df, scalar_param=1) cloned_e = clone(e) # the test assert (e.df == cloned_e.df).values.all() assert e.scalar_param == cloned_e.scalar_param def test_clone_protocol(): """Checks that clone works with `__sklearn_clone__` protocol.""" class FrozenEstimator(BaseEstimator): def __init__(self, fitted_estimator): self.fitted_estimator = fitted_estimator def __getattr__(self, name): return getattr(self.fitted_estimator, name) def __sklearn_clone__(self): return self def fit(self, *args, **kwargs): return self def fit_transform(self, *args, **kwargs): return self.fitted_estimator.transform(*args, **kwargs) X = np.array([[-1, -1], [-2, -1], [-3, -2]]) pca = PCA().fit(X) components = pca.components_ frozen_pca = FrozenEstimator(pca) assert_allclose(frozen_pca.components_, components) # Calling PCA methods such as `get_feature_names_out` still works assert_array_equal(frozen_pca.get_feature_names_out(), pca.get_feature_names_out()) # Fitting on a new data does not alter `components_` X_new = np.asarray([[-1, 2], [3, 4], [1, 2]]) frozen_pca.fit(X_new) assert_allclose(frozen_pca.components_, components) # `fit_transform` does not alter state frozen_pca.fit_transform(X_new) assert_allclose(frozen_pca.components_, components) # Cloning estimator is a no-op clone_frozen_pca = clone(frozen_pca) assert clone_frozen_pca is frozen_pca assert_allclose(clone_frozen_pca.components_, components) def test_pickle_version_warning_is_not_raised_with_matching_version(): iris = datasets.load_iris() tree = DecisionTreeClassifier().fit(iris.data, iris.target) tree_pickle = pickle.dumps(tree) assert b"_sklearn_version" in tree_pickle with warnings.catch_warnings(): warnings.simplefilter("error") tree_restored = pickle.loads(tree_pickle) # test that we can predict with the restored decision tree classifier score_of_original = tree.score(iris.data, iris.target) score_of_restored = tree_restored.score(iris.data, iris.target) assert score_of_original == score_of_restored class TreeBadVersion(DecisionTreeClassifier): def __getstate__(self): return dict(self.__dict__.items(), _sklearn_version="something") pickle_error_message = ( "Trying to unpickle estimator {estimator} from " "version {old_version} when using version " "{current_version}. This might " "lead to breaking code or invalid results. " "Use at your own risk." ) def test_pickle_version_warning_is_issued_upon_different_version(): iris = datasets.load_iris() tree = TreeBadVersion().fit(iris.data, iris.target) tree_pickle_other = pickle.dumps(tree) message = pickle_error_message.format( estimator="TreeBadVersion", old_version="something", current_version=sklearn.__version__, ) with pytest.warns(UserWarning, match=message) as warning_record: pickle.loads(tree_pickle_other) message = warning_record.list[0].message assert isinstance(message, InconsistentVersionWarning) assert message.estimator_name == "TreeBadVersion" assert message.original_sklearn_version == "something" assert message.current_sklearn_version == sklearn.__version__ class TreeNoVersion(DecisionTreeClassifier): def __getstate__(self): return self.__dict__ def test_pickle_version_warning_is_issued_when_no_version_info_in_pickle(): iris = datasets.load_iris() # TreeNoVersion has no getstate, like pre-0.18 tree = TreeNoVersion().fit(iris.data, iris.target) tree_pickle_noversion = pickle.dumps(tree) assert b"_sklearn_version" not in tree_pickle_noversion message = pickle_error_message.format( estimator="TreeNoVersion", old_version="pre-0.18", current_version=sklearn.__version__, ) # check we got the warning about using pre-0.18 pickle with pytest.warns(UserWarning, match=message): pickle.loads(tree_pickle_noversion) # The test modifies global state by changing the TreeNoVersion class @pytest.mark.thread_unsafe def test_pickle_version_no_warning_is_issued_with_non_sklearn_estimator(): iris = datasets.load_iris() tree = TreeNoVersion().fit(iris.data, iris.target) tree_pickle_noversion = pickle.dumps(tree) try: module_backup = TreeNoVersion.__module__ TreeNoVersion.__module__ = "notsklearn" with warnings.catch_warnings(): warnings.simplefilter("error") pickle.loads(tree_pickle_noversion) finally: TreeNoVersion.__module__ = module_backup class DontPickleAttributeMixin: def __getstate__(self): data = self.__dict__.copy() data["_attribute_not_pickled"] = None return data def __setstate__(self, state): state["_restored"] = True self.__dict__.update(state) class MultiInheritanceEstimator(DontPickleAttributeMixin, BaseEstimator): def __init__(self, attribute_pickled=5): self.attribute_pickled = attribute_pickled self._attribute_not_pickled = None def test_pickling_when_getstate_is_overwritten_by_mixin(): estimator = MultiInheritanceEstimator() estimator._attribute_not_pickled = "this attribute should not be pickled" serialized = pickle.dumps(estimator) estimator_restored = pickle.loads(serialized) assert estimator_restored.attribute_pickled == 5 assert estimator_restored._attribute_not_pickled is None assert estimator_restored._restored def test_pickling_when_getstate_is_overwritten_by_mixin_outside_of_sklearn(): try: estimator = MultiInheritanceEstimator() text = "this attribute should not be pickled" estimator._attribute_not_pickled = text old_mod = type(estimator).__module__ type(estimator).__module__ = "notsklearn" serialized = estimator.__getstate__() assert serialized == {"_attribute_not_pickled": None, "attribute_pickled": 5} serialized["attribute_pickled"] = 4 estimator.__setstate__(serialized) assert estimator.attribute_pickled == 4 assert estimator._restored finally: type(estimator).__module__ = old_mod class SingleInheritanceEstimator(BaseEstimator): def __init__(self, attribute_pickled=5): self.attribute_pickled = attribute_pickled self._attribute_not_pickled = None def __getstate__(self): state = super().__getstate__() state["_attribute_not_pickled"] = None return state def test_pickling_works_when_getstate_is_overwritten_in_the_child_class(): estimator = SingleInheritanceEstimator() estimator._attribute_not_pickled = "this attribute should not be pickled" serialized = pickle.dumps(estimator) estimator_restored = pickle.loads(serialized) assert estimator_restored.attribute_pickled == 5 assert estimator_restored._attribute_not_pickled is None def test_tag_inheritance(): # test that changing tags by inheritance is not allowed nan_tag_est = NaNTag() no_nan_tag_est = NoNaNTag() assert nan_tag_est.__sklearn_tags__().input_tags.allow_nan assert not no_nan_tag_est.__sklearn_tags__().input_tags.allow_nan redefine_tags_est = OverrideTag() assert not redefine_tags_est.__sklearn_tags__().input_tags.allow_nan diamond_tag_est = DiamondOverwriteTag() assert diamond_tag_est.__sklearn_tags__().input_tags.allow_nan inherit_diamond_tag_est = InheritDiamondOverwriteTag() assert inherit_diamond_tag_est.__sklearn_tags__().input_tags.allow_nan def test_raises_on_get_params_non_attribute(): class MyEstimator(BaseEstimator): def __init__(self, param=5): pass def fit(self, X, y=None): return self est = MyEstimator() msg = "'MyEstimator' object has no attribute 'param'" with pytest.raises(AttributeError, match=msg): est.get_params() def test_repr_mimebundle_(): # Checks the display configuration flag controls the json output tree = DecisionTreeClassifier() output = tree._repr_mimebundle_() assert "text/plain" in output assert "text/html" in output with config_context(display="text"): output = tree._repr_mimebundle_() assert "text/plain" in output assert "text/html" not in output def test_repr_html_wraps(): # Checks the display configuration flag controls the html output tree = DecisionTreeClassifier() output = tree._repr_html_() assert "<style>" in output with config_context(display="text"): msg = "_repr_html_ is only defined when" with pytest.raises(AttributeError, match=msg): output = tree._repr_html_() def test_n_features_in_validation(): """Check that `_check_n_features` validates data when reset=False""" est = MyEstimator() X_train = [[1, 2, 3], [4, 5, 6]] _check_n_features(est, X_train, reset=True) assert est.n_features_in_ == 3 msg = "X does not contain any features, but MyEstimator is expecting 3 features" with pytest.raises(ValueError, match=msg): _check_n_features(est, "invalid X", reset=False) def test_n_features_in_no_validation(): """Check that `_check_n_features` does not validate data when n_features_in_ is not defined.""" est = MyEstimator() _check_n_features(est, "invalid X", reset=True) assert not hasattr(est, "n_features_in_") # does not raise _check_n_features(est, "invalid X", reset=False) def test_feature_names_in(): """Check that feature_name_in are recorded by `_validate_data`""" pd = pytest.importorskip("pandas") iris = datasets.load_iris() X_np = iris.data df = pd.DataFrame(X_np, columns=iris.feature_names) class NoOpTransformer(TransformerMixin, BaseEstimator): def fit(self, X, y=None): validate_data(self, X) return self def transform(self, X): validate_data(self, X, reset=False) return X # fit on dataframe saves the feature names trans = NoOpTransformer().fit(df) assert_array_equal(trans.feature_names_in_, df.columns) # fit again but on ndarray does not keep the previous feature names (see #21383) trans.fit(X_np) assert not hasattr(trans, "feature_names_in_") trans.fit(df) msg = "The feature names should match those that were passed" df_bad = pd.DataFrame(X_np, columns=iris.feature_names[::-1]) with pytest.raises(ValueError, match=msg): trans.transform(df_bad) # warns when fitted on dataframe and transforming an ndarray msg = ( "X does not have valid feature names, but NoOpTransformer was " "fitted with feature names" ) with pytest.warns(UserWarning, match=msg): trans.transform(X_np) # warns when fitted on an ndarray and transforming dataframe msg = "X has feature names, but NoOpTransformer was fitted without feature names" trans = NoOpTransformer().fit(X_np) with pytest.warns(UserWarning, match=msg): trans.transform(df) # fit on dataframe with all integer feature names works without warning df_int_names = pd.DataFrame(X_np) trans = NoOpTransformer() with warnings.catch_warnings(): warnings.simplefilter("error", UserWarning) trans.fit(df_int_names) # fit on dataframe with no feature names or all integer feature names # -> do not warn on transform Xs = [X_np, df_int_names] for X in Xs: with warnings.catch_warnings(): warnings.simplefilter("error", UserWarning) trans.transform(X) # fit on dataframe with feature names that are mixed raises an error: df_mixed = pd.DataFrame(X_np, columns=["a", "b", 1, 2]) trans = NoOpTransformer() msg = re.escape( "Feature names are only supported if all input features have string names, " "but your input has ['int', 'str'] as feature name / column name types. " "If you want feature names to be stored and validated, you must convert " "them all to strings, by using X.columns = X.columns.astype(str) for " "example. Otherwise you can remove feature / column names from your input " "data, or convert them all to a non-string data type." ) with pytest.raises(TypeError, match=msg): trans.fit(df_mixed) # transform on feature names that are mixed also raises: with pytest.raises(TypeError, match=msg): trans.transform(df_mixed) def test_validate_data_skip_check_array(): """Check skip_check_array option of _validate_data.""" pd = pytest.importorskip("pandas") iris = datasets.load_iris() df = pd.DataFrame(iris.data, columns=iris.feature_names) y = pd.Series(iris.target) class NoOpTransformer(TransformerMixin, BaseEstimator): pass no_op = NoOpTransformer() X_np_out = validate_data(no_op, df, skip_check_array=False) assert isinstance(X_np_out, np.ndarray) assert_allclose(X_np_out, df.to_numpy()) X_df_out = validate_data(no_op, df, skip_check_array=True) assert X_df_out is df y_np_out = validate_data(no_op, y=y, skip_check_array=False) assert isinstance(y_np_out, np.ndarray) assert_allclose(y_np_out, y.to_numpy()) y_series_out = validate_data(no_op, y=y, skip_check_array=True) assert y_series_out is y X_np_out, y_np_out = validate_data(no_op, df, y, skip_check_array=False) assert isinstance(X_np_out, np.ndarray) assert_allclose(X_np_out, df.to_numpy()) assert isinstance(y_np_out, np.ndarray) assert_allclose(y_np_out, y.to_numpy()) X_df_out, y_series_out = validate_data(no_op, df, y, skip_check_array=True) assert X_df_out is df assert y_series_out is y msg = "Validation should be done on X, y or both." with pytest.raises(ValueError, match=msg): validate_data(no_op) def test_clone_keeps_output_config(): """Check that clone keeps the set_output config.""" ss = StandardScaler().set_output(transform="pandas") config = _get_output_config("transform", ss) ss_clone = clone(ss) config_clone = _get_output_config("transform", ss_clone) assert config == config_clone class _Empty: pass class EmptyEstimator(_Empty, BaseEstimator): pass @pytest.mark.parametrize("estimator", [BaseEstimator(), EmptyEstimator()]) def test_estimator_empty_instance_dict(estimator): """Check that ``__getstate__`` returns an empty ``dict`` with an empty instance. Python 3.11+ changed behaviour by returning ``None`` instead of raising an ``AttributeError``. Non-regression test for gh-25188. """ state = estimator.__getstate__() expected = {"_sklearn_version": sklearn.__version__} assert state == expected # this should not raise pickle.loads(pickle.dumps(BaseEstimator())) def test_estimator_getstate_using_slots_error_message(): """Using a `BaseEstimator` with `__slots__` is not supported.""" class WithSlots: __slots__ = ("x",) class Estimator(BaseEstimator, WithSlots): pass msg = ( "You cannot use `__slots__` in objects inheriting from " "`sklearn.base.BaseEstimator`" ) with pytest.raises(TypeError, match=msg): Estimator().__getstate__() with pytest.raises(TypeError, match=msg): pickle.dumps(Estimator()) @pytest.mark.parametrize( "constructor_name, minversion", [ ("dataframe", "1.5.0"), ("pyarrow", "12.0.0"), ("polars", "0.20.23"), ], ) def test_dataframe_protocol(constructor_name, minversion): """Uses the dataframe exchange protocol to get feature names.""" data = [[1, 4, 2], [3, 3, 6]] columns = ["col_0", "col_1", "col_2"] df = _convert_container( data, constructor_name, columns_name=columns, minversion=minversion ) class NoOpTransformer(TransformerMixin, BaseEstimator): def fit(self, X, y=None): validate_data(self, X) return self def transform(self, X): return validate_data(self, X, reset=False) no_op = NoOpTransformer() no_op.fit(df) assert_array_equal(no_op.feature_names_in_, columns) X_out = no_op.transform(df) if constructor_name != "pyarrow": # pyarrow does not work with `np.asarray` # https://github.com/apache/arrow/issues/34886 assert_allclose(df, X_out) bad_names = ["a", "b", "c"] df_bad = _convert_container(data, constructor_name, columns_name=bad_names) with pytest.raises(ValueError, match="The feature names should match"): no_op.transform(df_bad) @config_context(enable_metadata_routing=True) def test_transformer_fit_transform_with_metadata_in_transform(): """Test that having a transformer with metadata for transform raises a warning when calling fit_transform.""" class CustomTransformer(BaseEstimator, TransformerMixin): def fit(self, X, y=None, prop=None): return self def transform(self, X, prop=None): return X # passing the metadata to `fit_transform` should raise a warning since it # could potentially be consumed by `transform` with pytest.warns(UserWarning, match="`transform` method which consumes metadata"): CustomTransformer().set_transform_request(prop=True).fit_transform( [[1]], [1], prop=1 ) # not passing a metadata which can potentially be consumed by `transform` should # not raise a warning with warnings.catch_warnings(record=True) as record: CustomTransformer().set_transform_request(prop=True).fit_transform([[1]], [1]) assert len(record) == 0 @config_context(enable_metadata_routing=True) def test_outlier_mixin_fit_predict_with_metadata_in_predict(): """Test that having an OutlierMixin with metadata for predict raises a warning when calling fit_predict.""" class CustomOutlierDetector(BaseEstimator, OutlierMixin): def fit(self, X, y=None, prop=None): return self def predict(self, X, prop=None): return X # passing the metadata to `fit_predict` should raise a warning since it # could potentially be consumed by `predict` with pytest.warns(UserWarning, match="`predict` method which consumes metadata"): CustomOutlierDetector().set_predict_request(prop=True).fit_predict( [[1]], [1], prop=1 ) # not passing a metadata which can potentially be consumed by `predict` should # not raise a warning with warnings.catch_warnings(record=True) as record: CustomOutlierDetector().set_predict_request(prop=True).fit_predict([[1]], [1]) assert len(record) == 0 def test_get_params_html(): """Check the behaviour of the `_get_params_html` method.""" est = MyEstimator(empty="test") assert est._get_params_html() == {"l1": 0, "empty": "test"} assert est._get_params_html().non_default == ("empty",) def make_estimator_with_param(default_value): class DynamicEstimator(BaseEstimator): def __init__(self, param=default_value): self.param = param return DynamicEstimator @pytest.mark.parametrize( "default_value, test_value", [ ((), (1,)), ((), [1]), ((), np.array([1])), ((1, 2), (3, 4)), ((1, 2), [3, 4]), ((1, 2), np.array([3, 4])), (None, 1), (None, []), (None, lambda x: x), (np.nan, 1.0), (np.nan, np.array([np.nan])), ("abc", "def"), ("abc", ["abc"]), (True, False), (1, 2), (1, [1]), (1, np.array([1])), (1.0, 2.0), (1.0, [1.0]), (1.0, np.array([1.0])), ([1, 2], [3]), (np.array([1]), [2, 3]), (None, KFold()), (None, get_scorer("accuracy")), ], ) def test_param_is_non_default(default_value, test_value): """Check that we detect non-default parameters with various types. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/31525 """ estimator = make_estimator_with_param(default_value)(param=test_value) non_default = estimator._get_params_html().non_default
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
true
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/tests/test_discriminant_analysis.py
sklearn/tests/test_discriminant_analysis.py
import warnings import numpy as np import pytest from scipy import linalg from sklearn.cluster import KMeans from sklearn.covariance import LedoitWolf, ShrunkCovariance, ledoit_wolf from sklearn.datasets import make_blobs from sklearn.discriminant_analysis import ( LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis, _cov, ) from sklearn.model_selection import ShuffleSplit, cross_val_score from sklearn.preprocessing import StandardScaler from sklearn.utils import check_random_state from sklearn.utils._testing import ( _convert_container, assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_array_equal, ) # Data is just 6 separable points in the plane X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]], dtype="f") y = np.array([1, 1, 1, 2, 2, 2]) y3 = np.array([1, 1, 2, 2, 3, 3]) # Degenerate data with only one feature (still should be separable) X1 = np.array( [[-2], [-1], [-1], [1], [1], [2]], dtype="f", ) # Data is just 9 separable points in the plane X6 = np.array( [[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2], [1, 3], [1, 2], [2, 1], [2, 2]] ) y6 = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2]) y7 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1]) # Degenerate data with 1 feature (still should be separable) X7 = np.array([[-3], [-2], [-1], [-1], [0], [1], [1], [2], [3]]) # Data that has zero variance in one dimension and needs regularization X2 = np.array( [[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0], [2, 0], [3, 0]] ) # One element class y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2]) solver_shrinkage = [ ("svd", None), ("lsqr", None), ("eigen", None), ("lsqr", "auto"), ("lsqr", 0), ("lsqr", 0.43), ("eigen", "auto"), ("eigen", 0), ("eigen", 0.43), ] def test_lda_predict(): # Test LDA classification. # This checks that LDA implements fit and predict and returns correct # values for simple toy data. for test_case in solver_shrinkage: solver, shrinkage = test_case clf = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage) y_pred = clf.fit(X, y).predict(X) assert_array_equal(y_pred, y, "solver %s" % solver) # Assert that it works with 1D data y_pred1 = clf.fit(X1, y).predict(X1) assert_array_equal(y_pred1, y, "solver %s" % solver) # Test probability estimates y_proba_pred1 = clf.predict_proba(X1) assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y, "solver %s" % solver) y_log_proba_pred1 = clf.predict_log_proba(X1) assert_allclose( np.exp(y_log_proba_pred1), y_proba_pred1, rtol=1e-6, atol=1e-6, err_msg="solver %s" % solver, ) # Primarily test for commit 2f34950 -- "reuse" of priors y_pred3 = clf.fit(X, y3).predict(X) # LDA shouldn't be able to separate those assert np.any(y_pred3 != y3), "solver %s" % solver clf = LinearDiscriminantAnalysis(solver="svd", shrinkage="auto") with pytest.raises(NotImplementedError): clf.fit(X, y) clf = LinearDiscriminantAnalysis( solver="lsqr", shrinkage=0.1, covariance_estimator=ShrunkCovariance() ) with pytest.raises( ValueError, match=( "covariance_estimator and shrinkage " "parameters are not None. " "Only one of the two can be set." ), ): clf.fit(X, y) # test bad solver with covariance_estimator clf = LinearDiscriminantAnalysis(solver="svd", covariance_estimator=LedoitWolf()) with pytest.raises( ValueError, match="covariance estimator is not supported with svd" ): clf.fit(X, y) # test bad covariance estimator clf = LinearDiscriminantAnalysis( solver="lsqr", covariance_estimator=KMeans(n_clusters=2, n_init="auto") ) with pytest.raises(ValueError): clf.fit(X, y) @pytest.mark.parametrize("n_classes", [2, 3]) @pytest.mark.parametrize("solver", ["svd", "lsqr", "eigen"]) def test_lda_predict_proba(solver, n_classes): def generate_dataset(n_samples, centers, covariances, random_state=None): """Generate a multivariate normal data given some centers and covariances""" rng = check_random_state(random_state) X = np.vstack( [ rng.multivariate_normal(mean, cov, size=n_samples // len(centers)) for mean, cov in zip(centers, covariances) ] ) y = np.hstack( [[clazz] * (n_samples // len(centers)) for clazz in range(len(centers))] ) return X, y blob_centers = np.array([[0, 0], [-10, 40], [-30, 30]])[:n_classes] blob_stds = np.array([[[10, 10], [10, 100]]] * len(blob_centers)) X, y = generate_dataset( n_samples=90000, centers=blob_centers, covariances=blob_stds, random_state=42 ) lda = LinearDiscriminantAnalysis( solver=solver, store_covariance=True, shrinkage=None ).fit(X, y) # check that the empirical means and covariances are close enough to the # one used to generate the data assert_allclose(lda.means_, blob_centers, atol=1e-1) assert_allclose(lda.covariance_, blob_stds[0], atol=1) # implement the method to compute the probability given in The Elements # of Statistical Learning (cf. p.127, Sect. 4.4.5 "Logistic Regression # or LDA?") precision = linalg.inv(blob_stds[0]) alpha_k = [] alpha_k_0 = [] for clazz in range(len(blob_centers) - 1): alpha_k.append( np.dot(precision, (blob_centers[clazz] - blob_centers[-1])[:, np.newaxis]) ) alpha_k_0.append( np.dot( -0.5 * (blob_centers[clazz] + blob_centers[-1])[np.newaxis, :], alpha_k[-1], ) ) sample = np.array([[-22, 22]]) def discriminant_func(sample, coef, intercept, clazz): return np.exp(intercept[clazz] + np.dot(sample, coef[clazz])).item() prob = np.array( [ float( discriminant_func(sample, alpha_k, alpha_k_0, clazz) / ( 1 + sum( [ discriminant_func(sample, alpha_k, alpha_k_0, clazz) for clazz in range(n_classes - 1) ] ) ) ) for clazz in range(n_classes - 1) ] ) prob_ref = 1 - np.sum(prob) # check the consistency of the computed probability # all probabilities should sum to one prob_ref_2 = float( 1 / ( 1 + sum( [ discriminant_func(sample, alpha_k, alpha_k_0, clazz) for clazz in range(n_classes - 1) ] ) ) ) assert prob_ref == pytest.approx(prob_ref_2) # check that the probability of LDA are close to the theoretical # probabilities assert_allclose( lda.predict_proba(sample), np.hstack([prob, prob_ref])[np.newaxis], atol=1e-2 ) def test_lda_priors(): # Test priors (negative priors) priors = np.array([0.5, -0.5]) clf = LinearDiscriminantAnalysis(priors=priors) msg = "priors must be non-negative" with pytest.raises(ValueError, match=msg): clf.fit(X, y) # Test that priors passed as a list are correctly handled (run to see if # failure) clf = LinearDiscriminantAnalysis(priors=[0.5, 0.5]) clf.fit(X, y) # Test that priors always sum to 1 priors = np.array([0.5, 0.6]) prior_norm = np.array([0.45, 0.55]) clf = LinearDiscriminantAnalysis(priors=priors) with pytest.warns(UserWarning): clf.fit(X, y) assert_array_almost_equal(clf.priors_, prior_norm, 2) def test_lda_coefs(): # Test if the coefficients of the solvers are approximately the same. n_features = 2 n_classes = 2 n_samples = 1000 X, y = make_blobs( n_samples=n_samples, n_features=n_features, centers=n_classes, random_state=11 ) clf_lda_svd = LinearDiscriminantAnalysis(solver="svd") clf_lda_lsqr = LinearDiscriminantAnalysis(solver="lsqr") clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen") clf_lda_svd.fit(X, y) clf_lda_lsqr.fit(X, y) clf_lda_eigen.fit(X, y) assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_lsqr.coef_, 1) assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_eigen.coef_, 1) assert_array_almost_equal(clf_lda_eigen.coef_, clf_lda_lsqr.coef_, 1) def test_lda_transform(): # Test LDA transform. clf = LinearDiscriminantAnalysis(solver="svd", n_components=1) X_transformed = clf.fit(X, y).transform(X) assert X_transformed.shape[1] == 1 clf = LinearDiscriminantAnalysis(solver="eigen", n_components=1) X_transformed = clf.fit(X, y).transform(X) assert X_transformed.shape[1] == 1 clf = LinearDiscriminantAnalysis(solver="lsqr", n_components=1) clf.fit(X, y) msg = "transform not implemented for 'lsqr'" with pytest.raises(NotImplementedError, match=msg): clf.transform(X) def test_lda_explained_variance_ratio(): # Test if the sum of the normalized eigen vectors values equals 1, # Also tests whether the explained_variance_ratio_ formed by the # eigen solver is the same as the explained_variance_ratio_ formed # by the svd solver state = np.random.RandomState(0) X = state.normal(loc=0, scale=100, size=(40, 20)) y = state.randint(0, 3, size=(40,)) clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen") clf_lda_eigen.fit(X, y) assert_almost_equal(clf_lda_eigen.explained_variance_ratio_.sum(), 1.0, 3) assert clf_lda_eigen.explained_variance_ratio_.shape == (2,), ( "Unexpected length for explained_variance_ratio_" ) clf_lda_svd = LinearDiscriminantAnalysis(solver="svd") clf_lda_svd.fit(X, y) assert_almost_equal(clf_lda_svd.explained_variance_ratio_.sum(), 1.0, 3) assert clf_lda_svd.explained_variance_ratio_.shape == (2,), ( "Unexpected length for explained_variance_ratio_" ) assert_array_almost_equal( clf_lda_svd.explained_variance_ratio_, clf_lda_eigen.explained_variance_ratio_ ) def test_lda_orthogonality(): # arrange four classes with their means in a kite-shaped pattern # the longer distance should be transformed to the first component, and # the shorter distance to the second component. means = np.array([[0, 0, -1], [0, 2, 0], [0, -2, 0], [0, 0, 5]]) # We construct perfectly symmetric distributions, so the LDA can estimate # precise means. scatter = np.array( [ [0.1, 0, 0], [-0.1, 0, 0], [0, 0.1, 0], [0, -0.1, 0], [0, 0, 0.1], [0, 0, -0.1], ] ) X = (means[:, np.newaxis, :] + scatter[np.newaxis, :, :]).reshape((-1, 3)) y = np.repeat(np.arange(means.shape[0]), scatter.shape[0]) # Fit LDA and transform the means clf = LinearDiscriminantAnalysis(solver="svd").fit(X, y) means_transformed = clf.transform(means) d1 = means_transformed[3] - means_transformed[0] d2 = means_transformed[2] - means_transformed[1] d1 /= np.sqrt(np.sum(d1**2)) d2 /= np.sqrt(np.sum(d2**2)) # the transformed within-class covariance should be the identity matrix assert_almost_equal(np.cov(clf.transform(scatter).T), np.eye(2)) # the means of classes 0 and 3 should lie on the first component assert_almost_equal(np.abs(np.dot(d1[:2], [1, 0])), 1.0) # the means of classes 1 and 2 should lie on the second component assert_almost_equal(np.abs(np.dot(d2[:2], [0, 1])), 1.0) def test_lda_scaling(): # Test if classification works correctly with differently scaled features. n = 100 rng = np.random.RandomState(1234) # use uniform distribution of features to make sure there is absolutely no # overlap between classes. x1 = rng.uniform(-1, 1, (n, 3)) + [-10, 0, 0] x2 = rng.uniform(-1, 1, (n, 3)) + [10, 0, 0] x = np.vstack((x1, x2)) * [1, 100, 10000] y = [-1] * n + [1] * n for solver in ("svd", "lsqr", "eigen"): clf = LinearDiscriminantAnalysis(solver=solver) # should be able to separate the data perfectly assert clf.fit(x, y).score(x, y) == 1.0, "using covariance: %s" % solver def test_lda_store_covariance(): # Test for solver 'lsqr' and 'eigen' # 'store_covariance' has no effect on 'lsqr' and 'eigen' solvers for solver in ("lsqr", "eigen"): clf = LinearDiscriminantAnalysis(solver=solver).fit(X6, y6) assert hasattr(clf, "covariance_") # Test the actual attribute: clf = LinearDiscriminantAnalysis(solver=solver, store_covariance=True).fit( X6, y6 ) assert hasattr(clf, "covariance_") assert_array_almost_equal( clf.covariance_, np.array([[0.422222, 0.088889], [0.088889, 0.533333]]) ) # Test for SVD solver, the default is to not set the covariances_ attribute clf = LinearDiscriminantAnalysis(solver="svd").fit(X6, y6) assert not hasattr(clf, "covariance_") # Test the actual attribute: clf = LinearDiscriminantAnalysis(solver=solver, store_covariance=True).fit(X6, y6) assert hasattr(clf, "covariance_") assert_array_almost_equal( clf.covariance_, np.array([[0.422222, 0.088889], [0.088889, 0.533333]]) ) @pytest.mark.parametrize("seed", range(10)) def test_lda_shrinkage(seed): # Test that shrunk covariance estimator and shrinkage parameter behave the # same rng = np.random.RandomState(seed) X = rng.rand(100, 10) y = rng.randint(3, size=(100)) c1 = LinearDiscriminantAnalysis(store_covariance=True, shrinkage=0.5, solver="lsqr") c2 = LinearDiscriminantAnalysis( store_covariance=True, covariance_estimator=ShrunkCovariance(shrinkage=0.5), solver="lsqr", ) c1.fit(X, y) c2.fit(X, y) assert_allclose(c1.means_, c2.means_) assert_allclose(c1.covariance_, c2.covariance_) def test_lda_ledoitwolf(): # When shrinkage="auto" current implementation uses ledoitwolf estimation # of covariance after standardizing the data. This checks that it is indeed # the case class StandardizedLedoitWolf: def fit(self, X): sc = StandardScaler() # standardize features X_sc = sc.fit_transform(X) s = ledoit_wolf(X_sc)[0] # rescale s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :] self.covariance_ = s rng = np.random.RandomState(0) X = rng.rand(100, 10) y = rng.randint(3, size=(100,)) c1 = LinearDiscriminantAnalysis( store_covariance=True, shrinkage="auto", solver="lsqr" ) c2 = LinearDiscriminantAnalysis( store_covariance=True, covariance_estimator=StandardizedLedoitWolf(), solver="lsqr", ) c1.fit(X, y) c2.fit(X, y) assert_allclose(c1.means_, c2.means_) assert_allclose(c1.covariance_, c2.covariance_) @pytest.mark.parametrize("n_features", [3, 5]) @pytest.mark.parametrize("n_classes", [5, 3]) def test_lda_dimension_warning(n_classes, n_features): rng = check_random_state(0) n_samples = 10 X = rng.randn(n_samples, n_features) # we create n_classes labels by repeating and truncating a # range(n_classes) until n_samples y = np.tile(range(n_classes), n_samples // n_classes + 1)[:n_samples] max_components = min(n_features, n_classes - 1) for n_components in [max_components - 1, None, max_components]: # if n_components <= min(n_classes - 1, n_features), no warning lda = LinearDiscriminantAnalysis(n_components=n_components) lda.fit(X, y) for n_components in [max_components + 1, max(n_features, n_classes - 1) + 1]: # if n_components > min(n_classes - 1, n_features), raise error. # We test one unit higher than max_components, and then something # larger than both n_features and n_classes - 1 to ensure the test # works for any value of n_component lda = LinearDiscriminantAnalysis(n_components=n_components) msg = "n_components cannot be larger than " with pytest.raises(ValueError, match=msg): lda.fit(X, y) @pytest.mark.parametrize( "data_type, expected_type", [ (np.float32, np.float32), (np.float64, np.float64), (np.int32, np.float64), (np.int64, np.float64), ], ) def test_lda_dtype_match(data_type, expected_type): for solver, shrinkage in solver_shrinkage: clf = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage) clf.fit(X.astype(data_type), y.astype(data_type)) assert clf.coef_.dtype == expected_type def test_lda_numeric_consistency_float32_float64(): for solver, shrinkage in solver_shrinkage: clf_32 = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage) clf_32.fit(X.astype(np.float32), y.astype(np.float32)) clf_64 = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage) clf_64.fit(X.astype(np.float64), y.astype(np.float64)) # Check value consistency between types rtol = 1e-6 assert_allclose(clf_32.coef_, clf_64.coef_, rtol=rtol) @pytest.mark.parametrize("solver", ["svd", "eigen"]) def test_qda(solver): # QDA classification. # This checks that QDA implements fit and predict and returns # correct values for a simple toy dataset. clf = QuadraticDiscriminantAnalysis(solver=solver) y_pred = clf.fit(X6, y6).predict(X6) assert_array_equal(y_pred, y6) # Assure that it works with 1D data y_pred1 = clf.fit(X7, y6).predict(X7) assert_array_equal(y_pred1, y6) # Test probas estimates y_proba_pred1 = clf.predict_proba(X7) assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y6) y_log_proba_pred1 = clf.predict_log_proba(X7) assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8) y_pred3 = clf.fit(X6, y7).predict(X6) # QDA shouldn't be able to separate those assert np.any(y_pred3 != y7) # Classes should have at least 2 elements with pytest.raises(ValueError): clf.fit(X6, y4) def test_qda_covariance_estimator(): # Test that the correct errors are raised when using inappropriate # covariance estimators or shrinkage parameters with QDA. clf = QuadraticDiscriminantAnalysis(solver="svd", shrinkage="auto") with pytest.raises(NotImplementedError): clf.fit(X, y) clf = QuadraticDiscriminantAnalysis( solver="eigen", shrinkage=0.1, covariance_estimator=ShrunkCovariance() ) with pytest.raises( ValueError, match=( "covariance_estimator and shrinkage parameters are not None. " "Only one of the two can be set." ), ): clf.fit(X, y) # test bad solver with covariance_estimator clf = QuadraticDiscriminantAnalysis(solver="svd", covariance_estimator=LedoitWolf()) with pytest.raises( ValueError, match="covariance_estimator is not supported with solver='svd'" ): clf.fit(X, y) # test bad covariance estimator clf = QuadraticDiscriminantAnalysis( solver="eigen", covariance_estimator=KMeans(n_clusters=2, n_init="auto") ) with pytest.raises(ValueError): clf.fit(X, y) def test_qda_ledoitwolf(global_random_seed): # When shrinkage="auto" current implementation uses ledoitwolf estimation # of covariance after standardizing the data. This checks that it is indeed # the case class StandardizedLedoitWolf: def fit(self, X): sc = StandardScaler() # standardize features X_sc = sc.fit_transform(X) s = ledoit_wolf(X_sc)[0] # rescale s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :] self.covariance_ = s rng = np.random.RandomState(global_random_seed) X = rng.rand(100, 10) y = rng.randint(3, size=(100,)) c1 = QuadraticDiscriminantAnalysis( store_covariance=True, shrinkage="auto", solver="eigen" ) c2 = QuadraticDiscriminantAnalysis( store_covariance=True, covariance_estimator=StandardizedLedoitWolf(), solver="eigen", ) c1.fit(X, y) c2.fit(X, y) assert_allclose(c1.means_, c2.means_) assert_allclose(c1.covariance_, c2.covariance_) def test_qda_coefs(global_random_seed): # Test if the coefficients of the solvers are approximately the same. n_features = 2 n_classes = 2 n_samples = 3000 X, y = make_blobs( n_samples=n_samples, n_features=n_features, centers=n_classes, cluster_std=[1.0, 3.0], random_state=global_random_seed, ) clf_svd = QuadraticDiscriminantAnalysis(solver="svd") clf_eigen = QuadraticDiscriminantAnalysis(solver="eigen") clf_svd.fit(X, y) clf_eigen.fit(X, y) for class_idx in range(n_classes): assert_allclose( np.abs(clf_svd.rotations_[class_idx]), np.abs(clf_eigen.rotations_[class_idx]), rtol=1e-3, err_msg=f"SVD and Eigen rotations differ for class {class_idx}", ) assert_allclose( clf_svd.scalings_[class_idx], clf_eigen.scalings_[class_idx], rtol=1e-3, err_msg=f"SVD and Eigen scalings differ for class {class_idx}", ) def test_qda_priors(): clf = QuadraticDiscriminantAnalysis() y_pred = clf.fit(X6, y6).predict(X6) n_pos = np.sum(y_pred == 2) neg = 1e-10 clf = QuadraticDiscriminantAnalysis(priors=np.array([neg, 1 - neg])) y_pred = clf.fit(X6, y6).predict(X6) n_pos2 = np.sum(y_pred == 2) assert n_pos2 > n_pos @pytest.mark.parametrize("priors_type", ["list", "tuple", "array"]) def test_qda_prior_type(priors_type): """Check that priors accept array-like.""" priors = [0.5, 0.5] clf = QuadraticDiscriminantAnalysis( priors=_convert_container([0.5, 0.5], priors_type) ).fit(X6, y6) assert isinstance(clf.priors_, np.ndarray) assert_array_equal(clf.priors_, priors) def test_qda_prior_copy(): """Check that altering `priors` without `fit` doesn't change `priors_`""" priors = np.array([0.5, 0.5]) qda = QuadraticDiscriminantAnalysis(priors=priors).fit(X, y) # we expect the following assert_array_equal(qda.priors_, qda.priors) # altering `priors` without `fit` should not change `priors_` priors[0] = 0.2 assert qda.priors_[0] != qda.priors[0] def test_qda_store_covariance(): # The default is to not set the covariances_ attribute clf = QuadraticDiscriminantAnalysis().fit(X6, y6) assert not hasattr(clf, "covariance_") # Test the actual attribute: clf = QuadraticDiscriminantAnalysis(store_covariance=True).fit(X6, y6) assert hasattr(clf, "covariance_") assert_array_almost_equal(clf.covariance_[0], np.array([[0.7, 0.45], [0.45, 0.7]])) assert_array_almost_equal( clf.covariance_[1], np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]]), ) @pytest.mark.parametrize("solver", ["svd", "eigen"]) def test_qda_regularization(global_random_seed, solver): # The default is reg_param=0. and will cause issues when there is a # constant variable. rng = np.random.default_rng(global_random_seed) # Fitting on data with constant variable without regularization # triggers a LinAlgError. msg = r"The covariance matrix of class .+ is not full rank." clf = QuadraticDiscriminantAnalysis(solver=solver) with pytest.raises(linalg.LinAlgError, match=msg): clf.fit(X2, y6) with pytest.raises(AttributeError): y_pred = clf.predict(X2) # Adding a little regularization fixes the fit time error. if solver == "svd": clf = QuadraticDiscriminantAnalysis(solver=solver, reg_param=0.01) elif solver == "eigen": clf = QuadraticDiscriminantAnalysis(solver=solver, shrinkage=0.01) with warnings.catch_warnings(): warnings.simplefilter("error") clf.fit(X2, y6) y_pred = clf.predict(X2) assert_array_equal(y_pred, y6) # LinAlgError should also be there for the n_samples_in_a_class < # n_features case. X = rng.normal(size=(9, 4)) y = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2]) clf = QuadraticDiscriminantAnalysis(solver=solver) if solver == "svd": msg2 = msg + " When using `solver='svd'`" elif solver == "eigen": msg2 = msg with pytest.raises(linalg.LinAlgError, match=msg2): clf.fit(X, y) # The error will persist even with regularization for SVD # because the number of singular values is limited by n_samples_in_a_class. if solver == "svd": clf = QuadraticDiscriminantAnalysis(solver=solver, reg_param=0.3) with pytest.raises(linalg.LinAlgError, match=msg2): clf.fit(X, y) # The warning will be gone for Eigen with regularization, because # the covariance matrix will be full-rank. elif solver == "eigen": clf = QuadraticDiscriminantAnalysis(solver=solver, shrinkage=0.3) clf.fit(X, y) def test_covariance(): x, y = make_blobs(n_samples=100, n_features=5, centers=1, random_state=42) # make features correlated x = np.dot(x, np.arange(x.shape[1] ** 2).reshape(x.shape[1], x.shape[1])) c_e = _cov(x, "empirical") assert_almost_equal(c_e, c_e.T) c_s = _cov(x, "auto") assert_almost_equal(c_s, c_s.T) @pytest.mark.parametrize("solver", ["svd", "lsqr", "eigen"]) def test_raises_value_error_on_same_number_of_classes_and_samples(solver): """ Tests that if the number of samples equals the number of classes, a ValueError is raised. """ X = np.array([[0.5, 0.6], [0.6, 0.5]]) y = np.array(["a", "b"]) clf = LinearDiscriminantAnalysis(solver=solver) with pytest.raises(ValueError, match="The number of samples must be more"): clf.fit(X, y) @pytest.mark.parametrize("solver", ["svd", "eigen"]) def test_raises_value_error_on_one_sample_per_class(solver): """ Tests that if a class has one sample, a ValueError is raised. """ X = np.array([[0.5, 0.6], [0.6, 0.5], [0.4, 0.4], [0.6, 0.5]]) y = np.array(["a", "a", "a", "b"]) clf = QuadraticDiscriminantAnalysis(solver=solver) with pytest.raises(ValueError, match="y has only 1 sample in class"): clf.fit(X, y) def test_get_feature_names_out(): """Check get_feature_names_out uses class name as prefix.""" est = LinearDiscriminantAnalysis().fit(X, y) names_out = est.get_feature_names_out() class_name_lower = "LinearDiscriminantAnalysis".lower() expected_names_out = np.array( [ f"{class_name_lower}{i}" for i in range(est.explained_variance_ratio_.shape[0]) ], dtype=object, ) assert_array_equal(names_out, expected_names_out) @pytest.mark.parametrize("n_features", [25]) @pytest.mark.parametrize("train_size", [100]) @pytest.mark.parametrize("solver_no_shrinkage", ["svd", "eigen"]) def test_qda_shrinkage_performance( global_random_seed, n_features, train_size, solver_no_shrinkage ): # Test that QDA with shrinkage performs better than without shrinkage on # a case where there's a small number of samples per class relative to # the number of features. n_samples = 1000 n_features = n_features rng = np.random.default_rng(global_random_seed) # Sample from two Gaussians with different variances and same null means. vars1 = rng.uniform(2.0, 3.0, size=n_features) vars2 = rng.uniform(0.2, 1.0, size=n_features) X = np.concatenate( [ np.random.randn(n_samples // 2, n_features) * np.sqrt(vars1), np.random.randn(n_samples // 2, n_features) * np.sqrt(vars2), ], axis=0, ) y = np.array([0] * (n_samples // 2) + [1] * (n_samples // 2)) # Use small training sets to illustrate the regularization effect of # covariance shrinkage. cv = ShuffleSplit(n_splits=5, train_size=train_size, random_state=0) qda_shrinkage = QuadraticDiscriminantAnalysis(solver="eigen", shrinkage="auto") qda_no_shrinkage = QuadraticDiscriminantAnalysis( solver=solver_no_shrinkage, shrinkage=None ) scores_no_shrinkage = cross_val_score( qda_no_shrinkage, X, y, cv=cv, scoring="d2_brier_score" ) scores_shrinkage = cross_val_score( qda_shrinkage, X, y, cv=cv, scoring="d2_brier_score" ) assert scores_shrinkage.mean() > 0.9 assert scores_no_shrinkage.mean() < 0.6
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/tests/test_naive_bayes.py
sklearn/tests/test_naive_bayes.py
import re import warnings import numpy as np import pytest from scipy.special import logsumexp from sklearn._config import config_context from sklearn.datasets import load_digits, load_iris from sklearn.model_selection import cross_val_score, train_test_split from sklearn.naive_bayes import ( BernoulliNB, CategoricalNB, ComplementNB, GaussianNB, MultinomialNB, ) from sklearn.utils._array_api import ( _convert_to_numpy, _get_namespace_device_dtype_ids, device, yield_namespace_device_dtype_combinations, ) from sklearn.utils._testing import ( _array_api_for_tests, assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_array_equal, ) from sklearn.utils.fixes import CSR_CONTAINERS DISCRETE_NAIVE_BAYES_CLASSES = [BernoulliNB, CategoricalNB, ComplementNB, MultinomialNB] ALL_NAIVE_BAYES_CLASSES = DISCRETE_NAIVE_BAYES_CLASSES + [GaussianNB] # Data is just 6 separable points in the plane X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]) y = np.array([1, 1, 1, 2, 2, 2]) def get_random_normal_x_binary_y(global_random_seed): # A bit more random tests rng = np.random.RandomState(global_random_seed) X1 = rng.normal(size=(10, 3)) y1 = (rng.normal(size=10) > 0).astype(int) return X1, y1 def get_random_integer_x_three_classes_y(global_random_seed): # Data is 6 random integer points in a 100 dimensional space classified to # three classes. rng = np.random.RandomState(global_random_seed) X2 = rng.randint(5, size=(6, 100)) y2 = np.array([1, 1, 2, 2, 3, 3]) return X2, y2 def test_gnb(): # Gaussian Naive Bayes classification. # This checks that GaussianNB implements fit and predict and returns # correct values for a simple toy dataset. clf = GaussianNB() y_pred = clf.fit(X, y).predict(X) assert_array_equal(y_pred, y) y_pred_proba = clf.predict_proba(X) y_pred_log_proba = clf.predict_log_proba(X) assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8) # Test whether label mismatch between target y and classes raises # an Error # FIXME Remove this test once the more general partial_fit tests are merged with pytest.raises( ValueError, match="The target label.* in y do not exist in the initial classes" ): GaussianNB().partial_fit(X, y, classes=[0, 1]) def test_gnb_prior(global_random_seed): # Test whether class priors are properly set. clf = GaussianNB().fit(X, y) assert_array_almost_equal(np.array([3, 3]) / 6.0, clf.class_prior_, 8) X1, y1 = get_random_normal_x_binary_y(global_random_seed) clf = GaussianNB().fit(X1, y1) # Check that the class priors sum to 1 assert_array_almost_equal(clf.class_prior_.sum(), 1) def test_gnb_sample_weight(global_random_seed): """Test whether sample weights are properly used in GNB.""" # Sample weights all being 1 should not change results sw = np.ones(6) clf = GaussianNB().fit(X, y) clf_sw = GaussianNB().fit(X, y, sw) assert_array_almost_equal(clf.theta_, clf_sw.theta_) assert_array_almost_equal(clf.var_, clf_sw.var_) # Fitting twice with half sample-weights should result # in same result as fitting once with full weights rng = np.random.RandomState(global_random_seed) sw = rng.rand(y.shape[0]) clf1 = GaussianNB().fit(X, y, sample_weight=sw) clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2) clf2.partial_fit(X, y, sample_weight=sw / 2) assert_array_almost_equal(clf1.theta_, clf2.theta_) assert_array_almost_equal(clf1.var_, clf2.var_) # Check that duplicate entries and correspondingly increased sample # weights yield the same result ind = rng.randint(0, X.shape[0], 20) sample_weight = np.bincount(ind, minlength=X.shape[0]) clf_dupl = GaussianNB().fit(X[ind], y[ind]) clf_sw = GaussianNB().fit(X, y, sample_weight) assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_) assert_array_almost_equal(clf_dupl.var_, clf_sw.var_) # non-regression test for gh-24140 where a division by zero was # occurring when a single class was present sample_weight = (y == 1).astype(np.float64) clf = GaussianNB().fit(X, y, sample_weight=sample_weight) def test_gnb_neg_priors(): """Test whether an error is raised in case of negative priors""" clf = GaussianNB(priors=np.array([-1.0, 2.0])) msg = "Priors must be non-negative" with pytest.raises(ValueError, match=msg): clf.fit(X, y) def test_gnb_priors(): """Test whether the class prior override is properly used""" clf = GaussianNB(priors=np.array([0.3, 0.7])).fit(X, y) assert_array_almost_equal( clf.predict_proba([[-0.1, -0.1]]), np.array([[0.825303662161683, 0.174696337838317]]), 8, ) assert_array_almost_equal(clf.class_prior_, np.array([0.3, 0.7])) def test_gnb_priors_sum_isclose(): # test whether the class prior sum is properly tested""" X = np.array( [ [-1, -1], [-2, -1], [-3, -2], [-4, -5], [-5, -4], [1, 1], [2, 1], [3, 2], [4, 4], [5, 5], ] ) priors = np.array([0.08, 0.14, 0.03, 0.16, 0.11, 0.16, 0.07, 0.14, 0.11, 0.0]) Y = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) clf = GaussianNB(priors=priors) # smoke test for issue #9633 clf.fit(X, Y) def test_gnb_wrong_nb_priors(): """Test whether an error is raised if the number of prior is different from the number of class""" clf = GaussianNB(priors=np.array([0.25, 0.25, 0.25, 0.25])) msg = "Number of priors must match number of classes" with pytest.raises(ValueError, match=msg): clf.fit(X, y) def test_gnb_prior_greater_one(): """Test if an error is raised if the sum of prior greater than one""" clf = GaussianNB(priors=np.array([2.0, 1.0])) msg = "The sum of the priors should be 1" with pytest.raises(ValueError, match=msg): clf.fit(X, y) def test_gnb_prior_large_bias(): """Test if good prediction when class prior favor largely one class""" clf = GaussianNB(priors=np.array([0.01, 0.99])) clf.fit(X, y) assert clf.predict([[-0.1, -0.1]]) == np.array([2]) def test_gnb_check_update_with_no_data(): """Test when the partial fit is called without any data""" # Create an empty array prev_points = 100 mean = 0.0 var = 1.0 x_empty = np.empty((0, X.shape[1])) tmean, tvar = GaussianNB._update_mean_variance(prev_points, mean, var, x_empty) assert tmean == mean assert tvar == var def test_gnb_partial_fit(global_dtype): X_ = X.astype(global_dtype) clf = GaussianNB().fit(X_, y) clf_pf = GaussianNB().partial_fit(X_, y, np.unique(y)) for fitted_attr in ("class_prior_", "theta_", "var_"): clf_attr = getattr(clf, fitted_attr) clf_pf_attr = getattr(clf_pf, fitted_attr) assert clf_attr.dtype == clf_pf_attr.dtype == X_.dtype assert_array_almost_equal(clf_attr, clf_pf_attr) clf_pf2 = GaussianNB().partial_fit(X_[0::2, :], y[0::2], np.unique(y)) clf_pf2.partial_fit(X_[1::2], y[1::2]) for fitted_attr in ("class_prior_", "theta_", "var_"): clf_attr = getattr(clf, fitted_attr) clf_pf2_attr = getattr(clf_pf2, fitted_attr) assert clf_attr.dtype == clf_pf2_attr.dtype == X_.dtype assert_array_almost_equal(clf_attr, clf_pf2_attr) def test_gnb_naive_bayes_scale_invariance(): # Scaling the data should not change the prediction results iris = load_iris() X, y = iris.data, iris.target labels = [GaussianNB().fit(f * X, y).predict(f * X) for f in [1e-10, 1, 1e10]] assert_array_equal(labels[0], labels[1]) assert_array_equal(labels[1], labels[2]) @pytest.mark.parametrize("DiscreteNaiveBayes", DISCRETE_NAIVE_BAYES_CLASSES) def test_discretenb_prior(DiscreteNaiveBayes, global_random_seed): # Test whether class priors are properly set. X2, y2 = get_random_integer_x_three_classes_y(global_random_seed) clf = DiscreteNaiveBayes().fit(X2, y2) assert_array_almost_equal( np.log(np.array([2, 2, 2]) / 6.0), clf.class_log_prior_, 8 ) @pytest.mark.parametrize("DiscreteNaiveBayes", DISCRETE_NAIVE_BAYES_CLASSES) def test_discretenb_partial_fit(DiscreteNaiveBayes): clf1 = DiscreteNaiveBayes() clf1.fit([[0, 1], [1, 0], [1, 1]], [0, 1, 1]) clf2 = DiscreteNaiveBayes() clf2.partial_fit([[0, 1], [1, 0], [1, 1]], [0, 1, 1], classes=[0, 1]) assert_array_equal(clf1.class_count_, clf2.class_count_) if DiscreteNaiveBayes is CategoricalNB: for i in range(len(clf1.category_count_)): assert_array_equal(clf1.category_count_[i], clf2.category_count_[i]) else: assert_array_equal(clf1.feature_count_, clf2.feature_count_) clf3 = DiscreteNaiveBayes() # all categories have to appear in the first partial fit clf3.partial_fit([[0, 1]], [0], classes=[0, 1]) clf3.partial_fit([[1, 0]], [1]) clf3.partial_fit([[1, 1]], [1]) assert_array_equal(clf1.class_count_, clf3.class_count_) if DiscreteNaiveBayes is CategoricalNB: # the categories for each feature of CategoricalNB are mapped to an # index chronologically with each call of partial fit and therefore # the category_count matrices cannot be compared for equality for i in range(len(clf1.category_count_)): assert_array_equal( clf1.category_count_[i].shape, clf3.category_count_[i].shape ) assert_array_equal( np.sum(clf1.category_count_[i], axis=1), np.sum(clf3.category_count_[i], axis=1), ) # assert category 0 occurs 1x in the first class and 0x in the 2nd # class assert_array_equal(clf1.category_count_[0][0], np.array([1, 0])) # assert category 1 occurs 0x in the first class and 2x in the 2nd # class assert_array_equal(clf1.category_count_[0][1], np.array([0, 2])) # assert category 0 occurs 0x in the first class and 1x in the 2nd # class assert_array_equal(clf1.category_count_[1][0], np.array([0, 1])) # assert category 1 occurs 1x in the first class and 1x in the 2nd # class assert_array_equal(clf1.category_count_[1][1], np.array([1, 1])) else: assert_array_equal(clf1.feature_count_, clf3.feature_count_) @pytest.mark.parametrize("NaiveBayes", ALL_NAIVE_BAYES_CLASSES) def test_NB_partial_fit_no_first_classes(NaiveBayes, global_random_seed): # classes is required for first call to partial fit X2, y2 = get_random_integer_x_three_classes_y(global_random_seed) with pytest.raises( ValueError, match="classes must be passed on the first call to partial_fit." ): NaiveBayes().partial_fit(X2, y2) # check consistency of consecutive classes values clf = NaiveBayes() clf.partial_fit(X2, y2, classes=np.unique(y2)) with pytest.raises( ValueError, match="is not the same as on last call to partial_fit" ): clf.partial_fit(X2, y2, classes=np.arange(42)) def test_discretenb_predict_proba(): # Test discrete NB classes' probability scores # The 100s below distinguish Bernoulli from multinomial. # FIXME: write a test to show this. X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]] X_multinomial = [[0, 1], [1, 3], [4, 0]] # test binary case (1-d output) y = [0, 0, 2] # 2 is regression test for binary case, 02e673 for DiscreteNaiveBayes, X in zip( [BernoulliNB, MultinomialNB], [X_bernoulli, X_multinomial] ): clf = DiscreteNaiveBayes().fit(X, y) assert clf.predict(X[-1:]) == 2 assert clf.predict_proba([X[0]]).shape == (1, 2) assert_array_almost_equal( clf.predict_proba(X[:2]).sum(axis=1), np.array([1.0, 1.0]), 6 ) # test multiclass case (2-d output, must sum to one) y = [0, 1, 2] for DiscreteNaiveBayes, X in zip( [BernoulliNB, MultinomialNB], [X_bernoulli, X_multinomial] ): clf = DiscreteNaiveBayes().fit(X, y) assert clf.predict_proba(X[0:1]).shape == (1, 3) assert clf.predict_proba(X[:2]).shape == (2, 3) assert_almost_equal(np.sum(clf.predict_proba([X[1]])), 1) assert_almost_equal(np.sum(clf.predict_proba([X[-1]])), 1) assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1) @pytest.mark.parametrize("DiscreteNaiveBayes", DISCRETE_NAIVE_BAYES_CLASSES) def test_discretenb_uniform_prior(DiscreteNaiveBayes): # Test whether discrete NB classes fit a uniform prior # when fit_prior=False and class_prior=None clf = DiscreteNaiveBayes() clf.set_params(fit_prior=False) clf.fit([[0], [0], [1]], [0, 0, 1]) prior = np.exp(clf.class_log_prior_) assert_array_almost_equal(prior, np.array([0.5, 0.5])) @pytest.mark.parametrize("DiscreteNaiveBayes", DISCRETE_NAIVE_BAYES_CLASSES) def test_discretenb_provide_prior(DiscreteNaiveBayes): # Test whether discrete NB classes use provided prior clf = DiscreteNaiveBayes(class_prior=[0.5, 0.5]) clf.fit([[0], [0], [1]], [0, 0, 1]) prior = np.exp(clf.class_log_prior_) assert_array_almost_equal(prior, np.array([0.5, 0.5])) # Inconsistent number of classes with prior msg = "Number of priors must match number of classes" with pytest.raises(ValueError, match=msg): clf.fit([[0], [1], [2]], [0, 1, 2]) msg = "is not the same as on last call to partial_fit" with pytest.raises(ValueError, match=msg): clf.partial_fit([[0], [1]], [0, 1], classes=[0, 1, 1]) @pytest.mark.parametrize("DiscreteNaiveBayes", DISCRETE_NAIVE_BAYES_CLASSES) def test_discretenb_provide_prior_with_partial_fit(DiscreteNaiveBayes): # Test whether discrete NB classes use provided prior # when using partial_fit iris = load_iris() iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split( iris.data, iris.target, test_size=0.4, random_state=415 ) for prior in [None, [0.3, 0.3, 0.4]]: clf_full = DiscreteNaiveBayes(class_prior=prior) clf_full.fit(iris.data, iris.target) clf_partial = DiscreteNaiveBayes(class_prior=prior) clf_partial.partial_fit(iris_data1, iris_target1, classes=[0, 1, 2]) clf_partial.partial_fit(iris_data2, iris_target2) assert_array_almost_equal( clf_full.class_log_prior_, clf_partial.class_log_prior_ ) @pytest.mark.parametrize("DiscreteNaiveBayes", DISCRETE_NAIVE_BAYES_CLASSES) def test_discretenb_sample_weight_multiclass(DiscreteNaiveBayes): # check shape consistency for number of samples at fit time X = [ [0, 0, 1], [0, 1, 1], [0, 1, 1], [1, 0, 0], ] y = [0, 0, 1, 2] sample_weight = np.array([1, 1, 2, 2], dtype=np.float64) sample_weight /= sample_weight.sum() clf = DiscreteNaiveBayes().fit(X, y, sample_weight=sample_weight) assert_array_equal(clf.predict(X), [0, 1, 1, 2]) # Check sample weight using the partial_fit method clf = DiscreteNaiveBayes() clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2], sample_weight=sample_weight[:2]) clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3]) clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:]) assert_array_equal(clf.predict(X), [0, 1, 1, 2]) @pytest.mark.parametrize("DiscreteNaiveBayes", DISCRETE_NAIVE_BAYES_CLASSES) @pytest.mark.parametrize("use_partial_fit", [False, True]) @pytest.mark.parametrize("train_on_single_class_y", [False, True]) def test_discretenb_degenerate_one_class_case( DiscreteNaiveBayes, use_partial_fit, train_on_single_class_y, ): # Most array attributes of a discrete naive Bayes classifier should have a # first-axis length equal to the number of classes. Exceptions include: # ComplementNB.feature_all_, CategoricalNB.n_categories_. # Confirm that this is the case for binary problems and the degenerate # case of a single class in the training set, when fitting with `fit` or # `partial_fit`. # Non-regression test for handling degenerate one-class case: # https://github.com/scikit-learn/scikit-learn/issues/18974 X = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] y = [1, 1, 2] if train_on_single_class_y: X = X[:-1] y = y[:-1] classes = sorted(list(set(y))) num_classes = len(classes) clf = DiscreteNaiveBayes() if use_partial_fit: clf.partial_fit(X, y, classes=classes) else: clf.fit(X, y) assert clf.predict(X[:1]) == y[0] # Check that attributes have expected first-axis lengths attribute_names = [ "classes_", "class_count_", "class_log_prior_", "feature_count_", "feature_log_prob_", ] for attribute_name in attribute_names: attribute = getattr(clf, attribute_name, None) if attribute is None: # CategoricalNB has no feature_count_ attribute continue if isinstance(attribute, np.ndarray): assert attribute.shape[0] == num_classes else: # CategoricalNB.feature_log_prob_ is a list of arrays for element in attribute: assert element.shape[0] == num_classes @pytest.mark.parametrize("kind", ("dense", "sparse")) @pytest.mark.parametrize("csr_container", CSR_CONTAINERS) def test_mnnb(kind, global_random_seed, csr_container): # Test Multinomial Naive Bayes classification. # This checks that MultinomialNB implements fit and predict and returns # correct values for a simple toy dataset. X2, y2 = get_random_integer_x_three_classes_y(global_random_seed) if kind == "dense": X = X2 elif kind == "sparse": X = csr_container(X2) # Check the ability to predict the learning set. clf = MultinomialNB() msg = "Negative values in data passed to" with pytest.raises(ValueError, match=msg): clf.fit(-X, y2) y_pred = clf.fit(X, y2).predict(X) assert_array_equal(y_pred, y2) # Verify that np.log(clf.predict_proba(X)) gives the same results as # clf.predict_log_proba(X) y_pred_proba = clf.predict_proba(X) y_pred_log_proba = clf.predict_log_proba(X) assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8) # Check that incremental fitting yields the same results clf2 = MultinomialNB() clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2)) clf2.partial_fit(X[2:5], y2[2:5]) clf2.partial_fit(X[5:], y2[5:]) y_pred2 = clf2.predict(X) assert_array_equal(y_pred2, y2) y_pred_proba2 = clf2.predict_proba(X) y_pred_log_proba2 = clf2.predict_log_proba(X) assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8) assert_array_almost_equal(y_pred_proba2, y_pred_proba) assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba) # Partial fit on the whole data at once should be the same as fit too clf3 = MultinomialNB() clf3.partial_fit(X, y2, classes=np.unique(y2)) y_pred3 = clf3.predict(X) assert_array_equal(y_pred3, y2) y_pred_proba3 = clf3.predict_proba(X) y_pred_log_proba3 = clf3.predict_log_proba(X) assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8) assert_array_almost_equal(y_pred_proba3, y_pred_proba) assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba) def test_mnb_prior_unobserved_targets(): # test smoothing of prior for yet unobserved targets # Create toy training data X = np.array([[0, 1], [1, 0]]) y = np.array([0, 1]) clf = MultinomialNB() with warnings.catch_warnings(): warnings.simplefilter("error", RuntimeWarning) clf.partial_fit(X, y, classes=[0, 1, 2]) assert clf.predict([[0, 1]]) == 0 assert clf.predict([[1, 0]]) == 1 assert clf.predict([[1, 1]]) == 0 # add a training example with previously unobserved class with warnings.catch_warnings(): warnings.simplefilter("error", RuntimeWarning) clf.partial_fit([[1, 1]], [2]) assert clf.predict([[0, 1]]) == 0 assert clf.predict([[1, 0]]) == 1 assert clf.predict([[1, 1]]) == 2 def test_bnb(): # Tests that BernoulliNB when alpha=1.0 gives the same values as # those given for the toy example in Manning, Raghavan, and # Schuetze's "Introduction to Information Retrieval" book: # https://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html # Training data points are: # Chinese Beijing Chinese (class: China) # Chinese Chinese Shanghai (class: China) # Chinese Macao (class: China) # Tokyo Japan Chinese (class: Japan) # Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo X = np.array( [[1, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 1, 0, 1, 0, 0], [0, 1, 1, 0, 0, 1]] ) # Classes are China (0), Japan (1) Y = np.array([0, 0, 0, 1]) # Fit BernoulliBN w/ alpha = 1.0 clf = BernoulliNB(alpha=1.0) clf.fit(X, Y) # Check the class prior is correct class_prior = np.array([0.75, 0.25]) assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior) # Check the feature probabilities are correct feature_prob = np.array( [ [0.4, 0.8, 0.2, 0.4, 0.4, 0.2], [1 / 3.0, 2 / 3.0, 2 / 3.0, 1 / 3.0, 1 / 3.0, 2 / 3.0], ] ) assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob) # Testing data point is: # Chinese Chinese Chinese Tokyo Japan X_test = np.array([[0, 1, 1, 0, 0, 1]]) # Check the predictive probabilities are correct unnorm_predict_proba = np.array([[0.005183999999999999, 0.02194787379972565]]) predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba) assert_array_almost_equal(clf.predict_proba(X_test), predict_proba) def test_bnb_feature_log_prob(): # Test for issue #4268. # Tests that the feature log prob value computed by BernoulliNB when # alpha=1.0 is equal to the expression given in Manning, Raghavan, # and Schuetze's "Introduction to Information Retrieval" book: # http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]]) Y = np.array([0, 0, 1, 2, 2]) # Fit Bernoulli NB w/ alpha = 1.0 clf = BernoulliNB(alpha=1.0) clf.fit(X, Y) # Manually form the (log) numerator and denominator that # constitute P(feature presence | class) num = np.log(clf.feature_count_ + 1.0) denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T # Check manual estimate matches assert_array_almost_equal(clf.feature_log_prob_, (num - denom)) def test_cnb(): # Tests ComplementNB when alpha=1.0 for the toy example in Manning, # Raghavan, and Schuetze's "Introduction to Information Retrieval" book: # https://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html # Training data points are: # Chinese Beijing Chinese (class: China) # Chinese Chinese Shanghai (class: China) # Chinese Macao (class: China) # Tokyo Japan Chinese (class: Japan) # Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo. X = np.array( [[1, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 1, 0, 1, 0, 0], [0, 1, 1, 0, 0, 1]] ) # Classes are China (0), Japan (1). Y = np.array([0, 0, 0, 1]) # Check that weights are correct. See steps 4-6 in Table 4 of # Rennie et al. (2003). theta = np.array( [ [ (0 + 1) / (3 + 6), (1 + 1) / (3 + 6), (1 + 1) / (3 + 6), (0 + 1) / (3 + 6), (0 + 1) / (3 + 6), (1 + 1) / (3 + 6), ], [ (1 + 1) / (6 + 6), (3 + 1) / (6 + 6), (0 + 1) / (6 + 6), (1 + 1) / (6 + 6), (1 + 1) / (6 + 6), (0 + 1) / (6 + 6), ], ] ) weights = np.zeros(theta.shape) normed_weights = np.zeros(theta.shape) for i in range(2): weights[i] = -np.log(theta[i]) normed_weights[i] = weights[i] / weights[i].sum() # Verify inputs are nonnegative. clf = ComplementNB(alpha=1.0) msg = re.escape("Negative values in data passed to ComplementNB (input X)") with pytest.raises(ValueError, match=msg): clf.fit(-X, Y) clf.fit(X, Y) # Check that counts/weights are correct. feature_count = np.array([[1, 3, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1]]) assert_array_equal(clf.feature_count_, feature_count) class_count = np.array([3, 1]) assert_array_equal(clf.class_count_, class_count) feature_all = np.array([1, 4, 1, 1, 1, 1]) assert_array_equal(clf.feature_all_, feature_all) assert_array_almost_equal(clf.feature_log_prob_, weights) clf = ComplementNB(alpha=1.0, norm=True) clf.fit(X, Y) assert_array_almost_equal(clf.feature_log_prob_, normed_weights) def test_categoricalnb(global_random_seed): # Check the ability to predict the training set. clf = CategoricalNB() X2, y2 = get_random_integer_x_three_classes_y(global_random_seed) y_pred = clf.fit(X2, y2).predict(X2) assert_array_equal(y_pred, y2) X3 = np.array([[1, 4], [2, 5]]) y3 = np.array([1, 2]) clf = CategoricalNB(alpha=1, fit_prior=False) clf.fit(X3, y3) assert_array_equal(clf.n_categories_, np.array([3, 6])) # Check error is raised for X with negative entries X = np.array([[0, -1]]) y = np.array([1]) error_msg = re.escape("Negative values in data passed to CategoricalNB (input X)") with pytest.raises(ValueError, match=error_msg): clf.predict(X) with pytest.raises(ValueError, match=error_msg): clf.fit(X, y) # Test alpha X3_test = np.array([[2, 5]]) # alpha=1 increases the count of all categories by one so the final # probability for each category is not 50/50 but 1/3 to 2/3 bayes_numerator = np.array([[1 / 3 * 1 / 3, 2 / 3 * 2 / 3]]) bayes_denominator = bayes_numerator.sum() assert_array_almost_equal( clf.predict_proba(X3_test), bayes_numerator / bayes_denominator ) # Assert category_count has counted all features assert len(clf.category_count_) == X3.shape[1] # Check sample_weight X = np.array([[0, 0], [0, 1], [0, 0], [1, 1]]) y = np.array([1, 1, 2, 2]) clf = CategoricalNB(alpha=1, fit_prior=False) clf.fit(X, y) assert_array_equal(clf.predict(np.array([[0, 0]])), np.array([1])) assert_array_equal(clf.n_categories_, np.array([2, 2])) for factor in [1.0, 0.3, 5, 0.0001]: X = np.array([[0, 0], [0, 1], [0, 0], [1, 1]]) y = np.array([1, 1, 2, 2]) sample_weight = np.array([1, 1, 10, 0.1]) * factor clf = CategoricalNB(alpha=1, fit_prior=False) clf.fit(X, y, sample_weight=sample_weight) assert_array_equal(clf.predict(np.array([[0, 0]])), np.array([2])) assert_array_equal(clf.n_categories_, np.array([2, 2])) @pytest.mark.parametrize( "min_categories, exp_X1_count, exp_X2_count, new_X, exp_n_categories_", [ # check min_categories with int > observed categories ( 3, np.array([[2, 0, 0], [1, 1, 0]]), np.array([[1, 1, 0], [1, 1, 0]]), np.array([[0, 2]]), np.array([3, 3]), ), # check with list input ( [3, 4], np.array([[2, 0, 0], [1, 1, 0]]), np.array([[1, 1, 0, 0], [1, 1, 0, 0]]), np.array([[0, 3]]), np.array([3, 4]), ), # check min_categories with min less than actual ( [ 1, np.array([[2, 0], [1, 1]]), np.array([[1, 1], [1, 1]]), np.array([[0, 1]]), np.array([2, 2]), ] ), ], ) def test_categoricalnb_with_min_categories( min_categories, exp_X1_count, exp_X2_count, new_X, exp_n_categories_ ): X_n_categories = np.array([[0, 0], [0, 1], [0, 0], [1, 1]]) y_n_categories = np.array([1, 1, 2, 2]) expected_prediction = np.array([1]) clf = CategoricalNB(alpha=1, fit_prior=False, min_categories=min_categories) clf.fit(X_n_categories, y_n_categories) X1_count, X2_count = clf.category_count_ assert_array_equal(X1_count, exp_X1_count) assert_array_equal(X2_count, exp_X2_count) predictions = clf.predict(new_X) assert_array_equal(predictions, expected_prediction) assert_array_equal(clf.n_categories_, exp_n_categories_) @pytest.mark.parametrize( "min_categories, error_msg", [ ([[3, 2], [2, 4]], "'min_categories' should have shape"), ], ) def test_categoricalnb_min_categories_errors(min_categories, error_msg): X = np.array([[0, 0], [0, 1], [0, 0], [1, 1]]) y = np.array([1, 1, 2, 2]) clf = CategoricalNB(alpha=1, fit_prior=False, min_categories=min_categories) with pytest.raises(ValueError, match=error_msg): clf.fit(X, y) @pytest.mark.parametrize("csr_container", CSR_CONTAINERS) def test_alpha(csr_container): # Setting alpha=0 should not output nan results when p(x_i|y_j)=0 is a case X = np.array([[1, 0], [1, 1]]) y = np.array([0, 1]) nb = BernoulliNB(alpha=0.0, force_alpha=False) msg = "alpha too small will result in numeric errors, setting alpha = 1.0e-10" with pytest.warns(UserWarning, match=msg): nb.partial_fit(X, y, classes=[0, 1]) with pytest.warns(UserWarning, match=msg): nb.fit(X, y) prob = np.array([[1, 0], [0, 1]]) assert_array_almost_equal(nb.predict_proba(X), prob) nb = MultinomialNB(alpha=0.0, force_alpha=False) with pytest.warns(UserWarning, match=msg): nb.partial_fit(X, y, classes=[0, 1]) with pytest.warns(UserWarning, match=msg): nb.fit(X, y) prob = np.array([[2.0 / 3, 1.0 / 3], [0, 1]]) assert_array_almost_equal(nb.predict_proba(X), prob) nb = CategoricalNB(alpha=0.0, force_alpha=False) with pytest.warns(UserWarning, match=msg): nb.fit(X, y) prob = np.array([[1.0, 0.0], [0.0, 1.0]]) assert_array_almost_equal(nb.predict_proba(X), prob) # Test sparse X X = csr_container(X) nb = BernoulliNB(alpha=0.0, force_alpha=False) with pytest.warns(UserWarning, match=msg): nb.fit(X, y) prob = np.array([[1, 0], [0, 1]]) assert_array_almost_equal(nb.predict_proba(X), prob) nb = MultinomialNB(alpha=0.0, force_alpha=False) with pytest.warns(UserWarning, match=msg): nb.fit(X, y) prob = np.array([[2.0 / 3, 1.0 / 3], [0, 1]]) assert_array_almost_equal(nb.predict_proba(X), prob) def test_alpha_vector(): X = np.array([[1, 0], [1, 1]]) y = np.array([0, 1]) # Setting alpha=np.array with same length # as number of features should be fine alpha = np.array([1, 2]) nb = MultinomialNB(alpha=alpha, force_alpha=False) nb.partial_fit(X, y, classes=[0, 1]) # Test feature probabilities uses pseudo-counts (alpha) feature_prob = np.array([[1 / 2, 1 / 2], [2 / 5, 3 / 5]]) assert_array_almost_equal(nb.feature_log_prob_, np.log(feature_prob)) # Test predictions prob = np.array([[5 / 9, 4 / 9], [25 / 49, 24 / 49]]) assert_array_almost_equal(nb.predict_proba(X), prob) # Test alpha non-negative alpha = np.array([1.0, -0.1]) m_nb = MultinomialNB(alpha=alpha, force_alpha=False) expected_msg = "All values in alpha must be greater than 0." with pytest.raises(ValueError, match=expected_msg): m_nb.fit(X, y) # Test that too small pseudo-counts are replaced ALPHA_MIN = 1e-10 alpha = np.array([ALPHA_MIN / 2, 0.5]) m_nb = MultinomialNB(alpha=alpha, force_alpha=False) m_nb.partial_fit(X, y, classes=[0, 1]) assert_array_almost_equal(m_nb._check_alpha(), [ALPHA_MIN, 0.5], decimal=12) # Test correct dimensions alpha = np.array([1.0, 2.0, 3.0]) m_nb = MultinomialNB(alpha=alpha, force_alpha=False) expected_msg = "When alpha is an array, it should contains `n_features`" with pytest.raises(ValueError, match=expected_msg): m_nb.fit(X, y) def test_check_accuracy_on_digits(): # Non regression test to make sure that any further refactoring / optim # of the NB models do not harm the performance on a slightly non-linearly
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
true
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/tests/test_check_build.py
sklearn/tests/test_check_build.py
""" Smoke Test the check_build module """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import pytest from sklearn.__check_build import raise_build_error def test_raise_build_error(): with pytest.raises(ImportError): raise_build_error(ImportError())
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/tests/test_metaestimators_metadata_routing.py
sklearn/tests/test_metaestimators_metadata_routing.py
import copy import re import numpy as np import pytest from sklearn import config_context from sklearn.base import BaseEstimator, is_classifier from sklearn.calibration import CalibratedClassifierCV from sklearn.compose import TransformedTargetRegressor from sklearn.covariance import GraphicalLassoCV from sklearn.ensemble import ( AdaBoostClassifier, AdaBoostRegressor, BaggingClassifier, BaggingRegressor, ) from sklearn.exceptions import UnsetMetadataPassedError from sklearn.experimental import ( enable_halving_search_cv, # noqa: F401 enable_iterative_imputer, # noqa: F401 ) from sklearn.feature_selection import ( RFE, RFECV, SelectFromModel, SequentialFeatureSelector, ) from sklearn.impute import IterativeImputer from sklearn.linear_model import ( ElasticNetCV, LarsCV, LassoCV, LassoLarsCV, LogisticRegressionCV, MultiTaskElasticNetCV, MultiTaskLassoCV, OrthogonalMatchingPursuitCV, RANSACRegressor, RidgeClassifierCV, RidgeCV, ) from sklearn.metrics._regression import mean_squared_error from sklearn.metrics._scorer import make_scorer from sklearn.model_selection import ( FixedThresholdClassifier, GridSearchCV, GroupKFold, HalvingGridSearchCV, HalvingRandomSearchCV, RandomizedSearchCV, TunedThresholdClassifierCV, cross_validate, ) from sklearn.multiclass import ( OneVsOneClassifier, OneVsRestClassifier, OutputCodeClassifier, ) from sklearn.multioutput import ( ClassifierChain, MultiOutputClassifier, MultiOutputRegressor, RegressorChain, ) from sklearn.semi_supervised import SelfTrainingClassifier from sklearn.tests.metadata_routing_common import ( ConsumingClassifier, ConsumingRegressor, ConsumingScorer, ConsumingSplitter, NonConsumingClassifier, NonConsumingRegressor, _Registry, assert_request_is_empty, check_recorded_metadata, ) from sklearn.utils.metadata_routing import MetadataRouter rng = np.random.RandomState(42) N, M = 100, 4 X = rng.rand(N, M) y = rng.randint(0, 3, size=N) y_binary = (y >= 1).astype(int) classes = np.unique(y) y_multi = rng.randint(0, 3, size=(N, 3)) classes_multi = [np.unique(y_multi[:, i]) for i in range(y_multi.shape[1])] metadata = rng.randint(0, 10, size=N) sample_weight = rng.rand(N) groups = rng.randint(0, 10, size=len(y)) METAESTIMATORS: list = [ { "metaestimator": MultiOutputRegressor, "estimator_name": "estimator", "estimator": "regressor", "X": X, "y": y_multi, "estimator_routing_methods": ["fit", "partial_fit"], }, { "metaestimator": MultiOutputClassifier, "estimator_name": "estimator", "estimator": "classifier", "X": X, "y": y_multi, "estimator_routing_methods": ["fit", "partial_fit"], "method_args": {"partial_fit": {"classes": classes_multi}}, }, { "metaestimator": CalibratedClassifierCV, "estimator_name": "estimator", "estimator": "classifier", "X": X, "y": y, "estimator_routing_methods": ["fit"], "preserves_metadata": "subset", }, { "metaestimator": ClassifierChain, "estimator_name": "estimator", "estimator": "classifier", "X": X, "y": y_multi, "estimator_routing_methods": ["fit"], }, { "metaestimator": RegressorChain, "estimator_name": "estimator", "estimator": "regressor", "X": X, "y": y_multi, "estimator_routing_methods": ["fit"], }, { "metaestimator": LogisticRegressionCV, "init_args": {"use_legacy_attributes": False, "l1_ratios": (0,)}, "X": X, "y": y, "scorer_name": "scoring", "scorer_routing_methods": ["fit", "score"], "cv_name": "cv", "cv_routing_methods": ["fit"], }, { "metaestimator": GridSearchCV, "estimator_name": "estimator", "estimator": "classifier", "init_args": {"param_grid": {"alpha": [0.1, 0.2]}}, "X": X, "y": y, "estimator_routing_methods": ["fit"], "preserves_metadata": "subset", "scorer_name": "scoring", "scorer_routing_methods": ["fit", "score"], "cv_name": "cv", "cv_routing_methods": ["fit"], }, { "metaestimator": RandomizedSearchCV, "estimator_name": "estimator", "estimator": "classifier", "init_args": {"param_distributions": {"alpha": [0.1, 0.2]}}, "X": X, "y": y, "estimator_routing_methods": ["fit"], "preserves_metadata": "subset", "scorer_name": "scoring", "scorer_routing_methods": ["fit", "score"], "cv_name": "cv", "cv_routing_methods": ["fit"], }, { "metaestimator": HalvingGridSearchCV, "estimator_name": "estimator", "estimator": "classifier", "init_args": {"param_grid": {"alpha": [0.1, 0.2]}}, "X": X, "y": y, "estimator_routing_methods": ["fit"], "preserves_metadata": "subset", "scorer_name": "scoring", "scorer_routing_methods": ["fit", "score"], "cv_name": "cv", "cv_routing_methods": ["fit"], }, { "metaestimator": HalvingRandomSearchCV, "estimator_name": "estimator", "estimator": "classifier", "init_args": {"param_distributions": {"alpha": [0.1, 0.2]}}, "X": X, "y": y, "estimator_routing_methods": ["fit"], "preserves_metadata": "subset", "scorer_name": "scoring", "scorer_routing_methods": ["fit", "score"], "cv_name": "cv", "cv_routing_methods": ["fit"], }, { "metaestimator": FixedThresholdClassifier, "estimator_name": "estimator", "estimator": "classifier", "X": X, "y": y_binary, "estimator_routing_methods": ["fit"], "preserves_metadata": "subset", }, { "metaestimator": TunedThresholdClassifierCV, "estimator_name": "estimator", "estimator": "classifier", "X": X, "y": y_binary, "estimator_routing_methods": ["fit"], "preserves_metadata": "subset", }, { "metaestimator": OneVsRestClassifier, "estimator_name": "estimator", "estimator": "classifier", "X": X, "y": y, "estimator_routing_methods": ["fit", "partial_fit"], "method_args": {"partial_fit": {"classes": classes}}, }, { "metaestimator": OneVsOneClassifier, "estimator_name": "estimator", "estimator": "classifier", "X": X, "y": y, "estimator_routing_methods": ["fit", "partial_fit"], "preserves_metadata": "subset", "method_args": {"partial_fit": {"classes": classes}}, }, { "metaestimator": OutputCodeClassifier, "estimator_name": "estimator", "estimator": "classifier", "init_args": {"random_state": 42}, "X": X, "y": y, "estimator_routing_methods": ["fit"], }, { "metaestimator": SelectFromModel, "estimator_name": "estimator", "estimator": "classifier", "X": X, "y": y, "estimator_routing_methods": ["fit", "partial_fit"], "method_args": {"partial_fit": {"classes": classes}}, }, { "metaestimator": OrthogonalMatchingPursuitCV, "X": X, "y": y, "cv_name": "cv", "cv_routing_methods": ["fit"], }, { "metaestimator": ElasticNetCV, "X": X, "y": y, "cv_name": "cv", "cv_routing_methods": ["fit"], }, { "metaestimator": LassoCV, "X": X, "y": y, "cv_name": "cv", "cv_routing_methods": ["fit"], }, { "metaestimator": MultiTaskElasticNetCV, "X": X, "y": y_multi, "cv_name": "cv", "cv_routing_methods": ["fit"], }, { "metaestimator": MultiTaskLassoCV, "X": X, "y": y_multi, "cv_name": "cv", "cv_routing_methods": ["fit"], }, { "metaestimator": LarsCV, "X": X, "y": y, "cv_name": "cv", "cv_routing_methods": ["fit"], }, { "metaestimator": LassoLarsCV, "X": X, "y": y, "cv_name": "cv", "cv_routing_methods": ["fit"], }, { "metaestimator": RANSACRegressor, "estimator_name": "estimator", "estimator": "regressor", "init_args": {"min_samples": 0.5, "max_trials": 10}, "X": X, "y": y, "preserves_metadata": "subset", "estimator_routing_methods": ["fit", "predict", "score"], "method_mapping": {"fit": ["fit", "score"]}, }, { "metaestimator": IterativeImputer, "estimator_name": "estimator", "estimator": "regressor", "init_args": {"skip_complete": False}, "X": X, "y": y, "estimator_routing_methods": ["fit"], }, { "metaestimator": BaggingClassifier, "estimator_name": "estimator", "estimator": "classifier", "X": X, "y": y, "preserves_metadata": False, "estimator_routing_methods": [ ("fit", ["metadata"]), "predict", "predict_proba", "predict_log_proba", "decision_function", ], "method_mapping": { "predict": ["predict", "predict_proba"], "predict_proba": ["predict", "predict_proba"], "predict_log_proba": ["predict", "predict_proba", "predict_log_proba"], }, }, { "metaestimator": BaggingRegressor, "estimator_name": "estimator", "estimator": "regressor", "X": X, "y": y, "preserves_metadata": False, "estimator_routing_methods": [("fit", ["metadata"]), "predict"], }, { "metaestimator": RidgeCV, "X": X, "y": y, "scorer_name": "scoring", "scorer_routing_methods": ["fit"], }, { "metaestimator": RidgeClassifierCV, "X": X, "y": y, "scorer_name": "scoring", "scorer_routing_methods": ["fit"], }, { "metaestimator": RidgeCV, "X": X, "y": y, "scorer_name": "scoring", "scorer_routing_methods": ["fit"], "cv_name": "cv", "cv_routing_methods": ["fit"], }, { "metaestimator": RidgeClassifierCV, "X": X, "y": y, "scorer_name": "scoring", "scorer_routing_methods": ["fit"], "cv_name": "cv", "cv_routing_methods": ["fit"], }, { "metaestimator": GraphicalLassoCV, "X": X, "y": y, "cv_name": "cv", "cv_routing_methods": ["fit"], }, { "metaestimator": TransformedTargetRegressor, "estimator": "regressor", "estimator_name": "regressor", "X": X, "y": y, "estimator_routing_methods": ["fit", "predict"], }, { "metaestimator": SelfTrainingClassifier, "estimator_name": "estimator", "estimator": "classifier", "X": X, "y": y, "preserves_metadata": True, "estimator_routing_methods": [ "fit", "predict", "predict_proba", "predict_log_proba", "decision_function", "score", ], "method_mapping": {"fit": ["fit", "score"]}, }, { "metaestimator": SequentialFeatureSelector, "estimator_name": "estimator", "estimator": "classifier", "X": X, "y": y, "estimator_routing_methods": ["fit"], "scorer_name": "scoring", "scorer_routing_methods": ["fit"], "cv_name": "cv", "cv_routing_methods": ["fit"], }, { "metaestimator": RFE, "estimator": "classifier", "estimator_name": "estimator", "X": X, "y": y, "estimator_routing_methods": ["fit", "predict", "score"], }, { "metaestimator": RFECV, "estimator": "classifier", "estimator_name": "estimator", "estimator_routing_methods": ["fit"], "cv_name": "cv", "cv_routing_methods": ["fit"], "scorer_name": "scoring", "scorer_routing_methods": ["fit", "score"], "X": X, "y": y, }, ] """List containing all metaestimators to be tested and their settings The keys are as follows: - metaestimator: The metaestimator to be tested - estimator_name: The name of the argument for the sub-estimator - estimator: The sub-estimator type, either "regressor" or "classifier" - init_args: The arguments to be passed to the metaestimator's constructor - X: X-data to fit and predict - y: y-data to fit - estimator_routing_methods: list of all methods to check for routing metadata to the sub-estimator. Each value is either a str or a tuple: - str: the name of the method, all metadata in this method must be routed to the sub-estimator - tuple: the name of the method, the second element is a list of metadata keys to be passed to the sub-estimator. This is useful if certain metadata such as `sample_weight` are never routed and only consumed, such as in `BaggingClassifier` and `BaggingRegressor`. - preserves_metadata: - True (default): the metaestimator passes the metadata to the sub-estimator without modification. We check that the values recorded by the sub-estimator are identical to what we've passed to the metaestimator. - False: no check is performed regarding values, we only check that a metadata with the expected names/keys are passed. - "subset": we check that the recorded metadata by the sub-estimator is a subset of what is passed to the metaestimator. - scorer_name: The name of the argument for the scorer - scorer_routing_methods: list of all methods to check for routing metadata to the scorer - cv_name: The name of the argument for the CV splitter - cv_routing_methods: list of all methods to check for routing metadata to the splitter - method_args: a dict of dicts, defining extra arguments needed to be passed to methods, such as passing `classes` to `partial_fit`. - method_mapping: a dict of the form `{caller: [callee1, ...]}` which signals which `.set_{method}_request` methods should be called to set request values. If not present, a one-to-one mapping is assumed. """ # IDs used by pytest to get meaningful verbose messages when running the tests METAESTIMATOR_IDS = [str(row["metaestimator"].__name__) for row in METAESTIMATORS] UNSUPPORTED_ESTIMATORS = [ AdaBoostClassifier(), AdaBoostRegressor(), ] def get_init_args(metaestimator_info, sub_estimator_consumes): """Get the init args for a metaestimator This is a helper function to get the init args for a metaestimator from the METAESTIMATORS list. It returns an empty dict if no init args are required. Parameters ---------- metaestimator_info : dict The metaestimator info from METAESTIMATORS sub_estimator_consumes : bool Whether the sub-estimator consumes metadata or not. Returns ------- kwargs : dict The init args for the metaestimator. (estimator, estimator_registry) : (estimator, registry) The sub-estimator and the corresponding registry. (scorer, scorer_registry) : (scorer, registry) The scorer and the corresponding registry. (cv, cv_registry) : (CV splitter, registry) The CV splitter and the corresponding registry. """ # Avoid mutating the original init_args dict to keep the test execution # thread-safe. kwargs = metaestimator_info.get("init_args", {}).copy() estimator, estimator_registry = None, None scorer, scorer_registry = None, None cv, cv_registry = None, None if "estimator" in metaestimator_info: estimator_name = metaestimator_info["estimator_name"] estimator_registry = _Registry() sub_estimator_type = metaestimator_info["estimator"] if sub_estimator_consumes: if sub_estimator_type == "regressor": estimator = ConsumingRegressor(estimator_registry) elif sub_estimator_type == "classifier": estimator = ConsumingClassifier(estimator_registry) else: raise ValueError("Unpermitted `sub_estimator_type`.") # pragma: nocover else: if sub_estimator_type == "regressor": estimator = NonConsumingRegressor() elif sub_estimator_type == "classifier": estimator = NonConsumingClassifier() else: raise ValueError("Unpermitted `sub_estimator_type`.") # pragma: nocover kwargs[estimator_name] = estimator if "scorer_name" in metaestimator_info: scorer_name = metaestimator_info["scorer_name"] scorer_registry = _Registry() scorer = ConsumingScorer(registry=scorer_registry) kwargs[scorer_name] = scorer if "cv_name" in metaestimator_info: cv_name = metaestimator_info["cv_name"] cv_registry = _Registry() cv = ConsumingSplitter(registry=cv_registry) kwargs[cv_name] = cv return ( kwargs, (estimator, estimator_registry), (scorer, scorer_registry), (cv, cv_registry), ) def filter_metadata_in_routing_methods(estimator_routing_methods): """Process estimator_routing_methods and return a dict. Parameters ---------- estimator_routing_methods : list of str or tuple The estimator_routing_methods info from METAESTIMATORS. Returns ------- routing_methods : dict The dictionary is of the form {"method": ["metadata", ...]}. It specifies the list of metadata keys for each routing method. By default the list includes `sample_weight` and `metadata`. """ res = dict() for method_spec in estimator_routing_methods: if isinstance(method_spec, str): method = method_spec metadata = ["sample_weight", "metadata"] else: method, metadata = method_spec res[method] = metadata return res def set_requests(obj, *, method_mapping, methods, metadata_name, value=True): """Call `set_{method}_request` on a list of methods from the sub-estimator. Parameters ---------- obj : BaseEstimator The object for which `set_{method}_request` methods are called. method_mapping : dict The method mapping in the form of `{caller: [callee, ...]}`. If a "caller" is not present in the method mapping, a one-to-one mapping is assumed. methods : list of str The list of methods as "caller"s for which the request for the child should be set. metadata_name : str The name of the metadata to be routed, usually either `"metadata"` or `"sample_weight"` in our tests. value : None, bool, or str The request value to be set, by default it's `True` """ for caller in methods: for callee in method_mapping.get(caller, [caller]): set_request_for_method = getattr(obj, f"set_{callee}_request") set_request_for_method(**{metadata_name: value}) if ( isinstance(obj, BaseEstimator) and is_classifier(obj) and callee == "partial_fit" ): set_request_for_method(classes=True) @pytest.mark.parametrize("estimator", UNSUPPORTED_ESTIMATORS) @config_context(enable_metadata_routing=True) def test_unsupported_estimators_get_metadata_routing(estimator): """Test that get_metadata_routing is not implemented on meta-estimators for which we haven't implemented routing yet.""" with pytest.raises(NotImplementedError): estimator.get_metadata_routing() @pytest.mark.parametrize("estimator", UNSUPPORTED_ESTIMATORS) @config_context(enable_metadata_routing=True) def test_unsupported_estimators_fit_with_metadata(estimator): """Test that fit raises NotImplementedError when metadata routing is enabled and a metadata is passed on meta-estimators for which we haven't implemented routing yet.""" with pytest.raises(NotImplementedError): try: estimator.fit([[1]], [1], sample_weight=[1]) except TypeError: # not all meta-estimators in the list support sample_weight, # and for those we skip this test. raise NotImplementedError @config_context(enable_metadata_routing=True) def test_registry_copy(): # test that _Registry is not copied into a new instance. a = _Registry() b = _Registry() assert a is not b assert a is copy.copy(a) assert a is copy.deepcopy(a) @pytest.mark.parametrize("metaestimator", METAESTIMATORS, ids=METAESTIMATOR_IDS) @config_context(enable_metadata_routing=True) def test_default_request(metaestimator): # Check that by default request is empty and the right type metaestimator_class = metaestimator["metaestimator"] kwargs, *_ = get_init_args(metaestimator, sub_estimator_consumes=True) instance = metaestimator_class(**kwargs) if "cv_name" in metaestimator: # Our GroupCV splitters request groups by default, which we should # ignore in this test. exclude = {"splitter": ["split"]} else: exclude = None assert_request_is_empty(instance.get_metadata_routing(), exclude=exclude) assert isinstance(instance.get_metadata_routing(), MetadataRouter) @pytest.mark.parametrize("metaestimator", METAESTIMATORS, ids=METAESTIMATOR_IDS) @config_context(enable_metadata_routing=True) def test_error_on_missing_requests_for_sub_estimator(metaestimator): # Test that a UnsetMetadataPassedError is raised when the sub-estimator's # requests are not set if "estimator" not in metaestimator: # This test only makes sense for metaestimators which have a # sub-estimator, e.g. MyMetaEstimator(estimator=MySubEstimator()) return metaestimator_class = metaestimator["metaestimator"] X = metaestimator["X"] y = metaestimator["y"] routing_methods = filter_metadata_in_routing_methods( metaestimator["estimator_routing_methods"] ) for method_name, metadata_keys in routing_methods.items(): for key in metadata_keys: kwargs, (estimator, _), (scorer, _), *_ = get_init_args( metaestimator, sub_estimator_consumes=True ) if scorer: scorer.set_score_request(**{key: True}) val = {"sample_weight": sample_weight, "metadata": metadata}[key] method_kwargs = {key: val} instance = metaestimator_class(**kwargs) msg = ( f"[{key}] are passed but are not explicitly set as requested or not" f" requested for {estimator.__class__.__name__}.{method_name}" ) with pytest.raises(UnsetMetadataPassedError, match=re.escape(msg)): method = getattr(instance, method_name) if "fit" not in method_name: # set request on fit set_requests( estimator, method_mapping=metaestimator.get("method_mapping", {}), methods=["fit"], metadata_name=key, ) instance.fit(X, y, **method_kwargs) # making sure the requests are unset, in case they were set as a # side effect of setting them for fit. For instance, if method # mapping for fit is: `"fit": ["fit", "score"]`, that would mean # calling `.score` here would not raise, because we have already # set request value for child estimator's `score`. set_requests( estimator, method_mapping=metaestimator.get("method_mapping", {}), methods=["fit"], metadata_name=key, value=None, ) try: # `fit`, `partial_fit`, 'score' accept y, others don't. method(X, y, **method_kwargs) except TypeError: method(X, **method_kwargs) @pytest.mark.parametrize("metaestimator", METAESTIMATORS, ids=METAESTIMATOR_IDS) @config_context(enable_metadata_routing=True) def test_setting_request_on_sub_estimator_removes_error(metaestimator): # When the metadata is explicitly requested on the sub-estimator, there # should be no errors. if "estimator" not in metaestimator: # This test only makes sense for metaestimators which have a # sub-estimator, e.g. MyMetaEstimator(estimator=MySubEstimator()) return metaestimator_class = metaestimator["metaestimator"] X = metaestimator["X"] y = metaestimator["y"] routing_methods = filter_metadata_in_routing_methods( metaestimator["estimator_routing_methods"] ) method_mapping = metaestimator.get("method_mapping", {}) preserves_metadata = metaestimator.get("preserves_metadata", True) for method_name, metadata_keys in routing_methods.items(): for key in metadata_keys: val = {"sample_weight": sample_weight, "metadata": metadata}[key] method_kwargs = {key: val} kwargs, (estimator, registry), (scorer, _), (cv, _) = get_init_args( metaestimator, sub_estimator_consumes=True ) if scorer: set_requests( scorer, method_mapping={}, methods=["score"], metadata_name=key ) if cv: cv.set_split_request(groups=True, metadata=True) # `set_{method}_request({metadata}==True)` on the underlying objects set_requests( estimator, method_mapping=method_mapping, methods=[method_name], metadata_name=key, ) instance = metaestimator_class(**kwargs) method = getattr(instance, method_name) extra_method_args = metaestimator.get("method_args", {}).get( method_name, {} ) if "fit" not in method_name: # fit before calling method instance.fit(X, y) try: # `fit` and `partial_fit` accept y, others don't. method(X, y, **method_kwargs, **extra_method_args) except TypeError: method(X, **method_kwargs, **extra_method_args) # sanity check that registry is not empty, or else the test passes # trivially assert registry split_params = ( method_kwargs.keys() if preserves_metadata == "subset" else () ) for estimator in registry: check_recorded_metadata( estimator, method=method_name, parent=method_name, split_params=split_params, **method_kwargs, ) @pytest.mark.parametrize("metaestimator", METAESTIMATORS, ids=METAESTIMATOR_IDS) @config_context(enable_metadata_routing=True) def test_non_consuming_estimator_works(metaestimator): # Test that when a non-consuming estimator is given, the meta-estimator # works w/o setting any requests. # Regression test for https://github.com/scikit-learn/scikit-learn/issues/28239 if "estimator" not in metaestimator: # This test only makes sense for metaestimators which have a # sub-estimator, e.g. MyMetaEstimator(estimator=MySubEstimator()) return def set_request(estimator, method_name): # e.g. call set_fit_request on estimator if is_classifier(estimator) and method_name == "partial_fit": estimator.set_partial_fit_request(classes=True) metaestimator_class = metaestimator["metaestimator"] X = metaestimator["X"] y = metaestimator["y"] routing_methods = filter_metadata_in_routing_methods( metaestimator["estimator_routing_methods"] ) for method_name in routing_methods: kwargs, (estimator, _), (_, _), (_, _) = get_init_args( metaestimator, sub_estimator_consumes=False ) instance = metaestimator_class(**kwargs) set_request(estimator, method_name) method = getattr(instance, method_name) extra_method_args = metaestimator.get("method_args", {}).get(method_name, {}) if "fit" not in method_name: instance.fit(X, y, **extra_method_args) # The following should pass w/o raising a routing error. try: # `fit` and `partial_fit` accept y, others don't. method(X, y, **extra_method_args) except TypeError: method(X, **extra_method_args) @pytest.mark.parametrize("metaestimator", METAESTIMATORS, ids=METAESTIMATOR_IDS) @config_context(enable_metadata_routing=True) def test_metadata_is_routed_correctly_to_scorer(metaestimator): """Test that any requested metadata is correctly routed to the underlying scorers in CV estimators. """ if "scorer_name" not in metaestimator: # This test only makes sense for CV estimators return metaestimator_class = metaestimator["metaestimator"] routing_methods = metaestimator["scorer_routing_methods"] method_mapping = metaestimator.get("method_mapping", {}) for method_name in routing_methods: kwargs, (estimator, _), (scorer, registry), (cv, _) = get_init_args( metaestimator, sub_estimator_consumes=True ) scorer.set_score_request(sample_weight=True) if cv: cv.set_split_request(groups=True, metadata=True) if estimator is not None: set_requests( estimator, method_mapping=method_mapping, methods=[method_name], metadata_name="sample_weight", ) instance = metaestimator_class(**kwargs) method = getattr(instance, method_name) method_kwargs = {"sample_weight": sample_weight} if "fit" not in method_name: instance.fit(X, y) method(X, y, **method_kwargs) assert registry for _scorer in registry: check_recorded_metadata( obj=_scorer, method="score", parent=method_name, split_params=("sample_weight",), **method_kwargs, ) @pytest.mark.parametrize("metaestimator", METAESTIMATORS, ids=METAESTIMATOR_IDS) @config_context(enable_metadata_routing=True) def test_metadata_is_routed_correctly_to_splitter(metaestimator): """Test that any requested metadata is correctly routed to the underlying splitters in CV estimators. """ if "cv_routing_methods" not in metaestimator: # This test is only for metaestimators accepting a CV splitter return metaestimator_class = metaestimator["metaestimator"] routing_methods = metaestimator["cv_routing_methods"] X_ = metaestimator["X"] y_ = metaestimator["y"] for method_name in routing_methods: kwargs, (estimator, _), (scorer, _), (cv, registry) = get_init_args( metaestimator, sub_estimator_consumes=True ) if estimator: estimator.set_fit_request(sample_weight=False, metadata=False) if scorer: scorer.set_score_request(sample_weight=False, metadata=False) cv.set_split_request(groups=True, metadata=True) instance = metaestimator_class(**kwargs) method_kwargs = {"groups": groups, "metadata": metadata} method = getattr(instance, method_name) method(X_, y_, **method_kwargs) assert registry for _splitter in registry: check_recorded_metadata( obj=_splitter, method="split", parent=method_name, **method_kwargs ) @pytest.mark.parametrize("metaestimator", METAESTIMATORS, ids=METAESTIMATOR_IDS) @config_context(enable_metadata_routing=True) def test_metadata_routed_to_group_splitter(metaestimator):
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
true
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/tests/test_multioutput.py
sklearn/tests/test_multioutput.py
import re import numpy as np import pytest from joblib import cpu_count from sklearn import datasets from sklearn.base import ClassifierMixin, clone from sklearn.datasets import ( load_linnerud, make_classification, make_multilabel_classification, make_regression, ) from sklearn.dummy import DummyClassifier, DummyRegressor from sklearn.ensemble import ( GradientBoostingRegressor, RandomForestClassifier, StackingRegressor, ) from sklearn.exceptions import NotFittedError from sklearn.impute import SimpleImputer from sklearn.linear_model import ( Lasso, LinearRegression, LogisticRegression, OrthogonalMatchingPursuit, Ridge, SGDClassifier, SGDRegressor, ) from sklearn.metrics import jaccard_score, mean_squared_error from sklearn.model_selection import GridSearchCV, train_test_split from sklearn.multiclass import OneVsRestClassifier from sklearn.multioutput import ( ClassifierChain, MultiOutputClassifier, MultiOutputRegressor, RegressorChain, ) from sklearn.pipeline import make_pipeline from sklearn.svm import LinearSVC from sklearn.tree import DecisionTreeClassifier from sklearn.utils import shuffle from sklearn.utils._testing import ( assert_almost_equal, assert_array_almost_equal, assert_array_equal, ) from sklearn.utils.fixes import ( BSR_CONTAINERS, COO_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS, DOK_CONTAINERS, LIL_CONTAINERS, ) def test_multi_target_regression(): X, y = datasets.make_regression(n_targets=3, random_state=0) X_train, y_train = X[:50], y[:50] X_test, y_test = X[50:], y[50:] references = np.zeros_like(y_test) for n in range(3): rgr = GradientBoostingRegressor(random_state=0) rgr.fit(X_train, y_train[:, n]) references[:, n] = rgr.predict(X_test) rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0)) rgr.fit(X_train, y_train) y_pred = rgr.predict(X_test) assert_almost_equal(references, y_pred) def test_multi_target_regression_partial_fit(): X, y = datasets.make_regression(n_targets=3, random_state=0) X_train, y_train = X[:50], y[:50] X_test, y_test = X[50:], y[50:] references = np.zeros_like(y_test) half_index = 25 for n in range(3): sgr = SGDRegressor(random_state=0, max_iter=5) sgr.partial_fit(X_train[:half_index], y_train[:half_index, n]) sgr.partial_fit(X_train[half_index:], y_train[half_index:, n]) references[:, n] = sgr.predict(X_test) sgr = MultiOutputRegressor(SGDRegressor(random_state=0, max_iter=5)) sgr.partial_fit(X_train[:half_index], y_train[:half_index]) sgr.partial_fit(X_train[half_index:], y_train[half_index:]) y_pred = sgr.predict(X_test) assert_almost_equal(references, y_pred) assert not hasattr(MultiOutputRegressor(Lasso), "partial_fit") def test_multi_target_regression_one_target(): # Test multi target regression raises X, y = datasets.make_regression(n_targets=1, random_state=0) rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0)) msg = "at least two dimensions" with pytest.raises(ValueError, match=msg): rgr.fit(X, y) @pytest.mark.parametrize( "sparse_container", CSR_CONTAINERS + CSC_CONTAINERS + COO_CONTAINERS + LIL_CONTAINERS + DOK_CONTAINERS + BSR_CONTAINERS, ) def test_multi_target_sparse_regression(sparse_container): X, y = datasets.make_regression(n_targets=3, random_state=0) X_train, y_train = X[:50], y[:50] X_test = X[50:] rgr = MultiOutputRegressor(Lasso(random_state=0)) rgr_sparse = MultiOutputRegressor(Lasso(random_state=0)) rgr.fit(X_train, y_train) rgr_sparse.fit(sparse_container(X_train), y_train) assert_almost_equal( rgr.predict(X_test), rgr_sparse.predict(sparse_container(X_test)) ) def test_multi_target_sample_weights_api(): X = [[1, 2, 3], [4, 5, 6]] y = [[3.141, 2.718], [2.718, 3.141]] w = [0.8, 0.6] rgr = MultiOutputRegressor(OrthogonalMatchingPursuit()) msg = "does not support sample weights" with pytest.raises(ValueError, match=msg): rgr.fit(X, y, w) # no exception should be raised if the base estimator supports weights rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0)) rgr.fit(X, y, w) def test_multi_target_sample_weight_partial_fit(): # weighted regressor X = [[1, 2, 3], [4, 5, 6]] y = [[3.141, 2.718], [2.718, 3.141]] w = [2.0, 1.0] rgr_w = MultiOutputRegressor(SGDRegressor(random_state=0, max_iter=5)) rgr_w.partial_fit(X, y, w) # weighted with different weights w = [2.0, 2.0] rgr = MultiOutputRegressor(SGDRegressor(random_state=0, max_iter=5)) rgr.partial_fit(X, y, w) assert rgr.predict(X)[0][0] != rgr_w.predict(X)[0][0] def test_multi_target_sample_weights(): # weighted regressor Xw = [[1, 2, 3], [4, 5, 6]] yw = [[3.141, 2.718], [2.718, 3.141]] w = [2.0, 1.0] rgr_w = MultiOutputRegressor(GradientBoostingRegressor(random_state=0)) rgr_w.fit(Xw, yw, w) # unweighted, but with repeated samples X = [[1, 2, 3], [1, 2, 3], [4, 5, 6]] y = [[3.141, 2.718], [3.141, 2.718], [2.718, 3.141]] rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0)) rgr.fit(X, y) X_test = [[1.5, 2.5, 3.5], [3.5, 4.5, 5.5]] assert_almost_equal(rgr.predict(X_test), rgr_w.predict(X_test)) # Import the data iris = datasets.load_iris() # create a multiple targets by randomized shuffling and concatenating y. X = iris.data y1 = iris.target y2 = shuffle(y1, random_state=1) y3 = shuffle(y1, random_state=2) y = np.column_stack((y1, y2, y3)) n_samples, n_features = X.shape n_outputs = y.shape[1] n_classes = len(np.unique(y1)) classes = list(map(np.unique, (y1, y2, y3))) # TODO: remove mark once loky bug is fixed: # https://github.com/joblib/loky/issues/458 @pytest.mark.thread_unsafe def test_multi_output_classification_partial_fit_parallelism(): sgd_linear_clf = SGDClassifier(loss="log_loss", random_state=1, max_iter=5) mor = MultiOutputClassifier(sgd_linear_clf, n_jobs=4) mor.partial_fit(X, y, classes) est1 = mor.estimators_[0] mor.partial_fit(X, y) est2 = mor.estimators_[0] if cpu_count() > 1: # parallelism requires this to be the case for a sane implementation assert est1 is not est2 # check multioutput has predict_proba def test_hasattr_multi_output_predict_proba(): # default SGDClassifier has loss='hinge' # which does not expose a predict_proba method sgd_linear_clf = SGDClassifier(random_state=1, max_iter=5) multi_target_linear = MultiOutputClassifier(sgd_linear_clf) multi_target_linear.fit(X, y) assert not hasattr(multi_target_linear, "predict_proba") # case where predict_proba attribute exists sgd_linear_clf = SGDClassifier(loss="log_loss", random_state=1, max_iter=5) multi_target_linear = MultiOutputClassifier(sgd_linear_clf) multi_target_linear.fit(X, y) assert hasattr(multi_target_linear, "predict_proba") # check predict_proba passes def test_multi_output_predict_proba(): sgd_linear_clf = SGDClassifier(random_state=1, max_iter=5) param = {"loss": ("hinge", "log_loss", "modified_huber")} # inner function for custom scoring def custom_scorer(estimator, X, y): if hasattr(estimator, "predict_proba"): return 1.0 else: return 0.0 grid_clf = GridSearchCV( sgd_linear_clf, param_grid=param, scoring=custom_scorer, cv=3, error_score="raise", ) multi_target_linear = MultiOutputClassifier(grid_clf) multi_target_linear.fit(X, y) multi_target_linear.predict_proba(X) # SGDClassifier defaults to loss='hinge' which is not a probabilistic # loss function; therefore it does not expose a predict_proba method sgd_linear_clf = SGDClassifier(random_state=1, max_iter=5) multi_target_linear = MultiOutputClassifier(sgd_linear_clf) multi_target_linear.fit(X, y) inner2_msg = "probability estimates are not available for loss='hinge'" inner1_msg = "'SGDClassifier' has no attribute 'predict_proba'" outer_msg = "'MultiOutputClassifier' has no attribute 'predict_proba'" with pytest.raises(AttributeError, match=outer_msg) as exec_info: multi_target_linear.predict_proba(X) assert isinstance(exec_info.value.__cause__, AttributeError) assert inner1_msg in str(exec_info.value.__cause__) assert isinstance(exec_info.value.__cause__.__cause__, AttributeError) assert inner2_msg in str(exec_info.value.__cause__.__cause__) def test_multi_output_classification_partial_fit(): # test if multi_target initializes correctly with base estimator and fit # assert predictions work as expected for predict sgd_linear_clf = SGDClassifier(loss="log_loss", random_state=1, max_iter=5) multi_target_linear = MultiOutputClassifier(sgd_linear_clf) # train the multi_target_linear and also get the predictions. half_index = X.shape[0] // 2 multi_target_linear.partial_fit(X[:half_index], y[:half_index], classes=classes) first_predictions = multi_target_linear.predict(X) assert (n_samples, n_outputs) == first_predictions.shape multi_target_linear.partial_fit(X[half_index:], y[half_index:]) second_predictions = multi_target_linear.predict(X) assert (n_samples, n_outputs) == second_predictions.shape # train the linear classification with each column and assert that # predictions are equal after first partial_fit and second partial_fit for i in range(3): # create a clone with the same state sgd_linear_clf = clone(sgd_linear_clf) sgd_linear_clf.partial_fit( X[:half_index], y[:half_index, i], classes=classes[i] ) assert_array_equal(sgd_linear_clf.predict(X), first_predictions[:, i]) sgd_linear_clf.partial_fit(X[half_index:], y[half_index:, i]) assert_array_equal(sgd_linear_clf.predict(X), second_predictions[:, i]) def test_multi_output_classification_partial_fit_no_first_classes_exception(): sgd_linear_clf = SGDClassifier(loss="log_loss", random_state=1, max_iter=5) multi_target_linear = MultiOutputClassifier(sgd_linear_clf) msg = "classes must be passed on the first call to partial_fit." with pytest.raises(ValueError, match=msg): multi_target_linear.partial_fit(X, y) def test_multi_output_classification(): # test if multi_target initializes correctly with base estimator and fit # assert predictions work as expected for predict, prodict_proba and score forest = RandomForestClassifier(n_estimators=10, random_state=1) multi_target_forest = MultiOutputClassifier(forest) # train the multi_target_forest and also get the predictions. multi_target_forest.fit(X, y) predictions = multi_target_forest.predict(X) assert (n_samples, n_outputs) == predictions.shape predict_proba = multi_target_forest.predict_proba(X) assert len(predict_proba) == n_outputs for class_probabilities in predict_proba: assert (n_samples, n_classes) == class_probabilities.shape assert_array_equal(np.argmax(np.dstack(predict_proba), axis=1), predictions) # train the forest with each column and assert that predictions are equal for i in range(3): forest_ = clone(forest) # create a clone with the same state forest_.fit(X, y[:, i]) assert list(forest_.predict(X)) == list(predictions[:, i]) assert_array_equal(list(forest_.predict_proba(X)), list(predict_proba[i])) def test_multiclass_multioutput_estimator(): # test to check meta of meta estimators svc = LinearSVC(random_state=0) multi_class_svc = OneVsRestClassifier(svc) multi_target_svc = MultiOutputClassifier(multi_class_svc) multi_target_svc.fit(X, y) predictions = multi_target_svc.predict(X) assert (n_samples, n_outputs) == predictions.shape # train the forest with each column and assert that predictions are equal for i in range(3): multi_class_svc_ = clone(multi_class_svc) # create a clone multi_class_svc_.fit(X, y[:, i]) assert list(multi_class_svc_.predict(X)) == list(predictions[:, i]) def test_multiclass_multioutput_estimator_predict_proba(): seed = 542 # make test deterministic rng = np.random.RandomState(seed) # random features X = rng.normal(size=(5, 5)) # random labels y1 = np.array(["b", "a", "a", "b", "a"]).reshape(5, 1) # 2 classes y2 = np.array(["d", "e", "f", "e", "d"]).reshape(5, 1) # 3 classes Y = np.concatenate([y1, y2], axis=1) clf = MultiOutputClassifier(LogisticRegression(random_state=seed)) clf.fit(X, Y) y_result = clf.predict_proba(X) y_actual = [ np.array( [ [0.31525135, 0.68474865], [0.81004803, 0.18995197], [0.65664086, 0.34335914], [0.38584929, 0.61415071], [0.83234285, 0.16765715], ] ), np.array( [ [0.65759215, 0.20976588, 0.13264197], [0.14996984, 0.82591444, 0.02411571], [0.13111876, 0.13294966, 0.73593158], [0.24663053, 0.65860244, 0.09476703], [0.81458885, 0.1728158, 0.01259535], ] ), ] for i in range(len(y_actual)): assert_almost_equal(y_result[i], y_actual[i]) def test_multi_output_classification_sample_weights(): # weighted classifier Xw = [[1, 2, 3], [4, 5, 6]] yw = [[3, 2], [2, 3]] w = np.asarray([2.0, 1.0]) forest = RandomForestClassifier(n_estimators=10, random_state=1) clf_w = MultiOutputClassifier(forest) clf_w.fit(Xw, yw, w) # unweighted, but with repeated samples X = [[1, 2, 3], [1, 2, 3], [4, 5, 6]] y = [[3, 2], [3, 2], [2, 3]] forest = RandomForestClassifier(n_estimators=10, random_state=1) clf = MultiOutputClassifier(forest) clf.fit(X, y) X_test = [[1.5, 2.5, 3.5], [3.5, 4.5, 5.5]] assert_almost_equal(clf.predict(X_test), clf_w.predict(X_test)) def test_multi_output_classification_partial_fit_sample_weights(): # weighted classifier Xw = [[1, 2, 3], [4, 5, 6], [1.5, 2.5, 3.5]] yw = [[3, 2], [2, 3], [3, 2]] w = np.asarray([2.0, 1.0, 1.0]) sgd_linear_clf = SGDClassifier(random_state=1, max_iter=20, tol=None) clf_w = MultiOutputClassifier(sgd_linear_clf) clf_w.fit(Xw, yw, w) # unweighted, but with repeated samples X = [[1, 2, 3], [1, 2, 3], [4, 5, 6], [1.5, 2.5, 3.5]] y = [[3, 2], [3, 2], [2, 3], [3, 2]] sgd_linear_clf = SGDClassifier(random_state=1, max_iter=20, tol=None) clf = MultiOutputClassifier(sgd_linear_clf) clf.fit(X, y) X_test = [[1.5, 2.5, 3.5]] assert_array_almost_equal(clf.predict(X_test), clf_w.predict(X_test)) def test_multi_output_exceptions(): # NotFittedError when fit is not done but score, predict and # and predict_proba are called moc = MultiOutputClassifier(LinearSVC(random_state=0)) with pytest.raises(NotFittedError): moc.score(X, y) # ValueError when number of outputs is different # for fit and score y_new = np.column_stack((y1, y2)) moc.fit(X, y) with pytest.raises(ValueError): moc.score(X, y_new) # ValueError when y is continuous msg = "Unknown label type" with pytest.raises(ValueError, match=msg): moc.fit(X, X[:, 1]) @pytest.mark.parametrize("response_method", ["predict_proba", "predict"]) def test_multi_output_not_fitted_error(response_method): """Check that we raise the proper error when the estimator is not fitted""" moc = MultiOutputClassifier(LogisticRegression()) with pytest.raises(NotFittedError): getattr(moc, response_method)(X) def test_multi_output_delegate_predict_proba(): """Check the behavior for the delegation of predict_proba to the underlying estimator""" # A base estimator with `predict_proba`should expose the method even before fit moc = MultiOutputClassifier(LogisticRegression()) assert hasattr(moc, "predict_proba") moc.fit(X, y) assert hasattr(moc, "predict_proba") # A base estimator without `predict_proba` should raise an AttributeError moc = MultiOutputClassifier(LinearSVC()) assert not hasattr(moc, "predict_proba") outer_msg = "'MultiOutputClassifier' has no attribute 'predict_proba'" inner_msg = "'LinearSVC' object has no attribute 'predict_proba'" with pytest.raises(AttributeError, match=outer_msg) as exec_info: moc.predict_proba(X) assert isinstance(exec_info.value.__cause__, AttributeError) assert inner_msg == str(exec_info.value.__cause__) moc.fit(X, y) assert not hasattr(moc, "predict_proba") with pytest.raises(AttributeError, match=outer_msg) as exec_info: moc.predict_proba(X) assert isinstance(exec_info.value.__cause__, AttributeError) assert inner_msg == str(exec_info.value.__cause__) def generate_multilabel_dataset_with_correlations(): # Generate a multilabel data set from a multiclass dataset as a way of # by representing the integer number of the original class using a binary # encoding. X, y = make_classification( n_samples=1000, n_features=100, n_classes=16, n_informative=10, random_state=0 ) Y_multi = np.array([[int(yyy) for yyy in format(yy, "#06b")[2:]] for yy in y]) return X, Y_multi @pytest.mark.parametrize("chain_method", ["predict", "decision_function"]) def test_classifier_chain_fit_and_predict_with_linear_svc(chain_method): # Fit classifier chain and verify predict performance using LinearSVC X, Y = generate_multilabel_dataset_with_correlations() classifier_chain = ClassifierChain( LinearSVC(), chain_method=chain_method, ).fit(X, Y) Y_pred = classifier_chain.predict(X) assert Y_pred.shape == Y.shape Y_decision = classifier_chain.decision_function(X) Y_binary = Y_decision >= 0 assert_array_equal(Y_binary, Y_pred) assert not hasattr(classifier_chain, "predict_proba") @pytest.mark.parametrize("csr_container", CSR_CONTAINERS) def test_classifier_chain_fit_and_predict_with_sparse_data(csr_container): # Fit classifier chain with sparse data X, Y = generate_multilabel_dataset_with_correlations() X_sparse = csr_container(X) classifier_chain = ClassifierChain(LogisticRegression()).fit(X_sparse, Y) Y_pred_sparse = classifier_chain.predict(X_sparse) classifier_chain = ClassifierChain(LogisticRegression()).fit(X, Y) Y_pred_dense = classifier_chain.predict(X) assert_array_equal(Y_pred_sparse, Y_pred_dense) def test_classifier_chain_vs_independent_models(): # Verify that an ensemble of classifier chains (each of length # N) can achieve a higher Jaccard similarity score than N independent # models X, Y = generate_multilabel_dataset_with_correlations() X_train = X[:600, :] X_test = X[600:, :] Y_train = Y[:600, :] Y_test = Y[600:, :] ovr = OneVsRestClassifier(LogisticRegression()) ovr.fit(X_train, Y_train) Y_pred_ovr = ovr.predict(X_test) chain = ClassifierChain(LogisticRegression()) chain.fit(X_train, Y_train) Y_pred_chain = chain.predict(X_test) assert jaccard_score(Y_test, Y_pred_chain, average="samples") > jaccard_score( Y_test, Y_pred_ovr, average="samples" ) @pytest.mark.parametrize( "chain_method", ["predict", "predict_proba", "predict_log_proba", "decision_function"], ) @pytest.mark.parametrize("response_method", ["predict_proba", "predict_log_proba"]) def test_classifier_chain_fit_and_predict(chain_method, response_method): # Fit classifier chain and verify predict performance X, Y = generate_multilabel_dataset_with_correlations() chain = ClassifierChain(LogisticRegression(), chain_method=chain_method) chain.fit(X, Y) Y_pred = chain.predict(X) assert Y_pred.shape == Y.shape assert [c.coef_.size for c in chain.estimators_] == list( range(X.shape[1], X.shape[1] + Y.shape[1]) ) Y_prob = getattr(chain, response_method)(X) if response_method == "predict_log_proba": Y_prob = np.exp(Y_prob) Y_binary = Y_prob >= 0.5 assert_array_equal(Y_binary, Y_pred) assert isinstance(chain, ClassifierMixin) def test_regressor_chain_fit_and_predict(): # Fit regressor chain and verify Y and estimator coefficients shape X, Y = generate_multilabel_dataset_with_correlations() chain = RegressorChain(Ridge()) chain.fit(X, Y) Y_pred = chain.predict(X) assert Y_pred.shape == Y.shape assert [c.coef_.size for c in chain.estimators_] == list( range(X.shape[1], X.shape[1] + Y.shape[1]) ) @pytest.mark.parametrize("csr_container", CSR_CONTAINERS) def test_base_chain_fit_and_predict_with_sparse_data_and_cv(csr_container): # Fit base chain with sparse data cross_val_predict X, Y = generate_multilabel_dataset_with_correlations() X_sparse = csr_container(X) base_chains = [ ClassifierChain(LogisticRegression(), cv=3), RegressorChain(Ridge(), cv=3), ] for chain in base_chains: chain.fit(X_sparse, Y) Y_pred = chain.predict(X_sparse) assert Y_pred.shape == Y.shape def test_base_chain_random_order(): # Fit base chain with random order X, Y = generate_multilabel_dataset_with_correlations() for chain in [ClassifierChain(LogisticRegression()), RegressorChain(Ridge())]: chain_random = clone(chain).set_params(order="random", random_state=42) chain_random.fit(X, Y) chain_fixed = clone(chain).set_params(order=chain_random.order_) chain_fixed.fit(X, Y) assert_array_equal(chain_fixed.order_, chain_random.order_) assert list(chain_random.order) != list(range(4)) assert len(chain_random.order_) == 4 assert len(set(chain_random.order_)) == 4 # Randomly ordered chain should behave identically to a fixed order # chain with the same order. for est1, est2 in zip(chain_random.estimators_, chain_fixed.estimators_): assert_array_almost_equal(est1.coef_, est2.coef_) @pytest.mark.parametrize( "chain_type, chain_method", [ ("classifier", "predict"), ("classifier", "predict_proba"), ("classifier", "predict_log_proba"), ("classifier", "decision_function"), ("regressor", ""), ], ) def test_base_chain_crossval_fit_and_predict(chain_type, chain_method): # Fit chain with cross_val_predict and verify predict # performance X, Y = generate_multilabel_dataset_with_correlations() if chain_type == "classifier": chain = ClassifierChain(LogisticRegression(), chain_method=chain_method) else: chain = RegressorChain(Ridge()) chain.fit(X, Y) chain_cv = clone(chain).set_params(cv=3) chain_cv.fit(X, Y) Y_pred_cv = chain_cv.predict(X) Y_pred = chain.predict(X) assert Y_pred_cv.shape == Y_pred.shape assert not np.all(Y_pred == Y_pred_cv) if isinstance(chain, ClassifierChain): assert jaccard_score(Y, Y_pred_cv, average="samples") > 0.4 else: assert mean_squared_error(Y, Y_pred_cv) < 0.25 @pytest.mark.parametrize( "estimator", [ RandomForestClassifier(n_estimators=2), MultiOutputClassifier(RandomForestClassifier(n_estimators=2)), ClassifierChain(RandomForestClassifier(n_estimators=2)), ], ) def test_multi_output_classes_(estimator): # Tests classes_ attribute of multioutput classifiers # RandomForestClassifier supports multioutput out-of-the-box estimator = clone(estimator).fit(X, y) assert isinstance(estimator.classes_, list) assert len(estimator.classes_) == n_outputs for estimator_classes, expected_classes in zip(classes, estimator.classes_): assert_array_equal(estimator_classes, expected_classes) class DummyRegressorWithFitParams(DummyRegressor): def fit(self, X, y, sample_weight=None, **fit_params): self._fit_params = fit_params return super().fit(X, y, sample_weight) class DummyClassifierWithFitParams(DummyClassifier): def fit(self, X, y, sample_weight=None, **fit_params): self._fit_params = fit_params return super().fit(X, y, sample_weight) @pytest.mark.parametrize( "estimator, dataset", [ ( MultiOutputClassifier(DummyClassifierWithFitParams(strategy="prior")), datasets.make_multilabel_classification(), ), ( MultiOutputRegressor(DummyRegressorWithFitParams()), datasets.make_regression(n_targets=3, random_state=0), ), ], ) def test_multioutput_estimator_with_fit_params(estimator, dataset): estimator = clone(estimator) # Avoid side effects from shared instances X, y = dataset some_param = np.zeros_like(X) estimator.fit(X, y, some_param=some_param) for dummy_estimator in estimator.estimators_: assert "some_param" in dummy_estimator._fit_params def test_regressor_chain_w_fit_params(): # Make sure fit_params are properly propagated to the sub-estimators rng = np.random.RandomState(0) X, y = datasets.make_regression(n_targets=3, random_state=0) weight = rng.rand(y.shape[0]) class MySGD(SGDRegressor): def fit(self, X, y, **fit_params): self.sample_weight_ = fit_params["sample_weight"] super().fit(X, y, **fit_params) model = RegressorChain(MySGD()) # Fitting with params fit_param = {"sample_weight": weight} model.fit(X, y, **fit_param) for est in model.estimators_: assert est.sample_weight_ is weight @pytest.mark.parametrize( "MultiOutputEstimator, Estimator", [(MultiOutputClassifier, LogisticRegression), (MultiOutputRegressor, Ridge)], ) # FIXME: we should move this test in `estimator_checks` once we are able # to construct meta-estimator instances def test_support_missing_values(MultiOutputEstimator, Estimator): # smoke test to check that pipeline MultioutputEstimators are letting # the validation of missing values to # the underlying pipeline, regressor or classifier rng = np.random.RandomState(42) X, y = rng.randn(50, 2), rng.binomial(1, 0.5, (50, 3)) mask = rng.choice([1, 0], X.shape, p=[0.01, 0.99]).astype(bool) X[mask] = np.nan pipe = make_pipeline(SimpleImputer(), Estimator()) MultiOutputEstimator(pipe).fit(X, y).score(X, y) @pytest.mark.parametrize("order_type", [list, np.array, tuple]) def test_classifier_chain_tuple_order(order_type): X = [[1, 2, 3], [4, 5, 6], [1.5, 2.5, 3.5]] y = [[3, 2], [2, 3], [3, 2]] order = order_type([1, 0]) chain = ClassifierChain( RandomForestClassifier(n_estimators=2, random_state=0), order=order ) chain.fit(X, y) X_test = [[1.5, 2.5, 3.5]] y_test = [[3, 2]] assert_array_almost_equal(chain.predict(X_test), y_test) def test_classifier_chain_tuple_invalid_order(): X = [[1, 2, 3], [4, 5, 6], [1.5, 2.5, 3.5]] y = [[3, 2], [2, 3], [3, 2]] order = tuple([1, 2]) chain = ClassifierChain(RandomForestClassifier(), order=order) with pytest.raises(ValueError, match="invalid order"): chain.fit(X, y) def test_classifier_chain_verbose(capsys): X, y = make_multilabel_classification( n_samples=100, n_features=5, n_classes=3, n_labels=3, random_state=0 ) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) pattern = ( r"\[Chain\].*\(1 of 3\) Processing order 0, total=.*\n" r"\[Chain\].*\(2 of 3\) Processing order 1, total=.*\n" r"\[Chain\].*\(3 of 3\) Processing order 2, total=.*\n$" ) classifier = ClassifierChain( DecisionTreeClassifier(), order=[0, 1, 2], random_state=0, verbose=True, ) classifier.fit(X_train, y_train) assert re.match(pattern, capsys.readouterr()[0]) def test_regressor_chain_verbose(capsys): X, y = make_regression(n_samples=125, n_targets=3, random_state=0) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) pattern = ( r"\[Chain\].*\(1 of 3\) Processing order 1, total=.*\n" r"\[Chain\].*\(2 of 3\) Processing order 0, total=.*\n" r"\[Chain\].*\(3 of 3\) Processing order 2, total=.*\n$" ) regressor = RegressorChain( LinearRegression(), order=[1, 0, 2], random_state=0, verbose=True, ) regressor.fit(X_train, y_train) assert re.match(pattern, capsys.readouterr()[0]) def test_multioutputregressor_ducktypes_fitted_estimator(): """Test that MultiOutputRegressor checks the fitted estimator for predict. Non-regression test for #16549.""" X, y = load_linnerud(return_X_y=True) stacker = StackingRegressor( estimators=[("sgd", SGDRegressor(random_state=1))], final_estimator=Ridge(), cv=2, ) reg = MultiOutputRegressor(estimator=stacker).fit(X, y) # Does not raise reg.predict(X) @pytest.mark.parametrize( "Cls, method", [(ClassifierChain, "fit"), (MultiOutputClassifier, "partial_fit")] ) def test_fit_params_no_routing(Cls, method): """Check that we raise an error when passing metadata not requested by the underlying classifier. """ X, y = make_classification(n_samples=50) clf = Cls(SGDClassifier()) with pytest.raises(ValueError, match="is only supported if"): getattr(clf, method)(X, y, test=1) def test_multioutput_regressor_has_partial_fit(): # Test that an unfitted MultiOutputRegressor handles available_if for # partial_fit correctly est = MultiOutputRegressor(LinearRegression()) msg = "This 'MultiOutputRegressor' has no attribute 'partial_fit'" with pytest.raises(AttributeError, match=msg): getattr(est, "partial_fit") # TODO(1.9): remove when deprecated `base_estimator` is removed @pytest.mark.parametrize("Estimator", [ClassifierChain, RegressorChain]) def test_base_estimator_deprecation(Estimator): """Check that we warn about the deprecation of `base_estimator`.""" X = np.array([[1, 2], [3, 4]]) y = np.array([[1, 0], [0, 1]]) estimator = LogisticRegression() with pytest.warns(FutureWarning): Estimator(base_estimator=estimator).fit(X, y) with pytest.raises(ValueError): Estimator(base_estimator=estimator, estimator=estimator).fit(X, y)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/tests/__init__.py
sklearn/tests/__init__.py
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/tests/test_isotonic.py
sklearn/tests/test_isotonic.py
import copy import pickle import warnings import numpy as np import pytest from scipy.special import expit import sklearn from sklearn.datasets import make_regression from sklearn.isotonic import ( IsotonicRegression, _make_unique, check_increasing, isotonic_regression, ) from sklearn.utils import shuffle from sklearn.utils._testing import ( assert_allclose, assert_array_almost_equal, assert_array_equal, ) from sklearn.utils.validation import check_array def test_permutation_invariance(): # check that fit is permutation invariant. # regression test of missing sorting of sample-weights ir = IsotonicRegression() x = [1, 2, 3, 4, 5, 6, 7] y = [1, 41, 51, 1, 2, 5, 24] sample_weight = [1, 2, 3, 4, 5, 6, 7] x_s, y_s, sample_weight_s = shuffle(x, y, sample_weight, random_state=0) y_transformed = ir.fit_transform(x, y, sample_weight=sample_weight) y_transformed_s = ir.fit(x_s, y_s, sample_weight=sample_weight_s).transform(x) assert_array_equal(y_transformed, y_transformed_s) def test_check_increasing_small_number_of_samples(): x = [0, 1, 2] y = [1, 1.1, 1.05] with warnings.catch_warnings(): warnings.simplefilter("error", UserWarning) is_increasing = check_increasing(x, y) assert is_increasing def test_check_increasing_up(): x = [0, 1, 2, 3, 4, 5] y = [0, 1.5, 2.77, 8.99, 8.99, 50] # Check that we got increasing=True and no warnings with warnings.catch_warnings(): warnings.simplefilter("error", UserWarning) is_increasing = check_increasing(x, y) assert is_increasing def test_check_increasing_up_extreme(): x = [0, 1, 2, 3, 4, 5] y = [0, 1, 2, 3, 4, 5] # Check that we got increasing=True and no warnings with warnings.catch_warnings(): warnings.simplefilter("error", UserWarning) is_increasing = check_increasing(x, y) assert is_increasing def test_check_increasing_down(): x = [0, 1, 2, 3, 4, 5] y = [0, -1.5, -2.77, -8.99, -8.99, -50] # Check that we got increasing=False and no warnings with warnings.catch_warnings(): warnings.simplefilter("error", UserWarning) is_increasing = check_increasing(x, y) assert not is_increasing def test_check_increasing_down_extreme(): x = [0, 1, 2, 3, 4, 5] y = [0, -1, -2, -3, -4, -5] # Check that we got increasing=False and no warnings with warnings.catch_warnings(): warnings.simplefilter("error", UserWarning) is_increasing = check_increasing(x, y) assert not is_increasing def test_check_ci_warn(): x = [0, 1, 2, 3, 4, 5] y = [0, -1, 2, -3, 4, -5] # Check that we got increasing=False and CI interval warning msg = "interval" with pytest.warns(UserWarning, match=msg): is_increasing = check_increasing(x, y) assert not is_increasing def test_isotonic_regression(): y = np.array([3, 7, 5, 9, 8, 7, 10]) y_ = np.array([3, 6, 6, 8, 8, 8, 10]) assert_array_equal(y_, isotonic_regression(y)) y = np.array([10, 0, 2]) y_ = np.array([4, 4, 4]) assert_array_equal(y_, isotonic_regression(y)) x = np.arange(len(y)) ir = IsotonicRegression(y_min=0.0, y_max=1.0) ir.fit(x, y) assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y)) assert_array_equal(ir.transform(x), ir.predict(x)) # check that it is immune to permutation perm = np.random.permutation(len(y)) ir = IsotonicRegression(y_min=0.0, y_max=1.0) assert_array_equal(ir.fit_transform(x[perm], y[perm]), ir.fit_transform(x, y)[perm]) assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm]) # check we don't crash when all x are equal: ir = IsotonicRegression() assert_array_equal(ir.fit_transform(np.ones(len(x)), y), np.mean(y)) def test_isotonic_regression_ties_min(): # Setup examples with ties on minimum x = [1, 1, 2, 3, 4, 5] y = [1, 2, 3, 4, 5, 6] y_true = [1.5, 1.5, 3, 4, 5, 6] # Check that we get identical results for fit/transform and fit_transform ir = IsotonicRegression() ir.fit(x, y) assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y)) assert_array_equal(y_true, ir.fit_transform(x, y)) def test_isotonic_regression_ties_max(): # Setup examples with ties on maximum x = [1, 2, 3, 4, 5, 5] y = [1, 2, 3, 4, 5, 6] y_true = [1, 2, 3, 4, 5.5, 5.5] # Check that we get identical results for fit/transform and fit_transform ir = IsotonicRegression() ir.fit(x, y) assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y)) assert_array_equal(y_true, ir.fit_transform(x, y)) def test_isotonic_regression_ties_secondary_(): """ Test isotonic regression fit, transform and fit_transform against the "secondary" ties method and "pituitary" data from R "isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair, Isotone Optimization in R: Pool-Adjacent-Violators Algorithm (PAVA) and Active Set Methods Set values based on pituitary example and the following R command detailed in the paper above: > library("isotone") > data("pituitary") > res1 <- gpava(pituitary$age, pituitary$size, ties="secondary") > res1$x `isotone` version: 1.0-2, 2014-09-07 R version: R version 3.1.1 (2014-07-10) """ x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14] y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25] y_true = [ 22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 24.25, 24.25, ] # Check fit, transform and fit_transform ir = IsotonicRegression() ir.fit(x, y) assert_array_almost_equal(ir.transform(x), y_true, 4) assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4) def test_isotonic_regression_with_ties_in_differently_sized_groups(): """ Non-regression test to handle issue 9432: https://github.com/scikit-learn/scikit-learn/issues/9432 Compare against output in R: > library("isotone") > x <- c(0, 1, 1, 2, 3, 4) > y <- c(0, 0, 1, 0, 0, 1) > res1 <- gpava(x, y, ties="secondary") > res1$x `isotone` version: 1.1-0, 2015-07-24 R version: R version 3.3.2 (2016-10-31) """ x = np.array([0, 1, 1, 2, 3, 4]) y = np.array([0, 0, 1, 0, 0, 1]) y_true = np.array([0.0, 0.25, 0.25, 0.25, 0.25, 1.0]) ir = IsotonicRegression() ir.fit(x, y) assert_array_almost_equal(ir.transform(x), y_true) assert_array_almost_equal(ir.fit_transform(x, y), y_true) def test_isotonic_regression_reversed(): y = np.array([10, 9, 10, 7, 6, 6.1, 5]) y_result = np.array([10, 9.5, 9.5, 7, 6.05, 6.05, 5]) y_iso = isotonic_regression(y, increasing=False) assert_allclose(y_iso, y_result) y_ = IsotonicRegression(increasing=False).fit_transform(np.arange(len(y)), y) assert_allclose(y_, y_result) assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0)) def test_isotonic_regression_auto_decreasing(): # Set y and x for decreasing y = np.array([10, 9, 10, 7, 6, 6.1, 5]) x = np.arange(len(y)) # Create model and fit_transform ir = IsotonicRegression(increasing="auto") y_ = ir.fit_transform(x, y) # Check that relationship decreases is_increasing = y_[0] < y_[-1] assert not is_increasing def test_isotonic_regression_auto_increasing(): # Set y and x for decreasing y = np.array([5, 6.1, 6, 7, 10, 9, 10]) x = np.arange(len(y)) # Create model and fit_transform ir = IsotonicRegression(increasing="auto") y_ = ir.fit_transform(x, y) # Check that relationship increases is_increasing = y_[0] < y_[-1] assert is_increasing def test_assert_raises_exceptions(): ir = IsotonicRegression() rng = np.random.RandomState(42) msg = "Found input variables with inconsistent numbers of samples" with pytest.raises(ValueError, match=msg): ir.fit([0, 1, 2], [5, 7, 3], [0.1, 0.6]) with pytest.raises(ValueError, match=msg): ir.fit([0, 1, 2], [5, 7]) msg = "X should be a 1d array" with pytest.raises(ValueError, match=msg): ir.fit(rng.randn(3, 10), [0, 1, 2]) msg = "Isotonic regression input X should be a 1d array" with pytest.raises(ValueError, match=msg): ir.transform(rng.randn(3, 10)) def test_isotonic_sample_weight_parameter_default_value(): # check if default value of sample_weight parameter is one ir = IsotonicRegression() # random test data rng = np.random.RandomState(42) n = 100 x = np.arange(n) y = rng.randint(-50, 50, size=(n,)) + 50.0 * np.log(1 + np.arange(n)) # check if value is correctly used weights = np.ones(n) y_set_value = ir.fit_transform(x, y, sample_weight=weights) y_default_value = ir.fit_transform(x, y) assert_array_equal(y_set_value, y_default_value) def test_isotonic_min_max_boundaries(): # check if min value is used correctly ir = IsotonicRegression(y_min=2, y_max=4) n = 6 x = np.arange(n) y = np.arange(n) y_test = [2, 2, 2, 3, 4, 4] y_result = np.round(ir.fit_transform(x, y)) assert_array_equal(y_result, y_test) def test_isotonic_sample_weight(): ir = IsotonicRegression() x = [1, 2, 3, 4, 5, 6, 7] y = [1, 41, 51, 1, 2, 5, 24] sample_weight = [1, 2, 3, 4, 5, 6, 7] expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24] received_y = ir.fit_transform(x, y, sample_weight=sample_weight) assert_array_equal(expected_y, received_y) def test_isotonic_regression_oob_raise(): # Set y and x y = np.array([3, 7, 5, 9, 8, 7, 10]) x = np.arange(len(y)) # Create model and fit ir = IsotonicRegression(increasing="auto", out_of_bounds="raise") ir.fit(x, y) # Check that an exception is thrown msg = "in x_new is below the interpolation range" with pytest.raises(ValueError, match=msg): ir.predict([min(x) - 10, max(x) + 10]) def test_isotonic_regression_oob_clip(): # Set y and x y = np.array([3, 7, 5, 9, 8, 7, 10]) x = np.arange(len(y)) # Create model and fit ir = IsotonicRegression(increasing="auto", out_of_bounds="clip") ir.fit(x, y) # Predict from training and test x and check that min/max match. y1 = ir.predict([min(x) - 10, max(x) + 10]) y2 = ir.predict(x) assert max(y1) == max(y2) assert min(y1) == min(y2) def test_isotonic_regression_oob_nan(): # Set y and x y = np.array([3, 7, 5, 9, 8, 7, 10]) x = np.arange(len(y)) # Create model and fit ir = IsotonicRegression(increasing="auto", out_of_bounds="nan") ir.fit(x, y) # Predict from training and test x and check that we have two NaNs. y1 = ir.predict([min(x) - 10, max(x) + 10]) assert sum(np.isnan(y1)) == 2 def test_isotonic_regression_pickle(): y = np.array([3, 7, 5, 9, 8, 7, 10]) x = np.arange(len(y)) # Create model and fit ir = IsotonicRegression(increasing="auto", out_of_bounds="clip") ir.fit(x, y) ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL) ir2 = pickle.loads(ir_ser) np.testing.assert_array_equal(ir.predict(x), ir2.predict(x)) def test_isotonic_duplicate_min_entry(): x = [0, 0, 1] y = [0, 0, 1] ir = IsotonicRegression(increasing=True, out_of_bounds="clip") ir.fit(x, y) all_predictions_finite = np.all(np.isfinite(ir.predict(x))) assert all_predictions_finite def test_isotonic_ymin_ymax(): # Test from @NelleV's issue: # https://github.com/scikit-learn/scikit-learn/issues/6921 x = np.array( [ 1.263, 1.318, -0.572, 0.307, -0.707, -0.176, -1.599, 1.059, 1.396, 1.906, 0.210, 0.028, -0.081, 0.444, 0.018, -0.377, -0.896, -0.377, -1.327, 0.180, ] ) y = isotonic_regression(x, y_min=0.0, y_max=0.1) assert np.all(y >= 0) assert np.all(y <= 0.1) # Also test decreasing case since the logic there is different y = isotonic_regression(x, y_min=0.0, y_max=0.1, increasing=False) assert np.all(y >= 0) assert np.all(y <= 0.1) # Finally, test with only one bound y = isotonic_regression(x, y_min=0.0, increasing=False) assert np.all(y >= 0) def test_isotonic_zero_weight_loop(): # Test from @ogrisel's issue: # https://github.com/scikit-learn/scikit-learn/issues/4297 # Get deterministic RNG with seed rng = np.random.RandomState(42) # Create regression and samples regression = IsotonicRegression() n_samples = 50 x = np.linspace(-3, 3, n_samples) y = x + rng.uniform(size=n_samples) # Get some random weights and zero out w = rng.uniform(size=n_samples) w[5:8] = 0 regression.fit(x, y, sample_weight=w) # This will hang in failure case. regression.fit(x, y, sample_weight=w) def test_fast_predict(): # test that the faster prediction change doesn't # affect out-of-sample predictions: # https://github.com/scikit-learn/scikit-learn/pull/6206 rng = np.random.RandomState(123) n_samples = 10**3 # X values over the -10,10 range X_train = 20.0 * rng.rand(n_samples) - 10 y_train = ( np.less(rng.rand(n_samples), expit(X_train)).astype("int64").astype("float64") ) weights = rng.rand(n_samples) # we also want to test that everything still works when some weights are 0 weights[rng.rand(n_samples) < 0.1] = 0 slow_model = IsotonicRegression(y_min=0, y_max=1, out_of_bounds="clip") fast_model = IsotonicRegression(y_min=0, y_max=1, out_of_bounds="clip") # Build interpolation function with ALL input data, not just the # non-redundant subset. The following 2 lines are taken from the # .fit() method, without removing unnecessary points X_train_fit, y_train_fit = slow_model._build_y( X_train, y_train, sample_weight=weights, trim_duplicates=False ) slow_model._build_f(X_train_fit, y_train_fit) # fit with just the necessary data fast_model.fit(X_train, y_train, sample_weight=weights) X_test = 20.0 * rng.rand(n_samples) - 10 y_pred_slow = slow_model.predict(X_test) y_pred_fast = fast_model.predict(X_test) assert_array_equal(y_pred_slow, y_pred_fast) def test_isotonic_copy_before_fit(): # https://github.com/scikit-learn/scikit-learn/issues/6628 ir = IsotonicRegression() copy.copy(ir) @pytest.mark.parametrize("dtype", [np.int32, np.int64, np.float32, np.float64]) def test_isotonic_dtype(dtype): y = [2, 1, 4, 3, 5] weights = np.array([0.9, 0.9, 0.9, 0.9, 0.9], dtype=np.float64) reg = IsotonicRegression() for sample_weight in (None, weights.astype(np.float32), weights): y_np = np.array(y, dtype=dtype) expected_dtype = check_array( y_np, dtype=[np.float64, np.float32], ensure_2d=False ).dtype res = isotonic_regression(y_np, sample_weight=sample_weight) assert res.dtype == expected_dtype X = np.arange(len(y)).astype(dtype) reg.fit(X, y_np, sample_weight=sample_weight) res = reg.predict(X) assert res.dtype == expected_dtype @pytest.mark.parametrize("y_dtype", [np.int32, np.int64, np.float32, np.float64]) def test_isotonic_mismatched_dtype(y_dtype): # regression test for #15004 # check that data are converted when X and y dtype differ reg = IsotonicRegression() y = np.array([2, 1, 4, 3, 5], dtype=y_dtype) X = np.arange(len(y), dtype=np.float32) reg.fit(X, y) assert reg.predict(X).dtype == X.dtype def test_make_unique_dtype(): x_list = [2, 2, 2, 3, 5] for dtype in (np.float32, np.float64): x = np.array(x_list, dtype=dtype) y = x.copy() w = np.ones_like(x) x, y, w = _make_unique(x, y, w) assert_array_equal(x, [2, 3, 5]) @pytest.mark.parametrize("dtype", [np.float64, np.float32]) def test_make_unique_tolerance(dtype): # Check that equality takes account of np.finfo tolerance x = np.array([0, 1e-16, 1, 1 + 1e-14], dtype=dtype) y = x.copy() w = np.ones_like(x) x, y, w = _make_unique(x, y, w) if dtype == np.float64: x_out = np.array([0, 1, 1 + 1e-14]) else: x_out = np.array([0, 1]) assert_array_equal(x, x_out) def test_isotonic_make_unique_tolerance(): # Check that averaging of targets for duplicate X is done correctly, # taking into account tolerance X = np.array([0, 1, 1 + 1e-16, 2], dtype=np.float64) y = np.array([0, 1, 2, 3], dtype=np.float64) ireg = IsotonicRegression().fit(X, y) y_pred = ireg.predict([0, 0.5, 1, 1.5, 2]) assert_array_equal(y_pred, np.array([0, 0.75, 1.5, 2.25, 3])) assert_array_equal(ireg.X_thresholds_, np.array([0.0, 1.0, 2.0])) assert_array_equal(ireg.y_thresholds_, np.array([0.0, 1.5, 3.0])) def test_isotonic_non_regression_inf_slope(): # Non-regression test to ensure that inf values are not returned # see: https://github.com/scikit-learn/scikit-learn/issues/10903 X = np.array([0.0, 4.1e-320, 4.4e-314, 1.0]) y = np.array([0.42, 0.42, 0.44, 0.44]) ireg = IsotonicRegression().fit(X, y) y_pred = ireg.predict(np.array([0, 2.1e-319, 5.4e-316, 1e-10])) assert np.all(np.isfinite(y_pred)) @pytest.mark.parametrize("increasing", [True, False]) def test_isotonic_thresholds(increasing): rng = np.random.RandomState(42) n_samples = 30 X = rng.normal(size=n_samples) y = rng.normal(size=n_samples) ireg = IsotonicRegression(increasing=increasing).fit(X, y) X_thresholds, y_thresholds = ireg.X_thresholds_, ireg.y_thresholds_ assert X_thresholds.shape == y_thresholds.shape # Input thresholds are a strict subset of the training set (unless # the data is already strictly monotonic which is not the case with # this random data) assert X_thresholds.shape[0] < X.shape[0] assert np.isin(X_thresholds, X).all() # Output thresholds lie in the range of the training set: assert y_thresholds.max() <= y.max() assert y_thresholds.min() >= y.min() assert all(np.diff(X_thresholds) > 0) if increasing: assert all(np.diff(y_thresholds) >= 0) else: assert all(np.diff(y_thresholds) <= 0) def test_input_shape_validation(): # Test from #15012 # Check that IsotonicRegression can handle 2darray with only 1 feature X = np.arange(10) X_2d = X.reshape(-1, 1) y = np.arange(10) iso_reg = IsotonicRegression().fit(X, y) iso_reg_2d = IsotonicRegression().fit(X_2d, y) assert iso_reg.X_max_ == iso_reg_2d.X_max_ assert iso_reg.X_min_ == iso_reg_2d.X_min_ assert iso_reg.y_max == iso_reg_2d.y_max assert iso_reg.y_min == iso_reg_2d.y_min assert_array_equal(iso_reg.X_thresholds_, iso_reg_2d.X_thresholds_) assert_array_equal(iso_reg.y_thresholds_, iso_reg_2d.y_thresholds_) y_pred1 = iso_reg.predict(X) y_pred2 = iso_reg_2d.predict(X_2d) assert_allclose(y_pred1, y_pred2) def test_isotonic_2darray_more_than_1_feature(): # Ensure IsotonicRegression raises error if input has more than 1 feature X = np.arange(10) X_2d = np.c_[X, X] y = np.arange(10) msg = "should be a 1d array or 2d array with 1 feature" with pytest.raises(ValueError, match=msg): IsotonicRegression().fit(X_2d, y) iso_reg = IsotonicRegression().fit(X, y) with pytest.raises(ValueError, match=msg): iso_reg.predict(X_2d) with pytest.raises(ValueError, match=msg): iso_reg.transform(X_2d) def test_isotonic_regression_sample_weight_not_overwritten(): """Check that calling fitting function of isotonic regression will not overwrite `sample_weight`. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/20508 """ X, y = make_regression(n_samples=10, n_features=1, random_state=41) sample_weight_original = np.ones_like(y) sample_weight_original[0] = 10 sample_weight_fit = sample_weight_original.copy() isotonic_regression(y, sample_weight=sample_weight_fit) assert_allclose(sample_weight_fit, sample_weight_original) IsotonicRegression().fit(X, y, sample_weight=sample_weight_fit) assert_allclose(sample_weight_fit, sample_weight_original) @pytest.mark.parametrize("shape", ["1d", "2d"]) def test_get_feature_names_out(shape): """Check `get_feature_names_out` for `IsotonicRegression`.""" X = np.arange(10) if shape == "2d": X = X.reshape(-1, 1) y = np.arange(10) iso = IsotonicRegression().fit(X, y) names = iso.get_feature_names_out() assert isinstance(names, np.ndarray) assert names.dtype == object assert_array_equal(["isotonicregression0"], names) def test_isotonic_regression_output_predict(): """Check that `predict` does return the expected output type. We need to check that `transform` will output a DataFrame and a NumPy array when we set `transform_output` to `pandas`. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/25499 """ pd = pytest.importorskip("pandas") X, y = make_regression(n_samples=10, n_features=1, random_state=42) regressor = IsotonicRegression() with sklearn.config_context(transform_output="pandas"): regressor.fit(X, y) X_trans = regressor.transform(X) y_pred = regressor.predict(X) assert isinstance(X_trans, pd.DataFrame) assert isinstance(y_pred, np.ndarray)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/tests/test_metadata_routing.py
sklearn/tests/test_metadata_routing.py
""" Metadata Routing Utility Tests """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import re import numpy as np import pytest from sklearn import config_context from sklearn.base import ( BaseEstimator, clone, ) from sklearn.exceptions import UnsetMetadataPassedError from sklearn.linear_model import LinearRegression from sklearn.pipeline import Pipeline from sklearn.tests.metadata_routing_common import ( ConsumingClassifier, ConsumingRegressor, ConsumingTransformer, MetaRegressor, MetaTransformer, NonConsumingClassifier, WeightedMetaClassifier, WeightedMetaRegressor, _Registry, assert_request_equal, assert_request_is_empty, check_recorded_metadata, ) from sklearn.utils import metadata_routing from sklearn.utils._metadata_requests import ( COMPOSITE_METHODS, METHODS, SIMPLE_METHODS, MethodMetadataRequest, MethodPair, _MetadataRequester, request_is_alias, request_is_valid, ) from sklearn.utils.metadata_routing import ( MetadataRequest, MetadataRouter, MethodMapping, _RoutingNotSupportedMixin, get_routing_for_object, process_routing, ) from sklearn.utils.validation import check_is_fitted rng = np.random.RandomState(42) N, M = 100, 4 X = rng.rand(N, M) y = rng.randint(0, 2, size=N) my_groups = rng.randint(0, 10, size=N) my_weights = rng.rand(N) my_other_weights = rng.rand(N) class SimplePipeline(BaseEstimator): """A very simple pipeline, assuming the last step is always a predictor. Parameters ---------- steps : iterable of objects An iterable of transformers with the last step being a predictor. """ def __init__(self, steps): self.steps = steps def fit(self, X, y, **fit_params): self.steps_ = [] params = process_routing(self, "fit", **fit_params) X_transformed = X for i, step in enumerate(self.steps[:-1]): transformer = clone(step).fit( X_transformed, y, **params.get(f"step_{i}").fit ) self.steps_.append(transformer) X_transformed = transformer.transform( X_transformed, **params.get(f"step_{i}").transform ) self.steps_.append( clone(self.steps[-1]).fit(X_transformed, y, **params.predictor.fit) ) return self def predict(self, X, **predict_params): check_is_fitted(self) X_transformed = X params = process_routing(self, "predict", **predict_params) for i, step in enumerate(self.steps_[:-1]): X_transformed = step.transform(X, **params.get(f"step_{i}").transform) return self.steps_[-1].predict(X_transformed, **params.predictor.predict) def get_metadata_routing(self): router = MetadataRouter(owner=self) for i, step in enumerate(self.steps[:-1]): router.add( **{f"step_{i}": step}, method_mapping=MethodMapping() .add(caller="fit", callee="fit") .add(caller="fit", callee="transform") .add(caller="predict", callee="transform"), ) router.add( predictor=self.steps[-1], method_mapping=MethodMapping() .add(caller="fit", callee="fit") .add(caller="predict", callee="predict"), ) return router @config_context(enable_metadata_routing=True) def test_assert_request_is_empty(): requests = MetadataRequest(owner="test") assert_request_is_empty(requests) requests.fit.add_request(param="foo", alias=None) # this should still work, since None is the default value assert_request_is_empty(requests) requests.fit.add_request(param="bar", alias="value") with pytest.raises(AssertionError): # now requests is no more empty assert_request_is_empty(requests) # but one can exclude a method assert_request_is_empty(requests, exclude="fit") requests.score.add_request(param="carrot", alias=True) with pytest.raises(AssertionError): # excluding `fit` is not enough assert_request_is_empty(requests, exclude="fit") # and excluding both fit and score would avoid an exception assert_request_is_empty(requests, exclude=["fit", "score"]) # test if a router is empty assert_request_is_empty( MetadataRouter(owner="test") .add_self_request(WeightedMetaRegressor(estimator=None)) .add( estimator=ConsumingRegressor(), method_mapping=MethodMapping().add(caller="fit", callee="fit"), ) ) @pytest.mark.parametrize( "estimator", [ ConsumingClassifier(registry=_Registry()), ConsumingRegressor(registry=_Registry()), ConsumingTransformer(registry=_Registry()), WeightedMetaClassifier(estimator=ConsumingClassifier(), registry=_Registry()), WeightedMetaRegressor(estimator=ConsumingRegressor(), registry=_Registry()), ], ) @config_context(enable_metadata_routing=True) def test_estimator_puts_self_in_registry(estimator): """Check that an estimator puts itself in the registry upon fit.""" estimator.fit(X, y) assert estimator in estimator.registry @pytest.mark.parametrize( "val, res", [ (False, False), (True, False), (None, False), ("$UNUSED$", False), ("$WARN$", False), ("invalid-input", False), ("valid_arg", True), ], ) @config_context(enable_metadata_routing=True) def test_request_type_is_alias(val, res): # Test request_is_alias assert request_is_alias(val) == res @pytest.mark.parametrize( "val, res", [ (False, True), (True, True), (None, True), ("$UNUSED$", True), ("$WARN$", True), ("invalid-input", False), ("alias_arg", False), ], ) @config_context(enable_metadata_routing=True) def test_request_type_is_valid(val, res): # Test request_is_valid assert request_is_valid(val) == res @config_context(enable_metadata_routing=True) def test_default_requests(): class OddEstimator(BaseEstimator): __metadata_request__fit = { # set a different default request "sample_weight": True } # type: ignore[var-annotated] def fit(self, X, y=None): return self # pragma: no cover odd_request = get_routing_for_object(OddEstimator()) assert odd_request.fit.requests == {"sample_weight": True} # check other test estimators assert not len(get_routing_for_object(NonConsumingClassifier()).fit.requests) assert_request_is_empty(NonConsumingClassifier().get_metadata_routing()) trs_request = get_routing_for_object(ConsumingTransformer()) assert trs_request.fit.requests == { "sample_weight": None, "metadata": None, } assert trs_request.transform.requests == {"metadata": None, "sample_weight": None} assert_request_is_empty(trs_request) est_request = get_routing_for_object(ConsumingClassifier()) assert est_request.fit.requests == { "sample_weight": None, "metadata": None, } assert_request_is_empty(est_request) @config_context(enable_metadata_routing=True) def test_default_request_override(): """Test that default requests are correctly overridden regardless of the ASCII order of the class names, hence testing small and capital letter class name starts. Non-regression test for https://github.com/scikit-learn/scikit-learn/issues/28430 """ class Base(BaseEstimator): __metadata_request__split = {"groups": True} def split(self, X, y=None): pass # pragma: no cover class class_1(Base): __metadata_request__split = {"groups": "sample_domain"} def split(self, X, y=None): pass # pragma: no cover class Class_1(Base): __metadata_request__split = {"groups": "sample_domain"} def split(self, X, y=None): pass # pragma: no cover assert_request_equal( class_1()._get_metadata_request(), {"split": {"groups": "sample_domain"}} ) assert_request_equal( Class_1()._get_metadata_request(), {"split": {"groups": "sample_domain"}} ) @config_context(enable_metadata_routing=True) def test_process_routing_invalid_method(): with pytest.raises(TypeError, match="Can only route and process input"): process_routing(ConsumingClassifier(), "invalid_method", groups=my_groups) @config_context(enable_metadata_routing=True) def test_process_routing_invalid_object(): class InvalidObject: pass with pytest.raises(AttributeError, match="either implement the routing method"): process_routing(InvalidObject(), "fit", groups=my_groups) @pytest.mark.parametrize("method", METHODS) @pytest.mark.parametrize("default", [None, "default", []]) @config_context(enable_metadata_routing=True) def test_process_routing_empty_params_get_with_default(method, default): empty_params = {} routed_params = process_routing(ConsumingClassifier(), "fit", **empty_params) # Behaviour should be an empty dictionary returned for each method when retrieved. params_for_method = routed_params[method] assert isinstance(params_for_method, dict) assert set(params_for_method.keys()) == set(METHODS) # No default to `get` should be equivalent to the default default_params_for_method = routed_params.get(method, default=default) assert default_params_for_method == params_for_method @config_context(enable_metadata_routing=True) def test_simple_metadata_routing(): # Tests that metadata is properly routed # The underlying estimator doesn't accept or request metadata clf = WeightedMetaClassifier(estimator=NonConsumingClassifier()) clf.fit(X, y) # Meta-estimator consumes sample_weight, but doesn't forward it to the underlying # estimator clf = WeightedMetaClassifier(estimator=NonConsumingClassifier()) clf.fit(X, y, sample_weight=my_weights) # If the estimator accepts the metadata but doesn't explicitly say it doesn't # need it, there's an error clf = WeightedMetaClassifier(estimator=ConsumingClassifier()) err_message = ( "[sample_weight] are passed but are not explicitly set as requested or" " not requested for ConsumingClassifier.fit" ) with pytest.raises(ValueError, match=re.escape(err_message)): clf.fit(X, y, sample_weight=my_weights) # Explicitly saying the estimator doesn't need it, makes the error go away, # because in this case `WeightedMetaClassifier` consumes `sample_weight`. If # there was no consumer of sample_weight, passing it would result in an # error. clf = WeightedMetaClassifier( estimator=ConsumingClassifier().set_fit_request(sample_weight=False) ) # this doesn't raise since WeightedMetaClassifier itself is a consumer, # and passing metadata to the consumer directly is fine regardless of its # metadata_request values. clf.fit(X, y, sample_weight=my_weights) check_recorded_metadata(clf.estimator_, method="fit", parent="fit") # Requesting a metadata will make the meta-estimator forward it correctly clf = WeightedMetaClassifier( estimator=ConsumingClassifier().set_fit_request(sample_weight=True) ) clf.fit(X, y, sample_weight=my_weights) check_recorded_metadata( clf.estimator_, method="fit", parent="fit", sample_weight=my_weights ) # And requesting it with an alias clf = WeightedMetaClassifier( estimator=ConsumingClassifier().set_fit_request( sample_weight="alternative_weight" ) ) clf.fit(X, y, alternative_weight=my_weights) check_recorded_metadata( clf.estimator_, method="fit", parent="fit", sample_weight=my_weights ) @config_context(enable_metadata_routing=True) def test_nested_routing(): # check if metadata is routed in a nested routing situation. pipeline = SimplePipeline( [ MetaTransformer( transformer=ConsumingTransformer() .set_fit_request(metadata=True, sample_weight=False) .set_transform_request(sample_weight=True, metadata=False) ), WeightedMetaRegressor( estimator=ConsumingRegressor() .set_fit_request(sample_weight="inner_weights", metadata=False) .set_predict_request(sample_weight=False) ).set_fit_request(sample_weight="outer_weights"), ] ) w1, w2, w3 = [1], [2], [3] pipeline.fit( X, y, metadata=my_groups, sample_weight=w1, outer_weights=w2, inner_weights=w3 ) check_recorded_metadata( pipeline.steps_[0].transformer_, method="fit", parent="fit", metadata=my_groups, ) check_recorded_metadata( pipeline.steps_[0].transformer_, method="transform", parent="fit", sample_weight=w1, ) check_recorded_metadata( pipeline.steps_[1], method="fit", parent="fit", sample_weight=w2 ) check_recorded_metadata( pipeline.steps_[1].estimator_, method="fit", parent="fit", sample_weight=w3 ) pipeline.predict(X, sample_weight=w3) check_recorded_metadata( pipeline.steps_[0].transformer_, method="transform", parent="fit", sample_weight=w3, ) @config_context(enable_metadata_routing=True) def test_nested_routing_conflict(): # check if an error is raised if there's a conflict between keys pipeline = SimplePipeline( [ MetaTransformer( transformer=ConsumingTransformer() .set_fit_request(metadata=True, sample_weight=False) .set_transform_request(sample_weight=True) ), WeightedMetaRegressor( estimator=ConsumingRegressor().set_fit_request(sample_weight=True) ).set_fit_request(sample_weight="outer_weights"), ] ) w1, w2 = [1], [2] with pytest.raises( ValueError, match=( re.escape( "In WeightedMetaRegressor, there is a conflict on sample_weight between" " what is requested for this estimator and what is requested by its" " children. You can resolve this conflict by using an alias for the" " child estimators' requested metadata." ) ), ): pipeline.fit(X, y, metadata=my_groups, sample_weight=w1, outer_weights=w2) @config_context(enable_metadata_routing=True) def test_invalid_metadata(): # check that passing wrong metadata raises an error trs = MetaTransformer( transformer=ConsumingTransformer().set_transform_request(sample_weight=True) ) with pytest.raises( TypeError, match=(re.escape("transform got unexpected argument(s) {'other_param'}")), ): trs.fit(X, y).transform(X, other_param=my_weights) # passing a metadata which is not requested by any estimator should also raise trs = MetaTransformer( transformer=ConsumingTransformer().set_transform_request(sample_weight=False) ) with pytest.raises( TypeError, match=(re.escape("transform got unexpected argument(s) {'sample_weight'}")), ): trs.fit(X, y).transform(X, sample_weight=my_weights) @config_context(enable_metadata_routing=True) def test_get_metadata_routing(): class TestDefaults(_MetadataRequester): __metadata_request__fit = { "sample_weight": None, "my_other_param": None, } __metadata_request__score = { "sample_weight": None, "my_param": True, "my_other_param": None, } __metadata_request__predict = {"my_param": True} def fit(self, X, y=None): return self # pragma: no cover def score(self, X, y=None): pass # pragma: no cover def predict(self, X): pass # pragma: no cover expected = { "score": { "my_param": True, "my_other_param": None, "sample_weight": None, }, "fit": { "my_other_param": None, "sample_weight": None, }, "predict": {"my_param": True}, } assert_request_equal(TestDefaults().get_metadata_routing(), expected) est = TestDefaults().set_score_request(my_param="other_param") expected = { "score": { "my_param": "other_param", "my_other_param": None, "sample_weight": None, }, "fit": { "my_other_param": None, "sample_weight": None, }, "predict": {"my_param": True}, } assert_request_equal(est.get_metadata_routing(), expected) est = TestDefaults().set_fit_request(sample_weight=True) expected = { "score": { "my_param": True, "my_other_param": None, "sample_weight": None, }, "fit": { "my_other_param": None, "sample_weight": True, }, "predict": {"my_param": True}, } assert_request_equal(est.get_metadata_routing(), expected) @config_context(enable_metadata_routing=True) def test_setting_default_requests(): # Test _get_default_requests method test_cases = dict() class ExplicitRequest(BaseEstimator): # `fit` doesn't accept `props` explicitly, but we want to request it __metadata_request__fit = {"prop": None} def fit(self, X, y, **kwargs): return self test_cases[ExplicitRequest] = {"prop": None} class ExplicitRequestOverwrite(BaseEstimator): # `fit` explicitly accepts `props`, but we want to change the default # request value from None to True __metadata_request__fit = {"prop": True} def fit(self, X, y, prop=None, **kwargs): return self test_cases[ExplicitRequestOverwrite] = {"prop": True} class ImplicitRequest(BaseEstimator): # `fit` requests `prop` and the default None should be used def fit(self, X, y, prop=None, **kwargs): return self test_cases[ImplicitRequest] = {"prop": None} class ImplicitRequestRemoval(BaseEstimator): # `fit` (in this class or a parent) requests `prop`, but we don't want # it requested at all. __metadata_request__fit = {"prop": metadata_routing.UNUSED} def fit(self, X, y, prop=None, **kwargs): return self test_cases[ImplicitRequestRemoval] = {} for Klass, requests in test_cases.items(): assert get_routing_for_object(Klass()).fit.requests == requests assert_request_is_empty(Klass().get_metadata_routing(), exclude="fit") Klass().fit(None, None) # for coverage @config_context(enable_metadata_routing=True) def test_removing_non_existing_param_raises(): """Test that removing a metadata using UNUSED which doesn't exist raises.""" class InvalidRequestRemoval(BaseEstimator): # `fit` (in this class or a parent) requests `prop`, but we don't want # it requested at all. __metadata_request__fit = {"prop": metadata_routing.UNUSED} def fit(self, X, y, **kwargs): return self with pytest.raises(ValueError, match="Trying to remove parameter"): InvalidRequestRemoval().get_metadata_routing() @config_context(enable_metadata_routing=True) def test_method_metadata_request(): mmr = MethodMetadataRequest(owner="test", method="fit") with pytest.raises(ValueError, match="The alias you're setting for"): mmr.add_request(param="foo", alias=1.4) mmr.add_request(param="foo", alias=None) assert mmr.requests == {"foo": None} mmr.add_request(param="foo", alias=False) assert mmr.requests == {"foo": False} mmr.add_request(param="foo", alias=True) assert mmr.requests == {"foo": True} mmr.add_request(param="foo", alias="foo") assert mmr.requests == {"foo": True} mmr.add_request(param="foo", alias="bar") assert mmr.requests == {"foo": "bar"} assert mmr._get_param_names(return_alias=False) == {"foo"} assert mmr._get_param_names(return_alias=True) == {"bar"} @config_context(enable_metadata_routing=True) def test_get_routing_for_object(): class Consumer(BaseEstimator): __metadata_request__fit = {"prop": None} def fit(self, X, y=None): return self # pragma: no cover assert_request_is_empty(get_routing_for_object(None)) assert_request_is_empty(get_routing_for_object(object())) mr = MetadataRequest(owner="test") mr.fit.add_request(param="foo", alias="bar") mr_factory = get_routing_for_object(mr) assert_request_is_empty(mr_factory, exclude="fit") assert mr_factory.fit.requests == {"foo": "bar"} mr = get_routing_for_object(Consumer()) assert_request_is_empty(mr, exclude="fit") assert mr.fit.requests == {"prop": None} @config_context(enable_metadata_routing=True) def test_metadata_request_consumes_method(): """Test that MetadataRequest().consumes() method works as expected.""" request = MetadataRequest(owner="test") assert request.consumes(method="fit", params={"foo"}) == set() request = MetadataRequest(owner="test") request.fit.add_request(param="foo", alias=True) assert request.consumes(method="fit", params={"foo"}) == {"foo"} request = MetadataRequest(owner="test") request.fit.add_request(param="foo", alias="bar") assert request.consumes(method="fit", params={"bar", "foo"}) == {"bar"} @config_context(enable_metadata_routing=True) def test_metadata_router_consumes_method(): """Test that MetadataRouter().consumes method works as expected.""" # having it here instead of parametrizing the test since `set_fit_request` # is not available while collecting the tests. cases = [ ( WeightedMetaRegressor( estimator=ConsumingRegressor().set_fit_request(sample_weight=True) ), {"sample_weight"}, {"sample_weight"}, ), ( WeightedMetaRegressor( estimator=ConsumingRegressor().set_fit_request( sample_weight="my_weights" ) ), {"my_weights", "sample_weight"}, {"my_weights"}, ), ] for obj, input, output in cases: assert obj.get_metadata_routing().consumes(method="fit", params=input) == output @config_context(enable_metadata_routing=True) def test_metaestimator_warnings(): class WeightedMetaRegressorWarn(WeightedMetaRegressor): __metadata_request__fit = {"sample_weight": metadata_routing.WARN} with pytest.warns( UserWarning, match="Support for .* has recently been added to .* class" ): WeightedMetaRegressorWarn( estimator=LinearRegression().set_fit_request(sample_weight=False) ).fit(X, y, sample_weight=my_weights) @config_context(enable_metadata_routing=True) def test_estimator_warnings(): class ConsumingRegressorWarn(ConsumingRegressor): __metadata_request__fit = {"sample_weight": metadata_routing.WARN} with pytest.warns( UserWarning, match="Support for .* has recently been added to .* class" ): MetaRegressor(estimator=ConsumingRegressorWarn()).fit( X, y, sample_weight=my_weights ) @config_context(enable_metadata_routing=True) @pytest.mark.parametrize( "obj, string", [ ( MethodMetadataRequest(owner="test", method="fit").add_request( param="foo", alias="bar" ), "{'foo': 'bar'}", ), ( MetadataRequest(owner="test"), "{}", ), ( MetadataRouter(owner="test").add( estimator=ConsumingRegressor(), method_mapping=MethodMapping().add(caller="predict", callee="predict"), ), ( "{'estimator': {'mapping': [{'caller': 'predict', 'callee':" " 'predict'}], 'router': {'fit': {'sample_weight': None, 'metadata':" " None}, 'partial_fit': {'sample_weight': None, 'metadata': None}," " 'predict': {'sample_weight': None, 'metadata': None}, 'score':" " {'sample_weight': None, 'metadata': None}}}}" ), ), ], ) @config_context(enable_metadata_routing=True) def test_string_representations(obj, string): assert str(obj) == string @pytest.mark.parametrize( "obj, method, inputs, err_cls, err_msg", [ ( MethodMapping(), "add", {"caller": "fit", "callee": "invalid"}, ValueError, "Given callee", ), ( MethodMapping(), "add", {"caller": "invalid", "callee": "fit"}, ValueError, "Given caller", ), ( MetadataRouter(owner="test"), "add_self_request", {"obj": MetadataRouter(owner="test")}, ValueError, "Given `obj` is neither a `MetadataRequest` nor does it implement", ), ( ConsumingClassifier(), "set_fit_request", {"invalid": True}, TypeError, "Unexpected args", ), ], ) @config_context(enable_metadata_routing=True) def test_validations(obj, method, inputs, err_cls, err_msg): with pytest.raises(err_cls, match=err_msg): getattr(obj, method)(**inputs) @config_context(enable_metadata_routing=True) def test_methodmapping(): mm = ( MethodMapping() .add(caller="fit", callee="transform") .add(caller="fit", callee="fit") ) mm_list = list(mm) assert mm_list[0] == ("fit", "transform") assert mm_list[1] == ("fit", "fit") mm = MethodMapping() for method in METHODS: mm.add(caller=method, callee=method) assert MethodPair(method, method) in mm._routes assert len(mm._routes) == len(METHODS) mm = MethodMapping().add(caller="score", callee="score") assert repr(mm) == "[{'caller': 'score', 'callee': 'score'}]" @config_context(enable_metadata_routing=True) def test_metadatarouter_add_self_request(): # adding a MetadataRequest as `self` adds a copy request = MetadataRequest(owner="nested") request.fit.add_request(param="param", alias=True) router = MetadataRouter(owner="test").add_self_request(request) assert str(router._self_request) == str(request) # should be a copy, not the same object assert router._self_request is not request # one can add an estimator as self est = ConsumingRegressor().set_fit_request(sample_weight="my_weights") router = MetadataRouter(owner="test").add_self_request(obj=est) assert str(router._self_request) == str(est.get_metadata_routing()) assert router._self_request is not est.get_metadata_routing() # adding a consumer+router as self should only add the consumer part est = WeightedMetaRegressor( estimator=ConsumingRegressor().set_fit_request(sample_weight="nested_weights") ) router = MetadataRouter(owner="test").add_self_request(obj=est) # _get_metadata_request() returns the consumer part of the requests assert str(router._self_request) == str(est._get_metadata_request()) # get_metadata_routing() returns the complete request set, consumer and # router included. assert str(router._self_request) != str(est.get_metadata_routing()) # it should be a copy, not the same object assert router._self_request is not est._get_metadata_request() @config_context(enable_metadata_routing=True) def test_metadata_routing_add(): # adding one with a string `method_mapping` router = MetadataRouter(owner="test").add( est=ConsumingRegressor().set_fit_request(sample_weight="weights"), method_mapping=MethodMapping().add(caller="fit", callee="fit"), ) assert ( str(router) == "{'est': {'mapping': [{'caller': 'fit', 'callee': 'fit'}], 'router': {'fit':" " {'sample_weight': 'weights', 'metadata': None}, 'partial_fit':" " {'sample_weight': None, 'metadata': None}, 'predict': {'sample_weight':" " None, 'metadata': None}, 'score': {'sample_weight': None, 'metadata':" " None}}}}" ) # adding one with an instance of MethodMapping router = MetadataRouter(owner="test").add( method_mapping=MethodMapping().add(caller="fit", callee="score"), est=ConsumingRegressor().set_score_request(sample_weight=True), ) assert ( str(router) == "{'est': {'mapping': [{'caller': 'fit', 'callee': 'score'}], 'router':" " {'fit': {'sample_weight': None, 'metadata': None}, 'partial_fit':" " {'sample_weight': None, 'metadata': None}, 'predict': {'sample_weight':" " None, 'metadata': None}, 'score': {'sample_weight': True, 'metadata':" " None}}}}" ) @config_context(enable_metadata_routing=True) def test_metadata_routing_get_param_names(): router = ( MetadataRouter(owner="test") .add_self_request( WeightedMetaRegressor(estimator=ConsumingRegressor()).set_fit_request( sample_weight="self_weights" ) ) .add( trs=ConsumingTransformer().set_fit_request( sample_weight="transform_weights" ), method_mapping=MethodMapping().add(caller="fit", callee="fit"), ) ) assert ( str(router) == "{'$self_request': {'fit': {'sample_weight': 'self_weights'}, 'score':" " {'sample_weight': None}}, 'trs': {'mapping': [{'caller': 'fit', 'callee':" " 'fit'}], 'router': {'fit': {'sample_weight': 'transform_weights'," " 'metadata': None}, 'transform': {'sample_weight': None, 'metadata': None}," " 'inverse_transform': {'sample_weight': None, 'metadata': None}}}}" ) assert router._get_param_names( method="fit", return_alias=True, ignore_self_request=False ) == {"transform_weights", "metadata", "self_weights"} # return_alias=False will return original names for "self" assert router._get_param_names( method="fit", return_alias=False, ignore_self_request=False ) == {"sample_weight", "metadata", "transform_weights"} # ignoring self would remove "sample_weight" assert router._get_param_names( method="fit", return_alias=False, ignore_self_request=True ) == {"metadata", "transform_weights"} # return_alias is ignored when ignore_self_request=True assert router._get_param_names( method="fit", return_alias=True, ignore_self_request=True ) == router._get_param_names( method="fit", return_alias=False, ignore_self_request=True ) @config_context(enable_metadata_routing=True) def test_method_generation(): # Test if all required request methods are generated. # TODO: these test classes can be moved to sklearn.utils._testing once we # have a better idea of what the commonly used classes are. class SimpleEstimator(BaseEstimator): # This class should have no set_{method}_request def fit(self, X, y): pass # pragma: no cover def fit_transform(self, X, y): pass # pragma: no cover def fit_predict(self, X, y): pass # pragma: no cover def partial_fit(self, X, y): pass # pragma: no cover def predict(self, X): pass # pragma: no cover def predict_proba(self, X): pass # pragma: no cover def predict_log_proba(self, X): pass # pragma: no cover def decision_function(self, X): pass # pragma: no cover def score(self, X, y): pass # pragma: no cover def split(self, X, y=None): pass # pragma: no cover def transform(self, X): pass # pragma: no cover def inverse_transform(self, X): pass # pragma: no cover for method in METHODS: assert not hasattr(SimpleEstimator(), f"set_{method}_request") class SimpleEstimator(BaseEstimator): # This class should have every set_{method}_request def fit(self, X, y, sample_weight=None): pass # pragma: no cover
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
true
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/tests/test_docstring_parameters.py
sklearn/tests/test_docstring_parameters.py
# Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import importlib import inspect import os import warnings from inspect import signature from pkgutil import walk_packages import numpy as np import pytest import sklearn from sklearn.datasets import make_classification # make it possible to discover experimental estimators when calling `all_estimators` from sklearn.experimental import ( enable_halving_search_cv, # noqa: F401 enable_iterative_imputer, # noqa: F401 ) from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import FunctionTransformer from sklearn.utils import all_estimators from sklearn.utils._test_common.instance_generator import _construct_instances from sklearn.utils._testing import ( _get_func_name, check_docstring_parameters, ignore_warnings, ) from sklearn.utils.deprecation import _is_deprecated from sklearn.utils.estimator_checks import ( _enforce_estimator_tags_X, _enforce_estimator_tags_y, ) # walk_packages() ignores DeprecationWarnings, now we need to ignore # FutureWarnings with warnings.catch_warnings(): warnings.simplefilter("ignore", FutureWarning) # mypy error: Module has no attribute "__path__" sklearn_path = [os.path.dirname(sklearn.__file__)] PUBLIC_MODULES = set( [ pckg[1] for pckg in walk_packages(prefix="sklearn.", path=sklearn_path) if not any( substr in pckg[1] for substr in ["._", ".tests.", "sklearn.externals"] ) ] ) # functions to ignore args / docstring of _DOCSTRING_IGNORES = [ "sklearn.utils.deprecation.load_mlcomp", "sklearn.pipeline.make_pipeline", "sklearn.pipeline.make_union", "sklearn.utils.extmath.safe_sparse_dot", "HalfBinomialLoss", ] # Methods where y param should be ignored if y=None by default _METHODS_IGNORE_NONE_Y = [ "fit", "score", "fit_predict", "fit_transform", "partial_fit", "predict", ] def test_docstring_parameters(): # Test module docstring formatting # Skip test if numpydoc is not found pytest.importorskip( "numpydoc", reason="numpydoc is required to test the docstrings" ) # XXX unreached code as of v0.22 from numpydoc import docscrape incorrect = [] for name in PUBLIC_MODULES: if name.endswith(".conftest"): # pytest tooling, not part of the scikit-learn API continue if name == "sklearn.utils.fixes": # We cannot always control these docstrings continue with warnings.catch_warnings(record=True): module = importlib.import_module(name) classes = inspect.getmembers(module, inspect.isclass) # Exclude non-scikit-learn classes classes = [cls for cls in classes if cls[1].__module__.startswith("sklearn")] for cname, cls in classes: this_incorrect = [] if cname in _DOCSTRING_IGNORES or cname.startswith("_"): continue if inspect.isabstract(cls): continue with warnings.catch_warnings(record=True) as w: cdoc = docscrape.ClassDoc(cls) if len(w): raise RuntimeError( "Error for __init__ of %s in %s:\n%s" % (cls, name, w[0]) ) # Skip checks on deprecated classes if _is_deprecated(cls.__new__): continue this_incorrect += check_docstring_parameters(cls.__init__, cdoc) for method_name in cdoc.methods: method = getattr(cls, method_name) if _is_deprecated(method): continue param_ignore = None # Now skip docstring test for y when y is None # by default for API reason if method_name in _METHODS_IGNORE_NONE_Y: sig = signature(method) if "y" in sig.parameters and sig.parameters["y"].default is None: param_ignore = ["y"] # ignore y for fit and score result = check_docstring_parameters(method, ignore=param_ignore) this_incorrect += result incorrect += this_incorrect functions = inspect.getmembers(module, inspect.isfunction) # Exclude imported functions functions = [fn for fn in functions if fn[1].__module__ == name] for fname, func in functions: # Don't test private methods / functions if fname.startswith("_"): continue if fname == "configuration" and name.endswith("setup"): continue name_ = _get_func_name(func) if not any(d in name_ for d in _DOCSTRING_IGNORES) and not _is_deprecated( func ): incorrect += check_docstring_parameters(func) msg = "\n".join(incorrect) if len(incorrect) > 0: raise AssertionError("Docstring Error:\n" + msg) def _construct_searchcv_instance(SearchCV): return SearchCV(LogisticRegression(), {"C": [0.1, 1]}) def _construct_compose_pipeline_instance(Estimator): # Minimal / degenerate instances: only useful to test the docstrings. if Estimator.__name__ == "ColumnTransformer": return Estimator(transformers=[("transformer", "passthrough", [0, 1])]) elif Estimator.__name__ == "Pipeline": return Estimator(steps=[("clf", LogisticRegression())]) elif Estimator.__name__ == "FeatureUnion": return Estimator(transformer_list=[("transformer", FunctionTransformer())]) def _construct_sparse_coder(Estimator): # XXX: hard-coded assumption that n_features=3 dictionary = np.array( [[0, 1, 0], [-1, -1, 2], [1, 1, 1], [0, 1, 1], [0, 2, 1]], dtype=np.float64, ) return Estimator(dictionary=dictionary) # TODO(1.10): remove copy warning filter @pytest.mark.filterwarnings( "ignore:The default value of `copy` will change from False to True in 1.10." ) @pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning") @pytest.mark.parametrize("name, Estimator", all_estimators()) def test_fit_docstring_attributes(name, Estimator): pytest.importorskip("numpydoc") from numpydoc import docscrape doc = docscrape.ClassDoc(Estimator) attributes = doc["Attributes"] if Estimator.__name__ in ( "HalvingRandomSearchCV", "RandomizedSearchCV", "HalvingGridSearchCV", "GridSearchCV", ): est = _construct_searchcv_instance(Estimator) elif Estimator.__name__ in ( "ColumnTransformer", "Pipeline", "FeatureUnion", ): est = _construct_compose_pipeline_instance(Estimator) elif Estimator.__name__ == "SparseCoder": est = _construct_sparse_coder(Estimator) elif Estimator.__name__ == "FrozenEstimator": X, y = make_classification(n_samples=20, n_features=5, random_state=0) est = Estimator(LogisticRegression().fit(X, y)) else: # TODO(devtools): use _tested_estimators instead of all_estimators in the # decorator est = next(_construct_instances(Estimator)) if Estimator.__name__ == "SelectKBest": est.set_params(k=2) elif Estimator.__name__ == "DummyClassifier": est.set_params(strategy="stratified") elif Estimator.__name__ == "CCA" or Estimator.__name__.startswith("PLS"): # default = 2 is invalid for single target est.set_params(n_components=1) elif Estimator.__name__ in ( "GaussianRandomProjection", "SparseRandomProjection", ): # default="auto" raises an error with the shape of `X` est.set_params(n_components=2) elif Estimator.__name__ == "TSNE": # default raises an error, perplexity must be less than n_samples est.set_params(perplexity=2) # TODO(1.9) remove elif Estimator.__name__ == "KBinsDiscretizer": # default raises a FutureWarning if quantile method is at default "warn" est.set_params(quantile_method="averaged_inverted_cdf") # TODO(1.10) remove elif Estimator.__name__ == "MDS": # default raises a FutureWarning est.set_params(n_init=1, init="random") # TODO(1.10) remove elif Estimator.__name__ == "LogisticRegressionCV": # default 'l1_ratios' value creates a FutureWarning est.set_params(l1_ratios=(0,)) # Low max iter to speed up tests: we are only interested in checking the existence # of fitted attributes. This should be invariant to whether it has converged or not. if "max_iter" in est.get_params(): est.set_params(max_iter=2) # min value for `TSNE` is 250 if Estimator.__name__ == "TSNE": est.set_params(max_iter=250) if "random_state" in est.get_params(): est.set_params(random_state=0) # In case we want to deprecate some attributes in the future skipped_attributes = {} if Estimator.__name__.endswith("Vectorizer"): # Vectorizer require some specific input data if Estimator.__name__ in ( "CountVectorizer", "HashingVectorizer", "TfidfVectorizer", ): X = [ "This is the first document.", "This document is the second document.", "And this is the third one.", "Is this the first document?", ] elif Estimator.__name__ == "DictVectorizer": X = [{"foo": 1, "bar": 2}, {"foo": 3, "baz": 1}] y = None else: X, y = make_classification( n_samples=20, n_features=3, n_redundant=0, n_classes=2, random_state=2, ) y = _enforce_estimator_tags_y(est, y) X = _enforce_estimator_tags_X(est, X) if est.__sklearn_tags__().target_tags.one_d_labels: est.fit(y) elif est.__sklearn_tags__().target_tags.two_d_labels: est.fit(np.c_[y, y]) elif est.__sklearn_tags__().input_tags.three_d_array: est.fit(X[np.newaxis, ...], y) else: est.fit(X, y) for attr in attributes: if attr.name in skipped_attributes: continue desc = " ".join(attr.desc).lower() # As certain attributes are present "only" if a certain parameter is # provided, this checks if the word "only" is present in the attribute # description, and if not the attribute is required to be present. if "only " in desc: continue # ignore deprecation warnings with ignore_warnings(category=FutureWarning): assert hasattr(est, attr.name) fit_attr = _get_all_fitted_attributes(est) fit_attr_names = [attr.name for attr in attributes] undocumented_attrs = set(fit_attr).difference(fit_attr_names) undocumented_attrs = set(undocumented_attrs).difference(skipped_attributes) if undocumented_attrs: raise AssertionError( f"Undocumented attributes for {Estimator.__name__}: {undocumented_attrs}" ) def _get_all_fitted_attributes(estimator): "Get all the fitted attributes of an estimator including properties" # attributes fit_attr = list(estimator.__dict__.keys()) # properties with warnings.catch_warnings(): warnings.filterwarnings("error", category=FutureWarning) for name in dir(estimator.__class__): obj = getattr(estimator.__class__, name) if not isinstance(obj, property): continue # ignore properties that raises an AttributeError and deprecated # properties try: getattr(estimator, name) except (AttributeError, FutureWarning): continue fit_attr.append(name) return [k for k in fit_attr if k.endswith("_") and not k.startswith("_")]
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/tests/test_random_projection.py
sklearn/tests/test_random_projection.py
import functools import warnings from typing import Any, List import numpy as np import pytest import scipy.sparse as sp from sklearn.exceptions import DataDimensionalityWarning, NotFittedError from sklearn.metrics import euclidean_distances from sklearn.random_projection import ( GaussianRandomProjection, SparseRandomProjection, _gaussian_random_matrix, _sparse_random_matrix, johnson_lindenstrauss_min_dim, ) from sklearn.utils._testing import ( assert_allclose, assert_allclose_dense_sparse, assert_almost_equal, assert_array_almost_equal, assert_array_equal, ) from sklearn.utils.fixes import COO_CONTAINERS all_sparse_random_matrix: List[Any] = [_sparse_random_matrix] all_dense_random_matrix: List[Any] = [_gaussian_random_matrix] all_random_matrix = all_sparse_random_matrix + all_dense_random_matrix all_SparseRandomProjection: List[Any] = [SparseRandomProjection] all_DenseRandomProjection: List[Any] = [GaussianRandomProjection] all_RandomProjection = all_SparseRandomProjection + all_DenseRandomProjection def make_sparse_random_data( coo_container, n_samples, n_features, n_nonzeros, random_state=None, sparse_format="csr", ): """Make some random data with uniformly located non zero entries with Gaussian distributed values; `sparse_format` can be `"csr"` (default) or `None` (in which case a dense array is returned). """ rng = np.random.RandomState(random_state) data_coo = coo_container( ( rng.randn(n_nonzeros), ( rng.randint(n_samples, size=n_nonzeros), rng.randint(n_features, size=n_nonzeros), ), ), shape=(n_samples, n_features), ) if sparse_format is not None: return data_coo.asformat(sparse_format) else: return data_coo.toarray() def densify(matrix): if not sp.issparse(matrix): return matrix else: return matrix.toarray() n_samples, n_features = (10, 1000) n_nonzeros = int(n_samples * n_features / 100.0) ############################################################################### # test on JL lemma ############################################################################### @pytest.mark.parametrize( "n_samples, eps", [ ([100, 110], [0.9, 1.1]), ([90, 100], [0.1, 0.0]), ([50, -40], [0.1, 0.2]), ], ) def test_invalid_jl_domain(n_samples, eps): with pytest.raises(ValueError): johnson_lindenstrauss_min_dim(n_samples, eps=eps) def test_input_size_jl_min_dim(): with pytest.raises(ValueError): johnson_lindenstrauss_min_dim(3 * [100], eps=2 * [0.9]) johnson_lindenstrauss_min_dim( np.random.randint(1, 10, size=(10, 10)), eps=np.full((10, 10), 0.5) ) ############################################################################### # tests random matrix generation ############################################################################### def check_input_size_random_matrix(random_matrix): inputs = [(0, 0), (-1, 1), (1, -1), (1, 0), (-1, 0)] for n_components, n_features in inputs: with pytest.raises(ValueError): random_matrix(n_components, n_features) def check_size_generated(random_matrix): inputs = [(1, 5), (5, 1), (5, 5), (1, 1)] for n_components, n_features in inputs: assert random_matrix(n_components, n_features).shape == ( n_components, n_features, ) def check_zero_mean_and_unit_norm(random_matrix): # All random matrix should produce a transformation matrix # with zero mean and unit norm for each columns A = densify(random_matrix(10000, 1, random_state=0)) assert_array_almost_equal(0, np.mean(A), 3) assert_array_almost_equal(1.0, np.linalg.norm(A), 1) def check_input_with_sparse_random_matrix(random_matrix): n_components, n_features = 5, 10 for density in [-1.0, 0.0, 1.1]: with pytest.raises(ValueError): random_matrix(n_components, n_features, density=density) @pytest.mark.parametrize("random_matrix", all_random_matrix) def test_basic_property_of_random_matrix(random_matrix): # Check basic properties of random matrix generation check_input_size_random_matrix(random_matrix) check_size_generated(random_matrix) check_zero_mean_and_unit_norm(random_matrix) @pytest.mark.parametrize("random_matrix", all_sparse_random_matrix) def test_basic_property_of_sparse_random_matrix(random_matrix): check_input_with_sparse_random_matrix(random_matrix) random_matrix_dense = functools.partial(random_matrix, density=1.0) check_zero_mean_and_unit_norm(random_matrix_dense) def test_gaussian_random_matrix(): # Check some statical properties of Gaussian random matrix # Check that the random matrix follow the proper distribution. # Let's say that each element of a_{ij} of A is taken from # a_ij ~ N(0.0, 1 / n_components). # n_components = 100 n_features = 1000 A = _gaussian_random_matrix(n_components, n_features, random_state=0) assert_array_almost_equal(0.0, np.mean(A), 2) assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1) def test_sparse_random_matrix(): # Check some statical properties of sparse random matrix n_components = 100 n_features = 500 for density in [0.3, 1.0]: s = 1 / density A = _sparse_random_matrix( n_components, n_features, density=density, random_state=0 ) A = densify(A) # Check possible values values = np.unique(A) assert np.sqrt(s) / np.sqrt(n_components) in values assert -np.sqrt(s) / np.sqrt(n_components) in values if density == 1.0: assert np.size(values) == 2 else: assert 0.0 in values assert np.size(values) == 3 # Check that the random matrix follow the proper distribution. # Let's say that each element of a_{ij} of A is taken from # # - -sqrt(s) / sqrt(n_components) with probability 1 / 2s # - 0 with probability 1 - 1 / s # - +sqrt(s) / sqrt(n_components) with probability 1 / 2s # assert_almost_equal(np.mean(A == 0.0), 1 - 1 / s, decimal=2) assert_almost_equal( np.mean(A == np.sqrt(s) / np.sqrt(n_components)), 1 / (2 * s), decimal=2 ) assert_almost_equal( np.mean(A == -np.sqrt(s) / np.sqrt(n_components)), 1 / (2 * s), decimal=2 ) assert_almost_equal(np.var(A == 0.0, ddof=1), (1 - 1 / s) * 1 / s, decimal=2) assert_almost_equal( np.var(A == np.sqrt(s) / np.sqrt(n_components), ddof=1), (1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2, ) assert_almost_equal( np.var(A == -np.sqrt(s) / np.sqrt(n_components), ddof=1), (1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2, ) ############################################################################### # tests on random projection transformer ############################################################################### def test_random_projection_transformer_invalid_input(): n_components = "auto" fit_data = [[0, 1, 2]] for RandomProjection in all_RandomProjection: with pytest.raises(ValueError): RandomProjection(n_components=n_components).fit(fit_data) @pytest.mark.parametrize("coo_container", COO_CONTAINERS) def test_try_to_transform_before_fit(coo_container, global_random_seed): data = make_sparse_random_data( coo_container, n_samples, n_features, n_nonzeros, random_state=global_random_seed, sparse_format=None, ) for RandomProjection in all_RandomProjection: with pytest.raises(NotFittedError): RandomProjection(n_components="auto").transform(data) @pytest.mark.parametrize("coo_container", COO_CONTAINERS) def test_too_many_samples_to_find_a_safe_embedding(coo_container, global_random_seed): data = make_sparse_random_data( coo_container, n_samples=1000, n_features=100, n_nonzeros=1000, random_state=global_random_seed, sparse_format=None, ) for RandomProjection in all_RandomProjection: rp = RandomProjection(n_components="auto", eps=0.1) expected_msg = ( "eps=0.100000 and n_samples=1000 lead to a target dimension" " of 5920 which is larger than the original space with" " n_features=100" ) with pytest.raises(ValueError, match=expected_msg): rp.fit(data) @pytest.mark.parametrize("coo_container", COO_CONTAINERS) def test_random_projection_embedding_quality(coo_container): data = make_sparse_random_data( coo_container, n_samples=8, n_features=5000, n_nonzeros=15000, random_state=0, sparse_format=None, ) eps = 0.2 original_distances = euclidean_distances(data, squared=True) original_distances = original_distances.ravel() non_identical = original_distances != 0.0 # remove 0 distances to avoid division by 0 original_distances = original_distances[non_identical] for RandomProjection in all_RandomProjection: rp = RandomProjection(n_components="auto", eps=eps, random_state=0) projected = rp.fit_transform(data) projected_distances = euclidean_distances(projected, squared=True) projected_distances = projected_distances.ravel() # remove 0 distances to avoid division by 0 projected_distances = projected_distances[non_identical] distances_ratio = projected_distances / original_distances # check that the automatically tuned values for the density respect the # contract for eps: pairwise distances are preserved according to the # Johnson-Lindenstrauss lemma assert distances_ratio.max() < 1 + eps assert 1 - eps < distances_ratio.min() @pytest.mark.parametrize("coo_container", COO_CONTAINERS) def test_SparseRandomProj_output_representation(coo_container): dense_data = make_sparse_random_data( coo_container, n_samples, n_features, n_nonzeros, random_state=0, sparse_format=None, ) sparse_data = make_sparse_random_data( coo_container, n_samples, n_features, n_nonzeros, random_state=0, sparse_format="csr", ) for SparseRandomProj in all_SparseRandomProjection: # when using sparse input, the projected data can be forced to be a # dense numpy array rp = SparseRandomProj(n_components=10, dense_output=True, random_state=0) rp.fit(dense_data) assert isinstance(rp.transform(dense_data), np.ndarray) assert isinstance(rp.transform(sparse_data), np.ndarray) # the output can be left to a sparse matrix instead rp = SparseRandomProj(n_components=10, dense_output=False, random_state=0) rp = rp.fit(dense_data) # output for dense input will stay dense: assert isinstance(rp.transform(dense_data), np.ndarray) # output for sparse output will be sparse: assert sp.issparse(rp.transform(sparse_data)) @pytest.mark.parametrize("coo_container", COO_CONTAINERS) def test_correct_RandomProjection_dimensions_embedding( coo_container, global_random_seed ): data = make_sparse_random_data( coo_container, n_samples, n_features, n_nonzeros, random_state=global_random_seed, sparse_format=None, ) for RandomProjection in all_RandomProjection: rp = RandomProjection(n_components="auto", random_state=0, eps=0.5).fit(data) # the number of components is adjusted from the shape of the training # set assert rp.n_components == "auto" assert rp.n_components_ == 110 if RandomProjection in all_SparseRandomProjection: assert rp.density == "auto" assert_almost_equal(rp.density_, 0.03, 2) assert rp.components_.shape == (110, n_features) projected_1 = rp.transform(data) assert projected_1.shape == (n_samples, 110) # once the RP is 'fitted' the projection is always the same projected_2 = rp.transform(data) assert_array_equal(projected_1, projected_2) # fit transform with same random seed will lead to the same results rp2 = RandomProjection(random_state=0, eps=0.5) projected_3 = rp2.fit_transform(data) assert_array_equal(projected_1, projected_3) # Try to transform with an input X of size different from fitted. with pytest.raises(ValueError): rp.transform(data[:, 1:5]) # it is also possible to fix the number of components and the density # level if RandomProjection in all_SparseRandomProjection: rp = RandomProjection(n_components=100, density=0.001, random_state=0) projected = rp.fit_transform(data) assert projected.shape == (n_samples, 100) assert rp.components_.shape == (100, n_features) assert rp.components_.nnz < 115 # close to 1% density assert 85 < rp.components_.nnz # close to 1% density @pytest.mark.parametrize("coo_container", COO_CONTAINERS) def test_warning_n_components_greater_than_n_features( coo_container, global_random_seed ): n_features = 20 n_samples = 5 n_nonzeros = int(n_features / 4) data = make_sparse_random_data( coo_container, n_samples, n_features, n_nonzeros, random_state=global_random_seed, sparse_format=None, ) for RandomProjection in all_RandomProjection: with pytest.warns(DataDimensionalityWarning): RandomProjection(n_components=n_features + 1).fit(data) @pytest.mark.parametrize("coo_container", COO_CONTAINERS) def test_works_with_sparse_data(coo_container, global_random_seed): n_features = 20 n_samples = 5 n_nonzeros = int(n_features / 4) dense_data = make_sparse_random_data( coo_container, n_samples, n_features, n_nonzeros, random_state=global_random_seed, sparse_format=None, ) sparse_data = make_sparse_random_data( coo_container, n_samples, n_features, n_nonzeros, random_state=global_random_seed, sparse_format="csr", ) for RandomProjection in all_RandomProjection: rp_dense = RandomProjection(n_components=3, random_state=1).fit(dense_data) rp_sparse = RandomProjection(n_components=3, random_state=1).fit(sparse_data) assert_array_almost_equal( densify(rp_dense.components_), densify(rp_sparse.components_) ) def test_johnson_lindenstrauss_min_dim(): """Test Johnson-Lindenstrauss for small eps. Regression test for #17111: before #19374, 32-bit systems would fail. """ assert johnson_lindenstrauss_min_dim(100, eps=1e-5) == 368416070986 @pytest.mark.parametrize("coo_container", COO_CONTAINERS) @pytest.mark.parametrize("random_projection_cls", all_RandomProjection) def test_random_projection_feature_names_out( coo_container, random_projection_cls, global_random_seed ): data = make_sparse_random_data( coo_container, n_samples, n_features, n_nonzeros, random_state=global_random_seed, sparse_format=None, ) random_projection = random_projection_cls(n_components=2) random_projection.fit(data) names_out = random_projection.get_feature_names_out() class_name_lower = random_projection_cls.__name__.lower() expected_names_out = np.array( [f"{class_name_lower}{i}" for i in range(random_projection.n_components_)], dtype=object, ) assert_array_equal(names_out, expected_names_out) @pytest.mark.parametrize("coo_container", COO_CONTAINERS) @pytest.mark.parametrize("n_samples", (2, 9, 10, 11, 1000)) @pytest.mark.parametrize("n_features", (2, 9, 10, 11, 1000)) @pytest.mark.parametrize("random_projection_cls", all_RandomProjection) @pytest.mark.parametrize("compute_inverse_components", [True, False]) def test_inverse_transform( coo_container, n_samples, n_features, random_projection_cls, compute_inverse_components, global_random_seed, ): n_components = 10 random_projection = random_projection_cls( n_components=n_components, compute_inverse_components=compute_inverse_components, random_state=global_random_seed, ) X_dense = make_sparse_random_data( coo_container, n_samples, n_features, n_nonzeros=n_samples * n_features // 100 + 1, random_state=global_random_seed, sparse_format=None, ) X_csr = make_sparse_random_data( coo_container, n_samples, n_features, n_nonzeros=n_samples * n_features // 100 + 1, random_state=global_random_seed, sparse_format="csr", ) for X in [X_dense, X_csr]: with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message=( "The number of components is higher than the number of features" ), category=DataDimensionalityWarning, ) projected = random_projection.fit_transform(X) if compute_inverse_components: assert hasattr(random_projection, "inverse_components_") inv_components = random_projection.inverse_components_ assert inv_components.shape == (n_features, n_components) projected_back = random_projection.inverse_transform(projected) assert projected_back.shape == X.shape projected_again = random_projection.transform(projected_back) if hasattr(projected, "toarray"): projected = projected.toarray() assert_allclose(projected, projected_again, rtol=1e-7, atol=1e-10) @pytest.mark.parametrize("random_projection_cls", all_RandomProjection) @pytest.mark.parametrize( "input_dtype, expected_dtype", ( (np.float32, np.float32), (np.float64, np.float64), (np.int32, np.float64), (np.int64, np.float64), ), ) def test_random_projection_dtype_match( random_projection_cls, input_dtype, expected_dtype ): # Verify output matrix dtype rng = np.random.RandomState(42) X = rng.rand(25, 3000) rp = random_projection_cls(random_state=0) transformed = rp.fit_transform(X.astype(input_dtype)) assert rp.components_.dtype == expected_dtype assert transformed.dtype == expected_dtype @pytest.mark.parametrize("random_projection_cls", all_RandomProjection) def test_random_projection_numerical_consistency(random_projection_cls): # Verify numerical consistency among np.float32 and np.float64 atol = 1e-5 rng = np.random.RandomState(42) X = rng.rand(25, 3000) rp_32 = random_projection_cls(random_state=0) rp_64 = random_projection_cls(random_state=0) projection_32 = rp_32.fit_transform(X.astype(np.float32)) projection_64 = rp_64.fit_transform(X.astype(np.float64)) assert_allclose(projection_64, projection_32, atol=atol) assert_allclose_dense_sparse(rp_32.components_, rp_64.components_)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/tests/test_min_dependencies_readme.py
sklearn/tests/test_min_dependencies_readme.py
"""Tests for the minimum dependencies in README.rst and pyproject.toml""" import os import re import tomllib from collections import defaultdict from pathlib import Path import pytest import sklearn from sklearn._min_dependencies import dependent_packages from sklearn.utils.fixes import parse_version # minimal dependencies and pyproject definitions for testing the pyproject tests TOY_MIN_DEPENDENCIES_PY_INFO = { "joblib": ("1.3.0", "install"), "scipy": ("1.10.0", "build, install"), "conda-lock": ("3.0.1", "maintenance"), } TOY_MATCHING_PYPROJECT_SECTIONS = """ [project] dependencies = ["joblib>=1.3.0", "scipy>=1.10.0"] [project.optional-dependencies] build = ["scipy>=1.10.0"] install = ["joblib>=1.3.0", "scipy>=1.10.0"] maintenance = ["conda-lock==3.0.1"] [build-system] requires = ["scipy>=1.10.0"] """ TOY_MATCHING_PYPROJECT_SECTIONS_WITH_UPPER_BOUND = """ [project] dependencies = ["joblib>=1.3.0,<2.0", "scipy>=1.10.0"] [project.optional-dependencies] build = ["scipy>=1.10.0,<1.19.0"] install = ["joblib>=1.3.0,<2.0", "scipy>=1.10.0"] maintenance = ["conda-lock==3.0.1"] [build-system] requires = ["scipy>=1.10.0,<1.19.0"] """ TOY_WRONG_SYMBOL_PYPROJECT_SECTIONS = """ [project] dependencies = ["scipy<1.10.0"] [project.optional-dependencies] build = ["scipy>=1.10.0"] install = ["scipy>=1.10.0"] maintenance = ["conda-lock==3.0.1"] [build-system] requires = ["scipy>=1.10.0"] """ TOY_MISSING_PACKAGE_PYPROJECT_SECTIONS = """ [project] dependencies = ["scipy>=1.10.0"] [project.optional-dependencies] build = ["scipy>=1.10.0"] install = ["scipy>=1.10.0"] maintenance = ["conda-lock==3.0.1"] [build-system] requires = ["scipy>=1.10.0"] """ TOY_ADDITIONAL_PACKAGE_PYPROJECT_SECTIONS = """ [project] dependencies = ["joblib>=1.3.0", "scipy>=1.10.0"] [project.optional-dependencies] build = ["scipy>=1.10.0", "package_not_in_min_dependencies_py_file>=4.2"] install = ["joblib>=1.3.0", "scipy>=1.10.0"] maintenance = ["conda-lock==3.0.1"] [build-system] requires = ["scipy>=1.10.0"] """ TOY_NON_MATCHING_VERSION_PYPROJECT_SECTIONS = """ [project] dependencies = ["joblib>=1.42.0", "scipy>=1.10.0"] [project.optional-dependencies] build = ["scipy>=1.10.0"] install = ["joblib>=1.3.0", "scipy>=1.10.0"] maintenance = ["conda-lock==3.0.1"] [build-system] requires = ["scipy>=1.10.0"] """ def test_min_dependencies_readme(): # Test that the minimum dependencies in the README.rst file are # consistent with the minimum dependencies defined at the file: # sklearn/_min_dependencies.py pattern = re.compile( r"\.\. \|" r"([A-Za-z-]+)" r"MinVersion\| replace::" r"( [0-9]+\.[0-9]+(\.[0-9]+)?)" ) readme_path = Path(sklearn.__file__).parent.parent readme_file = readme_path / "README.rst" if not os.path.exists(readme_file): # Skip the test if the README.rst file is not available. # For instance, when installing scikit-learn from wheels pytest.skip("The README.rst file is not available.") with readme_file.open("r") as f: for line in f: matched = pattern.match(line) if not matched: continue package, version = matched.group(1), matched.group(2) package = package.lower() if package in dependent_packages: version = parse_version(version) min_version = parse_version(dependent_packages[package][0]) message = ( f"{package} has inconsistent minimum versions in README.rst and" f" _min_depencies.py: {version} != {min_version}" ) assert version == min_version, message def extract_packages_and_pyproject_tags(dependencies): min_depencies_tag_to_packages_without_version = defaultdict(list) for package, (min_version, tags) in dependencies.items(): for t in tags.split(", "): min_depencies_tag_to_packages_without_version[t].append(package) pyproject_section_to_min_dependencies_tag = { "build-system.requires": "build", "project.dependencies": "install", } for tag in min_depencies_tag_to_packages_without_version: section = f"project.optional-dependencies.{tag}" pyproject_section_to_min_dependencies_tag[section] = tag return ( min_depencies_tag_to_packages_without_version, pyproject_section_to_min_dependencies_tag, ) def check_pyproject_sections(pyproject_toml, min_dependencies): packages, pyproject_tags = extract_packages_and_pyproject_tags(min_dependencies) for pyproject_section, min_dependencies_tag in pyproject_tags.items(): # Special situation for numpy: we have numpy>=2 in # build-system.requires to make sure we build wheels against numpy>=2. # TODO remove this when our minimum supported numpy version is >=2. skip_version_check_for = ( ["numpy"] if pyproject_section == "build-system.requires" else [] ) expected_packages = packages[min_dependencies_tag] pyproject_section_keys = pyproject_section.split(".") info = pyproject_toml # iterate through nested keys to get packages and version for key in pyproject_section_keys: info = info[key] pyproject_build_min_versions = {} # Assuming pyproject.toml build section has something like "my-package>=2.3.0" pattern = r"([\w-]+)\s*[>=]=\s*([\d\w.]+)" for requirement in info: match = re.search(pattern, requirement) if match is None: raise NotImplementedError( f"{requirement} does not match expected regex {pattern!r}. " "Only >= and == are supported for version requirements" ) package, version = match.group(1), match.group(2) pyproject_build_min_versions[package] = version msg = f"Packages in {pyproject_section} differ from _min_depencies.py" assert sorted(pyproject_build_min_versions) == sorted(expected_packages), msg for package, version in pyproject_build_min_versions.items(): version = parse_version(version) expected_min_version = parse_version(min_dependencies[package][0]) if package in skip_version_check_for: continue message = ( f"{package} has inconsistent minimum versions in pyproject.toml and" f" _min_depencies.py: {version} != {expected_min_version}" ) assert version == expected_min_version, message def test_min_dependencies_pyproject_toml(): """Check versions in pyproject.toml is consistent with _min_dependencies.""" root_directory = Path(sklearn.__file__).parent.parent pyproject_toml_path = root_directory / "pyproject.toml" if not pyproject_toml_path.exists(): # Skip the test if the pyproject.toml file is not available. # For instance, when installing scikit-learn from wheels pytest.skip("pyproject.toml is not available.") with pyproject_toml_path.open("rb") as f: pyproject_toml = tomllib.load(f) check_pyproject_sections(pyproject_toml, dependent_packages) @pytest.mark.parametrize( "example_pyproject", [ TOY_MATCHING_PYPROJECT_SECTIONS, TOY_MATCHING_PYPROJECT_SECTIONS_WITH_UPPER_BOUND, ], ) def test_check_matching_pyproject_section(example_pyproject): """Test the version check for matching packages.""" pyproject_toml = tomllib.loads(example_pyproject) check_pyproject_sections(pyproject_toml, TOY_MIN_DEPENDENCIES_PY_INFO) @pytest.mark.parametrize( "example_non_matching_pyproject, error_msg", [ ( TOY_WRONG_SYMBOL_PYPROJECT_SECTIONS, ".* does not match expected regex .*. " "Only >= and == are supported for version requirements", ), ( TOY_MISSING_PACKAGE_PYPROJECT_SECTIONS, "Packages in .* differ from _min_depencies.py", ), ( TOY_ADDITIONAL_PACKAGE_PYPROJECT_SECTIONS, "Packages in .* differ from _min_depencies.py", ), ( TOY_NON_MATCHING_VERSION_PYPROJECT_SECTIONS, ".* has inconsistent minimum versions in pyproject.toml and" " _min_depencies.py: .* != .*", ), ], ) def test_check_non_matching_pyproject_section( example_non_matching_pyproject, error_msg ): """Test the version check for non-matching packages and versions.""" pyproject_toml = tomllib.loads(example_non_matching_pyproject) with pytest.raises(Exception, match=error_msg): check_pyproject_sections(pyproject_toml, TOY_MIN_DEPENDENCIES_PY_INFO)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/tests/test_dummy.py
sklearn/tests/test_dummy.py
import warnings import numpy as np import pytest import scipy.sparse as sp from sklearn.base import clone from sklearn.dummy import DummyClassifier, DummyRegressor from sklearn.exceptions import NotFittedError from sklearn.utils._testing import ( assert_almost_equal, assert_array_almost_equal, assert_array_equal, ) from sklearn.utils.fixes import CSC_CONTAINERS from sklearn.utils.stats import _weighted_percentile def _check_predict_proba(clf, X, y): proba = clf.predict_proba(X) # We know that we can have division by zero with warnings.catch_warnings(): warnings.filterwarnings("ignore", "divide by zero encountered in log") log_proba = clf.predict_log_proba(X) y = np.atleast_1d(y) if y.ndim == 1: y = np.reshape(y, (-1, 1)) n_outputs = y.shape[1] n_samples = len(X) if n_outputs == 1: proba = [proba] log_proba = [log_proba] for k in range(n_outputs): assert proba[k].shape[0] == n_samples assert proba[k].shape[1] == len(np.unique(y[:, k])) assert_array_almost_equal(proba[k].sum(axis=1), np.ones(len(X))) # We know that we can have division by zero with warnings.catch_warnings(): warnings.filterwarnings("ignore", "divide by zero encountered in log") assert_array_almost_equal(np.log(proba[k]), log_proba[k]) def _check_behavior_2d(clf): # 1d case X = np.array([[0], [0], [0], [0]]) # ignored y = np.array([1, 2, 1, 1]) est = clone(clf) est.fit(X, y) y_pred = est.predict(X) assert y.shape == y_pred.shape # 2d case y = np.array([[1, 0], [2, 0], [1, 0], [1, 3]]) est = clone(clf) est.fit(X, y) y_pred = est.predict(X) assert y.shape == y_pred.shape def _check_behavior_2d_for_constant(clf): # 2d case only X = np.array([[0], [0], [0], [0]]) # ignored y = np.array([[1, 0, 5, 4, 3], [2, 0, 1, 2, 5], [1, 0, 4, 5, 2], [1, 3, 3, 2, 0]]) est = clone(clf) est.fit(X, y) y_pred = est.predict(X) assert y.shape == y_pred.shape def _check_equality_regressor(statistic, y_learn, y_pred_learn, y_test, y_pred_test): assert_array_almost_equal(np.tile(statistic, (y_learn.shape[0], 1)), y_pred_learn) assert_array_almost_equal(np.tile(statistic, (y_test.shape[0], 1)), y_pred_test) def test_feature_names_in_and_n_features_in_(global_random_seed, n_samples=10): pd = pytest.importorskip("pandas") random_state = np.random.RandomState(seed=global_random_seed) X = pd.DataFrame([[0]] * n_samples, columns=["feature_1"]) y = random_state.rand(n_samples) est = DummyRegressor().fit(X, y) assert hasattr(est, "feature_names_in_") assert hasattr(est, "n_features_in_") est = DummyClassifier().fit(X, y) assert hasattr(est, "feature_names_in_") assert hasattr(est, "n_features_in_") def test_most_frequent_and_prior_strategy(): X = [[0], [0], [0], [0]] # ignored y = [1, 2, 1, 1] for strategy in ("most_frequent", "prior"): clf = DummyClassifier(strategy=strategy, random_state=0) clf.fit(X, y) assert_array_equal(clf.predict(X), np.ones(len(X))) _check_predict_proba(clf, X, y) if strategy == "prior": assert_array_almost_equal( clf.predict_proba([X[0]]), clf.class_prior_.reshape((1, -1)) ) else: assert_array_almost_equal( clf.predict_proba([X[0]]), clf.class_prior_.reshape((1, -1)) > 0.5 ) def test_most_frequent_and_prior_strategy_with_2d_column_y(): # non-regression test added in # https://github.com/scikit-learn/scikit-learn/pull/13545 X = [[0], [0], [0], [0]] y_1d = [1, 2, 1, 1] y_2d = [[1], [2], [1], [1]] for strategy in ("most_frequent", "prior"): clf_1d = DummyClassifier(strategy=strategy, random_state=0) clf_2d = DummyClassifier(strategy=strategy, random_state=0) clf_1d.fit(X, y_1d) clf_2d.fit(X, y_2d) assert_array_equal(clf_1d.predict(X), clf_2d.predict(X)) def test_most_frequent_and_prior_strategy_multioutput(): X = [[0], [0], [0], [0]] # ignored y = np.array([[1, 0], [2, 0], [1, 0], [1, 3]]) n_samples = len(X) for strategy in ("prior", "most_frequent"): clf = DummyClassifier(strategy=strategy, random_state=0) clf.fit(X, y) assert_array_equal( clf.predict(X), np.hstack([np.ones((n_samples, 1)), np.zeros((n_samples, 1))]), ) _check_predict_proba(clf, X, y) _check_behavior_2d(clf) def test_stratified_strategy(global_random_seed): X = [[0]] * 5 # ignored y = [1, 2, 1, 1, 2] clf = DummyClassifier(strategy="stratified", random_state=global_random_seed) clf.fit(X, y) X = [[0]] * 500 y_pred = clf.predict(X) p = np.bincount(y_pred) / float(len(X)) assert_almost_equal(p[1], 3.0 / 5, decimal=1) assert_almost_equal(p[2], 2.0 / 5, decimal=1) _check_predict_proba(clf, X, y) def test_stratified_strategy_multioutput(global_random_seed): X = [[0]] * 5 # ignored y = np.array([[2, 1], [2, 2], [1, 1], [1, 2], [1, 1]]) clf = DummyClassifier(strategy="stratified", random_state=global_random_seed) clf.fit(X, y) X = [[0]] * 500 y_pred = clf.predict(X) for k in range(y.shape[1]): p = np.bincount(y_pred[:, k]) / float(len(X)) assert_almost_equal(p[1], 3.0 / 5, decimal=1) assert_almost_equal(p[2], 2.0 / 5, decimal=1) _check_predict_proba(clf, X, y) _check_behavior_2d(clf) def test_uniform_strategy(global_random_seed): X = [[0]] * 4 # ignored y = [1, 2, 1, 1] clf = DummyClassifier(strategy="uniform", random_state=global_random_seed) clf.fit(X, y) X = [[0]] * 500 y_pred = clf.predict(X) p = np.bincount(y_pred) / float(len(X)) assert_almost_equal(p[1], 0.5, decimal=1) assert_almost_equal(p[2], 0.5, decimal=1) _check_predict_proba(clf, X, y) def test_uniform_strategy_multioutput(global_random_seed): X = [[0]] * 4 # ignored y = np.array([[2, 1], [2, 2], [1, 2], [1, 1]]) clf = DummyClassifier(strategy="uniform", random_state=global_random_seed) clf.fit(X, y) X = [[0]] * 500 y_pred = clf.predict(X) for k in range(y.shape[1]): p = np.bincount(y_pred[:, k]) / float(len(X)) assert_almost_equal(p[1], 0.5, decimal=1) assert_almost_equal(p[2], 0.5, decimal=1) _check_predict_proba(clf, X, y) _check_behavior_2d(clf) def test_string_labels(): X = [[0]] * 5 y = ["paris", "paris", "tokyo", "amsterdam", "berlin"] clf = DummyClassifier(strategy="most_frequent") clf.fit(X, y) assert_array_equal(clf.predict(X), ["paris"] * 5) @pytest.mark.parametrize( "y,y_test", [ ([2, 1, 1, 1], [2, 2, 1, 1]), ( np.array([[2, 2], [1, 1], [1, 1], [1, 1]]), np.array([[2, 2], [2, 2], [1, 1], [1, 1]]), ), ], ) def test_classifier_score_with_None(y, y_test): clf = DummyClassifier(strategy="most_frequent") clf.fit(None, y) assert clf.score(None, y_test) == 0.5 @pytest.mark.parametrize( "strategy", ["stratified", "most_frequent", "prior", "uniform", "constant"] ) def test_classifier_prediction_independent_of_X(strategy, global_random_seed): y = [0, 2, 1, 1] X1 = [[0]] * 4 clf1 = DummyClassifier( strategy=strategy, random_state=global_random_seed, constant=0 ) clf1.fit(X1, y) predictions1 = clf1.predict(X1) X2 = [[1]] * 4 clf2 = DummyClassifier( strategy=strategy, random_state=global_random_seed, constant=0 ) clf2.fit(X2, y) predictions2 = clf2.predict(X2) assert_array_equal(predictions1, predictions2) def test_mean_strategy_regressor(global_random_seed): random_state = np.random.RandomState(seed=global_random_seed) X = [[0]] * 4 # ignored y = random_state.randn(4) reg = DummyRegressor() reg.fit(X, y) assert_array_equal(reg.predict(X), [np.mean(y)] * len(X)) def test_mean_strategy_multioutput_regressor(global_random_seed): random_state = np.random.RandomState(seed=global_random_seed) X_learn = random_state.randn(10, 10) y_learn = random_state.randn(10, 5) mean = np.mean(y_learn, axis=0).reshape((1, -1)) X_test = random_state.randn(20, 10) y_test = random_state.randn(20, 5) # Correctness oracle est = DummyRegressor() est.fit(X_learn, y_learn) y_pred_learn = est.predict(X_learn) y_pred_test = est.predict(X_test) _check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test) _check_behavior_2d(est) def test_regressor_exceptions(): reg = DummyRegressor() with pytest.raises(NotFittedError): reg.predict([]) def test_median_strategy_regressor(global_random_seed): random_state = np.random.RandomState(seed=global_random_seed) X = [[0]] * 5 # ignored y = random_state.randn(5) reg = DummyRegressor(strategy="median") reg.fit(X, y) assert_array_equal(reg.predict(X), [np.median(y)] * len(X)) def test_median_strategy_multioutput_regressor(global_random_seed): random_state = np.random.RandomState(seed=global_random_seed) X_learn = random_state.randn(10, 10) y_learn = random_state.randn(10, 5) median = np.median(y_learn, axis=0).reshape((1, -1)) X_test = random_state.randn(20, 10) y_test = random_state.randn(20, 5) # Correctness oracle est = DummyRegressor(strategy="median") est.fit(X_learn, y_learn) y_pred_learn = est.predict(X_learn) y_pred_test = est.predict(X_test) _check_equality_regressor(median, y_learn, y_pred_learn, y_test, y_pred_test) _check_behavior_2d(est) def test_quantile_strategy_regressor(global_random_seed): random_state = np.random.RandomState(seed=global_random_seed) X = [[0]] * 5 # ignored y = random_state.randn(5) reg = DummyRegressor(strategy="quantile", quantile=0.5) reg.fit(X, y) assert_array_equal(reg.predict(X), [np.median(y)] * len(X)) reg = DummyRegressor(strategy="quantile", quantile=0) reg.fit(X, y) assert_array_equal(reg.predict(X), [np.min(y)] * len(X)) reg = DummyRegressor(strategy="quantile", quantile=1) reg.fit(X, y) assert_array_equal(reg.predict(X), [np.max(y)] * len(X)) reg = DummyRegressor(strategy="quantile", quantile=0.3) reg.fit(X, y) assert_array_equal(reg.predict(X), [np.percentile(y, q=30)] * len(X)) def test_quantile_strategy_multioutput_regressor(global_random_seed): random_state = np.random.RandomState(seed=global_random_seed) X_learn = random_state.randn(10, 10) y_learn = random_state.randn(10, 5) median = np.median(y_learn, axis=0).reshape((1, -1)) quantile_values = np.percentile(y_learn, axis=0, q=80).reshape((1, -1)) X_test = random_state.randn(20, 10) y_test = random_state.randn(20, 5) # Correctness oracle est = DummyRegressor(strategy="quantile", quantile=0.5) est.fit(X_learn, y_learn) y_pred_learn = est.predict(X_learn) y_pred_test = est.predict(X_test) _check_equality_regressor(median, y_learn, y_pred_learn, y_test, y_pred_test) _check_behavior_2d(est) # Correctness oracle est = DummyRegressor(strategy="quantile", quantile=0.8) est.fit(X_learn, y_learn) y_pred_learn = est.predict(X_learn) y_pred_test = est.predict(X_test) _check_equality_regressor( quantile_values, y_learn, y_pred_learn, y_test, y_pred_test ) _check_behavior_2d(est) def test_quantile_invalid(): X = [[0]] * 5 # ignored y = [0] * 5 # ignored est = DummyRegressor(strategy="quantile", quantile=None) err_msg = ( "When using `strategy='quantile', you have to specify the desired quantile" ) with pytest.raises(ValueError, match=err_msg): est.fit(X, y) def test_quantile_strategy_empty_train(): est = DummyRegressor(strategy="quantile", quantile=0.4) with pytest.raises(IndexError): est.fit([], []) def test_constant_strategy_regressor(global_random_seed): random_state = np.random.RandomState(seed=global_random_seed) X = [[0]] * 5 # ignored y = random_state.randn(5) reg = DummyRegressor(strategy="constant", constant=[43]) reg.fit(X, y) assert_array_equal(reg.predict(X), [43] * len(X)) reg = DummyRegressor(strategy="constant", constant=43) reg.fit(X, y) assert_array_equal(reg.predict(X), [43] * len(X)) # non-regression test for #22478 assert not isinstance(reg.constant, np.ndarray) def test_constant_strategy_multioutput_regressor(global_random_seed): random_state = np.random.RandomState(seed=global_random_seed) X_learn = random_state.randn(10, 10) y_learn = random_state.randn(10, 5) # test with 2d array constants = random_state.randn(5) X_test = random_state.randn(20, 10) y_test = random_state.randn(20, 5) # Correctness oracle est = DummyRegressor(strategy="constant", constant=constants) est.fit(X_learn, y_learn) y_pred_learn = est.predict(X_learn) y_pred_test = est.predict(X_test) _check_equality_regressor(constants, y_learn, y_pred_learn, y_test, y_pred_test) _check_behavior_2d_for_constant(est) def test_y_mean_attribute_regressor(): X = [[0]] * 5 y = [1, 2, 4, 6, 8] # when strategy = 'mean' est = DummyRegressor(strategy="mean") est.fit(X, y) assert est.constant_ == np.mean(y) def test_constants_not_specified_regressor(): X = [[0]] * 5 y = [1, 2, 4, 6, 8] est = DummyRegressor(strategy="constant") err_msg = "Constant target value has to be specified" with pytest.raises(TypeError, match=err_msg): est.fit(X, y) def test_constant_size_multioutput_regressor(global_random_seed): random_state = np.random.RandomState(seed=global_random_seed) X = random_state.randn(10, 10) y = random_state.randn(10, 5) est = DummyRegressor(strategy="constant", constant=[1, 2, 3, 4]) err_msg = r"Constant target value should have shape \(5, 1\)." with pytest.raises(ValueError, match=err_msg): est.fit(X, y) def test_constant_strategy(): X = [[0], [0], [0], [0]] # ignored y = [2, 1, 2, 2] clf = DummyClassifier(strategy="constant", random_state=0, constant=1) clf.fit(X, y) assert_array_equal(clf.predict(X), np.ones(len(X))) _check_predict_proba(clf, X, y) X = [[0], [0], [0], [0]] # ignored y = ["two", "one", "two", "two"] clf = DummyClassifier(strategy="constant", random_state=0, constant="one") clf.fit(X, y) assert_array_equal(clf.predict(X), np.array(["one"] * 4)) _check_predict_proba(clf, X, y) def test_constant_strategy_multioutput(): X = [[0], [0], [0], [0]] # ignored y = np.array([[2, 3], [1, 3], [2, 3], [2, 0]]) n_samples = len(X) clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0]) clf.fit(X, y) assert_array_equal( clf.predict(X), np.hstack([np.ones((n_samples, 1)), np.zeros((n_samples, 1))]) ) _check_predict_proba(clf, X, y) @pytest.mark.parametrize( "y, params, err_msg", [ ([2, 1, 2, 2], {"random_state": 0}, "Constant.*has to be specified"), ([2, 1, 2, 2], {"constant": [2, 0]}, "Constant.*should have shape"), ( np.transpose([[2, 1, 2, 2], [2, 1, 2, 2]]), {"constant": 2}, "Constant.*should have shape", ), ( [2, 1, 2, 2], {"constant": "my-constant"}, "constant=my-constant.*Possible values.*\\[1, 2]", ), ( np.transpose([[2, 1, 2, 2], [2, 1, 2, 2]]), {"constant": [2, "unknown"]}, "constant=\\[2, 'unknown'].*Possible values.*\\[1, 2]", ), ], ids=[ "no-constant", "too-many-constant", "not-enough-output", "single-output", "multi-output", ], ) def test_constant_strategy_exceptions(y, params, err_msg): X = [[0], [0], [0], [0]] clf = DummyClassifier(strategy="constant", **params) with pytest.raises(ValueError, match=err_msg): clf.fit(X, y) def test_classification_sample_weight(): X = [[0], [0], [1]] y = [0, 1, 0] sample_weight = [0.1, 1.0, 0.1] clf = DummyClassifier(strategy="stratified").fit(X, y, sample_weight) assert_array_almost_equal(clf.class_prior_, [0.2 / 1.2, 1.0 / 1.2]) @pytest.mark.parametrize("csc_container", CSC_CONTAINERS) def test_constant_strategy_sparse_target(csc_container): X = [[0]] * 5 # ignored y = csc_container(np.array([[0, 1], [4, 0], [1, 1], [1, 4], [1, 1]])) n_samples = len(X) clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0]) clf.fit(X, y) y_pred = clf.predict(X) assert sp.issparse(y_pred) assert_array_equal( y_pred.toarray(), np.hstack([np.ones((n_samples, 1)), np.zeros((n_samples, 1))]) ) @pytest.mark.parametrize("csc_container", CSC_CONTAINERS) def test_uniform_strategy_sparse_target_warning(global_random_seed, csc_container): X = [[0]] * 5 # ignored y = csc_container(np.array([[2, 1], [2, 2], [1, 4], [4, 2], [1, 1]])) clf = DummyClassifier(strategy="uniform", random_state=global_random_seed) with pytest.warns(UserWarning, match="the uniform strategy would not save memory"): clf.fit(X, y) X = [[0]] * 500 y_pred = clf.predict(X) for k in range(y.shape[1]): p = np.bincount(y_pred[:, k]) / float(len(X)) assert_almost_equal(p[1], 1 / 3, decimal=1) assert_almost_equal(p[2], 1 / 3, decimal=1) assert_almost_equal(p[4], 1 / 3, decimal=1) @pytest.mark.parametrize("csc_container", CSC_CONTAINERS) def test_stratified_strategy_sparse_target(global_random_seed, csc_container): X = [[0]] * 5 # ignored y = csc_container(np.array([[4, 1], [0, 0], [1, 1], [1, 4], [1, 1]])) clf = DummyClassifier(strategy="stratified", random_state=global_random_seed) clf.fit(X, y) X = [[0]] * 500 y_pred = clf.predict(X) assert sp.issparse(y_pred) y_pred = y_pred.toarray() for k in range(y.shape[1]): p = np.bincount(y_pred[:, k]) / float(len(X)) assert_almost_equal(p[1], 3.0 / 5, decimal=1) assert_almost_equal(p[0], 1.0 / 5, decimal=1) assert_almost_equal(p[4], 1.0 / 5, decimal=1) @pytest.mark.parametrize("csc_container", CSC_CONTAINERS) def test_most_frequent_and_prior_strategy_sparse_target(csc_container): X = [[0]] * 5 # ignored y = csc_container(np.array([[1, 0], [1, 3], [4, 0], [0, 1], [1, 0]])) n_samples = len(X) y_expected = np.hstack([np.ones((n_samples, 1)), np.zeros((n_samples, 1))]) for strategy in ("most_frequent", "prior"): clf = DummyClassifier(strategy=strategy, random_state=0) clf.fit(X, y) y_pred = clf.predict(X) assert sp.issparse(y_pred) assert_array_equal(y_pred.toarray(), y_expected) def test_dummy_regressor_sample_weight(global_random_seed, n_samples=10): random_state = np.random.RandomState(seed=global_random_seed) X = [[0]] * n_samples y = random_state.rand(n_samples) sample_weight = random_state.rand(n_samples) est = DummyRegressor(strategy="mean").fit(X, y, sample_weight) assert est.constant_ == np.average(y, weights=sample_weight) est = DummyRegressor(strategy="median").fit(X, y, sample_weight) assert est.constant_ == _weighted_percentile(y, sample_weight, 50.0) est = DummyRegressor(strategy="quantile", quantile=0.95).fit(X, y, sample_weight) assert est.constant_ == _weighted_percentile(y, sample_weight, 95.0) def test_dummy_regressor_on_3D_array(): X = np.array([[["foo"]], [["bar"]], [["baz"]]]) y = np.array([2, 2, 2]) y_expected = np.array([2, 2, 2]) cls = DummyRegressor() cls.fit(X, y) y_pred = cls.predict(X) assert_array_equal(y_pred, y_expected) def test_dummy_classifier_on_3D_array(): X = np.array([[["foo"]], [["bar"]], [["baz"]]]) y = [2, 2, 2] y_expected = [2, 2, 2] y_proba_expected = [[1], [1], [1]] cls = DummyClassifier(strategy="stratified") cls.fit(X, y) y_pred = cls.predict(X) y_pred_proba = cls.predict_proba(X) assert_array_equal(y_pred, y_expected) assert_array_equal(y_pred_proba, y_proba_expected) def test_dummy_regressor_return_std(): X = [[0]] * 3 # ignored y = np.array([2, 2, 2]) y_std_expected = np.array([0, 0, 0]) cls = DummyRegressor() cls.fit(X, y) y_pred_list = cls.predict(X, return_std=True) # there should be two elements when return_std is True assert len(y_pred_list) == 2 # the second element should be all zeros assert_array_equal(y_pred_list[1], y_std_expected) @pytest.mark.parametrize( "y,y_test", [ ([1, 1, 1, 2], [1.25] * 4), (np.array([[2, 2], [1, 1], [1, 1], [1, 1]]), [[1.25, 1.25]] * 4), ], ) def test_regressor_score_with_None(y, y_test): reg = DummyRegressor() reg.fit(None, y) assert reg.score(None, y_test) == 1.0 @pytest.mark.parametrize("strategy", ["mean", "median", "quantile", "constant"]) def test_regressor_prediction_independent_of_X(strategy): y = [0, 2, 1, 1] X1 = [[0]] * 4 reg1 = DummyRegressor(strategy=strategy, constant=0, quantile=0.7) reg1.fit(X1, y) predictions1 = reg1.predict(X1) X2 = [[1]] * 4 reg2 = DummyRegressor(strategy=strategy, constant=0, quantile=0.7) reg2.fit(X2, y) predictions2 = reg2.predict(X2) assert_array_equal(predictions1, predictions2) @pytest.mark.parametrize( "strategy", ["stratified", "most_frequent", "prior", "uniform", "constant"] ) def test_dtype_of_classifier_probas(strategy): y = [0, 2, 1, 1] X = np.zeros(4) model = DummyClassifier(strategy=strategy, random_state=0, constant=0) probas = model.fit(X, y).predict_proba(X) assert probas.dtype == np.float64
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/tests/test_init.py
sklearn/tests/test_init.py
# Basic unittests to test functioning of module's top-level __author__ = "Yaroslav Halchenko" __license__ = "BSD" try: from sklearn import * # noqa: F403 _top_import_error = None except Exception as e: _top_import_error = e def test_import_skl(): # Test either above import has failed for some reason # "import *" is discouraged outside of the module level, hence we # rely on setting up the variable above assert _top_import_error is None
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/tests/test_kernel_ridge.py
sklearn/tests/test_kernel_ridge.py
import numpy as np import pytest from sklearn.datasets import make_regression from sklearn.kernel_ridge import KernelRidge from sklearn.linear_model import Ridge from sklearn.metrics.pairwise import pairwise_kernels from sklearn.utils._testing import assert_array_almost_equal, ignore_warnings from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS X, y = make_regression(n_features=10, random_state=0) Y = np.array([y, y]).T def test_kernel_ridge(): pred = Ridge(alpha=1, fit_intercept=False).fit(X, y).predict(X) pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X) assert_array_almost_equal(pred, pred2) @pytest.mark.parametrize("sparse_container", [*CSR_CONTAINERS, *CSC_CONTAINERS]) def test_kernel_ridge_sparse(sparse_container): X_sparse = sparse_container(X) pred = ( Ridge(alpha=1, fit_intercept=False, solver="cholesky") .fit(X_sparse, y) .predict(X_sparse) ) pred2 = KernelRidge(kernel="linear", alpha=1).fit(X_sparse, y).predict(X_sparse) assert_array_almost_equal(pred, pred2) def test_kernel_ridge_singular_kernel(): # alpha=0 causes a LinAlgError in computing the dual coefficients, # which causes a fallback to a lstsq solver. This is tested here. pred = Ridge(alpha=0, fit_intercept=False).fit(X, y).predict(X) kr = KernelRidge(kernel="linear", alpha=0) ignore_warnings(kr.fit)(X, y) pred2 = kr.predict(X) assert_array_almost_equal(pred, pred2) def test_kernel_ridge_precomputed(): for kernel in ["linear", "rbf", "poly", "cosine"]: K = pairwise_kernels(X, X, metric=kernel) pred = KernelRidge(kernel=kernel).fit(X, y).predict(X) pred2 = KernelRidge(kernel="precomputed").fit(K, y).predict(K) assert_array_almost_equal(pred, pred2) def test_kernel_ridge_precomputed_kernel_unchanged(): K = np.dot(X, X.T) K2 = K.copy() KernelRidge(kernel="precomputed").fit(K, y) assert_array_almost_equal(K, K2) def test_kernel_ridge_sample_weights(): K = np.dot(X, X.T) # precomputed kernel sw = np.random.RandomState(0).rand(X.shape[0]) pred = Ridge(alpha=1, fit_intercept=False).fit(X, y, sample_weight=sw).predict(X) pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, y, sample_weight=sw).predict(X) pred3 = ( KernelRidge(kernel="precomputed", alpha=1) .fit(K, y, sample_weight=sw) .predict(K) ) assert_array_almost_equal(pred, pred2) assert_array_almost_equal(pred, pred3) def test_kernel_ridge_multi_output(): pred = Ridge(alpha=1, fit_intercept=False).fit(X, Y).predict(X) pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, Y).predict(X) assert_array_almost_equal(pred, pred2) pred3 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X) pred3 = np.array([pred3, pred3]).T assert_array_almost_equal(pred2, pred3)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/_base.py
sklearn/datasets/_base.py
""" Base IO code for all datasets """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import csv import gzip import hashlib import os import re import shutil import time import unicodedata import warnings from collections import namedtuple from importlib import resources from numbers import Integral from os import environ, listdir, makedirs from os.path import expanduser, isdir, join, splitext from pathlib import Path from tempfile import NamedTemporaryFile from urllib.error import URLError from urllib.parse import urlparse from urllib.request import urlretrieve import numpy as np from sklearn.preprocessing import scale from sklearn.utils import Bunch, check_random_state from sklearn.utils._optional_dependencies import check_pandas_support from sklearn.utils._param_validation import Interval, StrOptions, validate_params DATA_MODULE = "sklearn.datasets.data" DESCR_MODULE = "sklearn.datasets.descr" IMAGES_MODULE = "sklearn.datasets.images" RemoteFileMetadata = namedtuple("RemoteFileMetadata", ["filename", "url", "checksum"]) @validate_params( { "data_home": [str, os.PathLike, None], }, prefer_skip_nested_validation=True, ) def get_data_home(data_home=None) -> str: """Return the path of the scikit-learn data directory. This folder is used by some large dataset loaders to avoid downloading the data several times. By default the data directory is set to a folder named 'scikit_learn_data' in the user home folder. Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment variable or programmatically by giving an explicit folder path. The '~' symbol is expanded to the user home folder. If the folder does not already exist, it is automatically created. Parameters ---------- data_home : str or path-like, default=None The path to scikit-learn data directory. If `None`, the default path is `~/scikit_learn_data`. Returns ------- data_home: str The path to scikit-learn data directory. Examples -------- >>> import os >>> from sklearn.datasets import get_data_home >>> data_home_path = get_data_home() >>> os.path.exists(data_home_path) True """ if data_home is None: data_home = environ.get("SCIKIT_LEARN_DATA", join("~", "scikit_learn_data")) data_home = expanduser(data_home) makedirs(data_home, exist_ok=True) return data_home @validate_params( { "data_home": [str, os.PathLike, None], }, prefer_skip_nested_validation=True, ) def clear_data_home(data_home=None): """Delete all the content of the data home cache. Parameters ---------- data_home : str or path-like, default=None The path to scikit-learn data directory. If `None`, the default path is `~/scikit_learn_data`. Examples -------- >>> from sklearn.datasets import clear_data_home >>> clear_data_home() # doctest: +SKIP """ data_home = get_data_home(data_home) shutil.rmtree(data_home) def _convert_data_dataframe( caller_name, data, target, feature_names, target_names, sparse_data=False ): pd = check_pandas_support("{} with as_frame=True".format(caller_name)) if not sparse_data: data_df = pd.DataFrame(data, columns=feature_names, copy=False) else: data_df = pd.DataFrame.sparse.from_spmatrix(data, columns=feature_names) target_df = pd.DataFrame(target, columns=target_names) combined_df = pd.concat([data_df, target_df], axis=1) X = combined_df[feature_names] y = combined_df[target_names] if y.shape[1] == 1: y = y.iloc[:, 0] return combined_df, X, y @validate_params( { "container_path": [str, os.PathLike], "description": [str, None], "categories": [list, None], "load_content": ["boolean"], "shuffle": ["boolean"], "encoding": [str, None], "decode_error": [StrOptions({"strict", "ignore", "replace"})], "random_state": ["random_state"], "allowed_extensions": [list, None], }, prefer_skip_nested_validation=True, ) def load_files( container_path, *, description=None, categories=None, load_content=True, shuffle=True, encoding=None, decode_error="strict", random_state=0, allowed_extensions=None, ): """Load text files with categories as subfolder names. Individual samples are assumed to be files stored a two levels folder structure such as the following: .. code-block:: text container_folder/ category_1_folder/ file_1.txt file_2.txt ... file_42.txt category_2_folder/ file_43.txt file_44.txt ... The folder names are used as supervised signal label names. The individual file names are not important. This function does not try to extract features into a numpy array or scipy sparse matrix. In addition, if load_content is false it does not try to load the files in memory. To use text files in a scikit-learn classification or clustering algorithm, you will need to use the :mod:`~sklearn.feature_extraction.text` module to build a feature extraction transformer that suits your problem. If you set load_content=True, you should also specify the encoding of the text using the 'encoding' parameter. For many modern text files, 'utf-8' will be the correct encoding. If you leave encoding equal to None, then the content will be made of bytes instead of Unicode, and you will not be able to use most functions in :mod:`~sklearn.feature_extraction.text`. Similar feature extractors should be built for other kind of unstructured data input such as images, audio, video, ... If you want files with a specific file extension (e.g. `.txt`) then you can pass a list of those file extensions to `allowed_extensions`. Read more in the :ref:`User Guide <datasets>`. Parameters ---------- container_path : str Path to the main folder holding one subfolder per category. description : str, default=None A paragraph describing the characteristic of the dataset: its source, reference, etc. categories : list of str, default=None If None (default), load all the categories. If not None, list of category names to load (other categories ignored). load_content : bool, default=True Whether to load or not the content of the different files. If true a 'data' attribute containing the text information is present in the data structure returned. If not, a filenames attribute gives the path to the files. shuffle : bool, default=True Whether or not to shuffle the data: might be important for models that make the assumption that the samples are independent and identically distributed (i.i.d.), such as stochastic gradient descent. encoding : str, default=None If None, do not try to decode the content of the files (e.g. for images or other non-text content). If not None, encoding to use to decode text files to Unicode if load_content is True. decode_error : {'strict', 'ignore', 'replace'}, default='strict' Instruction on what to do if a byte sequence is given to analyze that contains characters not of the given `encoding`. Passed as keyword argument 'errors' to bytes.decode. random_state : int, RandomState instance or None, default=0 Determines random number generation for dataset shuffling. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. allowed_extensions : list of str, default=None List of desired file extensions to filter the files to be loaded. Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : list of str Only present when `load_content=True`. The raw text data to learn. target : ndarray The target labels (integer index). target_names : list The names of target classes. DESCR : str The full description of the dataset. filenames: ndarray The filenames holding the dataset. Examples -------- >>> from sklearn.datasets import load_files >>> container_path = "./" >>> load_files(container_path) # doctest: +SKIP """ target = [] target_names = [] filenames = [] folders = [ f for f in sorted(listdir(container_path)) if isdir(join(container_path, f)) ] if categories is not None: folders = [f for f in folders if f in categories] if allowed_extensions is not None: allowed_extensions = frozenset(allowed_extensions) for label, folder in enumerate(folders): target_names.append(folder) folder_path = join(container_path, folder) files = sorted(listdir(folder_path)) if allowed_extensions is not None: documents = [ join(folder_path, file) for file in files if os.path.splitext(file)[1] in allowed_extensions ] else: documents = [join(folder_path, file) for file in files] target.extend(len(documents) * [label]) filenames.extend(documents) # convert to array for fancy indexing filenames = np.array(filenames) target = np.array(target) if shuffle: random_state = check_random_state(random_state) indices = np.arange(filenames.shape[0]) random_state.shuffle(indices) filenames = filenames[indices] target = target[indices] if load_content: data = [] for filename in filenames: data.append(Path(filename).read_bytes()) if encoding is not None: data = [d.decode(encoding, decode_error) for d in data] return Bunch( data=data, filenames=filenames, target_names=target_names, target=target, DESCR=description, ) return Bunch( filenames=filenames, target_names=target_names, target=target, DESCR=description ) def load_csv_data( data_file_name, *, data_module=DATA_MODULE, descr_file_name=None, descr_module=DESCR_MODULE, encoding="utf-8", ): """Loads `data_file_name` from `data_module with `importlib.resources`. Parameters ---------- data_file_name : str Name of csv file to be loaded from `data_module/data_file_name`. For example `'wine_data.csv'`. data_module : str or module, default='sklearn.datasets.data' Module where data lives. The default is `'sklearn.datasets.data'`. descr_file_name : str, default=None Name of rst file to be loaded from `descr_module/descr_file_name`. For example `'wine_data.rst'`. See also :func:`load_descr`. If not None, also returns the corresponding description of the dataset. descr_module : str or module, default='sklearn.datasets.descr' Module where `descr_file_name` lives. See also :func:`load_descr`. The default is `'sklearn.datasets.descr'`. Returns ------- data : ndarray of shape (n_samples, n_features) A 2D array with each row representing one sample and each column representing the features of a given sample. target : ndarry of shape (n_samples,) A 1D array holding target variables for all the samples in `data`. For example target[0] is the target variable for data[0]. target_names : ndarry of shape (n_samples,) A 1D array containing the names of the classifications. For example target_names[0] is the name of the target[0] class. descr : str, optional Description of the dataset (the content of `descr_file_name`). Only returned if `descr_file_name` is not None. encoding : str, optional Text encoding of the CSV file. .. versionadded:: 1.4 """ data_path = resources.files(data_module) / data_file_name with data_path.open("r", encoding="utf-8") as csv_file: data_file = csv.reader(csv_file) temp = next(data_file) n_samples = int(temp[0]) n_features = int(temp[1]) target_names = np.array(temp[2:]) data = np.empty((n_samples, n_features)) target = np.empty((n_samples,), dtype=int) for i, ir in enumerate(data_file): data[i] = np.asarray(ir[:-1], dtype=np.float64) target[i] = np.asarray(ir[-1], dtype=int) if descr_file_name is None: return data, target, target_names else: assert descr_module is not None descr = load_descr(descr_module=descr_module, descr_file_name=descr_file_name) return data, target, target_names, descr def load_gzip_compressed_csv_data( data_file_name, *, data_module=DATA_MODULE, descr_file_name=None, descr_module=DESCR_MODULE, encoding="utf-8", **kwargs, ): """Loads gzip-compressed with `importlib.resources`. 1) Open resource file with `importlib.resources.open_binary` 2) Decompress file obj with `gzip.open` 3) Load decompressed data with `np.loadtxt` Parameters ---------- data_file_name : str Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from `data_module/data_file_name`. For example `'diabetes_data.csv.gz'`. data_module : str or module, default='sklearn.datasets.data' Module where data lives. The default is `'sklearn.datasets.data'`. descr_file_name : str, default=None Name of rst file to be loaded from `descr_module/descr_file_name`. For example `'wine_data.rst'`. See also :func:`load_descr`. If not None, also returns the corresponding description of the dataset. descr_module : str or module, default='sklearn.datasets.descr' Module where `descr_file_name` lives. See also :func:`load_descr`. The default is `'sklearn.datasets.descr'`. encoding : str, default="utf-8" Name of the encoding that the gzip-decompressed file will be decoded with. The default is 'utf-8'. **kwargs : dict, optional Keyword arguments to be passed to `np.loadtxt`; e.g. delimiter=','. Returns ------- data : ndarray of shape (n_samples, n_features) A 2D array with each row representing one sample and each column representing the features and/or target of a given sample. descr : str, optional Description of the dataset (the content of `descr_file_name`). Only returned if `descr_file_name` is not None. """ data_path = resources.files(data_module) / data_file_name with data_path.open("rb") as compressed_file: compressed_file = gzip.open(compressed_file, mode="rt", encoding=encoding) data = np.loadtxt(compressed_file, **kwargs) if descr_file_name is None: return data else: assert descr_module is not None descr = load_descr(descr_module=descr_module, descr_file_name=descr_file_name) return data, descr def load_descr(descr_file_name, *, descr_module=DESCR_MODULE, encoding="utf-8"): """Load `descr_file_name` from `descr_module` with `importlib.resources`. Parameters ---------- descr_file_name : str, default=None Name of rst file to be loaded from `descr_module/descr_file_name`. For example `'wine_data.rst'`. See also :func:`load_descr`. If not None, also returns the corresponding description of the dataset. descr_module : str or module, default='sklearn.datasets.descr' Module where `descr_file_name` lives. See also :func:`load_descr`. The default is `'sklearn.datasets.descr'`. encoding : str, default="utf-8" Name of the encoding that `descr_file_name` will be decoded with. The default is 'utf-8'. .. versionadded:: 1.4 Returns ------- fdescr : str Content of `descr_file_name`. """ path = resources.files(descr_module) / descr_file_name return path.read_text(encoding=encoding) @validate_params( { "return_X_y": ["boolean"], "as_frame": ["boolean"], }, prefer_skip_nested_validation=True, ) def load_wine(*, return_X_y=False, as_frame=False): """Load and return the wine dataset (classification). .. versionadded:: 0.18 The wine dataset is a classic and very easy multi-class classification dataset. ================= ============== Classes 3 Samples per class [59,71,48] Samples total 178 Dimensionality 13 Features real, positive ================= ============== The copy of UCI ML Wine Data Set dataset is downloaded and modified to fit standard format from: https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data Read more in the :ref:`User Guide <wine_dataset>`. Parameters ---------- return_X_y : bool, default=False If True, returns ``(data, target)`` instead of a Bunch object. See below for more information about the `data` and `target` object. as_frame : bool, default=False If True, the data is a pandas DataFrame including columns with appropriate dtypes (numeric). The target is a pandas DataFrame or Series depending on the number of target columns. If `return_X_y` is True, then (`data`, `target`) will be pandas DataFrames or Series as described below. .. versionadded:: 0.23 Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : {ndarray, dataframe} of shape (178, 13) The data matrix. If `as_frame=True`, `data` will be a pandas DataFrame. target: {ndarray, Series} of shape (178,) The classification target. If `as_frame=True`, `target` will be a pandas Series. feature_names: list The names of the dataset columns. target_names: list The names of target classes. frame: DataFrame of shape (178, 14) Only present when `as_frame=True`. DataFrame with `data` and `target`. .. versionadded:: 0.23 DESCR: str The full description of the dataset. (data, target) : tuple if ``return_X_y`` is True A tuple of two ndarrays by default. The first contains a 2D array of shape (178, 13) with each row representing one sample and each column representing the features. The second array of shape (178,) contains the target samples. Examples -------- Let's say you are interested in the samples 10, 80, and 140, and want to know their class name. >>> from sklearn.datasets import load_wine >>> data = load_wine() >>> data.target[[10, 80, 140]] array([0, 1, 2]) >>> list(data.target_names) [np.str_('class_0'), np.str_('class_1'), np.str_('class_2')] """ data, target, target_names, fdescr = load_csv_data( data_file_name="wine_data.csv", descr_file_name="wine_data.rst" ) feature_names = [ "alcohol", "malic_acid", "ash", "alcalinity_of_ash", "magnesium", "total_phenols", "flavanoids", "nonflavanoid_phenols", "proanthocyanins", "color_intensity", "hue", "od280/od315_of_diluted_wines", "proline", ] frame = None target_columns = [ "target", ] if as_frame: frame, data, target = _convert_data_dataframe( "load_wine", data, target, feature_names, target_columns ) if return_X_y: return data, target return Bunch( data=data, target=target, frame=frame, target_names=target_names, DESCR=fdescr, feature_names=feature_names, ) @validate_params( {"return_X_y": ["boolean"], "as_frame": ["boolean"]}, prefer_skip_nested_validation=True, ) def load_iris(*, return_X_y=False, as_frame=False): """Load and return the iris dataset (classification). The iris dataset is a classic and very easy multi-class classification dataset. ================= ============== Classes 3 Samples per class 50 Samples total 150 Dimensionality 4 Features real, positive ================= ============== Read more in the :ref:`User Guide <iris_dataset>`. .. versionchanged:: 0.20 Fixed two wrong data points according to Fisher's paper. The new version is the same as in R, but not as in the UCI Machine Learning Repository. Parameters ---------- return_X_y : bool, default=False If True, returns ``(data, target)`` instead of a Bunch object. See below for more information about the `data` and `target` object. .. versionadded:: 0.18 as_frame : bool, default=False If True, the data is a pandas DataFrame including columns with appropriate dtypes (numeric). The target is a pandas DataFrame or Series depending on the number of target columns. If `return_X_y` is True, then (`data`, `target`) will be pandas DataFrames or Series as described below. .. versionadded:: 0.23 Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : {ndarray, dataframe} of shape (150, 4) The data matrix. If `as_frame=True`, `data` will be a pandas DataFrame. target: {ndarray, Series} of shape (150,) The classification target. If `as_frame=True`, `target` will be a pandas Series. feature_names: list The names of the dataset columns. target_names: ndarray of shape (3, ) The names of target classes. frame: DataFrame of shape (150, 5) Only present when `as_frame=True`. DataFrame with `data` and `target`. .. versionadded:: 0.23 DESCR: str The full description of the dataset. filename: str The path to the location of the data. .. versionadded:: 0.20 (data, target) : tuple if ``return_X_y`` is True A tuple of two ndarray. The first containing a 2D array of shape (n_samples, n_features) with each row representing one sample and each column representing the features. The second ndarray of shape (n_samples,) containing the target samples. .. versionadded:: 0.18 Examples -------- Let's say you are interested in the samples 10, 25, and 50, and want to know their class name. >>> from sklearn.datasets import load_iris >>> data = load_iris() >>> samples = [10, 25, 50] >>> data.target[samples] array([0, 0, 1]) >>> data.target_names[data.target[samples]] array(['setosa', 'setosa', 'versicolor'], dtype='<U10') See :ref:`sphx_glr_auto_examples_decomposition_plot_pca_iris.py` for a more detailed example of how to work with the iris dataset. """ data_file_name = "iris.csv" data, target, target_names, fdescr = load_csv_data( data_file_name=data_file_name, descr_file_name="iris.rst" ) feature_names = [ "sepal length (cm)", "sepal width (cm)", "petal length (cm)", "petal width (cm)", ] frame = None target_columns = [ "target", ] if as_frame: frame, data, target = _convert_data_dataframe( "load_iris", data, target, feature_names, target_columns ) if return_X_y: return data, target return Bunch( data=data, target=target, frame=frame, target_names=target_names, DESCR=fdescr, feature_names=feature_names, filename=data_file_name, data_module=DATA_MODULE, ) @validate_params( {"return_X_y": ["boolean"], "as_frame": ["boolean"]}, prefer_skip_nested_validation=True, ) def load_breast_cancer(*, return_X_y=False, as_frame=False): """Load and return the breast cancer Wisconsin dataset (classification). The breast cancer dataset is a classic and very easy binary classification dataset. ================= ============== Classes 2 Samples per class 212(M),357(B) Samples total 569 Dimensionality 30 Features real, positive ================= ============== The copy of UCI ML Breast Cancer Wisconsin (Diagnostic) dataset is downloaded from: https://archive.ics.uci.edu/dataset/17/breast+cancer+wisconsin+diagnostic Read more in the :ref:`User Guide <breast_cancer_dataset>`. Parameters ---------- return_X_y : bool, default=False If True, returns ``(data, target)`` instead of a Bunch object. See below for more information about the `data` and `target` object. .. versionadded:: 0.18 as_frame : bool, default=False If True, the data is a pandas DataFrame including columns with appropriate dtypes (numeric). The target is a pandas DataFrame or Series depending on the number of target columns. If `return_X_y` is True, then (`data`, `target`) will be pandas DataFrames or Series as described below. .. versionadded:: 0.23 Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : {ndarray, dataframe} of shape (569, 30) The data matrix. If `as_frame=True`, `data` will be a pandas DataFrame. target : {ndarray, Series} of shape (569,) The classification target. If `as_frame=True`, `target` will be a pandas Series. feature_names : ndarray of shape (30,) The names of the dataset columns. target_names : ndarray of shape (2,) The names of target classes. frame : DataFrame of shape (569, 31) Only present when `as_frame=True`. DataFrame with `data` and `target`. .. versionadded:: 0.23 DESCR : str The full description of the dataset. filename : str The path to the location of the data. .. versionadded:: 0.20 (data, target) : tuple if ``return_X_y`` is True A tuple of two ndarrays by default. The first contains a 2D ndarray of shape (569, 30) with each row representing one sample and each column representing the features. The second ndarray of shape (569,) contains the target samples. If `as_frame=True`, both arrays are pandas objects, i.e. `X` a dataframe and `y` a series. .. versionadded:: 0.18 Examples -------- Let's say you are interested in the samples 10, 50, and 85, and want to know their class name. >>> from sklearn.datasets import load_breast_cancer >>> data = load_breast_cancer() >>> data.target[[10, 50, 85]] array([0, 1, 0]) >>> list(data.target_names) [np.str_('malignant'), np.str_('benign')] """ data_file_name = "breast_cancer.csv" data, target, target_names, fdescr = load_csv_data( data_file_name=data_file_name, descr_file_name="breast_cancer.rst" ) feature_names = np.array( [ "mean radius", "mean texture", "mean perimeter", "mean area", "mean smoothness", "mean compactness", "mean concavity", "mean concave points", "mean symmetry", "mean fractal dimension", "radius error", "texture error", "perimeter error", "area error", "smoothness error", "compactness error", "concavity error", "concave points error", "symmetry error", "fractal dimension error", "worst radius", "worst texture", "worst perimeter", "worst area", "worst smoothness", "worst compactness", "worst concavity", "worst concave points", "worst symmetry", "worst fractal dimension", ] ) frame = None target_columns = [ "target", ] if as_frame: frame, data, target = _convert_data_dataframe( "load_breast_cancer", data, target, feature_names, target_columns ) if return_X_y: return data, target return Bunch( data=data, target=target, frame=frame, target_names=target_names, DESCR=fdescr, feature_names=feature_names, filename=data_file_name, data_module=DATA_MODULE, ) @validate_params( { "n_class": [Interval(Integral, 1, 10, closed="both")], "return_X_y": ["boolean"], "as_frame": ["boolean"], }, prefer_skip_nested_validation=True, ) def load_digits(*, n_class=10, return_X_y=False, as_frame=False): """Load and return the digits dataset (classification). Each datapoint is a 8x8 image of a digit. ================= ============== Classes 10 Samples per class ~180 Samples total 1797 Dimensionality 64 Features integers 0-16 ================= ============== This is a copy of the test set of the UCI ML hand-written digits datasets https://archive.ics.uci.edu/ml/datasets/Optical+Recognition+of+Handwritten+Digits Read more in the :ref:`User Guide <digits_dataset>`. Parameters ---------- n_class : int, default=10 The number of classes to return. Between 0 and 10. return_X_y : bool, default=False If True, returns ``(data, target)`` instead of a Bunch object. See below for more information about the `data` and `target` object. .. versionadded:: 0.18 as_frame : bool, default=False If True, the data is a pandas DataFrame including columns with appropriate dtypes (numeric). The target is a pandas DataFrame or Series depending on the number of target columns. If `return_X_y` is True, then (`data`, `target`) will be pandas DataFrames or Series as described below. .. versionadded:: 0.23 Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : {ndarray, dataframe} of shape (1797, 64) The flattened data matrix. If `as_frame=True`, `data` will be a pandas DataFrame. target: {ndarray, Series} of shape (1797,) The classification target. If `as_frame=True`, `target` will be a pandas Series. feature_names: list The names of the dataset columns. target_names: list The names of target classes. .. versionadded:: 0.20 frame: DataFrame of shape (1797, 65) Only present when `as_frame=True`. DataFrame with `data` and `target`. .. versionadded:: 0.23 images: {ndarray} of shape (1797, 8, 8) The raw image data. DESCR: str The full description of the dataset. (data, target) : tuple if ``return_X_y`` is True A tuple of two ndarrays by default. The first contains a 2D ndarray of shape (1797, 64) with each row representing one sample and each column representing the features. The second ndarray of shape (1797) contains the target samples. If `as_frame=True`, both arrays are pandas objects, i.e. `X` a dataframe and `y` a series. .. versionadded:: 0.18 Examples -------- To load the data and visualize the images:: >>> from sklearn.datasets import load_digits >>> digits = load_digits() >>> print(digits.data.shape) (1797, 64)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
true
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/_kddcup99.py
sklearn/datasets/_kddcup99.py
"""KDDCUP 99 dataset. A classic dataset for anomaly detection. The dataset page is available from UCI Machine Learning Repository https://archive.ics.uci.edu/ml/machine-learning-databases/kddcup99-mld/kddcup.data.gz """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import errno import logging import os from gzip import GzipFile from numbers import Integral, Real from os.path import exists, join import joblib import numpy as np from sklearn.datasets import get_data_home from sklearn.datasets._base import ( RemoteFileMetadata, _convert_data_dataframe, _fetch_remote, load_descr, ) from sklearn.utils import Bunch, check_random_state from sklearn.utils import shuffle as shuffle_method from sklearn.utils._param_validation import Interval, StrOptions, validate_params # The original data can be found at: # https://archive.ics.uci.edu/ml/machine-learning-databases/kddcup99-mld/kddcup.data.gz ARCHIVE = RemoteFileMetadata( filename="kddcup99_data", url="https://ndownloader.figshare.com/files/5976045", checksum="3b6c942aa0356c0ca35b7b595a26c89d343652c9db428893e7494f837b274292", ) # The original data can be found at: # https://archive.ics.uci.edu/ml/machine-learning-databases/kddcup99-mld/kddcup.data_10_percent.gz ARCHIVE_10_PERCENT = RemoteFileMetadata( filename="kddcup99_10_data", url="https://ndownloader.figshare.com/files/5976042", checksum="8045aca0d84e70e622d1148d7df782496f6333bf6eb979a1b0837c42a9fd9561", ) logger = logging.getLogger(__name__) @validate_params( { "subset": [StrOptions({"SA", "SF", "http", "smtp"}), None], "data_home": [str, os.PathLike, None], "shuffle": ["boolean"], "random_state": ["random_state"], "percent10": ["boolean"], "download_if_missing": ["boolean"], "return_X_y": ["boolean"], "as_frame": ["boolean"], "n_retries": [Interval(Integral, 1, None, closed="left")], "delay": [Interval(Real, 0.0, None, closed="neither")], }, prefer_skip_nested_validation=True, ) def fetch_kddcup99( *, subset=None, data_home=None, shuffle=False, random_state=None, percent10=True, download_if_missing=True, return_X_y=False, as_frame=False, n_retries=3, delay=1.0, ): """Load the kddcup99 dataset (classification). Download it if necessary. ================= ==================================== Classes 23 Samples total 4898431 Dimensionality 41 Features discrete (int) or continuous (float) ================= ==================================== Read more in the :ref:`User Guide <kddcup99_dataset>`. .. versionadded:: 0.18 Parameters ---------- subset : {'SA', 'SF', 'http', 'smtp'}, default=None To return the corresponding classical subsets of kddcup 99. If None, return the entire kddcup 99 dataset. data_home : str or path-like, default=None Specify another download and cache folder for the datasets. By default all scikit-learn data is stored in '~/scikit_learn_data' subfolders. .. versionadded:: 0.19 shuffle : bool, default=False Whether to shuffle dataset. random_state : int, RandomState instance or None, default=None Determines random number generation for dataset shuffling and for selection of abnormal samples if `subset='SA'`. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. percent10 : bool, default=True Whether to load only 10 percent of the data. download_if_missing : bool, default=True If False, raise an OSError if the data is not locally available instead of trying to download the data from the source site. return_X_y : bool, default=False If True, returns ``(data, target)`` instead of a Bunch object. See below for more information about the `data` and `target` object. .. versionadded:: 0.20 as_frame : bool, default=False If `True`, returns a pandas Dataframe for the ``data`` and ``target`` objects in the `Bunch` returned object; `Bunch` return object will also have a ``frame`` member. .. versionadded:: 0.24 n_retries : int, default=3 Number of retries when HTTP errors are encountered. .. versionadded:: 1.5 delay : float, default=1.0 Number of seconds between retries. .. versionadded:: 1.5 Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : {ndarray, dataframe} of shape (494021, 41) The data matrix to learn. If `as_frame=True`, `data` will be a pandas DataFrame. target : {ndarray, series} of shape (494021,) The regression target for each sample. If `as_frame=True`, `target` will be a pandas Series. frame : dataframe of shape (494021, 42) Only present when `as_frame=True`. Contains `data` and `target`. DESCR : str The full description of the dataset. feature_names : list The names of the dataset columns target_names: list The names of the target columns (data, target) : tuple if ``return_X_y`` is True A tuple of two ndarray. The first containing a 2D array of shape (n_samples, n_features) with each row representing one sample and each column representing the features. The second ndarray of shape (n_samples,) containing the target samples. .. versionadded:: 0.20 """ data_home = get_data_home(data_home=data_home) kddcup99 = _fetch_brute_kddcup99( data_home=data_home, percent10=percent10, download_if_missing=download_if_missing, n_retries=n_retries, delay=delay, ) data = kddcup99.data target = kddcup99.target feature_names = kddcup99.feature_names target_names = kddcup99.target_names if subset == "SA": s = target == b"normal." t = np.logical_not(s) normal_samples = data[s, :] normal_targets = target[s] abnormal_samples = data[t, :] abnormal_targets = target[t] n_samples_abnormal = abnormal_samples.shape[0] # selected abnormal samples: random_state = check_random_state(random_state) r = random_state.randint(0, n_samples_abnormal, 3377) abnormal_samples = abnormal_samples[r] abnormal_targets = abnormal_targets[r] data = np.r_[normal_samples, abnormal_samples] target = np.r_[normal_targets, abnormal_targets] if subset == "SF" or subset == "http" or subset == "smtp": # select all samples with positive logged_in attribute: s = data[:, 11] == 1 data = np.c_[data[s, :11], data[s, 12:]] feature_names = feature_names[:11] + feature_names[12:] target = target[s] data[:, 0] = np.log((data[:, 0] + 0.1).astype(float, copy=False)) data[:, 4] = np.log((data[:, 4] + 0.1).astype(float, copy=False)) data[:, 5] = np.log((data[:, 5] + 0.1).astype(float, copy=False)) if subset == "http": s = data[:, 2] == b"http" data = data[s] target = target[s] data = np.c_[data[:, 0], data[:, 4], data[:, 5]] feature_names = [feature_names[0], feature_names[4], feature_names[5]] if subset == "smtp": s = data[:, 2] == b"smtp" data = data[s] target = target[s] data = np.c_[data[:, 0], data[:, 4], data[:, 5]] feature_names = [feature_names[0], feature_names[4], feature_names[5]] if subset == "SF": data = np.c_[data[:, 0], data[:, 2], data[:, 4], data[:, 5]] feature_names = [ feature_names[0], feature_names[2], feature_names[4], feature_names[5], ] if shuffle: data, target = shuffle_method(data, target, random_state=random_state) fdescr = load_descr("kddcup99.rst") frame = None if as_frame: frame, data, target = _convert_data_dataframe( "fetch_kddcup99", data, target, feature_names, target_names ) if return_X_y: return data, target return Bunch( data=data, target=target, frame=frame, target_names=target_names, feature_names=feature_names, DESCR=fdescr, ) def _fetch_brute_kddcup99( data_home=None, download_if_missing=True, percent10=True, n_retries=3, delay=1.0 ): """Load the kddcup99 dataset, downloading it if necessary. Parameters ---------- data_home : str, default=None Specify another download and cache folder for the datasets. By default all scikit-learn data is stored in '~/scikit_learn_data' subfolders. download_if_missing : bool, default=True If False, raise an OSError if the data is not locally available instead of trying to download the data from the source site. percent10 : bool, default=True Whether to load only 10 percent of the data. n_retries : int, default=3 Number of retries when HTTP errors are encountered. delay : float, default=1.0 Number of seconds between retries. Returns ------- dataset : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : ndarray of shape (494021, 41) Each row corresponds to the 41 features in the dataset. target : ndarray of shape (494021,) Each value corresponds to one of the 21 attack types or to the label 'normal.'. feature_names : list The names of the dataset columns target_names: list The names of the target columns DESCR : str Description of the kddcup99 dataset. """ data_home = get_data_home(data_home=data_home) dir_suffix = "-py3" if percent10: kddcup_dir = join(data_home, "kddcup99_10" + dir_suffix) archive = ARCHIVE_10_PERCENT else: kddcup_dir = join(data_home, "kddcup99" + dir_suffix) archive = ARCHIVE samples_path = join(kddcup_dir, "samples") targets_path = join(kddcup_dir, "targets") available = exists(samples_path) dt = [ ("duration", int), ("protocol_type", "S4"), ("service", "S11"), ("flag", "S6"), ("src_bytes", int), ("dst_bytes", int), ("land", int), ("wrong_fragment", int), ("urgent", int), ("hot", int), ("num_failed_logins", int), ("logged_in", int), ("num_compromised", int), ("root_shell", int), ("su_attempted", int), ("num_root", int), ("num_file_creations", int), ("num_shells", int), ("num_access_files", int), ("num_outbound_cmds", int), ("is_host_login", int), ("is_guest_login", int), ("count", int), ("srv_count", int), ("serror_rate", float), ("srv_serror_rate", float), ("rerror_rate", float), ("srv_rerror_rate", float), ("same_srv_rate", float), ("diff_srv_rate", float), ("srv_diff_host_rate", float), ("dst_host_count", int), ("dst_host_srv_count", int), ("dst_host_same_srv_rate", float), ("dst_host_diff_srv_rate", float), ("dst_host_same_src_port_rate", float), ("dst_host_srv_diff_host_rate", float), ("dst_host_serror_rate", float), ("dst_host_srv_serror_rate", float), ("dst_host_rerror_rate", float), ("dst_host_srv_rerror_rate", float), ("labels", "S16"), ] column_names = [c[0] for c in dt] target_names = column_names[-1] feature_names = column_names[:-1] if available: try: X = joblib.load(samples_path) y = joblib.load(targets_path) except Exception as e: raise OSError( "The cache for fetch_kddcup99 is invalid, please delete " f"{kddcup_dir} and run the fetch_kddcup99 again" ) from e elif download_if_missing: _mkdirp(kddcup_dir) logger.info("Downloading %s" % archive.url) _fetch_remote(archive, dirname=kddcup_dir, n_retries=n_retries, delay=delay) DT = np.dtype(dt) logger.debug("extracting archive") archive_path = join(kddcup_dir, archive.filename) Xy = [] with GzipFile(filename=archive_path, mode="r") as file_: for line in file_.readlines(): line = line.decode() Xy.append(line.replace("\n", "").split(",")) logger.debug("extraction done") os.remove(archive_path) Xy = np.asarray(Xy, dtype=object) for j in range(42): Xy[:, j] = Xy[:, j].astype(DT[j]) X = Xy[:, :-1] y = Xy[:, -1] # XXX bug when compress!=0: # (error: 'Incorrect data length while decompressing[...] the file # could be corrupted.') joblib.dump(X, samples_path, compress=0) joblib.dump(y, targets_path, compress=0) else: raise OSError("Data not found and `download_if_missing` is False") return Bunch( data=X, target=y, feature_names=feature_names, target_names=[target_names], ) def _mkdirp(d): """Ensure directory d exists (like mkdir -p on Unix) No guarantee that the directory is writable. """ try: os.makedirs(d) except OSError as e: if e.errno != errno.EEXIST: raise
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/_rcv1.py
sklearn/datasets/_rcv1.py
"""RCV1 dataset. The dataset page is available at http://jmlr.csail.mit.edu/papers/volume5/lewis04a/ """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import logging from gzip import GzipFile from numbers import Integral, Real from os import PathLike, makedirs, remove from os.path import exists, join import joblib import numpy as np import scipy.sparse as sp from sklearn.datasets import get_data_home from sklearn.datasets._base import ( RemoteFileMetadata, _fetch_remote, _pkl_filepath, load_descr, ) from sklearn.datasets._svmlight_format_io import load_svmlight_files from sklearn.utils import Bunch from sklearn.utils import shuffle as shuffle_ from sklearn.utils._param_validation import Interval, StrOptions, validate_params # The original vectorized data can be found at: # http://www.ai.mit.edu/projects/jmlr/papers/volume5/lewis04a/a13-vector-files/lyrl2004_vectors_test_pt0.dat.gz # http://www.ai.mit.edu/projects/jmlr/papers/volume5/lewis04a/a13-vector-files/lyrl2004_vectors_test_pt1.dat.gz # http://www.ai.mit.edu/projects/jmlr/papers/volume5/lewis04a/a13-vector-files/lyrl2004_vectors_test_pt2.dat.gz # http://www.ai.mit.edu/projects/jmlr/papers/volume5/lewis04a/a13-vector-files/lyrl2004_vectors_test_pt3.dat.gz # http://www.ai.mit.edu/projects/jmlr/papers/volume5/lewis04a/a13-vector-files/lyrl2004_vectors_train.dat.gz # while the original stemmed token files can be found # in the README, section B.12.i.: # http://www.ai.mit.edu/projects/jmlr/papers/volume5/lewis04a/lyrl2004_rcv1v2_README.htm XY_METADATA = ( RemoteFileMetadata( url="https://ndownloader.figshare.com/files/5976069", checksum="ed40f7e418d10484091b059703eeb95ae3199fe042891dcec4be6696b9968374", filename="lyrl2004_vectors_test_pt0.dat.gz", ), RemoteFileMetadata( url="https://ndownloader.figshare.com/files/5976066", checksum="87700668ae45d45d5ca1ef6ae9bd81ab0f5ec88cc95dcef9ae7838f727a13aa6", filename="lyrl2004_vectors_test_pt1.dat.gz", ), RemoteFileMetadata( url="https://ndownloader.figshare.com/files/5976063", checksum="48143ac703cbe33299f7ae9f4995db49a258690f60e5debbff8995c34841c7f5", filename="lyrl2004_vectors_test_pt2.dat.gz", ), RemoteFileMetadata( url="https://ndownloader.figshare.com/files/5976060", checksum="dfcb0d658311481523c6e6ca0c3f5a3e1d3d12cde5d7a8ce629a9006ec7dbb39", filename="lyrl2004_vectors_test_pt3.dat.gz", ), RemoteFileMetadata( url="https://ndownloader.figshare.com/files/5976057", checksum="5468f656d0ba7a83afc7ad44841cf9a53048a5c083eedc005dcdb5cc768924ae", filename="lyrl2004_vectors_train.dat.gz", ), ) # The original data can be found at: # http://jmlr.csail.mit.edu/papers/volume5/lewis04a/a08-topic-qrels/rcv1-v2.topics.qrels.gz TOPICS_METADATA = RemoteFileMetadata( url="https://ndownloader.figshare.com/files/5976048", checksum="2a98e5e5d8b770bded93afc8930d88299474317fe14181aee1466cc754d0d1c1", filename="rcv1v2.topics.qrels.gz", ) logger = logging.getLogger(__name__) @validate_params( { "data_home": [str, PathLike, None], "subset": [StrOptions({"train", "test", "all"})], "download_if_missing": ["boolean"], "random_state": ["random_state"], "shuffle": ["boolean"], "return_X_y": ["boolean"], "n_retries": [Interval(Integral, 1, None, closed="left")], "delay": [Interval(Real, 0.0, None, closed="neither")], }, prefer_skip_nested_validation=True, ) def fetch_rcv1( *, data_home=None, subset="all", download_if_missing=True, random_state=None, shuffle=False, return_X_y=False, n_retries=3, delay=1.0, ): """Load the RCV1 multilabel dataset (classification). Download it if necessary. Version: RCV1-v2, vectors, full sets, topics multilabels. ================= ===================== Classes 103 Samples total 804414 Dimensionality 47236 Features real, between 0 and 1 ================= ===================== Read more in the :ref:`User Guide <rcv1_dataset>`. .. versionadded:: 0.17 Parameters ---------- data_home : str or path-like, default=None Specify another download and cache folder for the datasets. By default all scikit-learn data is stored in '~/scikit_learn_data' subfolders. subset : {'train', 'test', 'all'}, default='all' Select the dataset to load: 'train' for the training set (23149 samples), 'test' for the test set (781265 samples), 'all' for both, with the training samples first if shuffle is False. This follows the official LYRL2004 chronological split. download_if_missing : bool, default=True If False, raise an OSError if the data is not locally available instead of trying to download the data from the source site. random_state : int, RandomState instance or None, default=None Determines random number generation for dataset shuffling. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. shuffle : bool, default=False Whether to shuffle dataset. return_X_y : bool, default=False If True, returns ``(dataset.data, dataset.target)`` instead of a Bunch object. See below for more information about the `dataset.data` and `dataset.target` object. .. versionadded:: 0.20 n_retries : int, default=3 Number of retries when HTTP errors are encountered. .. versionadded:: 1.5 delay : float, default=1.0 Number of seconds between retries. .. versionadded:: 1.5 Returns ------- dataset : :class:`~sklearn.utils.Bunch` Dictionary-like object. Returned only if `return_X_y` is False. `dataset` has the following attributes: - data : sparse matrix of shape (804414, 47236), dtype=np.float64 The array has 0.16% of non zero values. Will be of CSR format. - target : sparse matrix of shape (804414, 103), dtype=np.uint8 Each sample has a value of 1 in its categories, and 0 in others. The array has 3.15% of non zero values. Will be of CSR format. - sample_id : ndarray of shape (804414,), dtype=np.uint32, Identification number of each sample, as ordered in dataset.data. - target_names : ndarray of shape (103,), dtype=object Names of each target (RCV1 topics), as ordered in dataset.target. - DESCR : str Description of the RCV1 dataset. (data, target) : tuple A tuple consisting of `dataset.data` and `dataset.target`, as described above. Returned only if `return_X_y` is True. .. versionadded:: 0.20 Examples -------- >>> from sklearn.datasets import fetch_rcv1 >>> rcv1 = fetch_rcv1() >>> rcv1.data.shape (804414, 47236) >>> rcv1.target.shape (804414, 103) """ N_SAMPLES = 804414 N_FEATURES = 47236 N_CATEGORIES = 103 N_TRAIN = 23149 data_home = get_data_home(data_home=data_home) rcv1_dir = join(data_home, "RCV1") if download_if_missing: if not exists(rcv1_dir): makedirs(rcv1_dir) samples_path = _pkl_filepath(rcv1_dir, "samples.pkl") sample_id_path = _pkl_filepath(rcv1_dir, "sample_id.pkl") sample_topics_path = _pkl_filepath(rcv1_dir, "sample_topics.pkl") topics_path = _pkl_filepath(rcv1_dir, "topics_names.pkl") # load data (X) and sample_id if download_if_missing and (not exists(samples_path) or not exists(sample_id_path)): files = [] for each in XY_METADATA: logger.info("Downloading %s" % each.url) file_path = _fetch_remote( each, dirname=rcv1_dir, n_retries=n_retries, delay=delay ) files.append(GzipFile(filename=file_path)) Xy = load_svmlight_files(files, n_features=N_FEATURES) # Training data is before testing data X = sp.vstack([Xy[8], Xy[0], Xy[2], Xy[4], Xy[6]]).tocsr() sample_id = np.hstack((Xy[9], Xy[1], Xy[3], Xy[5], Xy[7])) sample_id = sample_id.astype(np.uint32, copy=False) joblib.dump(X, samples_path, compress=9) joblib.dump(sample_id, sample_id_path, compress=9) # delete archives for f in files: f.close() remove(f.name) else: X = joblib.load(samples_path) sample_id = joblib.load(sample_id_path) # load target (y), categories, and sample_id_bis if download_if_missing and ( not exists(sample_topics_path) or not exists(topics_path) ): logger.info("Downloading %s" % TOPICS_METADATA.url) topics_archive_path = _fetch_remote( TOPICS_METADATA, dirname=rcv1_dir, n_retries=n_retries, delay=delay ) # parse the target file n_cat = -1 n_doc = -1 doc_previous = -1 y = np.zeros((N_SAMPLES, N_CATEGORIES), dtype=np.uint8) sample_id_bis = np.zeros(N_SAMPLES, dtype=np.int32) category_names = {} with GzipFile(filename=topics_archive_path, mode="rb") as f: for line in f: line_components = line.decode("ascii").split(" ") if len(line_components) == 3: cat, doc, _ = line_components if cat not in category_names: n_cat += 1 category_names[cat] = n_cat doc = int(doc) if doc != doc_previous: doc_previous = doc n_doc += 1 sample_id_bis[n_doc] = doc y[n_doc, category_names[cat]] = 1 # delete archive remove(topics_archive_path) # Samples in X are ordered with sample_id, # whereas in y, they are ordered with sample_id_bis. permutation = _find_permutation(sample_id_bis, sample_id) y = y[permutation, :] # save category names in a list, with same order than y categories = np.empty(N_CATEGORIES, dtype=object) for k in category_names.keys(): categories[category_names[k]] = k # reorder categories in lexicographic order order = np.argsort(categories) categories = categories[order] y = sp.csr_matrix(y[:, order]) joblib.dump(y, sample_topics_path, compress=9) joblib.dump(categories, topics_path, compress=9) else: y = joblib.load(sample_topics_path) categories = joblib.load(topics_path) if subset == "all": pass elif subset == "train": X = X[:N_TRAIN, :] y = y[:N_TRAIN, :] sample_id = sample_id[:N_TRAIN] elif subset == "test": X = X[N_TRAIN:, :] y = y[N_TRAIN:, :] sample_id = sample_id[N_TRAIN:] else: raise ValueError( "Unknown subset parameter. Got '%s' instead of one" " of ('all', 'train', test')" % subset ) if shuffle: X, y, sample_id = shuffle_(X, y, sample_id, random_state=random_state) fdescr = load_descr("rcv1.rst") if return_X_y: return X, y return Bunch( data=X, target=y, sample_id=sample_id, target_names=categories, DESCR=fdescr ) def _inverse_permutation(p): """Inverse permutation p.""" n = p.size s = np.zeros(n, dtype=np.int32) i = np.arange(n, dtype=np.int32) np.put(s, p, i) # s[p] = i return s def _find_permutation(a, b): """Find the permutation from a to b.""" t = np.argsort(a) u = np.argsort(b) u_ = _inverse_permutation(u) return t[u_]
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/_covtype.py
sklearn/datasets/_covtype.py
"""Forest covertype dataset. A classic dataset for classification benchmarks, featuring categorical and real-valued features. The dataset page is available from UCI Machine Learning Repository https://archive.ics.uci.edu/ml/datasets/Covertype Courtesy of Jock A. Blackard and Colorado State University. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import logging import os from gzip import GzipFile from numbers import Integral, Real from os.path import exists, join from tempfile import TemporaryDirectory import joblib import numpy as np from sklearn.datasets import get_data_home from sklearn.datasets._base import ( RemoteFileMetadata, _convert_data_dataframe, _fetch_remote, _pkl_filepath, load_descr, ) from sklearn.utils import Bunch, check_random_state from sklearn.utils._param_validation import Interval, validate_params # The original data can be found in: # https://archive.ics.uci.edu/ml/machine-learning-databases/covtype/covtype.data.gz ARCHIVE = RemoteFileMetadata( filename="covtype.data.gz", url="https://ndownloader.figshare.com/files/5976039", checksum="614360d0257557dd1792834a85a1cdebfadc3c4f30b011d56afee7ffb5b15771", ) logger = logging.getLogger(__name__) # Column names reference: # https://archive.ics.uci.edu/ml/machine-learning-databases/covtype/covtype.info FEATURE_NAMES = [ "Elevation", "Aspect", "Slope", "Horizontal_Distance_To_Hydrology", "Vertical_Distance_To_Hydrology", "Horizontal_Distance_To_Roadways", "Hillshade_9am", "Hillshade_Noon", "Hillshade_3pm", "Horizontal_Distance_To_Fire_Points", ] FEATURE_NAMES += [f"Wilderness_Area_{i}" for i in range(4)] FEATURE_NAMES += [f"Soil_Type_{i}" for i in range(40)] TARGET_NAMES = ["Cover_Type"] @validate_params( { "data_home": [str, os.PathLike, None], "download_if_missing": ["boolean"], "random_state": ["random_state"], "shuffle": ["boolean"], "return_X_y": ["boolean"], "as_frame": ["boolean"], "n_retries": [Interval(Integral, 1, None, closed="left")], "delay": [Interval(Real, 0.0, None, closed="neither")], }, prefer_skip_nested_validation=True, ) def fetch_covtype( *, data_home=None, download_if_missing=True, random_state=None, shuffle=False, return_X_y=False, as_frame=False, n_retries=3, delay=1.0, ): """Load the covertype dataset (classification). Download it if necessary. ================= ============ Classes 7 Samples total 581012 Dimensionality 54 Features int ================= ============ Read more in the :ref:`User Guide <covtype_dataset>`. Parameters ---------- data_home : str or path-like, default=None Specify another download and cache folder for the datasets. By default all scikit-learn data is stored in '~/scikit_learn_data' subfolders. download_if_missing : bool, default=True If False, raise an OSError if the data is not locally available instead of trying to download the data from the source site. random_state : int, RandomState instance or None, default=None Determines random number generation for dataset shuffling. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. shuffle : bool, default=False Whether to shuffle dataset. return_X_y : bool, default=False If True, returns ``(data.data, data.target)`` instead of a Bunch object. .. versionadded:: 0.20 as_frame : bool, default=False If True, the data is a pandas DataFrame including columns with appropriate dtypes (numeric). The target is a pandas DataFrame or Series depending on the number of target columns. If `return_X_y` is True, then (`data`, `target`) will be pandas DataFrames or Series as described below. .. versionadded:: 0.24 n_retries : int, default=3 Number of retries when HTTP errors are encountered. .. versionadded:: 1.5 delay : float, default=1.0 Number of seconds between retries. .. versionadded:: 1.5 Returns ------- dataset : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : ndarray of shape (581012, 54) Each row corresponds to the 54 features in the dataset. target : ndarray of shape (581012,) Each value corresponds to one of the 7 forest covertypes with values ranging between 1 to 7. frame : dataframe of shape (581012, 55) Only present when `as_frame=True`. Contains `data` and `target`. DESCR : str Description of the forest covertype dataset. feature_names : list The names of the dataset columns. target_names: list The names of the target columns. (data, target) : tuple if ``return_X_y`` is True A tuple of two ndarray. The first containing a 2D array of shape (n_samples, n_features) with each row representing one sample and each column representing the features. The second ndarray of shape (n_samples,) containing the target samples. .. versionadded:: 0.20 Examples -------- >>> from sklearn.datasets import fetch_covtype >>> cov_type = fetch_covtype() >>> cov_type.data.shape (581012, 54) >>> cov_type.target.shape (581012,) >>> # Let's check the 4 first feature names >>> cov_type.feature_names[:4] ['Elevation', 'Aspect', 'Slope', 'Horizontal_Distance_To_Hydrology'] """ data_home = get_data_home(data_home=data_home) covtype_dir = join(data_home, "covertype") samples_path = _pkl_filepath(covtype_dir, "samples") targets_path = _pkl_filepath(covtype_dir, "targets") available = exists(samples_path) and exists(targets_path) if download_if_missing and not available: os.makedirs(covtype_dir, exist_ok=True) # Creating temp_dir as a direct subdirectory of the target directory # guarantees that both reside on the same filesystem, so that we can use # os.rename to atomically move the data files to their target location. with TemporaryDirectory(dir=covtype_dir) as temp_dir: logger.info(f"Downloading {ARCHIVE.url}") archive_path = _fetch_remote( ARCHIVE, dirname=temp_dir, n_retries=n_retries, delay=delay ) Xy = np.genfromtxt(GzipFile(filename=archive_path), delimiter=",") X = Xy[:, :-1] y = Xy[:, -1].astype(np.int32, copy=False) samples_tmp_path = _pkl_filepath(temp_dir, "samples") joblib.dump(X, samples_tmp_path, compress=9) os.rename(samples_tmp_path, samples_path) targets_tmp_path = _pkl_filepath(temp_dir, "targets") joblib.dump(y, targets_tmp_path, compress=9) os.rename(targets_tmp_path, targets_path) elif not available and not download_if_missing: raise OSError("Data not found and `download_if_missing` is False") try: X, y except NameError: X = joblib.load(samples_path) y = joblib.load(targets_path) if shuffle: ind = np.arange(X.shape[0]) rng = check_random_state(random_state) rng.shuffle(ind) X = X[ind] y = y[ind] fdescr = load_descr("covtype.rst") frame = None if as_frame: frame, X, y = _convert_data_dataframe( caller_name="fetch_covtype", data=X, target=y, feature_names=FEATURE_NAMES, target_names=TARGET_NAMES, ) if return_X_y: return X, y return Bunch( data=X, target=y, frame=frame, target_names=TARGET_NAMES, feature_names=FEATURE_NAMES, DESCR=fdescr, )
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/_svmlight_format_io.py
sklearn/datasets/_svmlight_format_io.py
"""This module implements a loader and dumper for the svmlight format This format is a text-based format, with one sample per line. It does not store zero valued features hence is suitable for sparse dataset. The first element of each line can be used to store a target variable to predict. This format is used as the default format for both svmlight and the libsvm command line programs. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import os.path from contextlib import closing from numbers import Integral import numpy as np import scipy.sparse as sp from sklearn import __version__ from sklearn.datasets._svmlight_format_fast import ( _dump_svmlight_file, _load_svmlight_file, ) from sklearn.utils import check_array from sklearn.utils._param_validation import ( HasMethods, Interval, StrOptions, validate_params, ) @validate_params( { "f": [ str, Interval(Integral, 0, None, closed="left"), os.PathLike, HasMethods("read"), ], "n_features": [Interval(Integral, 1, None, closed="left"), None], "dtype": "no_validation", # delegate validation to numpy "multilabel": ["boolean"], "zero_based": ["boolean", StrOptions({"auto"})], "query_id": ["boolean"], "offset": [Interval(Integral, 0, None, closed="left")], "length": [Integral], }, prefer_skip_nested_validation=True, ) def load_svmlight_file( f, *, n_features=None, dtype=np.float64, multilabel=False, zero_based="auto", query_id=False, offset=0, length=-1, ): """Load datasets in the svmlight / libsvm format into sparse CSR matrix. This format is a text-based format, with one sample per line. It does not store zero valued features hence is suitable for sparse dataset. The first element of each line can be used to store a target variable to predict. This format is used as the default format for both svmlight and the libsvm command line programs. Parsing a text based source can be expensive. When repeatedly working on the same dataset, it is recommended to wrap this loader with joblib.Memory.cache to store a memmapped backup of the CSR results of the first call and benefit from the near instantaneous loading of memmapped structures for the subsequent calls. In case the file contains a pairwise preference constraint (known as "qid" in the svmlight format) these are ignored unless the query_id parameter is set to True. These pairwise preference constraints can be used to constraint the combination of samples when using pairwise loss functions (as is the case in some learning to rank problems) so that only pairs with the same query_id value are considered. This implementation is written in Cython and is reasonably fast. However, a faster API-compatible loader is also available at: https://github.com/mblondel/svmlight-loader Parameters ---------- f : str, path-like, file-like or int (Path to) a file to load. If a path ends in ".gz" or ".bz2", it will be uncompressed on the fly. If an integer is passed, it is assumed to be a file descriptor. A file-like or file descriptor will not be closed by this function. A file-like object must be opened in binary mode. .. versionchanged:: 1.2 Path-like objects are now accepted. n_features : int, default=None The number of features to use. If None, it will be inferred. This argument is useful to load several files that are subsets of a bigger sliced dataset: each subset might not have examples of every feature, hence the inferred shape might vary from one slice to another. n_features is only required if ``offset`` or ``length`` are passed a non-default value. dtype : numpy data type, default=np.float64 Data type of dataset to be loaded. This will be the data type of the output numpy arrays ``X`` and ``y``. multilabel : bool, default=False Samples may have several labels each (see https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html). zero_based : bool or "auto", default="auto" Whether column indices in f are zero-based (True) or one-based (False). If column indices are one-based, they are transformed to zero-based to match Python/NumPy conventions. If set to "auto", a heuristic check is applied to determine this from the file contents. Both kinds of files occur "in the wild", but they are unfortunately not self-identifying. Using "auto" or True should always be safe when no ``offset`` or ``length`` is passed. If ``offset`` or ``length`` are passed, the "auto" mode falls back to ``zero_based=True`` to avoid having the heuristic check yield inconsistent results on different segments of the file. query_id : bool, default=False If True, will return the query_id array for each file. offset : int, default=0 Ignore the offset first bytes by seeking forward, then discarding the following bytes up until the next new line character. length : int, default=-1 If strictly positive, stop reading any new line of data once the position in the file has reached the (offset + length) bytes threshold. Returns ------- X : scipy.sparse matrix of shape (n_samples, n_features) The data matrix. y : ndarray of shape (n_samples,), or a list of tuples of length n_samples The target. It is a list of tuples when ``multilabel=True``, else a ndarray. query_id : array of shape (n_samples,) The query_id for each sample. Only returned when query_id is set to True. See Also -------- load_svmlight_files : Similar function for loading multiple files in this format, enforcing the same number of features/columns on all of them. Examples -------- To use joblib.Memory to cache the svmlight file:: from joblib import Memory from sklearn.datasets import load_svmlight_file mem = Memory("./mycache") @mem.cache def get_data(): data = load_svmlight_file("mysvmlightfile") return data[0], data[1] X, y = get_data() """ return tuple( load_svmlight_files( [f], n_features=n_features, dtype=dtype, multilabel=multilabel, zero_based=zero_based, query_id=query_id, offset=offset, length=length, ) ) def _gen_open(f): if isinstance(f, int): # file descriptor return open(f, "rb", closefd=False) elif isinstance(f, os.PathLike): f = os.fspath(f) elif not isinstance(f, str): raise TypeError("expected {str, int, path-like, file-like}, got %s" % type(f)) _, ext = os.path.splitext(f) if ext == ".gz": import gzip return gzip.open(f, "rb") elif ext == ".bz2": from bz2 import BZ2File return BZ2File(f, "rb") else: return open(f, "rb") def _open_and_load(f, dtype, multilabel, zero_based, query_id, offset=0, length=-1): if hasattr(f, "read"): actual_dtype, data, ind, indptr, labels, query = _load_svmlight_file( f, dtype, multilabel, zero_based, query_id, offset, length ) else: with closing(_gen_open(f)) as f: actual_dtype, data, ind, indptr, labels, query = _load_svmlight_file( f, dtype, multilabel, zero_based, query_id, offset, length ) # convert from array.array, give data the right dtype if not multilabel: labels = np.frombuffer(labels, np.float64) data = np.frombuffer(data, actual_dtype) indices = np.frombuffer(ind, np.longlong) indptr = np.frombuffer(indptr, dtype=np.longlong) # never empty query = np.frombuffer(query, np.int64) data = np.asarray(data, dtype=dtype) # no-op for float{32,64} return data, indices, indptr, labels, query @validate_params( { "files": [ "array-like", str, os.PathLike, HasMethods("read"), Interval(Integral, 0, None, closed="left"), ], "n_features": [Interval(Integral, 1, None, closed="left"), None], "dtype": "no_validation", # delegate validation to numpy "multilabel": ["boolean"], "zero_based": ["boolean", StrOptions({"auto"})], "query_id": ["boolean"], "offset": [Interval(Integral, 0, None, closed="left")], "length": [Integral], }, prefer_skip_nested_validation=True, ) def load_svmlight_files( files, *, n_features=None, dtype=np.float64, multilabel=False, zero_based="auto", query_id=False, offset=0, length=-1, ): """Load dataset from multiple files in SVMlight format. This function is equivalent to mapping load_svmlight_file over a list of files, except that the results are concatenated into a single, flat list and the samples vectors are constrained to all have the same number of features. In case the file contains a pairwise preference constraint (known as "qid" in the svmlight format) these are ignored unless the query_id parameter is set to True. These pairwise preference constraints can be used to constraint the combination of samples when using pairwise loss functions (as is the case in some learning to rank problems) so that only pairs with the same query_id value are considered. Parameters ---------- files : array-like, dtype=str, path-like, file-like or int (Paths of) files to load. If a path ends in ".gz" or ".bz2", it will be uncompressed on the fly. If an integer is passed, it is assumed to be a file descriptor. File-likes and file descriptors will not be closed by this function. File-like objects must be opened in binary mode. .. versionchanged:: 1.2 Path-like objects are now accepted. n_features : int, default=None The number of features to use. If None, it will be inferred from the maximum column index occurring in any of the files. This can be set to a higher value than the actual number of features in any of the input files, but setting it to a lower value will cause an exception to be raised. dtype : numpy data type, default=np.float64 Data type of dataset to be loaded. This will be the data type of the output numpy arrays ``X`` and ``y``. multilabel : bool, default=False Samples may have several labels each (see https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html). zero_based : bool or "auto", default="auto" Whether column indices in f are zero-based (True) or one-based (False). If column indices are one-based, they are transformed to zero-based to match Python/NumPy conventions. If set to "auto", a heuristic check is applied to determine this from the file contents. Both kinds of files occur "in the wild", but they are unfortunately not self-identifying. Using "auto" or True should always be safe when no offset or length is passed. If offset or length are passed, the "auto" mode falls back to zero_based=True to avoid having the heuristic check yield inconsistent results on different segments of the file. query_id : bool, default=False If True, will return the query_id array for each file. offset : int, default=0 Ignore the offset first bytes by seeking forward, then discarding the following bytes up until the next new line character. length : int, default=-1 If strictly positive, stop reading any new line of data once the position in the file has reached the (offset + length) bytes threshold. Returns ------- [X1, y1, ..., Xn, yn] or [X1, y1, q1, ..., Xn, yn, qn]: list of arrays Each (Xi, yi) pair is the result from load_svmlight_file(files[i]). If query_id is set to True, this will return instead (Xi, yi, qi) triplets. See Also -------- load_svmlight_file: Similar function for loading a single file in this format. Notes ----- When fitting a model to a matrix X_train and evaluating it against a matrix X_test, it is essential that X_train and X_test have the same number of features (X_train.shape[1] == X_test.shape[1]). This may not be the case if you load the files individually with load_svmlight_file. Examples -------- To use joblib.Memory to cache the svmlight file:: from joblib import Memory from sklearn.datasets import load_svmlight_file mem = Memory("./mycache") @mem.cache def get_data(): data_train, target_train, data_test, target_test = load_svmlight_files( ["svmlight_file_train", "svmlight_file_test"] ) return data_train, target_train, data_test, target_test X_train, y_train, X_test, y_test = get_data() """ if (offset != 0 or length > 0) and zero_based == "auto": # disable heuristic search to avoid getting inconsistent results on # different segments of the file zero_based = True if (offset != 0 or length > 0) and n_features is None: raise ValueError("n_features is required when offset or length is specified.") r = [ _open_and_load( f, dtype, multilabel, bool(zero_based), bool(query_id), offset=offset, length=length, ) for f in files ] if zero_based is False or ( zero_based == "auto" and all(len(tmp[1]) and np.min(tmp[1]) > 0 for tmp in r) ): for _, indices, _, _, _ in r: indices -= 1 n_f = max(ind[1].max() if len(ind[1]) else 0 for ind in r) + 1 if n_features is None: n_features = n_f elif n_features < n_f: raise ValueError( "n_features was set to {}, but input file contains {} features".format( n_features, n_f ) ) result = [] for data, indices, indptr, y, query_values in r: shape = (indptr.shape[0] - 1, n_features) X = sp.csr_matrix((data, indices, indptr), shape) X.sort_indices() result += X, y if query_id: result.append(query_values) return result def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id): if comment: f.write( ( "# Generated by dump_svmlight_file from scikit-learn %s\n" % __version__ ).encode() ) f.write( ("# Column indices are %s-based\n" % ["zero", "one"][one_based]).encode() ) f.write(b"#\n") f.writelines(b"# %s\n" % line for line in comment.splitlines()) X_is_sp = sp.issparse(X) y_is_sp = sp.issparse(y) if not multilabel and not y_is_sp: y = y[:, np.newaxis] _dump_svmlight_file( X, y, f, multilabel, one_based, query_id, X_is_sp, y_is_sp, ) @validate_params( { "X": ["array-like", "sparse matrix"], "y": ["array-like", "sparse matrix"], "f": [str, HasMethods(["write"])], "zero_based": ["boolean"], "comment": [str, bytes, None], "query_id": ["array-like", None], "multilabel": ["boolean"], }, prefer_skip_nested_validation=True, ) def dump_svmlight_file( X, y, f, *, zero_based=True, comment=None, query_id=None, multilabel=False, ): """Dump the dataset in svmlight / libsvm file format. This format is a text-based format, with one sample per line. It does not store zero valued features hence is suitable for sparse dataset. The first element of each line can be used to store a target variable to predict. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where `n_samples` is the number of samples and `n_features` is the number of features. y : {array-like, sparse matrix}, shape = (n_samples,) or (n_samples, n_labels) Target values. Class labels must be an integer or float, or array-like objects of integer or float for multilabel classifications. f : str or file-like in binary mode If string, specifies the path that will contain the data. If file-like, data will be written to f. f should be opened in binary mode. zero_based : bool, default=True Whether column indices should be written zero-based (True) or one-based (False). comment : str or bytes, default=None Comment to insert at the top of the file. This should be either a Unicode string, which will be encoded as UTF-8, or an ASCII byte string. If a comment is given, then it will be preceded by one that identifies the file as having been dumped by scikit-learn. Note that not all tools grok comments in SVMlight files. query_id : array-like of shape (n_samples,), default=None Array containing pairwise preference constraints (qid in svmlight format). multilabel : bool, default=False Samples may have several labels each (see https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html). .. versionadded:: 0.17 parameter `multilabel` to support multilabel datasets. Examples -------- >>> from sklearn.datasets import dump_svmlight_file, make_classification >>> X, y = make_classification(random_state=0) >>> output_file = "my_dataset.svmlight" >>> dump_svmlight_file(X, y, output_file) # doctest: +SKIP """ if comment is not None: # Convert comment string to list of lines in UTF-8. # If a byte string is passed, then check whether it's ASCII; # if a user wants to get fancy, they'll have to decode themselves. if isinstance(comment, bytes): comment.decode("ascii") # just for the exception else: comment = comment.encode("utf-8") if b"\0" in comment: raise ValueError("comment string contains NUL byte") yval = check_array(y, accept_sparse="csr", ensure_2d=False) if sp.issparse(yval): if yval.shape[1] != 1 and not multilabel: raise ValueError( "expected y of shape (n_samples, 1), got %r" % (yval.shape,) ) else: if yval.ndim != 1 and not multilabel: raise ValueError("expected y of shape (n_samples,), got %r" % (yval.shape,)) Xval = check_array(X, accept_sparse="csr") if Xval.shape[0] != yval.shape[0]: raise ValueError( "X.shape[0] and y.shape[0] should be the same, got %r and %r instead." % (Xval.shape[0], yval.shape[0]) ) # We had some issues with CSR matrices with unsorted indices (e.g. #1501), # so sort them here, but first make sure we don't modify the user's X. # TODO We can do this cheaper; sorted_indices copies the whole matrix. if yval is y and hasattr(yval, "sorted_indices"): y = yval.sorted_indices() else: y = yval if hasattr(y, "sort_indices"): y.sort_indices() if Xval is X and hasattr(Xval, "sorted_indices"): X = Xval.sorted_indices() else: X = Xval if hasattr(X, "sort_indices"): X.sort_indices() if query_id is None: # NOTE: query_id is passed to Cython functions using a fused type on query_id. # Yet as of Cython>=3.0, memory views can't be None otherwise the runtime # would not known which concrete implementation to dispatch the Python call to. # TODO: simplify interfaces and implementations in _svmlight_format_fast.pyx. query_id = np.array([], dtype=np.int32) else: query_id = np.asarray(query_id) if query_id.shape[0] != y.shape[0]: raise ValueError( "expected query_id of shape (n_samples,), got %r" % (query_id.shape,) ) one_based = not zero_based if hasattr(f, "write"): _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id) else: with open(f, "wb") as f: _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/_california_housing.py
sklearn/datasets/_california_housing.py
"""California housing dataset. The original database is available from StatLib http://lib.stat.cmu.edu/datasets/ The data contains 20,640 observations on 9 variables. This dataset contains the median house value as target variable and the following input variables (features): average income, housing average age, average rooms, average bedrooms, population, average occupation, latitude, and longitude in that order. References ---------- Pace, R. Kelley and Ronald Barry, Sparse Spatial Autoregressions, Statistics and Probability Letters, 33:291-297, 1997. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import logging import tarfile from numbers import Integral, Real from os import PathLike, remove from os.path import exists import joblib import numpy as np from sklearn.datasets import get_data_home from sklearn.datasets._base import ( RemoteFileMetadata, _convert_data_dataframe, _fetch_remote, _pkl_filepath, load_descr, ) from sklearn.utils import Bunch from sklearn.utils._param_validation import Interval, validate_params # The original data can be found at: # https://www.dcc.fc.up.pt/~ltorgo/Regression/cal_housing.tgz ARCHIVE = RemoteFileMetadata( filename="cal_housing.tgz", url="https://ndownloader.figshare.com/files/5976036", checksum="aaa5c9a6afe2225cc2aed2723682ae403280c4a3695a2ddda4ffb5d8215ea681", ) logger = logging.getLogger(__name__) @validate_params( { "data_home": [str, PathLike, None], "download_if_missing": ["boolean"], "return_X_y": ["boolean"], "as_frame": ["boolean"], "n_retries": [Interval(Integral, 1, None, closed="left")], "delay": [Interval(Real, 0.0, None, closed="neither")], }, prefer_skip_nested_validation=True, ) def fetch_california_housing( *, data_home=None, download_if_missing=True, return_X_y=False, as_frame=False, n_retries=3, delay=1.0, ): """Load the California housing dataset (regression). ============== ============== Samples total 20640 Dimensionality 8 Features real Target real 0.15 - 5. ============== ============== Read more in the :ref:`User Guide <california_housing_dataset>`. Parameters ---------- data_home : str or path-like, default=None Specify another download and cache folder for the datasets. By default all scikit-learn data is stored in '~/scikit_learn_data' subfolders. download_if_missing : bool, default=True If False, raise an OSError if the data is not locally available instead of trying to download the data from the source site. return_X_y : bool, default=False If True, returns ``(data.data, data.target)`` instead of a Bunch object. .. versionadded:: 0.20 as_frame : bool, default=False If True, the data is a pandas DataFrame including columns with appropriate dtypes (numeric, string or categorical). The target is a pandas DataFrame or Series depending on the number of target_columns. .. versionadded:: 0.23 n_retries : int, default=3 Number of retries when HTTP errors are encountered. .. versionadded:: 1.5 delay : float, default=1.0 Number of seconds between retries. .. versionadded:: 1.5 Returns ------- dataset : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : ndarray, shape (20640, 8) Each row corresponding to the 8 feature values in order. If ``as_frame`` is True, ``data`` is a pandas object. target : numpy array of shape (20640,) Each value corresponds to the median house value in units of 100,000. If ``as_frame`` is True, ``target`` is a pandas object. feature_names : list of length 8 Array of ordered feature names used in the dataset. DESCR : str Description of the California housing dataset. frame : pandas DataFrame Only present when `as_frame=True`. DataFrame with ``data`` and ``target``. .. versionadded:: 0.23 (data, target) : tuple if ``return_X_y`` is True A tuple of two ndarray. The first containing a 2D array of shape (n_samples, n_features) with each row representing one sample and each column representing the features. The second ndarray of shape (n_samples,) containing the target samples. .. versionadded:: 0.20 Notes ----- This dataset consists of 20,640 samples and 9 features. Examples -------- >>> from sklearn.datasets import fetch_california_housing >>> housing = fetch_california_housing() >>> print(housing.data.shape, housing.target.shape) (20640, 8) (20640,) >>> print(housing.feature_names[0:6]) ['MedInc', 'HouseAge', 'AveRooms', 'AveBedrms', 'Population', 'AveOccup'] """ data_home = get_data_home(data_home=data_home) filepath = _pkl_filepath(data_home, "cal_housing.pkz") if not exists(filepath): if not download_if_missing: raise OSError("Data not found and `download_if_missing` is False") logger.info( "Downloading Cal. housing from {} to {}".format(ARCHIVE.url, data_home) ) archive_path = _fetch_remote( ARCHIVE, dirname=data_home, n_retries=n_retries, delay=delay, ) with tarfile.open(mode="r:gz", name=archive_path) as f: cal_housing = np.loadtxt( f.extractfile("CaliforniaHousing/cal_housing.data"), delimiter="," ) # Columns are not in the same order compared to the previous # URL resource on lib.stat.cmu.edu columns_index = [8, 7, 2, 3, 4, 5, 6, 1, 0] cal_housing = cal_housing[:, columns_index] joblib.dump(cal_housing, filepath, compress=6) remove(archive_path) else: cal_housing = joblib.load(filepath) feature_names = [ "MedInc", "HouseAge", "AveRooms", "AveBedrms", "Population", "AveOccup", "Latitude", "Longitude", ] target, data = cal_housing[:, 0], cal_housing[:, 1:] # avg rooms = total rooms / households data[:, 2] /= data[:, 5] # avg bed rooms = total bed rooms / households data[:, 3] /= data[:, 5] # avg occupancy = population / households data[:, 5] = data[:, 4] / data[:, 5] # target in units of 100,000 target = target / 100000.0 descr = load_descr("california_housing.rst") X = data y = target frame = None target_names = [ "MedHouseVal", ] if as_frame: frame, X, y = _convert_data_dataframe( "fetch_california_housing", data, target, feature_names, target_names ) if return_X_y: return X, y return Bunch( data=X, target=y, frame=frame, target_names=target_names, feature_names=feature_names, DESCR=descr, )
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/_samples_generator.py
sklearn/datasets/_samples_generator.py
""" Generate samples of synthetic data sets. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import array import numbers from collections.abc import Iterable from numbers import Integral, Real import numpy as np import scipy.sparse as sp from scipy import linalg from sklearn.preprocessing import MultiLabelBinarizer from sklearn.utils import Bunch, check_array, check_random_state from sklearn.utils import shuffle as util_shuffle from sklearn.utils._param_validation import Interval, StrOptions, validate_params from sklearn.utils.random import sample_without_replacement def _generate_hypercube(samples, dimensions, rng): """Returns distinct binary samples of length dimensions.""" if dimensions > 30: return np.hstack( [ rng.randint(2, size=(samples, dimensions - 30)), _generate_hypercube(samples, 30, rng), ] ) out = sample_without_replacement(2**dimensions, samples, random_state=rng).astype( dtype=">u4", copy=False ) out = np.unpackbits(out.view(">u1")).reshape((-1, 32))[:, -dimensions:] return out @validate_params( { "n_samples": [Interval(Integral, 1, None, closed="left")], "n_features": [Interval(Integral, 1, None, closed="left")], "n_informative": [Interval(Integral, 1, None, closed="left")], "n_redundant": [Interval(Integral, 0, None, closed="left")], "n_repeated": [Interval(Integral, 0, None, closed="left")], "n_classes": [Interval(Integral, 1, None, closed="left")], "n_clusters_per_class": [Interval(Integral, 1, None, closed="left")], "weights": ["array-like", None], "flip_y": [Interval(Real, 0, 1, closed="both")], "class_sep": [Interval(Real, 0, None, closed="neither")], "hypercube": ["boolean"], "shift": [Interval(Real, None, None, closed="neither"), "array-like", None], "scale": [Interval(Real, 0, None, closed="neither"), "array-like", None], "shuffle": ["boolean"], "random_state": ["random_state"], "return_X_y": ["boolean"], }, prefer_skip_nested_validation=True, ) def make_classification( n_samples=100, n_features=20, *, n_informative=2, n_redundant=2, n_repeated=0, n_classes=2, n_clusters_per_class=2, weights=None, flip_y=0.01, class_sep=1.0, hypercube=True, shift=0.0, scale=1.0, shuffle=True, random_state=None, return_X_y=True, ): """Generate a random n-class classification problem. This initially creates clusters of points normally distributed (std=1) about vertices of an ``n_informative``-dimensional hypercube with sides of length ``2*class_sep`` and assigns an equal number of clusters to each class. It introduces interdependence between these features and adds various types of further noise to the data. Without shuffling, ``X`` horizontally stacks features in the following order: the primary ``n_informative`` features, followed by ``n_redundant`` linear combinations of the informative features, followed by ``n_repeated`` duplicates, drawn randomly with replacement from the informative and redundant features. The remaining features are filled with random noise. Thus, without shuffling, all useful features are contained in the columns ``X[:, :n_informative + n_redundant + n_repeated]``. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, default=100 The number of samples. n_features : int, default=20 The total number of features. These comprise ``n_informative`` informative features, ``n_redundant`` redundant features, ``n_repeated`` duplicated features and ``n_features-n_informative-n_redundant-n_repeated`` useless features drawn at random. n_informative : int, default=2 The number of informative features. Each class is composed of a number of gaussian clusters each located around the vertices of a hypercube in a subspace of dimension ``n_informative``. For each cluster, informative features are drawn independently from N(0, 1) and then randomly linearly combined within each cluster in order to add covariance. The clusters are then placed on the vertices of the hypercube. n_redundant : int, default=2 The number of redundant features. These features are generated as random linear combinations of the informative features. n_repeated : int, default=0 The number of duplicated features, drawn randomly from the informative and the redundant features. n_classes : int, default=2 The number of classes (or labels) of the classification problem. n_clusters_per_class : int, default=2 The number of clusters per class. weights : array-like of shape (n_classes,) or (n_classes - 1,),\ default=None The proportions of samples assigned to each class. If None, then classes are balanced. Note that if ``len(weights) == n_classes - 1``, then the last class weight is automatically inferred. More than ``n_samples`` samples may be returned if the sum of ``weights`` exceeds 1. Note that the actual class proportions will not exactly match ``weights`` when ``flip_y`` isn't 0. flip_y : float, default=0.01 The fraction of samples whose class is assigned randomly. Larger values introduce noise in the labels and make the classification task harder. Note that the default setting flip_y > 0 might lead to less than ``n_classes`` in y in some cases. class_sep : float, default=1.0 The factor multiplying the hypercube size. Larger values spread out the clusters/classes and make the classification task easier. hypercube : bool, default=True If True, the clusters are put on the vertices of a hypercube. If False, the clusters are put on the vertices of a random polytope. shift : float, ndarray of shape (n_features,) or None, default=0.0 Shift features by the specified value. If None, then features are shifted by a random value drawn in [-class_sep, class_sep]. scale : float, ndarray of shape (n_features,) or None, default=1.0 Multiply features by the specified value. If None, then features are scaled by a random value drawn in [1, 100]. Note that scaling happens after shifting. shuffle : bool, default=True Shuffle the samples and the features. random_state : int, RandomState instance or None, default=None Determines random number generation for dataset creation. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. return_X_y : bool, default=True If True, a tuple ``(X, y)`` instead of a Bunch object is returned. .. versionadded:: 1.7 Returns ------- data : :class:`~sklearn.utils.Bunch` if `return_X_y` is `False`. Dictionary-like object, with the following attributes. DESCR : str A description of the function that generated the dataset. parameter : dict A dictionary that stores the values of the arguments passed to the generator function. feature_info : list of len(n_features) A description for each generated feature. X : ndarray of shape (n_samples, n_features) The generated samples. y : ndarray of shape (n_samples,) An integer label for class membership of each sample. .. versionadded:: 1.7 (X, y) : tuple if ``return_X_y`` is True A tuple of generated samples and labels. See Also -------- make_blobs : Simplified variant. make_multilabel_classification : Unrelated generator for multilabel tasks. Notes ----- The algorithm is adapted from Guyon [1] and was designed to generate the "Madelon" dataset. References ---------- .. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable selection benchmark", 2003. Examples -------- >>> from sklearn.datasets import make_classification >>> X, y = make_classification(random_state=42) >>> X.shape (100, 20) >>> y.shape (100,) >>> list(y[:5]) [np.int64(0), np.int64(0), np.int64(1), np.int64(1), np.int64(0)] """ generator = check_random_state(random_state) # Count features, clusters and samples if n_informative + n_redundant + n_repeated > n_features: raise ValueError( "Number of informative, redundant and repeated " "features must sum to less than the number of total" " features" ) # Use log2 to avoid overflow errors if n_informative < np.log2(n_classes * n_clusters_per_class): msg = "n_classes({}) * n_clusters_per_class({}) must be" msg += " smaller or equal 2**n_informative({})={}" raise ValueError( msg.format(n_classes, n_clusters_per_class, n_informative, 2**n_informative) ) if weights is not None: # we define new variable, weight_, instead of modifying user defined parameter. if len(weights) not in [n_classes, n_classes - 1]: raise ValueError( "Weights specified but incompatible with number of classes." ) if len(weights) == n_classes - 1: if isinstance(weights, list): weights_ = weights + [1.0 - sum(weights)] else: weights_ = np.resize(weights, n_classes) weights_[-1] = 1.0 - sum(weights_[:-1]) else: weights_ = weights.copy() else: weights_ = [1.0 / n_classes] * n_classes n_random = n_features - n_informative - n_redundant - n_repeated n_clusters = n_classes * n_clusters_per_class # Distribute samples among clusters by weight n_samples_per_cluster = [ int(n_samples * weights_[k % n_classes] / n_clusters_per_class) for k in range(n_clusters) ] for i in range(n_samples - sum(n_samples_per_cluster)): n_samples_per_cluster[i % n_clusters] += 1 # Initialize X and y X = np.zeros((n_samples, n_features)) y = np.zeros(n_samples, dtype=int) # Build the polytope whose vertices become cluster centroids centroids = _generate_hypercube(n_clusters, n_informative, generator).astype( float, copy=False ) centroids *= 2 * class_sep centroids -= class_sep if not hypercube: centroids *= generator.uniform(size=(n_clusters, 1)) centroids *= generator.uniform(size=(1, n_informative)) # Initially draw informative features from the standard normal X[:, :n_informative] = generator.standard_normal(size=(n_samples, n_informative)) # Create each cluster; a variant of make_blobs stop = 0 for k, centroid in enumerate(centroids): start, stop = stop, stop + n_samples_per_cluster[k] y[start:stop] = k % n_classes # assign labels X_k = X[start:stop, :n_informative] # slice a view of the cluster A = 2 * generator.uniform(size=(n_informative, n_informative)) - 1 X_k[...] = np.dot(X_k, A) # introduce random covariance X_k += centroid # shift the cluster to a vertex # Create redundant features if n_redundant > 0: B = 2 * generator.uniform(size=(n_informative, n_redundant)) - 1 X[:, n_informative : n_informative + n_redundant] = np.dot( X[:, :n_informative], B ) # Repeat some features n = n_informative + n_redundant if n_repeated > 0: indices = ((n - 1) * generator.uniform(size=n_repeated) + 0.5).astype(np.intp) X[:, n : n + n_repeated] = X[:, indices] # Fill useless features if n_random > 0: X[:, -n_random:] = generator.standard_normal(size=(n_samples, n_random)) # Randomly replace labels if flip_y >= 0.0: flip_mask = generator.uniform(size=n_samples) < flip_y y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum()) # Randomly shift and scale if shift is None: shift = (2 * generator.uniform(size=n_features) - 1) * class_sep X += shift if scale is None: scale = 1 + 100 * generator.uniform(size=n_features) X *= scale indices = np.arange(n_features) if shuffle: # Randomly permute samples X, y = util_shuffle(X, y, random_state=generator) # Randomly permute features generator.shuffle(indices) X[:, :] = X[:, indices] if return_X_y: return X, y # feat_desc describes features in X feat_desc = ["random"] * n_features for i, index in enumerate(indices): if index < n_informative: feat_desc[i] = "informative" elif n_informative <= index < n_informative + n_redundant: feat_desc[i] = "redundant" elif n <= index < n + n_repeated: feat_desc[i] = "repeated" parameters = { "n_samples": n_samples, "n_features": n_features, "n_informative": n_informative, "n_redundant": n_redundant, "n_repeated": n_repeated, "n_classes": n_classes, "n_clusters_per_class": n_clusters_per_class, "weights": weights, "flip_y": flip_y, "class_sep": class_sep, "hypercube": hypercube, "shift": shift, "scale": scale, "shuffle": shuffle, "random_state": random_state, "return_X_y": return_X_y, } bunch = Bunch( DESCR=make_classification.__doc__, parameters=parameters, feature_info=feat_desc, X=X, y=y, ) return bunch @validate_params( { "n_samples": [Interval(Integral, 1, None, closed="left")], "n_features": [Interval(Integral, 1, None, closed="left")], "n_classes": [Interval(Integral, 1, None, closed="left")], "n_labels": [Interval(Integral, 0, None, closed="left")], "length": [Interval(Integral, 1, None, closed="left")], "allow_unlabeled": ["boolean"], "sparse": ["boolean"], "return_indicator": [StrOptions({"dense", "sparse"}), "boolean"], "return_distributions": ["boolean"], "random_state": ["random_state"], }, prefer_skip_nested_validation=True, ) def make_multilabel_classification( n_samples=100, n_features=20, *, n_classes=5, n_labels=2, length=50, allow_unlabeled=True, sparse=False, return_indicator="dense", return_distributions=False, random_state=None, ): """Generate a random multilabel classification problem. For each sample, the generative process is: - pick the number of labels: n ~ Poisson(n_labels) - n times, choose a class c: c ~ Multinomial(theta) - pick the document length: k ~ Poisson(length) - k times, choose a word: w ~ Multinomial(theta_c) In the above process, rejection sampling is used to make sure that n is never zero or more than `n_classes`, and that the document length is never zero. Likewise, we reject classes which have already been chosen. For an example of usage, see :ref:`sphx_glr_auto_examples_datasets_plot_random_multilabel_dataset.py`. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, default=100 The number of samples. n_features : int, default=20 The total number of features. n_classes : int, default=5 The number of classes of the classification problem. n_labels : int, default=2 The average number of labels per instance. More precisely, the number of labels per sample is drawn from a Poisson distribution with ``n_labels`` as its expected value, but samples are bounded (using rejection sampling) by ``n_classes``, and must be nonzero if ``allow_unlabeled`` is False. length : int, default=50 The sum of the features (number of words if documents) is drawn from a Poisson distribution with this expected value. allow_unlabeled : bool, default=True If ``True``, some instances might not belong to any class. sparse : bool, default=False If ``True``, return a sparse feature matrix. .. versionadded:: 0.17 parameter to allow *sparse* output. return_indicator : {'dense', 'sparse'} or False, default='dense' If ``'dense'`` return ``Y`` in the dense binary indicator format. If ``'sparse'`` return ``Y`` in the sparse binary indicator format. ``False`` returns a list of lists of labels. return_distributions : bool, default=False If ``True``, return the prior class probability and conditional probabilities of features given classes, from which the data was drawn. random_state : int, RandomState instance or None, default=None Determines random number generation for dataset creation. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- X : ndarray of shape (n_samples, n_features) The generated samples. Y : {ndarray, sparse matrix} of shape (n_samples, n_classes) The label sets. Sparse matrix should be of CSR format. p_c : ndarray of shape (n_classes,) The probability of each class being drawn. Only returned if ``return_distributions=True``. p_w_c : ndarray of shape (n_features, n_classes) The probability of each feature being drawn given each class. Only returned if ``return_distributions=True``. Examples -------- >>> from sklearn.datasets import make_multilabel_classification >>> X, y = make_multilabel_classification(n_labels=3, random_state=42) >>> X.shape (100, 20) >>> y.shape (100, 5) >>> list(y[:3]) [array([1, 1, 0, 1, 0]), array([0, 1, 1, 1, 0]), array([0, 1, 0, 0, 0])] """ generator = check_random_state(random_state) p_c = generator.uniform(size=n_classes) p_c /= p_c.sum() cumulative_p_c = np.cumsum(p_c) p_w_c = generator.uniform(size=(n_features, n_classes)) p_w_c /= np.sum(p_w_c, axis=0) def sample_example(): _, n_classes = p_w_c.shape # pick a nonzero number of labels per document by rejection sampling y_size = n_classes + 1 while (not allow_unlabeled and y_size == 0) or y_size > n_classes: y_size = generator.poisson(n_labels) # pick n classes y = set() while len(y) != y_size: # pick a class with probability P(c) c = np.searchsorted(cumulative_p_c, generator.uniform(size=y_size - len(y))) y.update(c) y = list(y) # pick a non-zero document length by rejection sampling n_words = 0 while n_words == 0: n_words = generator.poisson(length) # generate a document of length n_words if len(y) == 0: # if sample does not belong to any class, generate noise word words = generator.randint(n_features, size=n_words) return words, y # sample words with replacement from selected classes cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum() cumulative_p_w_sample /= cumulative_p_w_sample[-1] words = np.searchsorted(cumulative_p_w_sample, generator.uniform(size=n_words)) return words, y X_indices = array.array("i") X_indptr = array.array("i", [0]) Y = [] for i in range(n_samples): words, y = sample_example() X_indices.extend(words) X_indptr.append(len(X_indices)) Y.append(y) X_data = np.ones(len(X_indices), dtype=np.float64) X = sp.csr_matrix((X_data, X_indices, X_indptr), shape=(n_samples, n_features)) X.sum_duplicates() if not sparse: X = X.toarray() # return_indicator can be True due to backward compatibility if return_indicator in (True, "sparse", "dense"): lb = MultiLabelBinarizer(sparse_output=(return_indicator == "sparse")) Y = lb.fit([range(n_classes)]).transform(Y) if return_distributions: return X, Y, p_c, p_w_c return X, Y @validate_params( { "n_samples": [Interval(Integral, 1, None, closed="left")], "random_state": ["random_state"], }, prefer_skip_nested_validation=True, ) def make_hastie_10_2(n_samples=12000, *, random_state=None): """Generate data for binary classification used in Hastie et al. 2009, Example 10.2. The ten features are standard independent Gaussian and the target ``y`` is defined by:: y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1 Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, default=12000 The number of samples. random_state : int, RandomState instance or None, default=None Determines random number generation for dataset creation. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- X : ndarray of shape (n_samples, 10) The input samples. y : ndarray of shape (n_samples,) The output values. See Also -------- make_gaussian_quantiles : A generalization of this dataset approach. References ---------- .. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical Learning Ed. 2", Springer, 2009. Examples -------- >>> from sklearn.datasets import make_hastie_10_2 >>> X, y = make_hastie_10_2(n_samples=24000, random_state=42) >>> X.shape (24000, 10) >>> y.shape (24000,) >>> list(y[:5]) [np.float64(-1.0), np.float64(1.0), np.float64(-1.0), np.float64(1.0), np.float64(-1.0)] """ rs = check_random_state(random_state) shape = (n_samples, 10) X = rs.normal(size=shape).reshape(shape) y = ((X**2.0).sum(axis=1) > 9.34).astype(np.float64, copy=False) y[y == 0.0] = -1.0 return X, y @validate_params( { "n_samples": [Interval(Integral, 1, None, closed="left")], "n_features": [Interval(Integral, 1, None, closed="left")], "n_informative": [Interval(Integral, 0, None, closed="left")], "n_targets": [Interval(Integral, 1, None, closed="left")], "bias": [Interval(Real, None, None, closed="neither")], "effective_rank": [Interval(Integral, 1, None, closed="left"), None], "tail_strength": [Interval(Real, 0, 1, closed="both")], "noise": [Interval(Real, 0, None, closed="left")], "shuffle": ["boolean"], "coef": ["boolean"], "random_state": ["random_state"], }, prefer_skip_nested_validation=True, ) def make_regression( n_samples=100, n_features=100, *, n_informative=10, n_targets=1, bias=0.0, effective_rank=None, tail_strength=0.5, noise=0.0, shuffle=True, coef=False, random_state=None, ): """Generate a random regression problem. The input set can either be well conditioned (by default) or have a low rank-fat tail singular profile. See :func:`make_low_rank_matrix` for more details. The output is generated by applying a (potentially biased) random linear regression model with `n_informative` nonzero regressors to the previously generated input and some gaussian centered noise with some adjustable scale. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, default=100 The number of samples. n_features : int, default=100 The number of features. n_informative : int, default=10 The number of informative features, i.e., the number of features used to build the linear model used to generate the output. n_targets : int, default=1 The number of regression targets, i.e., the dimension of the y output vector associated with a sample. By default, the output is a scalar. bias : float, default=0.0 The bias term in the underlying linear model. effective_rank : int, default=None If not None: The approximate number of singular vectors required to explain most of the input data by linear combinations. Using this kind of singular spectrum in the input allows the generator to reproduce the correlations often observed in practice. If None: The input set is well conditioned, centered and gaussian with unit variance. tail_strength : float, default=0.5 The relative importance of the fat noisy tail of the singular values profile if `effective_rank` is not None. When a float, it should be between 0 and 1. noise : float, default=0.0 The standard deviation of the gaussian noise applied to the output. shuffle : bool, default=True Shuffle the samples and the features. coef : bool, default=False If True, the coefficients of the underlying linear model are returned. random_state : int, RandomState instance or None, default=None Determines random number generation for dataset creation. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- X : ndarray of shape (n_samples, n_features) The input samples. y : ndarray of shape (n_samples,) or (n_samples, n_targets) The output values. coef : ndarray of shape (n_features,) or (n_features, n_targets) The coefficient of the underlying linear model. It is returned only if coef is True. Examples -------- >>> from sklearn.datasets import make_regression >>> X, y = make_regression(n_samples=5, n_features=2, noise=1, random_state=42) >>> X array([[ 0.4967, -0.1382 ], [ 0.6476, 1.523], [-0.2341, -0.2341], [-0.4694, 0.5425], [ 1.579, 0.7674]]) >>> y array([ 6.737, 37.79, -10.27, 0.4017, 42.22]) """ n_informative = min(n_features, n_informative) generator = check_random_state(random_state) if effective_rank is None: # Randomly generate a well conditioned input set X = generator.standard_normal(size=(n_samples, n_features)) else: # Randomly generate a low rank, fat tail input set X = make_low_rank_matrix( n_samples=n_samples, n_features=n_features, effective_rank=effective_rank, tail_strength=tail_strength, random_state=generator, ) # Generate a ground truth model with only n_informative features being non # zeros (the other features are not correlated to y and should be ignored # by a sparsifying regularizers such as L1 or elastic net) ground_truth = np.zeros((n_features, n_targets)) ground_truth[:n_informative, :] = 100 * generator.uniform( size=(n_informative, n_targets) ) y = np.dot(X, ground_truth) + bias # Add noise if noise > 0.0: y += generator.normal(scale=noise, size=y.shape) # Randomly permute samples and features if shuffle: X, y = util_shuffle(X, y, random_state=generator) indices = np.arange(n_features) generator.shuffle(indices) X[:, :] = X[:, indices] ground_truth = ground_truth[indices] y = np.squeeze(y) if coef: return X, y, np.squeeze(ground_truth) else: return X, y @validate_params( { "n_samples": [Interval(Integral, 0, None, closed="left"), tuple], "shuffle": ["boolean"], "noise": [Interval(Real, 0, None, closed="left"), None], "random_state": ["random_state"], "factor": [Interval(Real, 0, 1, closed="left")], }, prefer_skip_nested_validation=True, ) def make_circles( n_samples=100, *, shuffle=True, noise=None, random_state=None, factor=0.8 ): """Make a large circle containing a smaller circle in 2d. A simple toy dataset to visualize clustering and classification algorithms. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int or tuple of shape (2,), dtype=int, default=100 If int, it is the total number of points generated. For odd numbers, the inner circle will have one point more than the outer circle. If two-element tuple, number of points in outer circle and inner circle. .. versionchanged:: 0.23 Added two-element tuple. shuffle : bool, default=True Whether to shuffle the samples. noise : float, default=None Standard deviation of Gaussian noise added to the data. random_state : int, RandomState instance or None, default=None Determines random number generation for dataset shuffling and noise. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. factor : float, default=.8 Scale factor between inner and outer circle in the range `[0, 1)`. Returns ------- X : ndarray of shape (n_samples, 2) The generated samples. y : ndarray of shape (n_samples,) The integer labels (0 or 1) for class membership of each sample. Examples -------- >>> from sklearn.datasets import make_circles >>> X, y = make_circles(random_state=42) >>> X.shape (100, 2) >>> y.shape (100,) >>> list(y[:5]) [np.int64(1), np.int64(1), np.int64(1), np.int64(0), np.int64(0)] """ if isinstance(n_samples, numbers.Integral): n_samples_out = n_samples // 2 n_samples_in = n_samples - n_samples_out else: # n_samples is a tuple if len(n_samples) != 2: raise ValueError("When a tuple, n_samples must have exactly two elements.") n_samples_out, n_samples_in = n_samples generator = check_random_state(random_state) # so as not to have the first point = last point, we set endpoint=False linspace_out = np.linspace(0, 2 * np.pi, n_samples_out, endpoint=False) linspace_in = np.linspace(0, 2 * np.pi, n_samples_in, endpoint=False) outer_circ_x = np.cos(linspace_out) outer_circ_y = np.sin(linspace_out) inner_circ_x = np.cos(linspace_in) * factor inner_circ_y = np.sin(linspace_in) * factor X = np.vstack( [np.append(outer_circ_x, inner_circ_x), np.append(outer_circ_y, inner_circ_y)] ).T y = np.hstack( [np.zeros(n_samples_out, dtype=np.intp), np.ones(n_samples_in, dtype=np.intp)] ) if shuffle: X, y = util_shuffle(X, y, random_state=generator) if noise is not None: X += generator.normal(scale=noise, size=X.shape) return X, y @validate_params( { "n_samples": [Interval(Integral, 1, None, closed="left"), tuple], "shuffle": ["boolean"], "noise": [Interval(Real, 0, None, closed="left"), None], "random_state": ["random_state"], }, prefer_skip_nested_validation=True, ) def make_moons(n_samples=100, *, shuffle=True, noise=None, random_state=None): """Make two interleaving half circles. A simple toy dataset to visualize clustering and classification algorithms. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int or tuple of shape (2,), dtype=int, default=100 If int, the total number of points generated. If two-element tuple, number of points in each of two moons. .. versionchanged:: 0.23 Added two-element tuple. shuffle : bool, default=True Whether to shuffle the samples. noise : float, default=None Standard deviation of Gaussian noise added to the data. random_state : int, RandomState instance or None, default=None Determines random number generation for dataset shuffling and noise. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- X : ndarray of shape (n_samples, 2) The generated samples. y : ndarray of shape (n_samples,) The integer labels (0 or 1) for class membership of each sample.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
true
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/__init__.py
sklearn/datasets/__init__.py
"""Utilities to load popular datasets and artificial data generators.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import textwrap from sklearn.datasets._base import ( clear_data_home, fetch_file, get_data_home, load_breast_cancer, load_diabetes, load_digits, load_files, load_iris, load_linnerud, load_sample_image, load_sample_images, load_wine, ) from sklearn.datasets._california_housing import fetch_california_housing from sklearn.datasets._covtype import fetch_covtype from sklearn.datasets._kddcup99 import fetch_kddcup99 from sklearn.datasets._lfw import fetch_lfw_pairs, fetch_lfw_people from sklearn.datasets._olivetti_faces import fetch_olivetti_faces from sklearn.datasets._openml import fetch_openml from sklearn.datasets._rcv1 import fetch_rcv1 from sklearn.datasets._samples_generator import ( make_biclusters, make_blobs, make_checkerboard, make_circles, make_classification, make_friedman1, make_friedman2, make_friedman3, make_gaussian_quantiles, make_hastie_10_2, make_low_rank_matrix, make_moons, make_multilabel_classification, make_regression, make_s_curve, make_sparse_coded_signal, make_sparse_spd_matrix, make_sparse_uncorrelated, make_spd_matrix, make_swiss_roll, ) from sklearn.datasets._species_distributions import fetch_species_distributions from sklearn.datasets._svmlight_format_io import ( dump_svmlight_file, load_svmlight_file, load_svmlight_files, ) from sklearn.datasets._twenty_newsgroups import ( fetch_20newsgroups, fetch_20newsgroups_vectorized, ) __all__ = [ "clear_data_home", "dump_svmlight_file", "fetch_20newsgroups", "fetch_20newsgroups_vectorized", "fetch_california_housing", "fetch_covtype", "fetch_file", "fetch_kddcup99", "fetch_lfw_pairs", "fetch_lfw_people", "fetch_olivetti_faces", "fetch_openml", "fetch_rcv1", "fetch_species_distributions", "get_data_home", "load_breast_cancer", "load_diabetes", "load_digits", "load_files", "load_iris", "load_linnerud", "load_sample_image", "load_sample_images", "load_svmlight_file", "load_svmlight_files", "load_wine", "make_biclusters", "make_blobs", "make_checkerboard", "make_circles", "make_classification", "make_friedman1", "make_friedman2", "make_friedman3", "make_gaussian_quantiles", "make_hastie_10_2", "make_low_rank_matrix", "make_moons", "make_multilabel_classification", "make_regression", "make_s_curve", "make_sparse_coded_signal", "make_sparse_spd_matrix", "make_sparse_uncorrelated", "make_spd_matrix", "make_swiss_roll", ] def __getattr__(name): if name == "load_boston": msg = textwrap.dedent( """ `load_boston` has been removed from scikit-learn since version 1.2. The Boston housing prices dataset has an ethical problem: as investigated in [1], the authors of this dataset engineered a non-invertible variable "B" assuming that racial self-segregation had a positive impact on house prices [2]. Furthermore the goal of the research that led to the creation of this dataset was to study the impact of air quality but it did not give adequate demonstration of the validity of this assumption. The scikit-learn maintainers therefore strongly discourage the use of this dataset unless the purpose of the code is to study and educate about ethical issues in data science and machine learning. In this special case, you can fetch the dataset from the original source:: import pandas as pd import numpy as np data_url = "http://lib.stat.cmu.edu/datasets/boston" raw_df = pd.read_csv(data_url, sep="\\s+", skiprows=22, header=None) data = np.hstack([raw_df.values[::2, :], raw_df.values[1::2, :2]]) target = raw_df.values[1::2, 2] Alternative datasets include the California housing dataset and the Ames housing dataset. You can load the datasets as follows:: from sklearn.datasets import fetch_california_housing housing = fetch_california_housing() for the California housing dataset and:: from sklearn.datasets import fetch_openml housing = fetch_openml(name="house_prices", as_frame=True) for the Ames housing dataset. [1] M Carlisle. "Racist data destruction?" <https://medium.com/@docintangible/racist-data-destruction-113e3eff54a8> [2] Harrison Jr, David, and Daniel L. Rubinfeld. "Hedonic housing prices and the demand for clean air." Journal of environmental economics and management 5.1 (1978): 81-102. <https://www.researchgate.net/publication/4974606_Hedonic_housing_prices_and_the_demand_for_clean_air> """ ) raise ImportError(msg) try: return globals()[name] except KeyError: # This is turned into the appropriate ImportError raise AttributeError
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/_lfw.py
sklearn/datasets/_lfw.py
"""Labeled Faces in the Wild (LFW) dataset This dataset is a collection of JPEG pictures of famous people collected over the internet, all details are available on the official website: http://vis-www.cs.umass.edu/lfw/ """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import logging from numbers import Integral, Real from os import PathLike, listdir, makedirs, remove from os.path import exists, isdir, join import numpy as np from joblib import Memory from sklearn.datasets._base import ( RemoteFileMetadata, _fetch_remote, get_data_home, load_descr, ) from sklearn.utils import Bunch from sklearn.utils._param_validation import ( Hidden, Interval, StrOptions, validate_params, ) from sklearn.utils.fixes import tarfile_extractall logger = logging.getLogger(__name__) # The original data can be found in: # http://vis-www.cs.umass.edu/lfw/lfw.tgz ARCHIVE = RemoteFileMetadata( filename="lfw.tgz", url="https://ndownloader.figshare.com/files/5976018", checksum="055f7d9c632d7370e6fb4afc7468d40f970c34a80d4c6f50ffec63f5a8d536c0", ) # The original funneled data can be found in: # http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz FUNNELED_ARCHIVE = RemoteFileMetadata( filename="lfw-funneled.tgz", url="https://ndownloader.figshare.com/files/5976015", checksum="b47c8422c8cded889dc5a13418c4bc2abbda121092b3533a83306f90d900100a", ) # The original target data can be found in: # http://vis-www.cs.umass.edu/lfw/pairsDevTrain.txt', # http://vis-www.cs.umass.edu/lfw/pairsDevTest.txt', # http://vis-www.cs.umass.edu/lfw/pairs.txt', TARGETS = ( RemoteFileMetadata( filename="pairsDevTrain.txt", url="https://ndownloader.figshare.com/files/5976012", checksum="1d454dada7dfeca0e7eab6f65dc4e97a6312d44cf142207be28d688be92aabfa", ), RemoteFileMetadata( filename="pairsDevTest.txt", url="https://ndownloader.figshare.com/files/5976009", checksum="7cb06600ea8b2814ac26e946201cdb304296262aad67d046a16a7ec85d0ff87c", ), RemoteFileMetadata( filename="pairs.txt", url="https://ndownloader.figshare.com/files/5976006", checksum="ea42330c62c92989f9d7c03237ed5d591365e89b3e649747777b70e692dc1592", ), ) # # Common private utilities for data fetching from the original LFW website # local disk caching, and image decoding. # def _check_fetch_lfw( data_home=None, funneled=True, download_if_missing=True, n_retries=3, delay=1.0 ): """Helper function to download any missing LFW data""" data_home = get_data_home(data_home=data_home) lfw_home = join(data_home, "lfw_home") if not exists(lfw_home): makedirs(lfw_home) for target in TARGETS: target_filepath = join(lfw_home, target.filename) if not exists(target_filepath): if download_if_missing: logger.info("Downloading LFW metadata: %s", target.url) _fetch_remote( target, dirname=lfw_home, n_retries=n_retries, delay=delay ) else: raise OSError("%s is missing" % target_filepath) if funneled: data_folder_path = join(lfw_home, "lfw_funneled") archive = FUNNELED_ARCHIVE else: data_folder_path = join(lfw_home, "lfw") archive = ARCHIVE if not exists(data_folder_path): archive_path = join(lfw_home, archive.filename) if not exists(archive_path): if download_if_missing: logger.info("Downloading LFW data (~200MB): %s", archive.url) _fetch_remote( archive, dirname=lfw_home, n_retries=n_retries, delay=delay ) else: raise OSError("%s is missing" % archive_path) import tarfile logger.debug("Decompressing the data archive to %s", data_folder_path) with tarfile.open(archive_path, "r:gz") as fp: tarfile_extractall(fp, path=lfw_home) remove(archive_path) return lfw_home, data_folder_path def _load_imgs(file_paths, slice_, color, resize): """Internally used to load images""" try: from PIL import Image except ImportError: raise ImportError( "The Python Imaging Library (PIL) is required to load data " "from jpeg files. Please refer to " "https://pillow.readthedocs.io/en/stable/installation.html " "for installing PIL." ) # compute the portion of the images to load to respect the slice_ parameter # given by the caller default_slice = (slice(0, 250), slice(0, 250)) if slice_ is None: slice_ = default_slice else: slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice)) h_slice, w_slice = slice_ h = (h_slice.stop - h_slice.start) // (h_slice.step or 1) w = (w_slice.stop - w_slice.start) // (w_slice.step or 1) if resize is not None: resize = float(resize) h = int(resize * h) w = int(resize * w) # allocate some contiguous memory to host the decoded image slices n_faces = len(file_paths) if not color: faces = np.zeros((n_faces, h, w), dtype=np.float32) else: faces = np.zeros((n_faces, h, w, 3), dtype=np.float32) # iterate over the collected file path to load the jpeg files as numpy # arrays for i, file_path in enumerate(file_paths): if i % 1000 == 0: logger.debug("Loading face #%05d / %05d", i + 1, n_faces) # Checks if jpeg reading worked. Refer to issue #3594 for more # details. with Image.open(file_path) as pil_img: pil_img = pil_img.crop( (w_slice.start, h_slice.start, w_slice.stop, h_slice.stop) ) if resize is not None: pil_img = pil_img.resize((w, h)) face = np.asarray(pil_img, dtype=np.float32) if face.ndim == 0: raise RuntimeError( "Failed to read the image file %s, " "Please make sure that libjpeg is installed" % file_path ) face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats if not color: # average the color channels to compute a gray levels # representation face = face.mean(axis=2) faces[i, ...] = face return faces # # Task #1: Face Identification on picture with names # def _fetch_lfw_people( data_folder_path, slice_=None, color=False, resize=None, min_faces_per_person=0 ): """Perform the actual data loading for the lfw people dataset This operation is meant to be cached by a joblib wrapper. """ # scan the data folder content to retain people with more that # `min_faces_per_person` face pictures person_names, file_paths = [], [] for person_name in sorted(listdir(data_folder_path)): folder_path = join(data_folder_path, person_name) if not isdir(folder_path): continue paths = [join(folder_path, f) for f in sorted(listdir(folder_path))] n_pictures = len(paths) if n_pictures >= min_faces_per_person: person_name = person_name.replace("_", " ") person_names.extend([person_name] * n_pictures) file_paths.extend(paths) n_faces = len(file_paths) if n_faces == 0: raise ValueError( "min_faces_per_person=%d is too restrictive" % min_faces_per_person ) target_names = np.unique(person_names) target = np.searchsorted(target_names, person_names) faces = _load_imgs(file_paths, slice_, color, resize) # shuffle the faces with a deterministic RNG scheme to avoid having # all faces of the same person in a row, as it would break some # cross validation and learning algorithms such as SGD and online # k-means that make an IID assumption indices = np.arange(n_faces) np.random.RandomState(42).shuffle(indices) faces, target = faces[indices], target[indices] return faces, target, target_names @validate_params( { "data_home": [str, PathLike, None], "funneled": ["boolean"], "resize": [Interval(Real, 0, None, closed="neither"), None], "min_faces_per_person": [Interval(Integral, 0, None, closed="left"), None], "color": ["boolean"], "slice_": [tuple, Hidden(None)], "download_if_missing": ["boolean"], "return_X_y": ["boolean"], "n_retries": [Interval(Integral, 1, None, closed="left")], "delay": [Interval(Real, 0.0, None, closed="neither")], }, prefer_skip_nested_validation=True, ) def fetch_lfw_people( *, data_home=None, funneled=True, resize=0.5, min_faces_per_person=0, color=False, slice_=(slice(70, 195), slice(78, 172)), download_if_missing=True, return_X_y=False, n_retries=3, delay=1.0, ): """Load the Labeled Faces in the Wild (LFW) people dataset \ (classification). Download it if necessary. ================= ======================= Classes 5749 Samples total 13233 Dimensionality 5828 Features real, between 0 and 255 ================= ======================= For a usage example of this dataset, see :ref:`sphx_glr_auto_examples_applications_plot_face_recognition.py`. Read more in the :ref:`User Guide <labeled_faces_in_the_wild_dataset>`. Parameters ---------- data_home : str or path-like, default=None Specify another download and cache folder for the datasets. By default all scikit-learn data is stored in '~/scikit_learn_data' subfolders. funneled : bool, default=True Download and use the funneled variant of the dataset. resize : float or None, default=0.5 Ratio used to resize the each face picture. If `None`, no resizing is performed. min_faces_per_person : int, default=None The extracted dataset will only retain pictures of people that have at least `min_faces_per_person` different pictures. color : bool, default=False Keep the 3 RGB channels instead of averaging them to a single gray level channel. If color is True the shape of the data has one more dimension than the shape with color = False. slice_ : tuple of slice, default=(slice(70, 195), slice(78, 172)) Provide a custom 2D slice (height, width) to extract the 'interesting' part of the jpeg files and avoid use statistical correlation from the background. download_if_missing : bool, default=True If False, raise an OSError if the data is not locally available instead of trying to download the data from the source site. return_X_y : bool, default=False If True, returns ``(dataset.data, dataset.target)`` instead of a Bunch object. See below for more information about the `dataset.data` and `dataset.target` object. .. versionadded:: 0.20 n_retries : int, default=3 Number of retries when HTTP errors are encountered. .. versionadded:: 1.5 delay : float, default=1.0 Number of seconds between retries. .. versionadded:: 1.5 Returns ------- dataset : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : numpy array of shape (13233, 2914) Each row corresponds to a ravelled face image of original size 62 x 47 pixels. Changing the ``slice_`` or resize parameters will change the shape of the output. images : numpy array of shape (13233, 62, 47) Each row is a face image corresponding to one of the 5749 people in the dataset. Changing the ``slice_`` or resize parameters will change the shape of the output. target : numpy array of shape (13233,) Labels associated to each face image. Those labels range from 0-5748 and correspond to the person IDs. target_names : numpy array of shape (5749,) Names of all persons in the dataset. Position in array corresponds to the person ID in the target array. DESCR : str Description of the Labeled Faces in the Wild (LFW) dataset. (data, target) : tuple if ``return_X_y`` is True A tuple of two ndarray. The first containing a 2D array of shape (n_samples, n_features) with each row representing one sample and each column representing the features. The second ndarray of shape (n_samples,) containing the target samples. .. versionadded:: 0.20 Examples -------- >>> from sklearn.datasets import fetch_lfw_people >>> lfw_people = fetch_lfw_people() >>> lfw_people.data.shape (13233, 2914) >>> lfw_people.target.shape (13233,) >>> for name in lfw_people.target_names[:5]: ... print(name) AJ Cook AJ Lamas Aaron Eckhart Aaron Guiel Aaron Patterson """ lfw_home, data_folder_path = _check_fetch_lfw( data_home=data_home, funneled=funneled, download_if_missing=download_if_missing, n_retries=n_retries, delay=delay, ) logger.debug("Loading LFW people faces from %s", lfw_home) # wrap the loader in a memoizing function that will return memmaped data # arrays for optimal memory usage m = Memory(location=lfw_home, compress=6, verbose=0) load_func = m.cache(_fetch_lfw_people) # load and memoize the pairs as np arrays faces, target, target_names = load_func( data_folder_path, resize=resize, min_faces_per_person=min_faces_per_person, color=color, slice_=slice_, ) X = faces.reshape(len(faces), -1) fdescr = load_descr("lfw.rst") if return_X_y: return X, target # pack the results as a Bunch instance return Bunch( data=X, images=faces, target=target, target_names=target_names, DESCR=fdescr ) # # Task #2: Face Verification on pairs of face pictures # def _fetch_lfw_pairs( index_file_path, data_folder_path, slice_=None, color=False, resize=None ): """Perform the actual data loading for the LFW pairs dataset This operation is meant to be cached by a joblib wrapper. """ # parse the index file to find the number of pairs to be able to allocate # the right amount of memory before starting to decode the jpeg files with open(index_file_path, "rb") as index_file: split_lines = [ln.decode().strip().split("\t") for ln in index_file] pair_specs = [sl for sl in split_lines if len(sl) > 2] n_pairs = len(pair_specs) # iterating over the metadata lines for each pair to find the filename to # decode and load in memory target = np.zeros(n_pairs, dtype=int) file_paths = list() for i, components in enumerate(pair_specs): if len(components) == 3: target[i] = 1 pair = ( (components[0], int(components[1]) - 1), (components[0], int(components[2]) - 1), ) elif len(components) == 4: target[i] = 0 pair = ( (components[0], int(components[1]) - 1), (components[2], int(components[3]) - 1), ) else: raise ValueError("invalid line %d: %r" % (i + 1, components)) for j, (name, idx) in enumerate(pair): try: person_folder = join(data_folder_path, name) except TypeError: person_folder = join(data_folder_path, str(name, "UTF-8")) filenames = list(sorted(listdir(person_folder))) file_path = join(person_folder, filenames[idx]) file_paths.append(file_path) pairs = _load_imgs(file_paths, slice_, color, resize) shape = list(pairs.shape) n_faces = shape.pop(0) shape.insert(0, 2) shape.insert(0, n_faces // 2) pairs.shape = shape return pairs, target, np.array(["Different persons", "Same person"]) @validate_params( { "subset": [StrOptions({"train", "test", "10_folds"})], "data_home": [str, PathLike, None], "funneled": ["boolean"], "resize": [Interval(Real, 0, None, closed="neither"), None], "color": ["boolean"], "slice_": [tuple, Hidden(None)], "download_if_missing": ["boolean"], "n_retries": [Interval(Integral, 1, None, closed="left")], "delay": [Interval(Real, 0.0, None, closed="neither")], }, prefer_skip_nested_validation=True, ) def fetch_lfw_pairs( *, subset="train", data_home=None, funneled=True, resize=0.5, color=False, slice_=(slice(70, 195), slice(78, 172)), download_if_missing=True, n_retries=3, delay=1.0, ): """Load the Labeled Faces in the Wild (LFW) pairs dataset (classification). Download it if necessary. ================= ======================= Classes 2 Samples total 13233 Dimensionality 5828 Features real, between 0 and 255 ================= ======================= In the `original paper <https://people.cs.umass.edu/~elm/papers/lfw.pdf>`_ the "pairs" version corresponds to the "restricted task", where the experimenter should not use the name of a person to infer the equivalence or non-equivalence of two face images that are not explicitly given in the training set. The original images are 250 x 250 pixels, but the default slice and resize arguments reduce them to 62 x 47. Read more in the :ref:`User Guide <labeled_faces_in_the_wild_dataset>`. Parameters ---------- subset : {'train', 'test', '10_folds'}, default='train' Select the dataset to load: 'train' for the development training set, 'test' for the development test set, and '10_folds' for the official evaluation set that is meant to be used with a 10-folds cross validation. data_home : str or path-like, default=None Specify another download and cache folder for the datasets. By default all scikit-learn data is stored in '~/scikit_learn_data' subfolders. funneled : bool, default=True Download and use the funneled variant of the dataset. resize : float, default=0.5 Ratio used to resize the each face picture. color : bool, default=False Keep the 3 RGB channels instead of averaging them to a single gray level channel. If color is True the shape of the data has one more dimension than the shape with color = False. slice_ : tuple of slice, default=(slice(70, 195), slice(78, 172)) Provide a custom 2D slice (height, width) to extract the 'interesting' part of the jpeg files and avoid use statistical correlation from the background. download_if_missing : bool, default=True If False, raise an OSError if the data is not locally available instead of trying to download the data from the source site. n_retries : int, default=3 Number of retries when HTTP errors are encountered. .. versionadded:: 1.5 delay : float, default=1.0 Number of seconds between retries. .. versionadded:: 1.5 Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : ndarray of shape (2200, 5828). Shape depends on ``subset``. Each row corresponds to 2 ravel'd face images of original size 62 x 47 pixels. Changing the ``slice_``, ``resize`` or ``subset`` parameters will change the shape of the output. pairs : ndarray of shape (2200, 2, 62, 47). Shape depends on ``subset`` Each row has 2 face images corresponding to same or different person from the dataset containing 5749 people. Changing the ``slice_``, ``resize`` or ``subset`` parameters will change the shape of the output. target : numpy array of shape (2200,). Shape depends on ``subset``. Labels associated to each pair of images. The two label values being different persons or the same person. target_names : numpy array of shape (2,) Explains the target values of the target array. 0 corresponds to "Different person", 1 corresponds to "same person". DESCR : str Description of the Labeled Faces in the Wild (LFW) dataset. Examples -------- >>> from sklearn.datasets import fetch_lfw_pairs >>> lfw_pairs_train = fetch_lfw_pairs(subset='train') >>> list(lfw_pairs_train.target_names) [np.str_('Different persons'), np.str_('Same person')] >>> lfw_pairs_train.pairs.shape (2200, 2, 62, 47) >>> lfw_pairs_train.data.shape (2200, 5828) >>> lfw_pairs_train.target.shape (2200,) """ lfw_home, data_folder_path = _check_fetch_lfw( data_home=data_home, funneled=funneled, download_if_missing=download_if_missing, n_retries=n_retries, delay=delay, ) logger.debug("Loading %s LFW pairs from %s", subset, lfw_home) # wrap the loader in a memoizing function that will return memmaped data # arrays for optimal memory usage m = Memory(location=lfw_home, compress=6, verbose=0) load_func = m.cache(_fetch_lfw_pairs) # select the right metadata file according to the requested subset label_filenames = { "train": "pairsDevTrain.txt", "test": "pairsDevTest.txt", "10_folds": "pairs.txt", } if subset not in label_filenames: raise ValueError( "subset='%s' is invalid: should be one of %r" % (subset, list(sorted(label_filenames.keys()))) ) index_file_path = join(lfw_home, label_filenames[subset]) # load and memoize the pairs as np arrays pairs, target, target_names = load_func( index_file_path, data_folder_path, resize=resize, color=color, slice_=slice_ ) fdescr = load_descr("lfw.rst") # pack the results as a Bunch instance return Bunch( data=pairs.reshape(len(pairs), -1), pairs=pairs, target=target, target_names=target_names, DESCR=fdescr, )
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/_twenty_newsgroups.py
sklearn/datasets/_twenty_newsgroups.py
"""Caching loader for the 20 newsgroups text classification dataset. The description of the dataset is available on the official website at: http://people.csail.mit.edu/jrennie/20Newsgroups/ Quoting the introduction: The 20 Newsgroups data set is a collection of approximately 20,000 newsgroup documents, partitioned (nearly) evenly across 20 different newsgroups. To the best of my knowledge, it was originally collected by Ken Lang, probably for his Newsweeder: Learning to filter netnews paper, though he does not explicitly mention this collection. The 20 newsgroups collection has become a popular data set for experiments in text applications of machine learning techniques, such as text classification and text clustering. This dataset loader will download the recommended "by date" variant of the dataset and which features a point in time split between the train and test sets. The compressed dataset size is around 14 Mb compressed. Once uncompressed the train set is 52 MB and the test set is 34 MB. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import codecs import logging import os import pickle import re import shutil import tarfile from contextlib import suppress from numbers import Integral, Real import joblib import numpy as np import scipy.sparse as sp from sklearn import preprocessing from sklearn.datasets import get_data_home, load_files from sklearn.datasets._base import ( RemoteFileMetadata, _convert_data_dataframe, _fetch_remote, _pkl_filepath, load_descr, ) from sklearn.feature_extraction.text import CountVectorizer from sklearn.utils import Bunch, check_random_state from sklearn.utils._param_validation import Interval, StrOptions, validate_params from sklearn.utils.fixes import tarfile_extractall logger = logging.getLogger(__name__) # The original data can be found at: # https://people.csail.mit.edu/jrennie/20Newsgroups/20news-bydate.tar.gz ARCHIVE = RemoteFileMetadata( filename="20news-bydate.tar.gz", url="https://ndownloader.figshare.com/files/5975967", checksum="8f1b2514ca22a5ade8fbb9cfa5727df95fa587f4c87b786e15c759fa66d95610", ) CACHE_NAME = "20news-bydate.pkz" TRAIN_FOLDER = "20news-bydate-train" TEST_FOLDER = "20news-bydate-test" def _download_20newsgroups(target_dir, cache_path, n_retries, delay): """Download the 20 newsgroups data and stored it as a zipped pickle.""" train_path = os.path.join(target_dir, TRAIN_FOLDER) test_path = os.path.join(target_dir, TEST_FOLDER) os.makedirs(target_dir, exist_ok=True) logger.info("Downloading dataset from %s (14 MB)", ARCHIVE.url) archive_path = _fetch_remote( ARCHIVE, dirname=target_dir, n_retries=n_retries, delay=delay ) logger.debug("Decompressing %s", archive_path) with tarfile.open(archive_path, "r:gz") as fp: tarfile_extractall(fp, path=target_dir) with suppress(FileNotFoundError): os.remove(archive_path) # Store a zipped pickle cache = dict( train=load_files(train_path, encoding="latin1"), test=load_files(test_path, encoding="latin1"), ) compressed_content = codecs.encode(pickle.dumps(cache), "zlib_codec") with open(cache_path, "wb") as f: f.write(compressed_content) shutil.rmtree(target_dir) return cache def strip_newsgroup_header(text): """ Given text in "news" format, strip the headers, by removing everything before the first blank line. Parameters ---------- text : str The text from which to remove the signature block. """ _before, _blankline, after = text.partition("\n\n") return after _QUOTE_RE = re.compile( r"(writes in|writes:|wrote:|says:|said:|^In article|^Quoted from|^\||^>)" ) def strip_newsgroup_quoting(text): """ Given text in "news" format, strip lines beginning with the quote characters > or |, plus lines that often introduce a quoted section (for example, because they contain the string 'writes:'.) Parameters ---------- text : str The text from which to remove the signature block. """ good_lines = [line for line in text.split("\n") if not _QUOTE_RE.search(line)] return "\n".join(good_lines) def strip_newsgroup_footer(text): """ Given text in "news" format, attempt to remove a signature block. As a rough heuristic, we assume that signatures are set apart by either a blank line or a line made of hyphens, and that it is the last such line in the file (disregarding blank lines at the end). Parameters ---------- text : str The text from which to remove the signature block. """ lines = text.strip().split("\n") for line_num in range(len(lines) - 1, -1, -1): line = lines[line_num] if line.strip().strip("-") == "": break if line_num > 0: return "\n".join(lines[:line_num]) else: return text @validate_params( { "data_home": [str, os.PathLike, None], "subset": [StrOptions({"train", "test", "all"})], "categories": ["array-like", None], "shuffle": ["boolean"], "random_state": ["random_state"], "remove": [tuple], "download_if_missing": ["boolean"], "return_X_y": ["boolean"], "n_retries": [Interval(Integral, 1, None, closed="left")], "delay": [Interval(Real, 0.0, None, closed="neither")], }, prefer_skip_nested_validation=True, ) def fetch_20newsgroups( *, data_home=None, subset="train", categories=None, shuffle=True, random_state=42, remove=(), download_if_missing=True, return_X_y=False, n_retries=3, delay=1.0, ): """Load the filenames and data from the 20 newsgroups dataset \ (classification). Download it if necessary. ================= ========== Classes 20 Samples total 18846 Dimensionality 1 Features text ================= ========== Read more in the :ref:`User Guide <20newsgroups_dataset>`. Parameters ---------- data_home : str or path-like, default=None Specify a download and cache folder for the datasets. If None, all scikit-learn data is stored in '~/scikit_learn_data' subfolders. subset : {'train', 'test', 'all'}, default='train' Select the dataset to load: 'train' for the training set, 'test' for the test set, 'all' for both, with shuffled ordering. categories : array-like, dtype=str, default=None If None (default), load all the categories. If not None, list of category names to load (other categories ignored). shuffle : bool, default=True Whether or not to shuffle the data: might be important for models that make the assumption that the samples are independent and identically distributed (i.i.d.), such as stochastic gradient descent. random_state : int, RandomState instance or None, default=42 Determines random number generation for dataset shuffling. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. remove : tuple, default=() May contain any subset of ('headers', 'footers', 'quotes'). Each of these are kinds of text that will be detected and removed from the newsgroup posts, preventing classifiers from overfitting on metadata. 'headers' removes newsgroup headers, 'footers' removes blocks at the ends of posts that look like signatures, and 'quotes' removes lines that appear to be quoting another post. 'headers' follows an exact standard; the other filters are not always correct. download_if_missing : bool, default=True If False, raise an OSError if the data is not locally available instead of trying to download the data from the source site. return_X_y : bool, default=False If True, returns `(data.data, data.target)` instead of a Bunch object. .. versionadded:: 0.22 n_retries : int, default=3 Number of retries when HTTP errors are encountered. .. versionadded:: 1.5 delay : float, default=1.0 Number of seconds between retries. .. versionadded:: 1.5 Returns ------- bunch : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : list of shape (n_samples,) The data list to learn. target: ndarray of shape (n_samples,) The target labels. filenames: list of shape (n_samples,) The path to the location of the data. DESCR: str The full description of the dataset. target_names: list of shape (n_classes,) The names of target classes. (data, target) : tuple if `return_X_y=True` A tuple of two ndarrays. The first contains a 2D array of shape (n_samples, n_classes) with each row representing one sample and each column representing the features. The second array of shape (n_samples,) contains the target samples. .. versionadded:: 0.22 Examples -------- >>> from sklearn.datasets import fetch_20newsgroups >>> cats = ['alt.atheism', 'sci.space'] >>> newsgroups_train = fetch_20newsgroups(subset='train', categories=cats) >>> list(newsgroups_train.target_names) ['alt.atheism', 'sci.space'] >>> newsgroups_train.filenames.shape (1073,) >>> newsgroups_train.target.shape (1073,) >>> newsgroups_train.target[:10] array([0, 1, 1, 1, 0, 1, 1, 0, 0, 0]) """ data_home = get_data_home(data_home=data_home) cache_path = _pkl_filepath(data_home, CACHE_NAME) twenty_home = os.path.join(data_home, "20news_home") cache = None if os.path.exists(cache_path): try: with open(cache_path, "rb") as f: compressed_content = f.read() uncompressed_content = codecs.decode(compressed_content, "zlib_codec") cache = pickle.loads(uncompressed_content) except Exception as e: print(80 * "_") print("Cache loading failed") print(80 * "_") print(e) if cache is None: if download_if_missing: logger.info("Downloading 20news dataset. This may take a few minutes.") cache = _download_20newsgroups( target_dir=twenty_home, cache_path=cache_path, n_retries=n_retries, delay=delay, ) else: raise OSError("20Newsgroups dataset not found") if subset in ("train", "test"): data = cache[subset] elif subset == "all": data_lst = list() target = list() filenames = list() for subset in ("train", "test"): data = cache[subset] data_lst.extend(data.data) target.extend(data.target) filenames.extend(data.filenames) data.data = data_lst data.target = np.array(target) data.filenames = np.array(filenames) fdescr = load_descr("twenty_newsgroups.rst") data.DESCR = fdescr if "headers" in remove: data.data = [strip_newsgroup_header(text) for text in data.data] if "footers" in remove: data.data = [strip_newsgroup_footer(text) for text in data.data] if "quotes" in remove: data.data = [strip_newsgroup_quoting(text) for text in data.data] if categories is not None: labels = [(data.target_names.index(cat), cat) for cat in categories] # Sort the categories to have the ordering of the labels labels.sort() labels, categories = zip(*labels) mask = np.isin(data.target, labels) data.filenames = data.filenames[mask] data.target = data.target[mask] # searchsorted to have continuous labels data.target = np.searchsorted(labels, data.target) data.target_names = list(categories) # Use an object array to shuffle: avoids memory copy data_lst = np.array(data.data, dtype=object) data_lst = data_lst[mask] data.data = data_lst.tolist() if shuffle: random_state = check_random_state(random_state) indices = np.arange(data.target.shape[0]) random_state.shuffle(indices) data.filenames = data.filenames[indices] data.target = data.target[indices] # Use an object array to shuffle: avoids memory copy data_lst = np.array(data.data, dtype=object) data_lst = data_lst[indices] data.data = data_lst.tolist() if return_X_y: return data.data, data.target return data @validate_params( { "subset": [StrOptions({"train", "test", "all"})], "remove": [tuple], "data_home": [str, os.PathLike, None], "download_if_missing": ["boolean"], "return_X_y": ["boolean"], "normalize": ["boolean"], "as_frame": ["boolean"], "n_retries": [Interval(Integral, 1, None, closed="left")], "delay": [Interval(Real, 0.0, None, closed="neither")], }, prefer_skip_nested_validation=True, ) def fetch_20newsgroups_vectorized( *, subset="train", remove=(), data_home=None, download_if_missing=True, return_X_y=False, normalize=True, as_frame=False, n_retries=3, delay=1.0, ): """Load and vectorize the 20 newsgroups dataset (classification). Download it if necessary. This is a convenience function; the transformation is done using the default settings for :class:`~sklearn.feature_extraction.text.CountVectorizer`. For more advanced usage (stopword filtering, n-gram extraction, etc.), combine fetch_20newsgroups with a custom :class:`~sklearn.feature_extraction.text.CountVectorizer`, :class:`~sklearn.feature_extraction.text.HashingVectorizer`, :class:`~sklearn.feature_extraction.text.TfidfTransformer` or :class:`~sklearn.feature_extraction.text.TfidfVectorizer`. The resulting counts are normalized using :func:`sklearn.preprocessing.normalize` unless normalize is set to False. ================= ========== Classes 20 Samples total 18846 Dimensionality 130107 Features real ================= ========== Read more in the :ref:`User Guide <20newsgroups_dataset>`. Parameters ---------- subset : {'train', 'test', 'all'}, default='train' Select the dataset to load: 'train' for the training set, 'test' for the test set, 'all' for both, with shuffled ordering. remove : tuple, default=() May contain any subset of ('headers', 'footers', 'quotes'). Each of these are kinds of text that will be detected and removed from the newsgroup posts, preventing classifiers from overfitting on metadata. 'headers' removes newsgroup headers, 'footers' removes blocks at the ends of posts that look like signatures, and 'quotes' removes lines that appear to be quoting another post. data_home : str or path-like, default=None Specify a download and cache folder for the datasets. If None, all scikit-learn data is stored in '~/scikit_learn_data' subfolders. download_if_missing : bool, default=True If False, raise an OSError if the data is not locally available instead of trying to download the data from the source site. return_X_y : bool, default=False If True, returns ``(data.data, data.target)`` instead of a Bunch object. .. versionadded:: 0.20 normalize : bool, default=True If True, normalizes each document's feature vector to unit norm using :func:`sklearn.preprocessing.normalize`. .. versionadded:: 0.22 as_frame : bool, default=False If True, the data is a pandas DataFrame including columns with appropriate dtypes (numeric, string, or categorical). The target is a pandas DataFrame or Series depending on the number of `target_columns`. .. versionadded:: 0.24 n_retries : int, default=3 Number of retries when HTTP errors are encountered. .. versionadded:: 1.5 delay : float, default=1.0 Number of seconds between retries. .. versionadded:: 1.5 Returns ------- bunch : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data: {sparse matrix, dataframe} of shape (n_samples, n_features) The input data matrix. If ``as_frame`` is `True`, ``data`` is a pandas DataFrame with sparse columns. target: {ndarray, series} of shape (n_samples,) The target labels. If ``as_frame`` is `True`, ``target`` is a pandas Series. target_names: list of shape (n_classes,) The names of target classes. DESCR: str The full description of the dataset. frame: dataframe of shape (n_samples, n_features + 1) Only present when `as_frame=True`. Pandas DataFrame with ``data`` and ``target``. .. versionadded:: 0.24 (data, target) : tuple if ``return_X_y`` is True `data` and `target` would be of the format defined in the `Bunch` description above. .. versionadded:: 0.20 Examples -------- >>> from sklearn.datasets import fetch_20newsgroups_vectorized >>> newsgroups_vectorized = fetch_20newsgroups_vectorized(subset='test') >>> newsgroups_vectorized.data.shape (7532, 130107) >>> newsgroups_vectorized.target.shape (7532,) """ data_home = get_data_home(data_home=data_home) filebase = "20newsgroup_vectorized" if remove: filebase += "remove-" + "-".join(remove) target_file = _pkl_filepath(data_home, filebase + ".pkl") # we shuffle but use a fixed seed for the memoization data_train = fetch_20newsgroups( data_home=data_home, subset="train", categories=None, shuffle=True, random_state=12, remove=remove, download_if_missing=download_if_missing, n_retries=n_retries, delay=delay, ) data_test = fetch_20newsgroups( data_home=data_home, subset="test", categories=None, shuffle=True, random_state=12, remove=remove, download_if_missing=download_if_missing, n_retries=n_retries, delay=delay, ) if os.path.exists(target_file): try: X_train, X_test, feature_names = joblib.load(target_file) except ValueError as e: raise ValueError( f"The cached dataset located in {target_file} was fetched " "with an older scikit-learn version and it is not compatible " "with the scikit-learn version imported. You need to " f"manually delete the file: {target_file}." ) from e else: vectorizer = CountVectorizer(dtype=np.int16) X_train = vectorizer.fit_transform(data_train.data).tocsr() X_test = vectorizer.transform(data_test.data).tocsr() feature_names = vectorizer.get_feature_names_out() joblib.dump((X_train, X_test, feature_names), target_file, compress=9) # the data is stored as int16 for compactness # but normalize needs floats if normalize: X_train = X_train.astype(np.float64) X_test = X_test.astype(np.float64) preprocessing.normalize(X_train, copy=False) preprocessing.normalize(X_test, copy=False) target_names = data_train.target_names if subset == "train": data = X_train target = data_train.target elif subset == "test": data = X_test target = data_test.target elif subset == "all": data = sp.vstack((X_train, X_test)).tocsr() target = np.concatenate((data_train.target, data_test.target)) fdescr = load_descr("twenty_newsgroups.rst") frame = None target_name = ["category_class"] if as_frame: frame, data, target = _convert_data_dataframe( "fetch_20newsgroups_vectorized", data, target, feature_names, target_names=target_name, sparse_data=True, ) if return_X_y: return data, target return Bunch( data=data, target=target, frame=frame, target_names=target_names, feature_names=feature_names, DESCR=fdescr, )
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/_species_distributions.py
sklearn/datasets/_species_distributions.py
""" ============================= Species distribution dataset ============================= This dataset represents the geographic distribution of species. The dataset is provided by Phillips et. al. (2006). The two species are: - `"Bradypus variegatus" <http://www.iucnredlist.org/details/3038/0>`_ , the Brown-throated Sloth. - `"Microryzomys minutus" <http://www.iucnredlist.org/details/13408/0>`_ , also known as the Forest Small Rice Rat, a rodent that lives in Peru, Colombia, Ecuador, Peru, and Venezuela. References ---------- `"Maximum entropy modeling of species geographic distributions" <http://rob.schapire.net/papers/ecolmod.pdf>`_ S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling, 190:231-259, 2006. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import logging from io import BytesIO from numbers import Integral, Real from os import PathLike, remove from os.path import exists import joblib import numpy as np from sklearn.datasets import get_data_home from sklearn.datasets._base import RemoteFileMetadata, _fetch_remote, _pkl_filepath from sklearn.utils import Bunch from sklearn.utils._param_validation import Interval, validate_params # The original data can be found at: # https://biodiversityinformatics.amnh.org/open_source/maxent/samples.zip SAMPLES = RemoteFileMetadata( filename="samples.zip", url="https://ndownloader.figshare.com/files/5976075", checksum="abb07ad284ac50d9e6d20f1c4211e0fd3c098f7f85955e89d321ee8efe37ac28", ) # The original data can be found at: # https://biodiversityinformatics.amnh.org/open_source/maxent/coverages.zip COVERAGES = RemoteFileMetadata( filename="coverages.zip", url="https://ndownloader.figshare.com/files/5976078", checksum="4d862674d72e79d6cee77e63b98651ec7926043ba7d39dcb31329cf3f6073807", ) DATA_ARCHIVE_NAME = "species_coverage.pkz" logger = logging.getLogger(__name__) def _load_coverage(F, header_length=6, dtype=np.int16): """Load a coverage file from an open file object. This will return a numpy array of the given dtype """ header = [F.readline() for _ in range(header_length)] make_tuple = lambda t: (t.split()[0], float(t.split()[1])) header = dict([make_tuple(line) for line in header]) M = np.loadtxt(F, dtype=dtype) nodata = int(header[b"NODATA_value"]) if nodata != -9999: M[nodata] = -9999 return M def _load_csv(F): """Load csv file. Parameters ---------- F : file object CSV file open in byte mode. Returns ------- rec : np.ndarray record array representing the data """ names = F.readline().decode("ascii").strip().split(",") rec = np.loadtxt(F, skiprows=0, delimiter=",", dtype="S22,f4,f4") rec.dtype.names = names return rec def construct_grids(batch): """Construct the map grid from the batch object Parameters ---------- batch : Batch object The object returned by :func:`fetch_species_distributions` Returns ------- (xgrid, ygrid) : 1-D arrays The grid corresponding to the values in batch.coverages """ # x,y coordinates for corner cells xmin = batch.x_left_lower_corner + batch.grid_size xmax = xmin + (batch.Nx * batch.grid_size) ymin = batch.y_left_lower_corner + batch.grid_size ymax = ymin + (batch.Ny * batch.grid_size) # x coordinates of the grid cells xgrid = np.arange(xmin, xmax, batch.grid_size) # y coordinates of the grid cells ygrid = np.arange(ymin, ymax, batch.grid_size) return (xgrid, ygrid) @validate_params( { "data_home": [str, PathLike, None], "download_if_missing": ["boolean"], "n_retries": [Interval(Integral, 1, None, closed="left")], "delay": [Interval(Real, 0.0, None, closed="neither")], }, prefer_skip_nested_validation=True, ) def fetch_species_distributions( *, data_home=None, download_if_missing=True, n_retries=3, delay=1.0, ): """Loader for species distribution dataset from Phillips et. al. (2006). Read more in the :ref:`User Guide <species_distribution_dataset>`. Parameters ---------- data_home : str or path-like, default=None Specify another download and cache folder for the datasets. By default all scikit-learn data is stored in '~/scikit_learn_data' subfolders. download_if_missing : bool, default=True If False, raise an OSError if the data is not locally available instead of trying to download the data from the source site. n_retries : int, default=3 Number of retries when HTTP errors are encountered. .. versionadded:: 1.5 delay : float, default=1.0 Number of seconds between retries. .. versionadded:: 1.5 Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. coverages : array, shape = [14, 1592, 1212] These represent the 14 features measured at each point of the map grid. The latitude/longitude values for the grid are discussed below. Missing data is represented by the value -9999. train : record array, shape = (1624,) The training points for the data. Each point has three fields: - train['species'] is the species name - train['dd long'] is the longitude, in degrees - train['dd lat'] is the latitude, in degrees test : record array, shape = (620,) The test points for the data. Same format as the training data. Nx, Ny : integers The number of longitudes (x) and latitudes (y) in the grid x_left_lower_corner, y_left_lower_corner : floats The (x,y) position of the lower-left corner, in degrees grid_size : float The spacing between points of the grid, in degrees Notes ----- This dataset represents the geographic distribution of species. The dataset is provided by Phillips et. al. (2006). The two species are: - `"Bradypus variegatus" <http://www.iucnredlist.org/details/3038/0>`_ , the Brown-throated Sloth. - `"Microryzomys minutus" <http://www.iucnredlist.org/details/13408/0>`_ , also known as the Forest Small Rice Rat, a rodent that lives in Peru, Colombia, Ecuador, Peru, and Venezuela. References ---------- * `"Maximum entropy modeling of species geographic distributions" <http://rob.schapire.net/papers/ecolmod.pdf>`_ S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling, 190:231-259, 2006. Examples -------- >>> from sklearn.datasets import fetch_species_distributions >>> species = fetch_species_distributions() >>> species.train[:5] array([(b'microryzomys_minutus', -64.7 , -17.85 ), (b'microryzomys_minutus', -67.8333, -16.3333), (b'microryzomys_minutus', -67.8833, -16.3 ), (b'microryzomys_minutus', -67.8 , -16.2667), (b'microryzomys_minutus', -67.9833, -15.9 )], dtype=[('species', 'S22'), ('dd long', '<f4'), ('dd lat', '<f4')]) For a more extended example, see :ref:`sphx_glr_auto_examples_applications_plot_species_distribution_modeling.py` """ data_home = get_data_home(data_home) # Define parameters for the data files. These should not be changed # unless the data model changes. They will be saved in the npz file # with the downloaded data. extra_params = dict( x_left_lower_corner=-94.8, Nx=1212, y_left_lower_corner=-56.05, Ny=1592, grid_size=0.05, ) dtype = np.int16 archive_path = _pkl_filepath(data_home, DATA_ARCHIVE_NAME) if not exists(archive_path): if not download_if_missing: raise OSError("Data not found and `download_if_missing` is False") logger.info("Downloading species data from %s to %s" % (SAMPLES.url, data_home)) samples_path = _fetch_remote( SAMPLES, dirname=data_home, n_retries=n_retries, delay=delay ) with np.load(samples_path) as X: # samples.zip is a valid npz for f in X.files: fhandle = BytesIO(X[f]) if "train" in f: train = _load_csv(fhandle) if "test" in f: test = _load_csv(fhandle) remove(samples_path) logger.info( "Downloading coverage data from %s to %s" % (COVERAGES.url, data_home) ) coverages_path = _fetch_remote( COVERAGES, dirname=data_home, n_retries=n_retries, delay=delay ) with np.load(coverages_path) as X: # coverages.zip is a valid npz coverages = [] for f in X.files: fhandle = BytesIO(X[f]) logger.debug(" - converting {}".format(f)) coverages.append(_load_coverage(fhandle)) coverages = np.asarray(coverages, dtype=dtype) remove(coverages_path) bunch = Bunch(coverages=coverages, test=test, train=train, **extra_params) joblib.dump(bunch, archive_path, compress=9) else: bunch = joblib.load(archive_path) return bunch
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/_olivetti_faces.py
sklearn/datasets/_olivetti_faces.py
"""Modified Olivetti faces dataset. The original database was available from (now defunct) https://www.cl.cam.ac.uk/research/dtg/attarchive/facedatabase.html The version retrieved here comes in MATLAB format from the personal web page of Sam Roweis: https://cs.nyu.edu/~roweis/ """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from numbers import Integral, Real from os import PathLike, remove from os.path import exists import joblib import numpy as np from scipy.io import loadmat from sklearn.datasets import get_data_home from sklearn.datasets._base import ( RemoteFileMetadata, _fetch_remote, _pkl_filepath, load_descr, ) from sklearn.utils import Bunch, check_random_state from sklearn.utils._param_validation import Interval, validate_params # The original data can be found at: # https://cs.nyu.edu/~roweis/data/olivettifaces.mat FACES = RemoteFileMetadata( filename="olivettifaces.mat", url="https://ndownloader.figshare.com/files/5976027", checksum="b612fb967f2dc77c9c62d3e1266e0c73d5fca46a4b8906c18e454d41af987794", ) @validate_params( { "data_home": [str, PathLike, None], "shuffle": ["boolean"], "random_state": ["random_state"], "download_if_missing": ["boolean"], "return_X_y": ["boolean"], "n_retries": [Interval(Integral, 1, None, closed="left")], "delay": [Interval(Real, 0.0, None, closed="neither")], }, prefer_skip_nested_validation=True, ) def fetch_olivetti_faces( *, data_home=None, shuffle=False, random_state=0, download_if_missing=True, return_X_y=False, n_retries=3, delay=1.0, ): """Load the Olivetti faces data-set from AT&T (classification). Download it if necessary. ================= ===================== Classes 40 Samples total 400 Dimensionality 4096 Features real, between 0 and 1 ================= ===================== Read more in the :ref:`User Guide <olivetti_faces_dataset>`. Parameters ---------- data_home : str or path-like, default=None Specify another download and cache folder for the datasets. By default all scikit-learn data is stored in '~/scikit_learn_data' subfolders. shuffle : bool, default=False If True the order of the dataset is shuffled to avoid having images of the same person grouped. random_state : int, RandomState instance or None, default=0 Determines random number generation for dataset shuffling. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. download_if_missing : bool, default=True If False, raise an OSError if the data is not locally available instead of trying to download the data from the source site. return_X_y : bool, default=False If True, returns `(data, target)` instead of a `Bunch` object. See below for more information about the `data` and `target` object. .. versionadded:: 0.22 n_retries : int, default=3 Number of retries when HTTP errors are encountered. .. versionadded:: 1.5 delay : float, default=1.0 Number of seconds between retries. .. versionadded:: 1.5 Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data: ndarray, shape (400, 4096) Each row corresponds to a ravelled face image of original size 64 x 64 pixels. images : ndarray, shape (400, 64, 64) Each row is a face image corresponding to one of the 40 subjects of the dataset. target : ndarray, shape (400,) Labels associated to each face image. Those labels are ranging from 0-39 and correspond to the Subject IDs. DESCR : str Description of the modified Olivetti Faces Dataset. (data, target) : tuple if `return_X_y=True` Tuple with the `data` and `target` objects described above. .. versionadded:: 0.22 Examples -------- >>> from sklearn.datasets import fetch_olivetti_faces >>> olivetti_faces = fetch_olivetti_faces() >>> olivetti_faces.data.shape (400, 4096) >>> olivetti_faces.target.shape (400,) >>> olivetti_faces.images.shape (400, 64, 64) """ data_home = get_data_home(data_home=data_home) filepath = _pkl_filepath(data_home, "olivetti.pkz") if not exists(filepath): if not download_if_missing: raise OSError("Data not found and `download_if_missing` is False") print("downloading Olivetti faces from %s to %s" % (FACES.url, data_home)) mat_path = _fetch_remote( FACES, dirname=data_home, n_retries=n_retries, delay=delay ) mfile = loadmat(file_name=mat_path) # delete raw .mat data remove(mat_path) faces = mfile["faces"].T.copy() joblib.dump(faces, filepath, compress=6) del mfile else: faces = joblib.load(filepath) # We want floating point data, but float32 is enough (there is only # one byte of precision in the original uint8s anyway) faces = np.float32(faces) faces = faces - faces.min() faces /= faces.max() faces = faces.reshape((400, 64, 64)).transpose(0, 2, 1) # 10 images per class, 400 images total, each class is contiguous. target = np.array([i // 10 for i in range(400)]) if shuffle: random_state = check_random_state(random_state) order = random_state.permutation(len(faces)) faces = faces[order] target = target[order] faces_vectorized = faces.reshape(len(faces), -1) fdescr = load_descr("olivetti_faces.rst") if return_X_y: return faces_vectorized, target return Bunch(data=faces_vectorized, images=faces, target=target, DESCR=fdescr)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/_arff_parser.py
sklearn/datasets/_arff_parser.py
"""Implementation of ARFF parsers: via LIAC-ARFF and pandas.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import itertools import re from collections import OrderedDict from collections.abc import Generator from typing import List import numpy as np import scipy as sp from sklearn.externals import _arff from sklearn.externals._arff import ArffSparseDataType from sklearn.utils._chunking import chunk_generator, get_chunk_n_rows from sklearn.utils._optional_dependencies import check_pandas_support from sklearn.utils.fixes import pd_fillna def _split_sparse_columns( arff_data: ArffSparseDataType, include_columns: List ) -> ArffSparseDataType: """Obtains several columns from sparse ARFF representation. Additionally, the column indices are re-labelled, given the columns that are not included. (e.g., when including [1, 2, 3], the columns will be relabelled to [0, 1, 2]). Parameters ---------- arff_data : tuple A tuple of three lists of equal size; first list indicating the value, second the x coordinate and the third the y coordinate. include_columns : list A list of columns to include. Returns ------- arff_data_new : tuple Subset of arff data with only the include columns indicated by the include_columns argument. """ arff_data_new: ArffSparseDataType = (list(), list(), list()) reindexed_columns = { column_idx: array_idx for array_idx, column_idx in enumerate(include_columns) } for val, row_idx, col_idx in zip(arff_data[0], arff_data[1], arff_data[2]): if col_idx in include_columns: arff_data_new[0].append(val) arff_data_new[1].append(row_idx) arff_data_new[2].append(reindexed_columns[col_idx]) return arff_data_new def _sparse_data_to_array( arff_data: ArffSparseDataType, include_columns: List ) -> np.ndarray: # turns the sparse data back into an array (can't use toarray() function, # as this does only work on numeric data) num_obs = max(arff_data[1]) + 1 y_shape = (num_obs, len(include_columns)) reindexed_columns = { column_idx: array_idx for array_idx, column_idx in enumerate(include_columns) } # TODO: improve for efficiency y = np.empty(y_shape, dtype=np.float64) for val, row_idx, col_idx in zip(arff_data[0], arff_data[1], arff_data[2]): if col_idx in include_columns: y[row_idx, reindexed_columns[col_idx]] = val return y def _post_process_frame(frame, feature_names, target_names): """Post process a dataframe to select the desired columns in `X` and `y`. Parameters ---------- frame : dataframe The dataframe to split into `X` and `y`. feature_names : list of str The list of feature names to populate `X`. target_names : list of str The list of target names to populate `y`. Returns ------- X : dataframe The dataframe containing the features. y : {series, dataframe} or None The series or dataframe containing the target. """ X = frame[feature_names] if len(target_names) >= 2: y = frame[target_names] elif len(target_names) == 1: y = frame[target_names[0]] else: y = None return X, y def _liac_arff_parser( gzip_file, output_arrays_type, openml_columns_info, feature_names_to_select, target_names_to_select, shape=None, ): """ARFF parser using the LIAC-ARFF library coded purely in Python. This parser is quite slow but consumes a generator. Currently it is needed to parse sparse datasets. For dense datasets, it is recommended to instead use the pandas-based parser, although it does not always handles the dtypes exactly the same. Parameters ---------- gzip_file : GzipFile instance The file compressed to be read. output_arrays_type : {"numpy", "sparse", "pandas"} The type of the arrays that will be returned. The possibilities ara: - `"numpy"`: both `X` and `y` will be NumPy arrays; - `"sparse"`: `X` will be sparse matrix and `y` will be a NumPy array; - `"pandas"`: `X` will be a pandas DataFrame and `y` will be either a pandas Series or DataFrame. columns_info : dict The information provided by OpenML regarding the columns of the ARFF file. feature_names_to_select : list of str A list of the feature names to be selected. target_names_to_select : list of str A list of the target names to be selected. Returns ------- X : {ndarray, sparse matrix, dataframe} The data matrix. y : {ndarray, dataframe, series} The target. frame : dataframe or None A dataframe containing both `X` and `y`. `None` if `output_array_type != "pandas"`. categories : list of str or None The names of the features that are categorical. `None` if `output_array_type == "pandas"`. """ def _io_to_generator(gzip_file): for line in gzip_file: yield line.decode("utf-8") stream = _io_to_generator(gzip_file) # find which type (dense or sparse) ARFF type we will have to deal with return_type = _arff.COO if output_arrays_type == "sparse" else _arff.DENSE_GEN # we should not let LIAC-ARFF to encode the nominal attributes with NumPy # arrays to have only numerical values. encode_nominal = not (output_arrays_type == "pandas") arff_container = _arff.load( stream, return_type=return_type, encode_nominal=encode_nominal ) columns_to_select = feature_names_to_select + target_names_to_select categories = { name: cat for name, cat in arff_container["attributes"] if isinstance(cat, list) and name in columns_to_select } if output_arrays_type == "pandas": pd = check_pandas_support("fetch_openml with as_frame=True") columns_info = OrderedDict(arff_container["attributes"]) columns_names = list(columns_info.keys()) # calculate chunksize first_row = next(arff_container["data"]) first_df = pd.DataFrame([first_row], columns=columns_names, copy=False) row_bytes = first_df.memory_usage(deep=True).sum() chunksize = get_chunk_n_rows(row_bytes) # read arff data with chunks columns_to_keep = [col for col in columns_names if col in columns_to_select] dfs = [first_df[columns_to_keep]] for data in chunk_generator(arff_container["data"], chunksize): dfs.append( pd.DataFrame(data, columns=columns_names, copy=False)[columns_to_keep] ) # dfs[0] contains only one row, which may not have enough data to infer to # column's dtype. Here we use `dfs[1]` to configure the dtype in dfs[0] if len(dfs) >= 2: dfs[0] = dfs[0].astype(dfs[1].dtypes) # liac-arff parser does not depend on NumPy and uses None to represent # missing values. To be consistent with the pandas parser, we replace # None with np.nan. frame = pd.concat(dfs, ignore_index=True) frame = pd_fillna(pd, frame) del dfs, first_df # cast the columns frame dtypes = {} for name in frame.columns: column_dtype = openml_columns_info[name]["data_type"] if column_dtype.lower() == "integer": # Use a pandas extension array instead of np.int64 to be able # to support missing values. dtypes[name] = "Int64" elif column_dtype.lower() == "nominal": dtypes[name] = "category" else: dtypes[name] = frame.dtypes[name] frame = frame.astype(dtypes) X, y = _post_process_frame( frame, feature_names_to_select, target_names_to_select ) else: arff_data = arff_container["data"] feature_indices_to_select = [ int(openml_columns_info[col_name]["index"]) for col_name in feature_names_to_select ] target_indices_to_select = [ int(openml_columns_info[col_name]["index"]) for col_name in target_names_to_select ] if isinstance(arff_data, Generator): if shape is None: raise ValueError( "shape must be provided when arr['data'] is a Generator" ) if shape[0] == -1: count = -1 else: count = shape[0] * shape[1] data = np.fromiter( itertools.chain.from_iterable(arff_data), dtype="float64", count=count, ) data = data.reshape(*shape) X = data[:, feature_indices_to_select] y = data[:, target_indices_to_select] elif isinstance(arff_data, tuple): arff_data_X = _split_sparse_columns(arff_data, feature_indices_to_select) num_obs = max(arff_data[1]) + 1 X_shape = (num_obs, len(feature_indices_to_select)) X = sp.sparse.coo_matrix( (arff_data_X[0], (arff_data_X[1], arff_data_X[2])), shape=X_shape, dtype=np.float64, ) X = X.tocsr() y = _sparse_data_to_array(arff_data, target_indices_to_select) else: # This should never happen raise ValueError( f"Unexpected type for data obtained from arff: {type(arff_data)}" ) is_classification = { col_name in categories for col_name in target_names_to_select } if not is_classification: # No target pass elif all(is_classification): y = np.hstack( [ np.take( np.asarray(categories.pop(col_name), dtype="O"), y[:, i : i + 1].astype(int, copy=False), ) for i, col_name in enumerate(target_names_to_select) ] ) elif any(is_classification): raise ValueError( "Mix of nominal and non-nominal targets is not currently supported" ) # reshape y back to 1-D array, if there is only 1 target column; # back to None if there are not target columns if y.shape[1] == 1: y = y.reshape((-1,)) elif y.shape[1] == 0: y = None if output_arrays_type == "pandas": return X, y, frame, None return X, y, None, categories def _pandas_arff_parser( gzip_file, output_arrays_type, openml_columns_info, feature_names_to_select, target_names_to_select, read_csv_kwargs=None, ): """ARFF parser using `pandas.read_csv`. This parser uses the metadata fetched directly from OpenML and skips the metadata headers of ARFF file itself. The data is loaded as a CSV file. Parameters ---------- gzip_file : GzipFile instance The GZip compressed file with the ARFF formatted payload. output_arrays_type : {"numpy", "sparse", "pandas"} The type of the arrays that will be returned. The possibilities are: - `"numpy"`: both `X` and `y` will be NumPy arrays; - `"sparse"`: `X` will be sparse matrix and `y` will be a NumPy array; - `"pandas"`: `X` will be a pandas DataFrame and `y` will be either a pandas Series or DataFrame. openml_columns_info : dict The information provided by OpenML regarding the columns of the ARFF file. feature_names_to_select : list of str A list of the feature names to be selected to build `X`. target_names_to_select : list of str A list of the target names to be selected to build `y`. read_csv_kwargs : dict, default=None Keyword arguments to pass to `pandas.read_csv`. It allows to overwrite the default options. Returns ------- X : {ndarray, sparse matrix, dataframe} The data matrix. y : {ndarray, dataframe, series} The target. frame : dataframe or None A dataframe containing both `X` and `y`. `None` if `output_array_type != "pandas"`. categories : list of str or None The names of the features that are categorical. `None` if `output_array_type == "pandas"`. """ import pandas as pd # read the file until the data section to skip the ARFF metadata headers for line in gzip_file: if line.decode("utf-8").lower().startswith("@data"): break dtypes = {} for name in openml_columns_info: column_dtype = openml_columns_info[name]["data_type"] if column_dtype.lower() == "integer": # Use Int64 to infer missing values from data # XXX: this line is not covered by our tests. Is this really needed? dtypes[name] = "Int64" elif column_dtype.lower() == "nominal": dtypes[name] = "category" # since we will not pass `names` when reading the ARFF file, we need to translate # `dtypes` from column names to column indices to pass to `pandas.read_csv` dtypes_positional = { col_idx: dtypes[name] for col_idx, name in enumerate(openml_columns_info) if name in dtypes } default_read_csv_kwargs = { "header": None, "index_col": False, # always force pandas to not use the first column as index "na_values": ["?"], # missing values are represented by `?` "keep_default_na": False, # only `?` is a missing value given the ARFF specs "comment": "%", # skip line starting by `%` since they are comments "quotechar": '"', # delimiter to use for quoted strings "skipinitialspace": True, # skip spaces after delimiter to follow ARFF specs "escapechar": "\\", "dtype": dtypes_positional, } read_csv_kwargs = {**default_read_csv_kwargs, **(read_csv_kwargs or {})} frame = pd.read_csv(gzip_file, **read_csv_kwargs) try: # Setting the columns while reading the file will select the N first columns # and not raise a ParserError. Instead, we set the columns after reading the # file and raise a ParserError if the number of columns does not match the # number of columns in the metadata given by OpenML. frame.columns = [name for name in openml_columns_info] except ValueError as exc: raise pd.errors.ParserError( "The number of columns provided by OpenML does not match the number of " "columns inferred by pandas when reading the file." ) from exc columns_to_select = feature_names_to_select + target_names_to_select columns_to_keep = [col for col in frame.columns if col in columns_to_select] frame = frame[columns_to_keep] # `pd.read_csv` automatically handles double quotes for quoting non-numeric # CSV cell values. Contrary to LIAC-ARFF, `pd.read_csv` cannot be configured to # consider either single quotes and double quotes as valid quoting chars at # the same time since this case does not occur in regular (non-ARFF) CSV files. # To mimic the behavior of LIAC-ARFF parser, we manually strip single quotes # on categories as a post-processing steps if needed. # # Note however that we intentionally do not attempt to do this kind of manual # post-processing of (non-categorical) string-typed columns because we cannot # resolve the ambiguity of the case of CSV cell with nesting quoting such as # `"'some string value'"` with pandas. single_quote_pattern = re.compile(r"^'(?P<contents>.*)'$") def strip_single_quotes(input_string): match = re.search(single_quote_pattern, input_string) if match is None: return input_string return match.group("contents") categorical_columns = [ name for name, dtype in frame.dtypes.items() if isinstance(dtype, pd.CategoricalDtype) ] for col in categorical_columns: frame[col] = frame[col].cat.rename_categories(strip_single_quotes) X, y = _post_process_frame(frame, feature_names_to_select, target_names_to_select) if output_arrays_type == "pandas": return X, y, frame, None else: X, y = X.to_numpy(), y.to_numpy() categories = { name: dtype.categories.tolist() for name, dtype in frame.dtypes.items() if isinstance(dtype, pd.CategoricalDtype) } return X, y, None, categories def load_arff_from_gzip_file( gzip_file, parser, output_type, openml_columns_info, feature_names_to_select, target_names_to_select, shape=None, read_csv_kwargs=None, ): """Load a compressed ARFF file using a given parser. Parameters ---------- gzip_file : GzipFile instance The file compressed to be read. parser : {"pandas", "liac-arff"} The parser used to parse the ARFF file. "pandas" is recommended but only supports loading dense datasets. output_type : {"numpy", "sparse", "pandas"} The type of the arrays that will be returned. The possibilities ara: - `"numpy"`: both `X` and `y` will be NumPy arrays; - `"sparse"`: `X` will be sparse matrix and `y` will be a NumPy array; - `"pandas"`: `X` will be a pandas DataFrame and `y` will be either a pandas Series or DataFrame. openml_columns_info : dict The information provided by OpenML regarding the columns of the ARFF file. feature_names_to_select : list of str A list of the feature names to be selected. target_names_to_select : list of str A list of the target names to be selected. read_csv_kwargs : dict, default=None Keyword arguments to pass to `pandas.read_csv`. It allows to overwrite the default options. Returns ------- X : {ndarray, sparse matrix, dataframe} The data matrix. y : {ndarray, dataframe, series} The target. frame : dataframe or None A dataframe containing both `X` and `y`. `None` if `output_array_type != "pandas"`. categories : list of str or None The names of the features that are categorical. `None` if `output_array_type == "pandas"`. """ if parser == "liac-arff": return _liac_arff_parser( gzip_file, output_type, openml_columns_info, feature_names_to_select, target_names_to_select, shape, ) elif parser == "pandas": return _pandas_arff_parser( gzip_file, output_type, openml_columns_info, feature_names_to_select, target_names_to_select, read_csv_kwargs, ) else: raise ValueError( f"Unknown parser: '{parser}'. Should be 'liac-arff' or 'pandas'." )
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/_openml.py
sklearn/datasets/_openml.py
# Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import gzip import hashlib import json import os import shutil import time from contextlib import closing from functools import wraps from os.path import join from tempfile import TemporaryDirectory from typing import Any, Callable, Dict, List, Optional, Tuple, Union from urllib.error import HTTPError, URLError from urllib.parse import urlparse from urllib.request import Request, urlopen from warnings import warn import numpy as np from sklearn.datasets import get_data_home from sklearn.datasets._arff_parser import load_arff_from_gzip_file from sklearn.utils import Bunch from sklearn.utils._optional_dependencies import check_pandas_support from sklearn.utils._param_validation import ( Integral, Interval, Real, StrOptions, validate_params, ) __all__ = ["fetch_openml"] _SEARCH_NAME = "https://api.openml.org/api/v1/json/data/list/data_name/{}/limit/2" _DATA_INFO = "https://api.openml.org/api/v1/json/data/{}" _DATA_FEATURES = "https://api.openml.org/api/v1/json/data/features/{}" _DATA_QUALITIES = "https://api.openml.org/api/v1/json/data/qualities/{}" OpenmlQualitiesType = List[Dict[str, str]] OpenmlFeaturesType = List[Dict[str, str]] def _get_local_path(openml_path: str, data_home: str) -> str: return os.path.join(data_home, "openml.org", openml_path + ".gz") def _retry_with_clean_cache( openml_path: str, data_home: Optional[str], no_retry_exception: Optional[Exception] = None, ) -> Callable: """If the first call to the decorated function fails, the local cached file is removed, and the function is called again. If ``data_home`` is ``None``, then the function is called once. We can provide a specific exception to not retry on using `no_retry_exception` parameter. """ def decorator(f): @wraps(f) def wrapper(*args, **kw): if data_home is None: return f(*args, **kw) try: return f(*args, **kw) except URLError: raise except Exception as exc: if no_retry_exception is not None and isinstance( exc, no_retry_exception ): raise warn("Invalid cache, redownloading file", RuntimeWarning) local_path = _get_local_path(openml_path, data_home) if os.path.exists(local_path): os.unlink(local_path) return f(*args, **kw) return wrapper return decorator def _retry_on_network_error( n_retries: int = 3, delay: float = 1.0, url: str = "" ) -> Callable: """If the function call results in a network error, call the function again up to ``n_retries`` times with a ``delay`` between each call. If the error has a 412 status code, don't call the function again as this is a specific OpenML error. The url parameter is used to give more information to the user about the error. """ def decorator(f): @wraps(f) def wrapper(*args, **kwargs): retry_counter = n_retries while True: try: return f(*args, **kwargs) except (URLError, TimeoutError) as e: # 412 is a specific OpenML error code. if isinstance(e, HTTPError) and e.code == 412: raise if retry_counter == 0: raise warn( f"A network error occurred while downloading {url}. Retrying..." ) # Avoid a ResourceWarning on Python 3.14 and later. if isinstance(e, HTTPError): e.close() retry_counter -= 1 time.sleep(delay) return wrapper return decorator def _open_openml_url( url: str, data_home: Optional[str], n_retries: int = 3, delay: float = 1.0 ): """ Returns a resource from OpenML.org. Caches it to data_home if required. Parameters ---------- url : str OpenML URL that will be downloaded and cached locally. The path component of the URL is used to replicate the tree structure as sub-folders of the local cache folder. data_home : str Directory to which the files will be cached. If None, no caching will be applied. n_retries : int, default=3 Number of retries when HTTP errors are encountered. Error with status code 412 won't be retried as they represent OpenML generic errors. delay : float, default=1.0 Number of seconds between retries. Returns ------- result : stream A stream to the OpenML resource. """ def is_gzip_encoded(_fsrc): return _fsrc.info().get("Content-Encoding", "") == "gzip" req = Request(url) req.add_header("Accept-encoding", "gzip") if data_home is None: fsrc = _retry_on_network_error(n_retries, delay, req.full_url)(urlopen)(req) if is_gzip_encoded(fsrc): return gzip.GzipFile(fileobj=fsrc, mode="rb") return fsrc openml_path = urlparse(url).path.lstrip("/") local_path = _get_local_path(openml_path, data_home) dir_name, file_name = os.path.split(local_path) if not os.path.exists(local_path): os.makedirs(dir_name, exist_ok=True) try: # Create a tmpdir as a subfolder of dir_name where the final file will # be moved to if the download is successful. This guarantees that the # renaming operation to the final location is atomic to ensure the # concurrence safety of the dataset caching mechanism. with TemporaryDirectory(dir=dir_name) as tmpdir: with closing( _retry_on_network_error(n_retries, delay, req.full_url)(urlopen)( req ) ) as fsrc: opener: Callable if is_gzip_encoded(fsrc): opener = open else: opener = gzip.GzipFile with opener(os.path.join(tmpdir, file_name), "wb") as fdst: shutil.copyfileobj(fsrc, fdst) shutil.move(fdst.name, local_path) except Exception: if os.path.exists(local_path): os.unlink(local_path) raise # XXX: First time, decompression will not be necessary (by using fsrc), but # it will happen nonetheless return gzip.GzipFile(local_path, "rb") class OpenMLError(ValueError): """HTTP 412 is a specific OpenML error code, indicating a generic error""" pass def _get_json_content_from_openml_api( url: str, error_message: Optional[str], data_home: Optional[str], n_retries: int = 3, delay: float = 1.0, ) -> Dict: """ Loads json data from the openml api. Parameters ---------- url : str The URL to load from. Should be an official OpenML endpoint. error_message : str or None The error message to raise if an acceptable OpenML error is thrown (acceptable error is, e.g., data id not found. Other errors, like 404's will throw the native error message). data_home : str or None Location to cache the response. None if no cache is required. n_retries : int, default=3 Number of retries when HTTP errors are encountered. Error with status code 412 won't be retried as they represent OpenML generic errors. delay : float, default=1.0 Number of seconds between retries. Returns ------- json_data : json the json result from the OpenML server if the call was successful. An exception otherwise. """ @_retry_with_clean_cache(url, data_home=data_home) def _load_json(): with closing( _open_openml_url(url, data_home, n_retries=n_retries, delay=delay) ) as response: return json.loads(response.read().decode("utf-8")) try: return _load_json() except HTTPError as error: # 412 is an OpenML specific error code, indicating a generic error # (e.g., data not found) if error.code != 412: raise error # 412 error, not in except for nicer traceback raise OpenMLError(error_message) def _get_data_info_by_name( name: str, version: Union[int, str], data_home: Optional[str], n_retries: int = 3, delay: float = 1.0, ): """ Utilizes the openml dataset listing api to find a dataset by name/version OpenML api function: https://www.openml.org/api_docs#!/data/get_data_list_data_name_data_name Parameters ---------- name : str name of the dataset version : int or str If version is an integer, the exact name/version will be obtained from OpenML. If version is a string (value: "active") it will take the first version from OpenML that is annotated as active. Any other string values except "active" are treated as integer. data_home : str or None Location to cache the response. None if no cache is required. n_retries : int, default=3 Number of retries when HTTP errors are encountered. Error with status code 412 won't be retried as they represent OpenML generic errors. delay : float, default=1.0 Number of seconds between retries. Returns ------- first_dataset : json json representation of the first dataset object that adhired to the search criteria """ if version == "active": # situation in which we return the oldest active version url = _SEARCH_NAME.format(name) + "/status/active/" error_msg = "No active dataset {} found.".format(name) json_data = _get_json_content_from_openml_api( url, error_msg, data_home=data_home, n_retries=n_retries, delay=delay, ) res = json_data["data"]["dataset"] if len(res) > 1: first_version = version = res[0]["version"] warning_msg = ( "Multiple active versions of the dataset matching the name" f" {name} exist. Versions may be fundamentally different, " f"returning version {first_version}. " "Available versions:\n" ) for r in res: warning_msg += f"- version {r['version']}, status: {r['status']}\n" warning_msg += ( f" url: https://www.openml.org/search?type=data&id={r['did']}\n" ) warn(warning_msg) return res[0] # an integer version has been provided url = (_SEARCH_NAME + "/data_version/{}").format(name, version) try: json_data = _get_json_content_from_openml_api( url, error_message=None, data_home=data_home, n_retries=n_retries, delay=delay, ) except OpenMLError: # we can do this in 1 function call if OpenML does not require the # specification of the dataset status (i.e., return datasets with a # given name / version regardless of active, deactivated, etc. ) # TODO: feature request OpenML. url += "/status/deactivated" error_msg = "Dataset {} with version {} not found.".format(name, version) json_data = _get_json_content_from_openml_api( url, error_msg, data_home=data_home, n_retries=n_retries, delay=delay, ) return json_data["data"]["dataset"][0] def _get_data_description_by_id( data_id: int, data_home: Optional[str], n_retries: int = 3, delay: float = 1.0, ) -> Dict[str, Any]: # OpenML API function: https://www.openml.org/api_docs#!/data/get_data_id url = _DATA_INFO.format(data_id) error_message = "Dataset with data_id {} not found.".format(data_id) json_data = _get_json_content_from_openml_api( url, error_message, data_home=data_home, n_retries=n_retries, delay=delay, ) return json_data["data_set_description"] def _get_data_features( data_id: int, data_home: Optional[str], n_retries: int = 3, delay: float = 1.0, ) -> OpenmlFeaturesType: # OpenML function: # https://www.openml.org/api_docs#!/data/get_data_features_id url = _DATA_FEATURES.format(data_id) error_message = "Dataset with data_id {} not found.".format(data_id) json_data = _get_json_content_from_openml_api( url, error_message, data_home=data_home, n_retries=n_retries, delay=delay, ) return json_data["data_features"]["feature"] def _get_data_qualities( data_id: int, data_home: Optional[str], n_retries: int = 3, delay: float = 1.0, ) -> OpenmlQualitiesType: # OpenML API function: # https://www.openml.org/api_docs#!/data/get_data_qualities_id url = _DATA_QUALITIES.format(data_id) error_message = "Dataset with data_id {} not found.".format(data_id) json_data = _get_json_content_from_openml_api( url, error_message, data_home=data_home, n_retries=n_retries, delay=delay, ) # the qualities might not be available, but we still try to process # the data return json_data.get("data_qualities", {}).get("quality", []) def _get_num_samples(data_qualities: OpenmlQualitiesType) -> int: """Get the number of samples from data qualities. Parameters ---------- data_qualities : list of dict Used to retrieve the number of instances (samples) in the dataset. Returns ------- n_samples : int The number of samples in the dataset or -1 if data qualities are unavailable. """ # If the data qualities are unavailable, we return -1 default_n_samples = -1 qualities = {d["name"]: d["value"] for d in data_qualities} return int(float(qualities.get("NumberOfInstances", default_n_samples))) def _load_arff_response( url: str, data_home: Optional[str], parser: str, output_type: str, openml_columns_info: dict, feature_names_to_select: List[str], target_names_to_select: List[str], shape: Optional[Tuple[int, int]], md5_checksum: str, n_retries: int = 3, delay: float = 1.0, read_csv_kwargs: Optional[Dict] = None, ): """Load the ARFF data associated with the OpenML URL. In addition of loading the data, this function will also check the integrity of the downloaded file from OpenML using MD5 checksum. Parameters ---------- url : str The URL of the ARFF file on OpenML. data_home : str The location where to cache the data. parser : {"liac-arff", "pandas"} The parser used to parse the ARFF file. output_type : {"numpy", "pandas", "sparse"} The type of the arrays that will be returned. The possibilities are: - `"numpy"`: both `X` and `y` will be NumPy arrays; - `"sparse"`: `X` will be sparse matrix and `y` will be a NumPy array; - `"pandas"`: `X` will be a pandas DataFrame and `y` will be either a pandas Series or DataFrame. openml_columns_info : dict The information provided by OpenML regarding the columns of the ARFF file. feature_names_to_select : list of str The list of the features to be selected. target_names_to_select : list of str The list of the target variables to be selected. shape : tuple or None With `parser="liac-arff"`, when using a generator to load the data, one needs to provide the shape of the data beforehand. md5_checksum : str The MD5 checksum provided by OpenML to check the data integrity. n_retries : int, default=3 The number of times to retry downloading the data if it fails. delay : float, default=1.0 The delay between two consecutive downloads in seconds. read_csv_kwargs : dict, default=None Keyword arguments to pass to `pandas.read_csv` when using the pandas parser. It allows to overwrite the default options. .. versionadded:: 1.3 Returns ------- X : {ndarray, sparse matrix, dataframe} The data matrix. y : {ndarray, dataframe, series} The target. frame : dataframe or None A dataframe containing both `X` and `y`. `None` if `output_array_type != "pandas"`. categories : list of str or None The names of the features that are categorical. `None` if `output_array_type == "pandas"`. """ gzip_file = _open_openml_url(url, data_home, n_retries=n_retries, delay=delay) with closing(gzip_file): md5 = hashlib.md5() for chunk in iter(lambda: gzip_file.read(4096), b""): md5.update(chunk) actual_md5_checksum = md5.hexdigest() if actual_md5_checksum != md5_checksum: raise ValueError( f"md5 checksum of local file for {url} does not match description: " f"expected: {md5_checksum} but got {actual_md5_checksum}. " "Downloaded file could have been modified / corrupted, clean cache " "and retry..." ) def _open_url_and_load_gzip_file(url, data_home, n_retries, delay, arff_params): gzip_file = _open_openml_url(url, data_home, n_retries=n_retries, delay=delay) with closing(gzip_file): return load_arff_from_gzip_file(gzip_file, **arff_params) arff_params: Dict = dict( parser=parser, output_type=output_type, openml_columns_info=openml_columns_info, feature_names_to_select=feature_names_to_select, target_names_to_select=target_names_to_select, shape=shape, read_csv_kwargs=read_csv_kwargs or {}, ) try: X, y, frame, categories = _open_url_and_load_gzip_file( url, data_home, n_retries, delay, arff_params ) except Exception as exc: if parser != "pandas": raise from pandas.errors import ParserError if not isinstance(exc, ParserError): raise # A parsing error could come from providing the wrong quotechar # to pandas. By default, we use a double quote. Thus, we retry # with a single quote before to raise the error. arff_params["read_csv_kwargs"].update(quotechar="'") X, y, frame, categories = _open_url_and_load_gzip_file( url, data_home, n_retries, delay, arff_params ) return X, y, frame, categories def _download_data_to_bunch( url: str, sparse: bool, data_home: Optional[str], *, as_frame: bool, openml_columns_info: List[dict], data_columns: List[str], target_columns: List[str], shape: Optional[Tuple[int, int]], md5_checksum: str, n_retries: int = 3, delay: float = 1.0, parser: str, read_csv_kwargs: Optional[Dict] = None, ): """Download ARFF data, load it to a specific container and create to Bunch. This function has a mechanism to retry/cache/clean the data. Parameters ---------- url : str The URL of the ARFF file on OpenML. sparse : bool Whether the dataset is expected to use the sparse ARFF format. data_home : str The location where to cache the data. as_frame : bool Whether or not to return the data into a pandas DataFrame. openml_columns_info : list of dict The information regarding the columns provided by OpenML for the ARFF dataset. The information is stored as a list of dictionaries. data_columns : list of str The list of the features to be selected. target_columns : list of str The list of the target variables to be selected. shape : tuple or None With `parser="liac-arff"`, when using a generator to load the data, one needs to provide the shape of the data beforehand. md5_checksum : str The MD5 checksum provided by OpenML to check the data integrity. n_retries : int, default=3 Number of retries when HTTP errors are encountered. Error with status code 412 won't be retried as they represent OpenML generic errors. delay : float, default=1.0 Number of seconds between retries. parser : {"liac-arff", "pandas"} The parser used to parse the ARFF file. read_csv_kwargs : dict, default=None Keyword arguments to pass to `pandas.read_csv` when using the pandas parser. It allows to overwrite the default options. .. versionadded:: 1.3 Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. X : {ndarray, sparse matrix, dataframe} The data matrix. y : {ndarray, dataframe, series} The target. frame : dataframe or None A dataframe containing both `X` and `y`. `None` if `output_array_type != "pandas"`. categories : list of str or None The names of the features that are categorical. `None` if `output_array_type == "pandas"`. """ # Prepare which columns and data types should be returned for the X and y features_dict = {feature["name"]: feature for feature in openml_columns_info} if sparse: output_type = "sparse" elif as_frame: output_type = "pandas" else: output_type = "numpy" # XXX: target columns should all be categorical or all numeric _verify_target_data_type(features_dict, target_columns) for name in target_columns: column_info = features_dict[name] n_missing_values = int(column_info["number_of_missing_values"]) if n_missing_values > 0: raise ValueError( f"Target column '{column_info['name']}' has {n_missing_values} missing " "values. Missing values are not supported for target columns." ) no_retry_exception = None if parser == "pandas": # If we get a ParserError with pandas, then we don't want to retry and we raise # early. from pandas.errors import ParserError no_retry_exception = ParserError X, y, frame, categories = _retry_with_clean_cache( url, data_home, no_retry_exception )(_load_arff_response)( url, data_home, parser=parser, output_type=output_type, openml_columns_info=features_dict, feature_names_to_select=data_columns, target_names_to_select=target_columns, shape=shape, md5_checksum=md5_checksum, n_retries=n_retries, delay=delay, read_csv_kwargs=read_csv_kwargs, ) return Bunch( data=X, target=y, frame=frame, categories=categories, feature_names=data_columns, target_names=target_columns, ) def _verify_target_data_type(features_dict, target_columns): # verifies the data type of the y array in case there are multiple targets # (throws an error if these targets do not comply with sklearn support) if not isinstance(target_columns, list): raise ValueError("target_column should be list, got: %s" % type(target_columns)) found_types = set() for target_column in target_columns: if target_column not in features_dict: raise KeyError(f"Could not find target_column='{target_column}'") if features_dict[target_column]["data_type"] == "numeric": found_types.add(np.float64) else: found_types.add(object) # note: we compare to a string, not boolean if features_dict[target_column]["is_ignore"] == "true": warn(f"target_column='{target_column}' has flag is_ignore.") if features_dict[target_column]["is_row_identifier"] == "true": warn(f"target_column='{target_column}' has flag is_row_identifier.") if len(found_types) > 1: raise ValueError( "Can only handle homogeneous multi-target datasets, " "i.e., all targets are either numeric or " "categorical." ) def _valid_data_column_names(features_list, target_columns): # logic for determining on which columns can be learned. Note that from the # OpenML guide follows that columns that have the `is_row_identifier` or # `is_ignore` flag, these can not be learned on. Also target columns are # excluded. valid_data_column_names = [] for feature in features_list: if ( feature["name"] not in target_columns and feature["is_ignore"] != "true" and feature["is_row_identifier"] != "true" ): valid_data_column_names.append(feature["name"]) return valid_data_column_names @validate_params( { "name": [str, None], "version": [Interval(Integral, 1, None, closed="left"), StrOptions({"active"})], "data_id": [Interval(Integral, 1, None, closed="left"), None], "data_home": [str, os.PathLike, None], "target_column": [str, list, None], "cache": [bool], "return_X_y": [bool], "as_frame": [bool, StrOptions({"auto"})], "n_retries": [Interval(Integral, 1, None, closed="left")], "delay": [Interval(Real, 0.0, None, closed="neither")], "parser": [ StrOptions({"auto", "pandas", "liac-arff"}), ], "read_csv_kwargs": [dict, None], }, prefer_skip_nested_validation=True, ) def fetch_openml( name: Optional[str] = None, *, version: Union[str, int] = "active", data_id: Optional[int] = None, data_home: Optional[Union[str, os.PathLike]] = None, target_column: Optional[Union[str, List]] = "default-target", cache: bool = True, return_X_y: bool = False, as_frame: Union[str, bool] = "auto", n_retries: int = 3, delay: float = 1.0, parser: str = "auto", read_csv_kwargs: Optional[Dict] = None, ): """Fetch dataset from openml by name or dataset id. Datasets are uniquely identified by either an integer ID or by a combination of name and version (i.e. there might be multiple versions of the 'iris' dataset). Please give either name or data_id (not both). In case a name is given, a version can also be provided. Read more in the :ref:`User Guide <openml>`. .. versionadded:: 0.20 .. note:: EXPERIMENTAL The API is experimental (particularly the return value structure), and might have small backward-incompatible changes without notice or warning in future releases. Parameters ---------- name : str, default=None String identifier of the dataset. Note that OpenML can have multiple datasets with the same name. version : int or 'active', default='active' Version of the dataset. Can only be provided if also ``name`` is given. If 'active' the oldest version that's still active is used. Since there may be more than one active version of a dataset, and those versions may fundamentally be different from one another, setting an exact version is highly recommended. data_id : int, default=None OpenML ID of the dataset. The most specific way of retrieving a dataset. If data_id is not given, name (and potential version) are used to obtain a dataset. data_home : str or path-like, default=None Specify another download and cache folder for the data sets. By default all scikit-learn data is stored in '~/scikit_learn_data' subfolders. target_column : str, list or None, default='default-target' Specify the column name in the data to use as target. If 'default-target', the standard target column a stored on the server is used. If ``None``, all columns are returned as data and the target is ``None``. If list (of strings), all columns with these names are returned as multi-target (Note: not all scikit-learn classifiers can handle all types of multi-output combinations). cache : bool, default=True Whether to cache the downloaded datasets into `data_home`. return_X_y : bool, default=False If True, returns ``(data, target)`` instead of a Bunch object. See below for more information about the `data` and `target` objects. as_frame : bool or 'auto', default='auto' If True, the data is a pandas DataFrame including columns with appropriate dtypes (numeric, string or categorical). The target is a pandas DataFrame or Series depending on the number of target_columns. The Bunch will contain a ``frame`` attribute with the target and the data. If ``return_X_y`` is True, then ``(data, target)`` will be pandas DataFrames or Series as describe above. If `as_frame` is 'auto', the data and target will be converted to DataFrame or Series as if `as_frame` is set to True, unless the dataset is stored in sparse format. If `as_frame` is False, the data and target will be NumPy arrays and the `data` will only contain numerical values when `parser="liac-arff"` where the categories are provided in the attribute `categories` of the `Bunch` instance. When `parser="pandas"`, no ordinal encoding is made. .. versionchanged:: 0.24 The default value of `as_frame` changed from `False` to `'auto'` in 0.24. n_retries : int, default=3 Number of retries when HTTP errors or network timeouts are encountered. Error with status code 412 won't be retried as they represent OpenML generic errors. delay : float, default=1.0 Number of seconds between retries. parser : {"auto", "pandas", "liac-arff"}, default="auto" Parser used to load the ARFF file. Two parsers are implemented: - `"pandas"`: this is the most efficient parser. However, it requires pandas to be installed and can only open dense datasets. - `"liac-arff"`: this is a pure Python ARFF parser that is much less memory- and CPU-efficient. It deals with sparse ARFF datasets. If `"auto"`, the parser is chosen automatically such that `"liac-arff"` is selected for sparse ARFF datasets, otherwise `"pandas"` is selected. .. versionadded:: 1.2 .. versionchanged:: 1.4 The default value of `parser` changes from `"liac-arff"` to `"auto"`. read_csv_kwargs : dict, default=None Keyword arguments passed to :func:`pandas.read_csv` when loading the data from an ARFF file and using the pandas parser. It can allow to overwrite some default parameters. .. versionadded:: 1.3 Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : np.array, scipy.sparse.csr_matrix of floats, or pandas DataFrame The feature matrix. Categorical features are encoded as ordinals. target : np.array, pandas Series or DataFrame The regression target or classification labels, if applicable. Dtype is float if numeric, and object if categorical. If ``as_frame`` is True, ``target`` is a pandas object. DESCR : str The full description of the dataset. feature_names : list The names of the dataset columns. target_names: list The names of the target columns. .. versionadded:: 0.22 categories : dict or None Maps each categorical feature name to a list of values, such that the value encoded as i is ith in the list. If ``as_frame`` is True, this is None. details : dict More metadata from OpenML. frame : pandas DataFrame Only present when `as_frame=True`. DataFrame with ``data`` and ``target``. (data, target) : tuple if ``return_X_y`` is True .. note:: EXPERIMENTAL This interface is **experimental** and subsequent releases may change attributes without notice (although there should only be minor changes to ``data`` and ``target``).
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
true
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/tests/test_20news.py
sklearn/datasets/tests/test_20news.py
"""Test the 20news downloader, if the data is available, or if specifically requested via environment variable (e.g. for CI jobs).""" from functools import partial from unittest.mock import patch import numpy as np import pytest import scipy.sparse as sp from sklearn.datasets.tests.test_common import ( check_as_frame, check_pandas_dependency_message, check_return_X_y, ) from sklearn.preprocessing import normalize from sklearn.utils._testing import assert_allclose_dense_sparse def test_20news(fetch_20newsgroups_fxt): data = fetch_20newsgroups_fxt(subset="all", shuffle=False) assert data.DESCR.startswith(".. _20newsgroups_dataset:") # Extract a reduced dataset data2cats = fetch_20newsgroups_fxt( subset="all", categories=data.target_names[-1:-3:-1], shuffle=False ) # Check that the ordering of the target_names is the same # as the ordering in the full dataset assert data2cats.target_names == data.target_names[-2:] # Assert that we have only 0 and 1 as labels assert np.unique(data2cats.target).tolist() == [0, 1] # Check that the number of filenames is consistent with data/target assert len(data2cats.filenames) == len(data2cats.target) assert len(data2cats.filenames) == len(data2cats.data) # Check that the first entry of the reduced dataset corresponds to # the first entry of the corresponding category in the full dataset entry1 = data2cats.data[0] category = data2cats.target_names[data2cats.target[0]] label = data.target_names.index(category) entry2 = data.data[np.where(data.target == label)[0][0]] assert entry1 == entry2 # check that return_X_y option X, y = fetch_20newsgroups_fxt(subset="all", shuffle=False, return_X_y=True) assert len(X) == len(data.data) assert y.shape == data.target.shape def test_20news_length_consistency(fetch_20newsgroups_fxt): """Checks the length consistencies within the bunch This is a non-regression test for a bug present in 0.16.1. """ # Extract the full dataset data = fetch_20newsgroups_fxt(subset="all") assert len(data["data"]) == len(data.data) assert len(data["target"]) == len(data.target) assert len(data["filenames"]) == len(data.filenames) def test_20news_vectorized(fetch_20newsgroups_vectorized_fxt): # test subset = train bunch = fetch_20newsgroups_vectorized_fxt(subset="train") assert sp.issparse(bunch.data) and bunch.data.format == "csr" assert bunch.data.shape == (11314, 130107) assert bunch.target.shape[0] == 11314 assert bunch.data.dtype == np.float64 assert bunch.DESCR.startswith(".. _20newsgroups_dataset:") # test subset = test bunch = fetch_20newsgroups_vectorized_fxt(subset="test") assert sp.issparse(bunch.data) and bunch.data.format == "csr" assert bunch.data.shape == (7532, 130107) assert bunch.target.shape[0] == 7532 assert bunch.data.dtype == np.float64 assert bunch.DESCR.startswith(".. _20newsgroups_dataset:") # test return_X_y option fetch_func = partial(fetch_20newsgroups_vectorized_fxt, subset="test") check_return_X_y(bunch, fetch_func) # test subset = all bunch = fetch_20newsgroups_vectorized_fxt(subset="all") assert sp.issparse(bunch.data) and bunch.data.format == "csr" assert bunch.data.shape == (11314 + 7532, 130107) assert bunch.target.shape[0] == 11314 + 7532 assert bunch.data.dtype == np.float64 assert bunch.DESCR.startswith(".. _20newsgroups_dataset:") def test_20news_normalization(fetch_20newsgroups_vectorized_fxt): X = fetch_20newsgroups_vectorized_fxt(normalize=False) X_ = fetch_20newsgroups_vectorized_fxt(normalize=True) X_norm = X_["data"][:100] X = X["data"][:100] assert_allclose_dense_sparse(X_norm, normalize(X)) assert np.allclose(np.linalg.norm(X_norm.todense(), axis=1), 1) def test_20news_as_frame(fetch_20newsgroups_vectorized_fxt): pd = pytest.importorskip("pandas") bunch = fetch_20newsgroups_vectorized_fxt(as_frame=True) check_as_frame(bunch, fetch_20newsgroups_vectorized_fxt) frame = bunch.frame assert frame.shape == (11314, 130108) assert all([isinstance(col, pd.SparseDtype) for col in bunch.data.dtypes]) # Check a small subset of features for expected_feature in [ "beginner", "beginners", "beginning", "beginnings", "begins", "begley", "begone", ]: assert expected_feature in frame.keys() assert "category_class" in frame.keys() assert bunch.target.name == "category_class" def test_as_frame_no_pandas(fetch_20newsgroups_vectorized_fxt, hide_available_pandas): check_pandas_dependency_message(fetch_20newsgroups_vectorized_fxt) def test_outdated_pickle(fetch_20newsgroups_vectorized_fxt): with patch("os.path.exists") as mock_is_exist: with patch("joblib.load") as mock_load: # mock that the dataset was cached mock_is_exist.return_value = True # mock that we have an outdated pickle with only X and y returned mock_load.return_value = ("X", "y") err_msg = "The cached dataset located in" with pytest.raises(ValueError, match=err_msg): fetch_20newsgroups_vectorized_fxt(as_frame=True)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/tests/test_samples_generator.py
sklearn/datasets/tests/test_samples_generator.py
import re from collections import defaultdict from functools import partial import numpy as np import pytest import scipy.sparse as sp from sklearn.datasets import ( make_biclusters, make_blobs, make_checkerboard, make_circles, make_classification, make_friedman1, make_friedman2, make_friedman3, make_hastie_10_2, make_low_rank_matrix, make_moons, make_multilabel_classification, make_regression, make_s_curve, make_sparse_coded_signal, make_sparse_spd_matrix, make_sparse_uncorrelated, make_spd_matrix, make_swiss_roll, ) from sklearn.utils._testing import ( assert_allclose, assert_allclose_dense_sparse, assert_almost_equal, assert_array_almost_equal, assert_array_equal, ) from sklearn.utils.validation import assert_all_finite def test_make_classification(): weights = [0.1, 0.25] X, y = make_classification( n_samples=100, n_features=20, n_informative=5, n_redundant=1, n_repeated=1, n_classes=3, n_clusters_per_class=1, hypercube=False, shift=None, scale=None, weights=weights, random_state=0, ) assert weights == [0.1, 0.25] assert X.shape == (100, 20), "X shape mismatch" assert y.shape == (100,), "y shape mismatch" assert np.unique(y).shape == (3,), "Unexpected number of classes" assert sum(y == 0) == 10, "Unexpected number of samples in class #0" assert sum(y == 1) == 25, "Unexpected number of samples in class #1" assert sum(y == 2) == 65, "Unexpected number of samples in class #2" # Test for n_features > 30 X, y = make_classification( n_samples=2000, n_features=31, n_informative=31, n_redundant=0, n_repeated=0, hypercube=True, scale=0.5, random_state=0, ) assert X.shape == (2000, 31), "X shape mismatch" assert y.shape == (2000,), "y shape mismatch" assert ( np.unique(X.view([("", X.dtype)] * X.shape[1])) .view(X.dtype) .reshape(-1, X.shape[1]) .shape[0] == 2000 ), "Unexpected number of unique rows" def test_make_classification_informative_features(): """Test the construction of informative features in make_classification Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and fully-specified `weights`. """ # Create very separate clusters; check that vertices are unique and # correspond to classes class_sep = 1e6 make = partial( make_classification, class_sep=class_sep, n_redundant=0, n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False, ) for n_informative, weights, n_clusters_per_class in [ (2, [1], 1), (2, [1 / 3] * 3, 1), (2, [1 / 4] * 4, 1), (2, [1 / 2] * 2, 2), (2, [3 / 4, 1 / 4], 2), (10, [1 / 3] * 3, 10), (64, [1], 1), ]: n_classes = len(weights) n_clusters = n_classes * n_clusters_per_class n_samples = n_clusters * 50 for hypercube in (False, True): X, y = make( n_samples=n_samples, n_classes=n_classes, weights=weights, n_features=n_informative, n_informative=n_informative, n_clusters_per_class=n_clusters_per_class, hypercube=hypercube, random_state=0, ) assert X.shape == (n_samples, n_informative) assert y.shape == (n_samples,) # Cluster by sign, viewed as strings to allow uniquing signs = np.sign(X) signs = signs.view(dtype="|S{0}".format(signs.strides[0])).ravel() unique_signs, cluster_index = np.unique(signs, return_inverse=True) assert len(unique_signs) == n_clusters, ( "Wrong number of clusters, or not in distinct quadrants" ) clusters_by_class = defaultdict(set) for cluster, cls in zip(cluster_index, y): clusters_by_class[cls].add(cluster) for clusters in clusters_by_class.values(): assert len(clusters) == n_clusters_per_class, ( "Wrong number of clusters per class" ) assert len(clusters_by_class) == n_classes, "Wrong number of classes" assert_array_almost_equal( np.bincount(y) / len(y) // weights, [1] * n_classes, err_msg="Wrong number of samples per class", ) # Ensure on vertices of hypercube for cluster in range(len(unique_signs)): centroid = X[cluster_index == cluster].mean(axis=0) if hypercube: assert_array_almost_equal( np.abs(centroid) / class_sep, np.ones(n_informative), decimal=5, err_msg="Clusters are not centered on hypercube vertices", ) else: with pytest.raises(AssertionError): assert_array_almost_equal( np.abs(centroid) / class_sep, np.ones(n_informative), decimal=5, err_msg=( "Clusters should not be centered on hypercube vertices" ), ) with pytest.raises(ValueError): make(n_features=2, n_informative=2, n_classes=5, n_clusters_per_class=1) with pytest.raises(ValueError): make(n_features=2, n_informative=2, n_classes=3, n_clusters_per_class=2) def test_make_classification_return_x_y(): """ Test that make_classification returns a Bunch when return_X_y is False. Also that bunch.X is the same as X """ kwargs = { "n_samples": 100, "n_features": 20, "n_informative": 5, "n_redundant": 1, "n_repeated": 1, "n_classes": 3, "n_clusters_per_class": 2, "weights": None, "flip_y": 0.01, "class_sep": 1.0, "hypercube": True, "shift": 0.0, "scale": 1.0, "shuffle": True, "random_state": 42, "return_X_y": True, } X, y = make_classification(**kwargs) kwargs["return_X_y"] = False bunch = make_classification(**kwargs) assert ( hasattr(bunch, "DESCR") and hasattr(bunch, "parameters") and hasattr(bunch, "feature_info") and hasattr(bunch, "X") and hasattr(bunch, "y") ) def count(str_): return bunch.feature_info.count(str_) assert np.array_equal(X, bunch.X) assert np.array_equal(y, bunch.y) assert bunch.DESCR == make_classification.__doc__ assert bunch.parameters == kwargs assert count("informative") == kwargs["n_informative"] assert count("redundant") == kwargs["n_redundant"] assert count("repeated") == kwargs["n_repeated"] @pytest.mark.parametrize( "weights, err_type, err_msg", [ ([], ValueError, "Weights specified but incompatible with number of classes."), ( [0.25, 0.75, 0.1], ValueError, "Weights specified but incompatible with number of classes.", ), ( np.array([]), ValueError, "Weights specified but incompatible with number of classes.", ), ( np.array([0.25, 0.75, 0.1]), ValueError, "Weights specified but incompatible with number of classes.", ), ( np.random.random(3), ValueError, "Weights specified but incompatible with number of classes.", ), ], ) def test_make_classification_weights_type(weights, err_type, err_msg): with pytest.raises(err_type, match=err_msg): make_classification(weights=weights) @pytest.mark.parametrize("kwargs", [{}, {"n_classes": 3, "n_informative": 3}]) def test_make_classification_weights_array_or_list_ok(kwargs): X1, y1 = make_classification(weights=[0.1, 0.9], random_state=0, **kwargs) X2, y2 = make_classification(weights=np.array([0.1, 0.9]), random_state=0, **kwargs) assert_almost_equal(X1, X2) assert_almost_equal(y1, y2) def test_make_multilabel_classification_return_sequences(): for allow_unlabeled, min_length in zip((True, False), (0, 1)): X, Y = make_multilabel_classification( n_samples=100, n_features=20, n_classes=3, random_state=0, return_indicator=False, allow_unlabeled=allow_unlabeled, ) assert X.shape == (100, 20), "X shape mismatch" if not allow_unlabeled: assert max([max(y) for y in Y]) == 2 assert min([len(y) for y in Y]) == min_length assert max([len(y) for y in Y]) <= 3 def test_make_multilabel_classification_return_indicator(): for allow_unlabeled, min_length in zip((True, False), (0, 1)): X, Y = make_multilabel_classification( n_samples=25, n_features=20, n_classes=3, random_state=0, allow_unlabeled=allow_unlabeled, ) assert X.shape == (25, 20), "X shape mismatch" assert Y.shape == (25, 3), "Y shape mismatch" assert np.all(np.sum(Y, axis=0) > min_length) # Also test return_distributions and return_indicator with True X2, Y2, p_c, p_w_c = make_multilabel_classification( n_samples=25, n_features=20, n_classes=3, random_state=0, allow_unlabeled=allow_unlabeled, return_distributions=True, ) assert_array_almost_equal(X, X2) assert_array_equal(Y, Y2) assert p_c.shape == (3,) assert_almost_equal(p_c.sum(), 1) assert p_w_c.shape == (20, 3) assert_almost_equal(p_w_c.sum(axis=0), [1] * 3) def test_make_multilabel_classification_return_indicator_sparse(): for allow_unlabeled, min_length in zip((True, False), (0, 1)): X, Y = make_multilabel_classification( n_samples=25, n_features=20, n_classes=3, random_state=0, return_indicator="sparse", allow_unlabeled=allow_unlabeled, ) assert X.shape == (25, 20), "X shape mismatch" assert Y.shape == (25, 3), "Y shape mismatch" assert sp.issparse(Y) def test_make_hastie_10_2(): X, y = make_hastie_10_2(n_samples=100, random_state=0) assert X.shape == (100, 10), "X shape mismatch" assert y.shape == (100,), "y shape mismatch" assert np.unique(y).shape == (2,), "Unexpected number of classes" def test_make_regression(global_random_seed): X, y, c = make_regression( n_samples=200, n_features=10, n_informative=3, effective_rank=5, coef=True, bias=0.0, noise=1.0, random_state=global_random_seed, ) assert X.shape == (200, 10), "X shape mismatch" assert y.shape == (200,), "y shape mismatch" assert c.shape == (10,), "coef shape mismatch" assert sum(c != 0.0) == 3, "Unexpected number of informative features" # Test that y ~= np.dot(X, c) + bias + N(0, 1.0). assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1) # Test with small number of features. X, y = make_regression(n_samples=100, n_features=1) # n_informative=3 assert X.shape == (100, 1) def test_make_regression_multitarget(global_random_seed): X, y, c = make_regression( n_samples=100, n_features=10, n_informative=3, n_targets=3, coef=True, noise=1.0, random_state=global_random_seed, ) assert X.shape == (100, 10), "X shape mismatch" assert y.shape == (100, 3), "y shape mismatch" assert c.shape == (10, 3), "coef shape mismatch" assert_array_equal(sum(c != 0.0), 3, "Unexpected number of informative features") # Test that y ~= np.dot(X, c) + bias + N(0, 1.0) assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1) def test_make_blobs(global_random_seed): cluster_stds = np.array([0.05, 0.2, 0.4]) cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]]) X, y = make_blobs( random_state=global_random_seed, n_samples=50, n_features=2, centers=cluster_centers, cluster_std=cluster_stds, ) assert X.shape == (50, 2), "X shape mismatch" assert y.shape == (50,), "y shape mismatch" assert np.unique(y).shape == (3,), "Unexpected number of blobs" for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)): assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std") def test_make_blobs_n_samples_list(): n_samples = [50, 30, 20] X, y = make_blobs(n_samples=n_samples, n_features=2, random_state=0) assert X.shape == (sum(n_samples), 2), "X shape mismatch" assert all(np.bincount(y, minlength=len(n_samples)) == n_samples), ( "Incorrect number of samples per blob" ) def test_make_blobs_n_samples_list_with_centers(global_random_seed): n_samples = [20, 20, 20] centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]]) cluster_stds = np.array([0.05, 0.2, 0.4]) X, y = make_blobs( n_samples=n_samples, centers=centers, cluster_std=cluster_stds, random_state=global_random_seed, ) assert X.shape == (sum(n_samples), 2), "X shape mismatch" assert all(np.bincount(y, minlength=len(n_samples)) == n_samples), ( "Incorrect number of samples per blob" ) for i, (ctr, std) in enumerate(zip(centers, cluster_stds)): assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std") @pytest.mark.parametrize( "n_samples", [[5, 3, 0], np.array([5, 3, 0]), tuple([5, 3, 0])] ) def test_make_blobs_n_samples_centers_none(n_samples): centers = None X, y = make_blobs(n_samples=n_samples, centers=centers, random_state=0) assert X.shape == (sum(n_samples), 2), "X shape mismatch" assert all(np.bincount(y, minlength=len(n_samples)) == n_samples), ( "Incorrect number of samples per blob" ) def test_make_blobs_return_centers(): n_samples = [10, 20] n_features = 3 X, y, centers = make_blobs( n_samples=n_samples, n_features=n_features, return_centers=True, random_state=0 ) assert centers.shape == (len(n_samples), n_features) def test_make_blobs_error(): n_samples = [20, 20, 20] centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]]) cluster_stds = np.array([0.05, 0.2, 0.4]) wrong_centers_msg = re.escape( "Length of `n_samples` not consistent with number of centers. " f"Got n_samples = {n_samples} and centers = {centers[:-1]}" ) with pytest.raises(ValueError, match=wrong_centers_msg): make_blobs(n_samples, centers=centers[:-1]) wrong_std_msg = re.escape( "Length of `clusters_std` not consistent with number of centers. " f"Got centers = {centers} and cluster_std = {cluster_stds[:-1]}" ) with pytest.raises(ValueError, match=wrong_std_msg): make_blobs(n_samples, centers=centers, cluster_std=cluster_stds[:-1]) wrong_type_msg = "Parameter `centers` must be array-like. Got {!r} instead".format( 3 ) with pytest.raises(ValueError, match=wrong_type_msg): make_blobs(n_samples, centers=3) def test_make_friedman1(global_random_seed): X, y = make_friedman1( n_samples=5, n_features=10, noise=0.0, random_state=global_random_seed ) assert X.shape == (5, 10), "X shape mismatch" assert y.shape == (5,), "y shape mismatch" assert_array_almost_equal( y, 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 + 10 * X[:, 3] + 5 * X[:, 4], ) def test_make_friedman2(global_random_seed): X, y = make_friedman2(n_samples=5, noise=0.0, random_state=global_random_seed) assert X.shape == (5, 4), "X shape mismatch" assert y.shape == (5,), "y shape mismatch" assert_array_almost_equal( y, (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 ) def test_make_friedman3(global_random_seed): X, y = make_friedman3(n_samples=5, noise=0.0, random_state=global_random_seed) assert X.shape == (5, 4), "X shape mismatch" assert y.shape == (5,), "y shape mismatch" assert_array_almost_equal( y, np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) ) def test_make_low_rank_matrix(): X = make_low_rank_matrix( n_samples=50, n_features=25, effective_rank=5, tail_strength=0.01, random_state=0, ) assert X.shape == (50, 25), "X shape mismatch" from numpy.linalg import svd u, s, v = svd(X) assert sum(s) - 5 < 0.1, "X rank is not approximately 5" def test_make_sparse_coded_signal(global_random_seed): Y, D, X = make_sparse_coded_signal( n_samples=5, n_components=8, n_features=10, n_nonzero_coefs=3, random_state=global_random_seed, ) assert Y.shape == (5, 10), "Y shape mismatch" assert D.shape == (8, 10), "D shape mismatch" assert X.shape == (5, 8), "X shape mismatch" for row in X: assert len(np.flatnonzero(row)) == 3, "Non-zero coefs mismatch" assert_allclose(Y, X @ D) assert_allclose(np.sqrt((D**2).sum(axis=1)), np.ones(D.shape[0])) def test_make_sparse_uncorrelated(): X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0) assert X.shape == (5, 10), "X shape mismatch" assert y.shape == (5,), "y shape mismatch" def test_make_spd_matrix(global_random_seed): X = make_spd_matrix(n_dim=5, random_state=global_random_seed) assert X.shape == (5, 5), "X shape mismatch" assert_array_almost_equal(X, X.T) from numpy.linalg import eig eigenvalues, _ = eig(X) assert np.all(eigenvalues > 0), "X is not positive-definite" @pytest.mark.parametrize("norm_diag", [True, False]) @pytest.mark.parametrize( "sparse_format", [None, "bsr", "coo", "csc", "csr", "dia", "dok", "lil"] ) def test_make_sparse_spd_matrix(norm_diag, sparse_format, global_random_seed): n_dim = 5 X = make_sparse_spd_matrix( n_dim=n_dim, norm_diag=norm_diag, sparse_format=sparse_format, random_state=global_random_seed, ) assert X.shape == (n_dim, n_dim), "X shape mismatch" if sparse_format is None: assert not sp.issparse(X) assert_allclose(X, X.T) Xarr = X else: assert sp.issparse(X) and X.format == sparse_format assert_allclose_dense_sparse(X, X.T) Xarr = X.toarray() from numpy.linalg import eig # Do not use scipy.sparse.linalg.eigs because it cannot find all eigenvalues eigenvalues, _ = eig(Xarr) assert np.all(eigenvalues > 0), "X is not positive-definite" if norm_diag: # Check that leading diagonal elements are 1 assert_array_almost_equal(Xarr.diagonal(), np.ones(n_dim)) @pytest.mark.parametrize("hole", [False, True]) def test_make_swiss_roll(global_random_seed, hole): X, t = make_swiss_roll( n_samples=5, noise=0.0, random_state=global_random_seed, hole=hole ) assert X.shape == (5, 3) assert t.shape == (5,) assert_array_almost_equal(X[:, 0], t * np.cos(t)) assert_array_almost_equal(X[:, 2], t * np.sin(t)) def test_make_s_curve(global_random_seed): X, t = make_s_curve(n_samples=5, noise=0.0, random_state=global_random_seed) assert X.shape == (5, 3), "X shape mismatch" assert t.shape == (5,), "t shape mismatch" assert_array_almost_equal(X[:, 0], np.sin(t)) assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1)) def test_make_biclusters(): X, rows, cols = make_biclusters( shape=(100, 100), n_clusters=4, shuffle=True, random_state=0 ) assert X.shape == (100, 100), "X shape mismatch" assert rows.shape == (4, 100), "rows shape mismatch" assert cols.shape == ( 4, 100, ), "columns shape mismatch" assert_all_finite(X) assert_all_finite(rows) assert_all_finite(cols) X2, _, _ = make_biclusters( shape=(100, 100), n_clusters=4, shuffle=True, random_state=0 ) assert_array_almost_equal(X, X2) def test_make_checkerboard(): X, rows, cols = make_checkerboard( shape=(100, 100), n_clusters=(20, 5), shuffle=True, random_state=0 ) assert X.shape == (100, 100), "X shape mismatch" assert rows.shape == (100, 100), "rows shape mismatch" assert cols.shape == ( 100, 100, ), "columns shape mismatch" X, rows, cols = make_checkerboard( shape=(100, 100), n_clusters=2, shuffle=True, random_state=0 ) assert_all_finite(X) assert_all_finite(rows) assert_all_finite(cols) X1, _, _ = make_checkerboard( shape=(100, 100), n_clusters=2, shuffle=True, random_state=0 ) X2, _, _ = make_checkerboard( shape=(100, 100), n_clusters=2, shuffle=True, random_state=0 ) assert_array_almost_equal(X1, X2) def test_make_moons(global_random_seed): X, y = make_moons(3, shuffle=False, random_state=global_random_seed) for x, label in zip(X, y): center = [0.0, 0.0] if label == 0 else [1.0, 0.5] dist_sqr = ((x - center) ** 2).sum() assert_almost_equal( dist_sqr, 1.0, err_msg="Point is not on expected unit circle" ) def test_make_moons_unbalanced(): X, y = make_moons(n_samples=(7, 5)) assert np.sum(y == 0) == 7 and np.sum(y == 1) == 5, ( "Number of samples in a moon is wrong" ) assert X.shape == (12, 2), "X shape mismatch" assert y.shape == (12,), "y shape mismatch" with pytest.raises( ValueError, match=r"`n_samples` can be either an int or a two-element tuple.", ): make_moons(n_samples=(10,)) def test_make_circles(): factor = 0.3 for n_samples, n_outer, n_inner in [(7, 3, 4), (8, 4, 4)]: # Testing odd and even case, because in the past make_circles always # created an even number of samples. X, y = make_circles(n_samples, shuffle=False, noise=None, factor=factor) assert X.shape == (n_samples, 2), "X shape mismatch" assert y.shape == (n_samples,), "y shape mismatch" center = [0.0, 0.0] for x, label in zip(X, y): dist_sqr = ((x - center) ** 2).sum() dist_exp = 1.0 if label == 0 else factor**2 dist_exp = 1.0 if label == 0 else factor**2 assert_almost_equal( dist_sqr, dist_exp, err_msg="Point is not on expected circle" ) assert X[y == 0].shape == ( n_outer, 2, ), "Samples not correctly distributed across circles." assert X[y == 1].shape == ( n_inner, 2, ), "Samples not correctly distributed across circles." def test_make_circles_unbalanced(): X, y = make_circles(n_samples=(2, 8)) assert np.sum(y == 0) == 2, "Number of samples in inner circle is wrong" assert np.sum(y == 1) == 8, "Number of samples in outer circle is wrong" assert X.shape == (10, 2), "X shape mismatch" assert y.shape == (10,), "y shape mismatch" with pytest.raises( ValueError, match="When a tuple, n_samples must have exactly two elements.", ): make_circles(n_samples=(10,))
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/tests/test_common.py
sklearn/datasets/tests/test_common.py
"""Test loaders for common functionality.""" import inspect import os import numpy as np import pytest import sklearn.datasets def is_pillow_installed(): try: import PIL # noqa: F401 return True except ImportError: return False FETCH_PYTEST_MARKERS = { "return_X_y": { "fetch_20newsgroups": pytest.mark.xfail( reason="X is a list and does not have a shape argument" ), "fetch_openml": pytest.mark.xfail( reason="fetch_opeml requires a dataset name or id" ), "fetch_lfw_people": pytest.mark.skipif( not is_pillow_installed(), reason="pillow is not installed" ), }, "as_frame": { "fetch_openml": pytest.mark.xfail( reason="fetch_opeml requires a dataset name or id" ), }, } def check_pandas_dependency_message(fetch_func): try: import pandas # noqa: F401 pytest.skip("This test requires pandas to not be installed") except ImportError: # Check that pandas is imported lazily and that an informative error # message is raised when pandas is missing: name = fetch_func.__name__ expected_msg = f"{name} with as_frame=True requires pandas" with pytest.raises(ImportError, match=expected_msg): fetch_func(as_frame=True) def check_return_X_y(bunch, dataset_func): X_y_tuple = dataset_func(return_X_y=True) assert isinstance(X_y_tuple, tuple) assert X_y_tuple[0].shape == bunch.data.shape assert X_y_tuple[1].shape == bunch.target.shape def check_as_frame( bunch, dataset_func, expected_data_dtype=None, expected_target_dtype=None ): pd = pytest.importorskip("pandas") frame_bunch = dataset_func(as_frame=True) assert hasattr(frame_bunch, "frame") assert isinstance(frame_bunch.frame, pd.DataFrame) assert isinstance(frame_bunch.data, pd.DataFrame) assert frame_bunch.data.shape == bunch.data.shape if frame_bunch.target.ndim > 1: assert isinstance(frame_bunch.target, pd.DataFrame) else: assert isinstance(frame_bunch.target, pd.Series) assert frame_bunch.target.shape[0] == bunch.target.shape[0] if expected_data_dtype is not None: assert np.all(frame_bunch.data.dtypes == expected_data_dtype) if expected_target_dtype is not None: assert np.all(frame_bunch.target.dtypes == expected_target_dtype) # Test for return_X_y and as_frame=True frame_X, frame_y = dataset_func(as_frame=True, return_X_y=True) assert isinstance(frame_X, pd.DataFrame) if frame_y.ndim > 1: assert isinstance(frame_y, pd.DataFrame) else: assert isinstance(frame_y, pd.Series) def _skip_network_tests(): return os.environ.get("SKLEARN_SKIP_NETWORK_TESTS", "1") == "1" def _generate_func_supporting_param(param, dataset_type=("load", "fetch")): markers_fetch = FETCH_PYTEST_MARKERS.get(param, {}) for name, obj in inspect.getmembers(sklearn.datasets): if not inspect.isfunction(obj): continue is_dataset_type = any([name.startswith(t) for t in dataset_type]) is_support_param = param in inspect.signature(obj).parameters if is_dataset_type and is_support_param: # check if we should skip if we don't have network support marks = [ pytest.mark.skipif( condition=name.startswith("fetch") and _skip_network_tests(), reason="Skip because fetcher requires internet network", ) ] if name in markers_fetch: marks.append(markers_fetch[name]) yield pytest.param(name, obj, marks=marks) @pytest.mark.parametrize( "name, dataset_func", _generate_func_supporting_param("return_X_y") ) def test_common_check_return_X_y(name, dataset_func): bunch = dataset_func() check_return_X_y(bunch, dataset_func) @pytest.mark.parametrize( "name, dataset_func", _generate_func_supporting_param("as_frame") ) def test_common_check_as_frame(name, dataset_func): bunch = dataset_func() check_as_frame(bunch, dataset_func) @pytest.mark.parametrize( "name, dataset_func", _generate_func_supporting_param("as_frame") ) def test_common_check_pandas_dependency(name, dataset_func): check_pandas_dependency_message(dataset_func)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/tests/test_base.py
sklearn/datasets/tests/test_base.py
import hashlib import io import os import re import shutil import tempfile import warnings from functools import partial from importlib import resources from pathlib import Path from pickle import dumps, loads from unittest.mock import Mock from urllib.error import HTTPError from urllib.parse import urlparse import numpy as np import pytest from sklearn.datasets import ( clear_data_home, fetch_file, get_data_home, load_breast_cancer, load_diabetes, load_digits, load_files, load_iris, load_linnerud, load_sample_image, load_sample_images, load_wine, ) from sklearn.datasets._base import ( RemoteFileMetadata, _derive_folder_and_filename_from_url, _fetch_remote, load_csv_data, load_gzip_compressed_csv_data, ) from sklearn.datasets.tests.test_common import check_as_frame from sklearn.preprocessing import scale from sklearn.utils import Bunch class _DummyPath: """Minimal class that implements the os.PathLike interface.""" def __init__(self, path): self.path = path def __fspath__(self): return self.path def _remove_dir(path): if os.path.isdir(path): shutil.rmtree(path) @pytest.fixture(scope="module") def data_home(tmpdir_factory): tmp_file = str(tmpdir_factory.mktemp("scikit_learn_data_home_test")) yield tmp_file _remove_dir(tmp_file) @pytest.fixture(scope="module") def load_files_root(tmpdir_factory): tmp_file = str(tmpdir_factory.mktemp("scikit_learn_load_files_test")) yield tmp_file _remove_dir(tmp_file) @pytest.fixture def test_category_dir_1(load_files_root): test_category_dir1 = tempfile.mkdtemp(dir=load_files_root) sample_file = tempfile.NamedTemporaryFile(dir=test_category_dir1, delete=False) sample_file.write(b"Hello World!\n") sample_file.close() yield str(test_category_dir1) _remove_dir(test_category_dir1) @pytest.fixture def test_category_dir_2(load_files_root): test_category_dir2 = tempfile.mkdtemp(dir=load_files_root) yield str(test_category_dir2) _remove_dir(test_category_dir2) @pytest.mark.thread_unsafe @pytest.mark.parametrize("path_container", [None, Path, _DummyPath]) def test_data_home(path_container, data_home): # get_data_home will point to a pre-existing folder if path_container is not None: data_home = path_container(data_home) data_home = get_data_home(data_home=data_home) assert data_home == data_home assert os.path.exists(data_home) # clear_data_home will delete both the content and the folder it-self if path_container is not None: data_home = path_container(data_home) clear_data_home(data_home=data_home) assert not os.path.exists(data_home) # if the folder is missing it will be created again data_home = get_data_home(data_home=data_home) assert os.path.exists(data_home) def test_default_empty_load_files(load_files_root): res = load_files(load_files_root) assert len(res.filenames) == 0 assert len(res.target_names) == 0 assert res.DESCR is None def test_default_load_files(test_category_dir_1, test_category_dir_2, load_files_root): res = load_files(load_files_root) assert len(res.filenames) == 1 assert len(res.target_names) == 2 assert res.DESCR is None assert res.data == [b"Hello World!\n"] def test_load_files_w_categories_desc_and_encoding( test_category_dir_1, test_category_dir_2, load_files_root ): category = os.path.abspath(test_category_dir_1).split(os.sep).pop() res = load_files( load_files_root, description="test", categories=[category], encoding="utf-8" ) assert len(res.filenames) == 1 assert len(res.target_names) == 1 assert res.DESCR == "test" assert res.data == ["Hello World!\n"] def test_load_files_wo_load_content( test_category_dir_1, test_category_dir_2, load_files_root ): res = load_files(load_files_root, load_content=False) assert len(res.filenames) == 1 assert len(res.target_names) == 2 assert res.DESCR is None assert res.get("data") is None @pytest.mark.parametrize("allowed_extensions", ([".txt"], [".txt", ".json"])) def test_load_files_allowed_extensions(tmp_path, allowed_extensions): """Check the behaviour of `allowed_extension` in `load_files`.""" d = tmp_path / "sub" d.mkdir() files = ("file1.txt", "file2.json", "file3.json", "file4.md") paths = [d / f for f in files] for p in paths: p.write_bytes(b"hello") res = load_files(tmp_path, allowed_extensions=allowed_extensions) assert set([str(p) for p in paths if p.suffix in allowed_extensions]) == set( res.filenames ) @pytest.mark.parametrize( "filename, expected_n_samples, expected_n_features, expected_target_names", [ ("wine_data.csv", 178, 13, ["class_0", "class_1", "class_2"]), ("iris.csv", 150, 4, ["setosa", "versicolor", "virginica"]), ("breast_cancer.csv", 569, 30, ["malignant", "benign"]), ], ) def test_load_csv_data( filename, expected_n_samples, expected_n_features, expected_target_names ): actual_data, actual_target, actual_target_names = load_csv_data(filename) assert actual_data.shape[0] == expected_n_samples assert actual_data.shape[1] == expected_n_features assert actual_target.shape[0] == expected_n_samples np.testing.assert_array_equal(actual_target_names, expected_target_names) def test_load_csv_data_with_descr(): data_file_name = "iris.csv" descr_file_name = "iris.rst" res_without_descr = load_csv_data(data_file_name=data_file_name) res_with_descr = load_csv_data( data_file_name=data_file_name, descr_file_name=descr_file_name ) assert len(res_with_descr) == 4 assert len(res_without_descr) == 3 np.testing.assert_array_equal(res_with_descr[0], res_without_descr[0]) np.testing.assert_array_equal(res_with_descr[1], res_without_descr[1]) np.testing.assert_array_equal(res_with_descr[2], res_without_descr[2]) assert res_with_descr[-1].startswith(".. _iris_dataset:") @pytest.mark.parametrize( "filename, kwargs, expected_shape", [ ("diabetes_data_raw.csv.gz", {}, [442, 10]), ("diabetes_target.csv.gz", {}, [442]), ("digits.csv.gz", {"delimiter": ","}, [1797, 65]), ], ) def test_load_gzip_compressed_csv_data(filename, kwargs, expected_shape): actual_data = load_gzip_compressed_csv_data(filename, **kwargs) assert actual_data.shape == tuple(expected_shape) def test_load_gzip_compressed_csv_data_with_descr(): data_file_name = "diabetes_target.csv.gz" descr_file_name = "diabetes.rst" expected_data = load_gzip_compressed_csv_data(data_file_name=data_file_name) actual_data, descr = load_gzip_compressed_csv_data( data_file_name=data_file_name, descr_file_name=descr_file_name, ) np.testing.assert_array_equal(actual_data, expected_data) assert descr.startswith(".. _diabetes_dataset:") def test_load_sample_images(): try: res = load_sample_images() assert len(res.images) == 2 assert len(res.filenames) == 2 images = res.images # assert is china image assert np.all(images[0][0, 0, :] == np.array([174, 201, 231], dtype=np.uint8)) # assert is flower image assert np.all(images[1][0, 0, :] == np.array([2, 19, 13], dtype=np.uint8)) assert res.DESCR except ImportError: warnings.warn("Could not load sample images, PIL is not available.") def test_load_sample_image(): try: china = load_sample_image("china.jpg") assert china.dtype == "uint8" assert china.shape == (427, 640, 3) except ImportError: warnings.warn("Could not load sample images, PIL is not available.") def test_load_diabetes_raw(): """Test to check that we load a scaled version by default but that we can get an unscaled version when setting `scaled=False`.""" diabetes_raw = load_diabetes(scaled=False) assert diabetes_raw.data.shape == (442, 10) assert diabetes_raw.target.size == 442 assert len(diabetes_raw.feature_names) == 10 assert diabetes_raw.DESCR diabetes_default = load_diabetes() np.testing.assert_allclose( scale(diabetes_raw.data) / (442**0.5), diabetes_default.data, atol=1e-04 ) @pytest.mark.parametrize( "loader_func, data_shape, target_shape, n_target, has_descr, filenames", [ (load_breast_cancer, (569, 30), (569,), 2, True, ["filename"]), (load_wine, (178, 13), (178,), 3, True, []), (load_iris, (150, 4), (150,), 3, True, ["filename"]), ( load_linnerud, (20, 3), (20, 3), 3, True, ["data_filename", "target_filename"], ), (load_diabetes, (442, 10), (442,), None, True, []), (load_digits, (1797, 64), (1797,), 10, True, []), (partial(load_digits, n_class=9), (1617, 64), (1617,), 10, True, []), ], ) def test_loader(loader_func, data_shape, target_shape, n_target, has_descr, filenames): bunch = loader_func() assert isinstance(bunch, Bunch) assert bunch.data.shape == data_shape assert bunch.target.shape == target_shape if hasattr(bunch, "feature_names"): assert len(bunch.feature_names) == data_shape[1] if n_target is not None: assert len(bunch.target_names) == n_target if has_descr: assert bunch.DESCR if filenames: assert "data_module" in bunch assert all( [ f in bunch and (resources.files(bunch["data_module"]) / bunch[f]).is_file() for f in filenames ] ) @pytest.mark.parametrize( "loader_func, data_dtype, target_dtype", [ (load_breast_cancer, np.float64, int), (load_diabetes, np.float64, np.float64), (load_digits, np.float64, int), (load_iris, np.float64, int), (load_linnerud, np.float64, np.float64), (load_wine, np.float64, int), ], ) def test_toy_dataset_frame_dtype(loader_func, data_dtype, target_dtype): default_result = loader_func() check_as_frame( default_result, loader_func, expected_data_dtype=data_dtype, expected_target_dtype=target_dtype, ) def test_loads_dumps_bunch(): bunch = Bunch(x="x") bunch_from_pkl = loads(dumps(bunch)) bunch_from_pkl.x = "y" assert bunch_from_pkl["x"] == bunch_from_pkl.x def test_bunch_pickle_generated_with_0_16_and_read_with_0_17(): bunch = Bunch(key="original") # This reproduces a problem when Bunch pickles have been created # with scikit-learn 0.16 and are read with 0.17. Basically there # is a surprising behaviour because reading bunch.key uses # bunch.__dict__ (which is non empty for 0.16 Bunch objects) # whereas assigning into bunch.key uses bunch.__setattr__. See # https://github.com/scikit-learn/scikit-learn/issues/6196 for # more details bunch.__dict__["key"] = "set from __dict__" bunch_from_pkl = loads(dumps(bunch)) # After loading from pickle the __dict__ should have been ignored assert bunch_from_pkl.key == "original" assert bunch_from_pkl["key"] == "original" # Making sure that changing the attr does change the value # associated with __getitem__ as well bunch_from_pkl.key = "changed" assert bunch_from_pkl.key == "changed" assert bunch_from_pkl["key"] == "changed" def test_bunch_dir(): # check that dir (important for autocomplete) shows attributes data = load_iris() assert "data" in dir(data) def test_load_boston_error(): """Check that we raise the ethical warning when trying to import `load_boston`.""" msg = "The Boston housing prices dataset has an ethical problem" with pytest.raises(ImportError, match=msg): from sklearn.datasets import load_boston # noqa: F401 # other non-existing function should raise the usual import error msg = "cannot import name 'non_existing_function' from 'sklearn.datasets'" with pytest.raises(ImportError, match=msg): from sklearn.datasets import non_existing_function # noqa: F401 def test_fetch_remote_raise_warnings_with_invalid_url(monkeypatch): """Check retry mechanism in _fetch_remote.""" url = "https://scikit-learn.org/this_file_does_not_exist.tar.gz" invalid_remote_file = RemoteFileMetadata("invalid_file", url, None) urlretrieve_mock = Mock( side_effect=HTTPError( url=url, code=404, msg="Not Found", hdrs=None, fp=io.BytesIO() ) ) monkeypatch.setattr("sklearn.datasets._base.urlretrieve", urlretrieve_mock) with pytest.warns(UserWarning, match="Retry downloading") as record: with pytest.raises(HTTPError, match="HTTP Error 404"): _fetch_remote(invalid_remote_file, n_retries=3, delay=0) assert urlretrieve_mock.call_count == 4 for r in record: assert str(r.message) == f"Retry downloading from url: {url}" assert len(record) == 3 def test_derive_folder_and_filename_from_url(): folder, filename = _derive_folder_and_filename_from_url( "https://example.com/file.tar.gz" ) assert folder == "example.com" assert filename == "file.tar.gz" folder, filename = _derive_folder_and_filename_from_url( "https://example.com/نمونه نماینده.data" ) assert folder == "example.com" assert filename == "نمونه-نماینده.data" folder, filename = _derive_folder_and_filename_from_url( "https://example.com/path/to-/.file.tar.gz" ) assert folder == "example.com/path_to" assert filename == "file.tar.gz" folder, filename = _derive_folder_and_filename_from_url("https://example.com/") assert folder == "example.com" assert filename == "downloaded_file" folder, filename = _derive_folder_and_filename_from_url("https://example.com") assert folder == "example.com" assert filename == "downloaded_file" folder, filename = _derive_folder_and_filename_from_url( "https://example.com/path/@to/data.json?param=value" ) assert folder == "example.com/path_to" assert filename == "data.json" folder, filename = _derive_folder_and_filename_from_url( "https://example.com/path/@@to._/-_.data.json.#anchor" ) assert folder == "example.com/path_to" assert filename == "data.json" folder, filename = _derive_folder_and_filename_from_url( "https://example.com//some_file.txt" ) assert folder == "example.com" assert filename == "some_file.txt" folder, filename = _derive_folder_and_filename_from_url( "http://example/../some_file.txt" ) assert folder == "example" assert filename == "some_file.txt" folder, filename = _derive_folder_and_filename_from_url( "https://example.com/!.'.,/some_file.txt" ) assert folder == "example.com" assert filename == "some_file.txt" folder, filename = _derive_folder_and_filename_from_url( "https://example.com/a/!.'.,/b/some_file.txt" ) assert folder == "example.com/a_b" assert filename == "some_file.txt" folder, filename = _derive_folder_and_filename_from_url("https://example.com/!.'.,") assert folder == "example.com" assert filename == "downloaded_file" with pytest.raises(ValueError, match="Invalid URL"): _derive_folder_and_filename_from_url("https:/../") def _mock_urlretrieve(server_side): def _urlretrieve_mock(url, local_path): server_root = Path(server_side) file_path = urlparse(url).path.strip("/") if not (server_root / file_path).exists(): raise HTTPError(url, 404, "Not Found", None, None) shutil.copy(server_root / file_path, local_path) return Mock(side_effect=_urlretrieve_mock) def test_fetch_file_using_data_home(monkeypatch, tmpdir): tmpdir = Path(tmpdir) server_side = tmpdir / "server_side" server_side.mkdir() data_file = server_side / "data.jsonl" server_data = '{"a": 1, "b": 2}\n' data_file.write_text(server_data, encoding="utf-8") server_subfolder = server_side / "subfolder" server_subfolder.mkdir() other_data_file = server_subfolder / "other_file.txt" other_data_file.write_text("Some important text data.", encoding="utf-8") data_home = tmpdir / "data_home" data_home.mkdir() urlretrieve_mock = _mock_urlretrieve(server_side) monkeypatch.setattr("sklearn.datasets._base.urlretrieve", urlretrieve_mock) monkeypatch.setattr( "sklearn.datasets._base.get_data_home", Mock(return_value=data_home) ) fetched_file_path = fetch_file( "https://example.com/data.jsonl", ) assert fetched_file_path == data_home / "example.com" / "data.jsonl" assert fetched_file_path.read_text(encoding="utf-8") == server_data fetched_file_path = fetch_file( "https://example.com/subfolder/other_file.txt", ) assert ( fetched_file_path == data_home / "example.com" / "subfolder" / "other_file.txt" ) assert fetched_file_path.read_text(encoding="utf-8") == other_data_file.read_text( "utf-8" ) expected_warning_msg = re.escape( "Retry downloading from url: https://example.com/subfolder/invalid.txt" ) with pytest.raises(HTTPError): with pytest.warns(match=expected_warning_msg): fetch_file( "https://example.com/subfolder/invalid.txt", delay=0, ) local_subfolder = data_home / "example.com" / "subfolder" assert sorted(local_subfolder.iterdir()) == [local_subfolder / "other_file.txt"] def test_fetch_file_without_sha256(monkeypatch, tmpdir): server_side = tmpdir.mkdir("server_side") data_file = Path(server_side / "data.jsonl") server_data = '{"a": 1, "b": 2}\n' data_file.write_text(server_data, encoding="utf-8") client_side = tmpdir.mkdir("client_side") urlretrieve_mock = _mock_urlretrieve(server_side) monkeypatch.setattr("sklearn.datasets._base.urlretrieve", urlretrieve_mock) # The first call should trigger a download: fetched_file_path = fetch_file( "https://example.com/data.jsonl", folder=client_side, ) assert fetched_file_path == client_side / "data.jsonl" assert fetched_file_path.read_text(encoding="utf-8") == server_data assert urlretrieve_mock.call_count == 1 # Fetching again the same file to the same folder should do nothing: fetched_file_path = fetch_file( "https://example.com/data.jsonl", folder=client_side, ) assert fetched_file_path == client_side / "data.jsonl" assert fetched_file_path.read_text(encoding="utf-8") == server_data assert urlretrieve_mock.call_count == 1 # Deleting and calling again should re-download fetched_file_path.unlink() fetched_file_path = fetch_file( "https://example.com/data.jsonl", folder=client_side, ) assert fetched_file_path == client_side / "data.jsonl" assert fetched_file_path.read_text(encoding="utf-8") == server_data assert urlretrieve_mock.call_count == 2 def test_fetch_file_with_sha256(monkeypatch, tmpdir): server_side = tmpdir.mkdir("server_side") data_file = Path(server_side / "data.jsonl") server_data = '{"a": 1, "b": 2}\n' data_file.write_text(server_data, encoding="utf-8") expected_sha256 = hashlib.sha256(data_file.read_bytes()).hexdigest() client_side = tmpdir.mkdir("client_side") urlretrieve_mock = _mock_urlretrieve(server_side) monkeypatch.setattr("sklearn.datasets._base.urlretrieve", urlretrieve_mock) # The first call should trigger a download. fetched_file_path = fetch_file( "https://example.com/data.jsonl", folder=client_side, sha256=expected_sha256 ) assert fetched_file_path == client_side / "data.jsonl" assert fetched_file_path.read_text(encoding="utf-8") == server_data assert urlretrieve_mock.call_count == 1 # Fetching again the same file to the same folder should do nothing when # the sha256 match: fetched_file_path = fetch_file( "https://example.com/data.jsonl", folder=client_side, sha256=expected_sha256 ) assert fetched_file_path == client_side / "data.jsonl" assert fetched_file_path.read_text(encoding="utf-8") == server_data assert urlretrieve_mock.call_count == 1 # Corrupting the local data should yield a warning and trigger a new download: fetched_file_path.write_text("corrupted contents", encoding="utf-8") expected_msg = ( r"SHA256 checksum of existing local file data.jsonl " rf"\(.*\) differs from expected \({expected_sha256}\): " r"re-downloading from https://example.com/data.jsonl \." ) with pytest.warns(match=expected_msg): fetched_file_path = fetch_file( "https://example.com/data.jsonl", folder=client_side, sha256=expected_sha256 ) assert fetched_file_path == client_side / "data.jsonl" assert fetched_file_path.read_text(encoding="utf-8") == server_data assert urlretrieve_mock.call_count == 2 # Calling again should do nothing: fetched_file_path = fetch_file( "https://example.com/data.jsonl", folder=client_side, sha256=expected_sha256 ) assert fetched_file_path == client_side / "data.jsonl" assert fetched_file_path.read_text(encoding="utf-8") == server_data assert urlretrieve_mock.call_count == 2 # Deleting the local file and calling again should redownload without warning: fetched_file_path.unlink() fetched_file_path = fetch_file( "https://example.com/data.jsonl", folder=client_side, sha256=expected_sha256 ) assert fetched_file_path == client_side / "data.jsonl" assert fetched_file_path.read_text(encoding="utf-8") == server_data assert urlretrieve_mock.call_count == 3 # Calling without a sha256 should also work without redownloading: fetched_file_path = fetch_file( "https://example.com/data.jsonl", folder=client_side, ) assert fetched_file_path == client_side / "data.jsonl" assert fetched_file_path.read_text(encoding="utf-8") == server_data assert urlretrieve_mock.call_count == 3 # Calling with a wrong sha256 should raise an informative exception: non_matching_sha256 = "deadbabecafebeef" expected_warning_msg = "differs from expected" expected_error_msg = re.escape( f"The SHA256 checksum of data.jsonl ({expected_sha256}) differs from " f"expected ({non_matching_sha256})." ) with pytest.raises(OSError, match=expected_error_msg): with pytest.warns(match=expected_warning_msg): fetch_file( "https://example.com/data.jsonl", folder=client_side, sha256=non_matching_sha256, )
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/tests/test_rcv1.py
sklearn/datasets/tests/test_rcv1.py
"""Test the rcv1 loader, if the data is available, or if specifically requested via environment variable (e.g. for CI jobs).""" from functools import partial import numpy as np import scipy.sparse as sp from sklearn.datasets.tests.test_common import check_return_X_y from sklearn.utils._testing import assert_almost_equal, assert_array_equal def test_fetch_rcv1(fetch_rcv1_fxt, global_random_seed): data1 = fetch_rcv1_fxt(shuffle=False) X1, Y1 = data1.data, data1.target cat_list, s1 = data1.target_names.tolist(), data1.sample_id # test sparsity assert sp.issparse(X1) assert sp.issparse(Y1) assert 60915113 == X1.data.size assert 2606875 == Y1.data.size # test shapes assert (804414, 47236) == X1.shape assert (804414, 103) == Y1.shape assert (804414,) == s1.shape assert 103 == len(cat_list) # test descr assert data1.DESCR.startswith(".. _rcv1_dataset:") # test ordering of categories first_categories = ["C11", "C12", "C13", "C14", "C15", "C151"] assert_array_equal(first_categories, cat_list[:6]) # test number of sample for some categories some_categories = ("GMIL", "E143", "CCAT") number_non_zero_in_cat = (5, 1206, 381327) for num, cat in zip(number_non_zero_in_cat, some_categories): j = cat_list.index(cat) assert num == Y1[:, j].data.size # test shuffling and subset data2 = fetch_rcv1_fxt( shuffle=True, subset="train", random_state=global_random_seed ) X2, Y2 = data2.data, data2.target s2 = data2.sample_id # test return_X_y option fetch_func = partial(fetch_rcv1_fxt, shuffle=False, subset="train") check_return_X_y(data2, fetch_func) # The first 23149 samples are the training samples assert_array_equal(np.sort(s1[:23149]), np.sort(s2)) # test some precise values some_sample_ids = (2286, 3274, 14042) for sample_id in some_sample_ids: idx1 = s1.tolist().index(sample_id) idx2 = s2.tolist().index(sample_id) feature_values_1 = X1[idx1, :].toarray() feature_values_2 = X2[idx2, :].toarray() assert_almost_equal(feature_values_1, feature_values_2) target_values_1 = Y1[idx1, :].toarray() target_values_2 = Y2[idx2, :].toarray() assert_almost_equal(target_values_1, target_values_2)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false