Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_base.py +304 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_gradient_boosting.pyx +262 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__init__.py +8 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/utils.cpython-310.pyc +0 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_binning.pyx +84 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_bitset.pxd +20 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_predictor.pyx +255 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/common.pxd +43 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/common.pyx +44 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/grower.py +807 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/predictor.py +146 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/splitting.pyx +1191 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/__init__.cpython-310.pyc +0 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_binning.cpython-310.pyc +0 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_bitset.cpython-310.pyc +0 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_grower.cpython-310.pyc +0 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_histogram.cpython-310.pyc +0 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_monotonic_constraints.cpython-310.pyc +0 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_predictor.cpython-310.pyc +0 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_splitting.cpython-310.pyc +0 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_warm_start.cpython-310.pyc +0 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_monotonic_constraints.py +446 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_predictor.py +187 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_iforest.py +673 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_weight_boosting.py +1173 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/meson.build +10 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__init__.py +0 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/__init__.cpython-310.pyc +0 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_bagging.cpython-310.pyc +0 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_base.cpython-310.pyc +0 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_common.cpython-310.pyc +0 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_forest.cpython-310.pyc +0 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_gradient_boosting.cpython-310.pyc +0 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_iforest.cpython-310.pyc +0 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_stacking.cpython-310.pyc +0 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_voting.cpython-310.pyc +0 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_weight_boosting.cpython-310.pyc +0 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/test_bagging.py +977 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/test_common.py +262 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/test_forest.py +1864 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/test_gradient_boosting.py +1711 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/test_iforest.py +393 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/test_stacking.py +1019 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/test_voting.py +787 -0
- openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/test_weight_boosting.py +639 -0
- phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_cholesky_solve_helper_cpu_dispatch.h +23 -0
- phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_index_put_impl_cpu_dispatch.h +23 -0
- phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_jagged_to_padded_dense_forward.h +47 -0
- phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_eigvals_cuda_dispatch.h +23 -0
- phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_cudnn_attention_native.h +21 -0
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_base.py
ADDED
|
@@ -0,0 +1,304 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Base class for ensemble-based estimators."""
|
| 2 |
+
|
| 3 |
+
# Authors: The scikit-learn developers
|
| 4 |
+
# SPDX-License-Identifier: BSD-3-Clause
|
| 5 |
+
|
| 6 |
+
from abc import ABCMeta, abstractmethod
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
from joblib import effective_n_jobs
|
| 10 |
+
|
| 11 |
+
from ..base import BaseEstimator, MetaEstimatorMixin, clone, is_classifier, is_regressor
|
| 12 |
+
from ..utils import Bunch, check_random_state
|
| 13 |
+
from ..utils._tags import get_tags
|
| 14 |
+
from ..utils._user_interface import _print_elapsed_time
|
| 15 |
+
from ..utils.metadata_routing import _routing_enabled
|
| 16 |
+
from ..utils.metaestimators import _BaseComposition
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def _fit_single_estimator(
|
| 20 |
+
estimator, X, y, fit_params, message_clsname=None, message=None
|
| 21 |
+
):
|
| 22 |
+
"""Private function used to fit an estimator within a job."""
|
| 23 |
+
# TODO(SLEP6): remove if-condition for unrouted sample_weight when metadata
|
| 24 |
+
# routing can't be disabled.
|
| 25 |
+
if not _routing_enabled() and "sample_weight" in fit_params:
|
| 26 |
+
try:
|
| 27 |
+
with _print_elapsed_time(message_clsname, message):
|
| 28 |
+
estimator.fit(X, y, sample_weight=fit_params["sample_weight"])
|
| 29 |
+
except TypeError as exc:
|
| 30 |
+
if "unexpected keyword argument 'sample_weight'" in str(exc):
|
| 31 |
+
raise TypeError(
|
| 32 |
+
"Underlying estimator {} does not support sample weights.".format(
|
| 33 |
+
estimator.__class__.__name__
|
| 34 |
+
)
|
| 35 |
+
) from exc
|
| 36 |
+
raise
|
| 37 |
+
else:
|
| 38 |
+
with _print_elapsed_time(message_clsname, message):
|
| 39 |
+
estimator.fit(X, y, **fit_params)
|
| 40 |
+
return estimator
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def _set_random_states(estimator, random_state=None):
|
| 44 |
+
"""Set fixed random_state parameters for an estimator.
|
| 45 |
+
|
| 46 |
+
Finds all parameters ending ``random_state`` and sets them to integers
|
| 47 |
+
derived from ``random_state``.
|
| 48 |
+
|
| 49 |
+
Parameters
|
| 50 |
+
----------
|
| 51 |
+
estimator : estimator supporting get/set_params
|
| 52 |
+
Estimator with potential randomness managed by random_state
|
| 53 |
+
parameters.
|
| 54 |
+
|
| 55 |
+
random_state : int, RandomState instance or None, default=None
|
| 56 |
+
Pseudo-random number generator to control the generation of the random
|
| 57 |
+
integers. Pass an int for reproducible output across multiple function
|
| 58 |
+
calls.
|
| 59 |
+
See :term:`Glossary <random_state>`.
|
| 60 |
+
|
| 61 |
+
Notes
|
| 62 |
+
-----
|
| 63 |
+
This does not necessarily set *all* ``random_state`` attributes that
|
| 64 |
+
control an estimator's randomness, only those accessible through
|
| 65 |
+
``estimator.get_params()``. ``random_state``s not controlled include
|
| 66 |
+
those belonging to:
|
| 67 |
+
|
| 68 |
+
* cross-validation splitters
|
| 69 |
+
* ``scipy.stats`` rvs
|
| 70 |
+
"""
|
| 71 |
+
random_state = check_random_state(random_state)
|
| 72 |
+
to_set = {}
|
| 73 |
+
for key in sorted(estimator.get_params(deep=True)):
|
| 74 |
+
if key == "random_state" or key.endswith("__random_state"):
|
| 75 |
+
to_set[key] = random_state.randint(np.iinfo(np.int32).max)
|
| 76 |
+
|
| 77 |
+
if to_set:
|
| 78 |
+
estimator.set_params(**to_set)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
class BaseEnsemble(MetaEstimatorMixin, BaseEstimator, metaclass=ABCMeta):
|
| 82 |
+
"""Base class for all ensemble classes.
|
| 83 |
+
|
| 84 |
+
Warning: This class should not be used directly. Use derived classes
|
| 85 |
+
instead.
|
| 86 |
+
|
| 87 |
+
Parameters
|
| 88 |
+
----------
|
| 89 |
+
estimator : object
|
| 90 |
+
The base estimator from which the ensemble is built.
|
| 91 |
+
|
| 92 |
+
n_estimators : int, default=10
|
| 93 |
+
The number of estimators in the ensemble.
|
| 94 |
+
|
| 95 |
+
estimator_params : list of str, default=tuple()
|
| 96 |
+
The list of attributes to use as parameters when instantiating a
|
| 97 |
+
new base estimator. If none are given, default parameters are used.
|
| 98 |
+
|
| 99 |
+
Attributes
|
| 100 |
+
----------
|
| 101 |
+
estimator_ : estimator
|
| 102 |
+
The base estimator from which the ensemble is grown.
|
| 103 |
+
|
| 104 |
+
estimators_ : list of estimators
|
| 105 |
+
The collection of fitted base estimators.
|
| 106 |
+
"""
|
| 107 |
+
|
| 108 |
+
@abstractmethod
|
| 109 |
+
def __init__(
|
| 110 |
+
self,
|
| 111 |
+
estimator=None,
|
| 112 |
+
*,
|
| 113 |
+
n_estimators=10,
|
| 114 |
+
estimator_params=tuple(),
|
| 115 |
+
):
|
| 116 |
+
# Set parameters
|
| 117 |
+
self.estimator = estimator
|
| 118 |
+
self.n_estimators = n_estimators
|
| 119 |
+
self.estimator_params = estimator_params
|
| 120 |
+
|
| 121 |
+
# Don't instantiate estimators now! Parameters of estimator might
|
| 122 |
+
# still change. Eg., when grid-searching with the nested object syntax.
|
| 123 |
+
# self.estimators_ needs to be filled by the derived classes in fit.
|
| 124 |
+
|
| 125 |
+
def _validate_estimator(self, default=None):
|
| 126 |
+
"""Check the base estimator.
|
| 127 |
+
|
| 128 |
+
Sets the `estimator_` attributes.
|
| 129 |
+
"""
|
| 130 |
+
if self.estimator is not None:
|
| 131 |
+
self.estimator_ = self.estimator
|
| 132 |
+
else:
|
| 133 |
+
self.estimator_ = default
|
| 134 |
+
|
| 135 |
+
def _make_estimator(self, append=True, random_state=None):
|
| 136 |
+
"""Make and configure a copy of the `estimator_` attribute.
|
| 137 |
+
|
| 138 |
+
Warning: This method should be used to properly instantiate new
|
| 139 |
+
sub-estimators.
|
| 140 |
+
"""
|
| 141 |
+
estimator = clone(self.estimator_)
|
| 142 |
+
estimator.set_params(**{p: getattr(self, p) for p in self.estimator_params})
|
| 143 |
+
|
| 144 |
+
if random_state is not None:
|
| 145 |
+
_set_random_states(estimator, random_state)
|
| 146 |
+
|
| 147 |
+
if append:
|
| 148 |
+
self.estimators_.append(estimator)
|
| 149 |
+
|
| 150 |
+
return estimator
|
| 151 |
+
|
| 152 |
+
def __len__(self):
|
| 153 |
+
"""Return the number of estimators in the ensemble."""
|
| 154 |
+
return len(self.estimators_)
|
| 155 |
+
|
| 156 |
+
def __getitem__(self, index):
|
| 157 |
+
"""Return the index'th estimator in the ensemble."""
|
| 158 |
+
return self.estimators_[index]
|
| 159 |
+
|
| 160 |
+
def __iter__(self):
|
| 161 |
+
"""Return iterator over estimators in the ensemble."""
|
| 162 |
+
return iter(self.estimators_)
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
def _partition_estimators(n_estimators, n_jobs):
|
| 166 |
+
"""Private function used to partition estimators between jobs."""
|
| 167 |
+
# Compute the number of jobs
|
| 168 |
+
n_jobs = min(effective_n_jobs(n_jobs), n_estimators)
|
| 169 |
+
|
| 170 |
+
# Partition estimators between jobs
|
| 171 |
+
n_estimators_per_job = np.full(n_jobs, n_estimators // n_jobs, dtype=int)
|
| 172 |
+
n_estimators_per_job[: n_estimators % n_jobs] += 1
|
| 173 |
+
starts = np.cumsum(n_estimators_per_job)
|
| 174 |
+
|
| 175 |
+
return n_jobs, n_estimators_per_job.tolist(), [0] + starts.tolist()
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
class _BaseHeterogeneousEnsemble(
|
| 179 |
+
MetaEstimatorMixin, _BaseComposition, metaclass=ABCMeta
|
| 180 |
+
):
|
| 181 |
+
"""Base class for heterogeneous ensemble of learners.
|
| 182 |
+
|
| 183 |
+
Parameters
|
| 184 |
+
----------
|
| 185 |
+
estimators : list of (str, estimator) tuples
|
| 186 |
+
The ensemble of estimators to use in the ensemble. Each element of the
|
| 187 |
+
list is defined as a tuple of string (i.e. name of the estimator) and
|
| 188 |
+
an estimator instance. An estimator can be set to `'drop'` using
|
| 189 |
+
`set_params`.
|
| 190 |
+
|
| 191 |
+
Attributes
|
| 192 |
+
----------
|
| 193 |
+
estimators_ : list of estimators
|
| 194 |
+
The elements of the estimators parameter, having been fitted on the
|
| 195 |
+
training data. If an estimator has been set to `'drop'`, it will not
|
| 196 |
+
appear in `estimators_`.
|
| 197 |
+
"""
|
| 198 |
+
|
| 199 |
+
@property
|
| 200 |
+
def named_estimators(self):
|
| 201 |
+
"""Dictionary to access any fitted sub-estimators by name.
|
| 202 |
+
|
| 203 |
+
Returns
|
| 204 |
+
-------
|
| 205 |
+
:class:`~sklearn.utils.Bunch`
|
| 206 |
+
"""
|
| 207 |
+
return Bunch(**dict(self.estimators))
|
| 208 |
+
|
| 209 |
+
@abstractmethod
|
| 210 |
+
def __init__(self, estimators):
|
| 211 |
+
self.estimators = estimators
|
| 212 |
+
|
| 213 |
+
def _validate_estimators(self):
|
| 214 |
+
if len(self.estimators) == 0:
|
| 215 |
+
raise ValueError(
|
| 216 |
+
"Invalid 'estimators' attribute, 'estimators' should be a "
|
| 217 |
+
"non-empty list of (string, estimator) tuples."
|
| 218 |
+
)
|
| 219 |
+
names, estimators = zip(*self.estimators)
|
| 220 |
+
# defined by MetaEstimatorMixin
|
| 221 |
+
self._validate_names(names)
|
| 222 |
+
|
| 223 |
+
has_estimator = any(est != "drop" for est in estimators)
|
| 224 |
+
if not has_estimator:
|
| 225 |
+
raise ValueError(
|
| 226 |
+
"All estimators are dropped. At least one is required "
|
| 227 |
+
"to be an estimator."
|
| 228 |
+
)
|
| 229 |
+
|
| 230 |
+
is_estimator_type = is_classifier if is_classifier(self) else is_regressor
|
| 231 |
+
|
| 232 |
+
for est in estimators:
|
| 233 |
+
if est != "drop" and not is_estimator_type(est):
|
| 234 |
+
raise ValueError(
|
| 235 |
+
"The estimator {} should be a {}.".format(
|
| 236 |
+
est.__class__.__name__, is_estimator_type.__name__[3:]
|
| 237 |
+
)
|
| 238 |
+
)
|
| 239 |
+
|
| 240 |
+
return names, estimators
|
| 241 |
+
|
| 242 |
+
def set_params(self, **params):
|
| 243 |
+
"""
|
| 244 |
+
Set the parameters of an estimator from the ensemble.
|
| 245 |
+
|
| 246 |
+
Valid parameter keys can be listed with `get_params()`. Note that you
|
| 247 |
+
can directly set the parameters of the estimators contained in
|
| 248 |
+
`estimators`.
|
| 249 |
+
|
| 250 |
+
Parameters
|
| 251 |
+
----------
|
| 252 |
+
**params : keyword arguments
|
| 253 |
+
Specific parameters using e.g.
|
| 254 |
+
`set_params(parameter_name=new_value)`. In addition, to setting the
|
| 255 |
+
parameters of the estimator, the individual estimator of the
|
| 256 |
+
estimators can also be set, or can be removed by setting them to
|
| 257 |
+
'drop'.
|
| 258 |
+
|
| 259 |
+
Returns
|
| 260 |
+
-------
|
| 261 |
+
self : object
|
| 262 |
+
Estimator instance.
|
| 263 |
+
"""
|
| 264 |
+
super()._set_params("estimators", **params)
|
| 265 |
+
return self
|
| 266 |
+
|
| 267 |
+
def get_params(self, deep=True):
|
| 268 |
+
"""
|
| 269 |
+
Get the parameters of an estimator from the ensemble.
|
| 270 |
+
|
| 271 |
+
Returns the parameters given in the constructor as well as the
|
| 272 |
+
estimators contained within the `estimators` parameter.
|
| 273 |
+
|
| 274 |
+
Parameters
|
| 275 |
+
----------
|
| 276 |
+
deep : bool, default=True
|
| 277 |
+
Setting it to True gets the various estimators and the parameters
|
| 278 |
+
of the estimators as well.
|
| 279 |
+
|
| 280 |
+
Returns
|
| 281 |
+
-------
|
| 282 |
+
params : dict
|
| 283 |
+
Parameter and estimator names mapped to their values or parameter
|
| 284 |
+
names mapped to their values.
|
| 285 |
+
"""
|
| 286 |
+
return super()._get_params("estimators", deep=deep)
|
| 287 |
+
|
| 288 |
+
def __sklearn_tags__(self):
|
| 289 |
+
tags = super().__sklearn_tags__()
|
| 290 |
+
try:
|
| 291 |
+
tags.input_tags.allow_nan = all(
|
| 292 |
+
get_tags(est[1]).input_tags.allow_nan if est[1] != "drop" else True
|
| 293 |
+
for est in self.estimators
|
| 294 |
+
)
|
| 295 |
+
tags.input_tags.sparse = all(
|
| 296 |
+
get_tags(est[1]).input_tags.sparse if est[1] != "drop" else True
|
| 297 |
+
for est in self.estimators
|
| 298 |
+
)
|
| 299 |
+
except Exception:
|
| 300 |
+
# If `estimators` does not comply with our API (list of tuples) then it will
|
| 301 |
+
# fail. In this case, we assume that `allow_nan` and `sparse` are False but
|
| 302 |
+
# the parameter validation will raise an error during `fit`.
|
| 303 |
+
pass # pragma: no cover
|
| 304 |
+
return tags
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_gradient_boosting.pyx
ADDED
|
@@ -0,0 +1,262 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Authors: The scikit-learn developers
|
| 2 |
+
# SPDX-License-Identifier: BSD-3-Clause
|
| 3 |
+
|
| 4 |
+
from libc.stdlib cimport free
|
| 5 |
+
from libc.string cimport memset
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
from scipy.sparse import issparse
|
| 9 |
+
|
| 10 |
+
from ..utils._typedefs cimport float32_t, float64_t, intp_t, int32_t, uint8_t
|
| 11 |
+
# Note: _tree uses cimport numpy, cnp.import_array, so we need to include
|
| 12 |
+
# numpy headers in the build configuration of this extension
|
| 13 |
+
from ..tree._tree cimport Node
|
| 14 |
+
from ..tree._tree cimport Tree
|
| 15 |
+
from ..tree._utils cimport safe_realloc
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
# no namespace lookup for numpy dtype and array creation
|
| 19 |
+
from numpy import zeros as np_zeros
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
# constant to mark tree leafs
|
| 23 |
+
cdef intp_t TREE_LEAF = -1
|
| 24 |
+
|
| 25 |
+
cdef void _predict_regression_tree_inplace_fast_dense(
|
| 26 |
+
const float32_t[:, ::1] X,
|
| 27 |
+
Node* root_node,
|
| 28 |
+
double *value,
|
| 29 |
+
double scale,
|
| 30 |
+
Py_ssize_t k,
|
| 31 |
+
float64_t[:, :] out
|
| 32 |
+
) noexcept nogil:
|
| 33 |
+
"""Predicts output for regression tree and stores it in ``out[i, k]``.
|
| 34 |
+
|
| 35 |
+
This function operates directly on the data arrays of the tree
|
| 36 |
+
data structures. This is 5x faster than the variant above because
|
| 37 |
+
it allows us to avoid buffer validation.
|
| 38 |
+
|
| 39 |
+
The function assumes that the ndarray that wraps ``X`` is
|
| 40 |
+
c-continuous.
|
| 41 |
+
|
| 42 |
+
Parameters
|
| 43 |
+
----------
|
| 44 |
+
X : float32_t 2d memory view
|
| 45 |
+
The memory view on the data ndarray of the input ``X``.
|
| 46 |
+
Assumes that the array is c-continuous.
|
| 47 |
+
root_node : tree Node pointer
|
| 48 |
+
Pointer to the main node array of the :class:``sklearn.tree.Tree``.
|
| 49 |
+
value : np.float64_t pointer
|
| 50 |
+
The pointer to the data array of the ``value`` array attribute
|
| 51 |
+
of the :class:``sklearn.tree.Tree``.
|
| 52 |
+
scale : double
|
| 53 |
+
A constant to scale the predictions.
|
| 54 |
+
k : int
|
| 55 |
+
The index of the tree output to be predicted. Must satisfy
|
| 56 |
+
0 <= ``k`` < ``K``.
|
| 57 |
+
out : memory view on array of type np.float64_t
|
| 58 |
+
The data array where the predictions are stored.
|
| 59 |
+
``out`` is assumed to be a two-dimensional array of
|
| 60 |
+
shape ``(n_samples, K)``.
|
| 61 |
+
"""
|
| 62 |
+
cdef intp_t n_samples = X.shape[0]
|
| 63 |
+
cdef Py_ssize_t i
|
| 64 |
+
cdef Node *node
|
| 65 |
+
for i in range(n_samples):
|
| 66 |
+
node = root_node
|
| 67 |
+
# While node not a leaf
|
| 68 |
+
while node.left_child != TREE_LEAF:
|
| 69 |
+
if X[i, node.feature] <= node.threshold:
|
| 70 |
+
node = root_node + node.left_child
|
| 71 |
+
else:
|
| 72 |
+
node = root_node + node.right_child
|
| 73 |
+
out[i, k] += scale * value[node - root_node]
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def _predict_regression_tree_stages_sparse(
|
| 77 |
+
object[:, :] estimators,
|
| 78 |
+
object X,
|
| 79 |
+
double scale,
|
| 80 |
+
float64_t[:, :] out
|
| 81 |
+
):
|
| 82 |
+
"""Predicts output for regression tree inplace and adds scaled value to ``out[i, k]``.
|
| 83 |
+
|
| 84 |
+
The function assumes that the ndarray that wraps ``X`` is csr_matrix.
|
| 85 |
+
"""
|
| 86 |
+
cdef const float32_t[::1] X_data = X.data
|
| 87 |
+
cdef const int32_t[::1] X_indices = X.indices
|
| 88 |
+
cdef const int32_t[::1] X_indptr = X.indptr
|
| 89 |
+
|
| 90 |
+
cdef intp_t n_samples = X.shape[0]
|
| 91 |
+
cdef intp_t n_features = X.shape[1]
|
| 92 |
+
cdef intp_t n_stages = estimators.shape[0]
|
| 93 |
+
cdef intp_t n_outputs = estimators.shape[1]
|
| 94 |
+
|
| 95 |
+
# Indices and temporary variables
|
| 96 |
+
cdef intp_t sample_i
|
| 97 |
+
cdef intp_t feature_i
|
| 98 |
+
cdef intp_t stage_i
|
| 99 |
+
cdef intp_t output_i
|
| 100 |
+
cdef Node *root_node = NULL
|
| 101 |
+
cdef Node *node = NULL
|
| 102 |
+
cdef double *value = NULL
|
| 103 |
+
|
| 104 |
+
cdef Tree tree
|
| 105 |
+
cdef Node** nodes = NULL
|
| 106 |
+
cdef double** values = NULL
|
| 107 |
+
safe_realloc(&nodes, n_stages * n_outputs)
|
| 108 |
+
safe_realloc(&values, n_stages * n_outputs)
|
| 109 |
+
for stage_i in range(n_stages):
|
| 110 |
+
for output_i in range(n_outputs):
|
| 111 |
+
tree = estimators[stage_i, output_i].tree_
|
| 112 |
+
nodes[stage_i * n_outputs + output_i] = tree.nodes
|
| 113 |
+
values[stage_i * n_outputs + output_i] = tree.value
|
| 114 |
+
|
| 115 |
+
# Initialize auxiliary data-structure
|
| 116 |
+
cdef float32_t feature_value = 0.
|
| 117 |
+
cdef float32_t* X_sample = NULL
|
| 118 |
+
|
| 119 |
+
# feature_to_sample as a data structure records the last seen sample
|
| 120 |
+
# for each feature; functionally, it is an efficient way to identify
|
| 121 |
+
# which features are nonzero in the present sample.
|
| 122 |
+
cdef intp_t* feature_to_sample = NULL
|
| 123 |
+
|
| 124 |
+
safe_realloc(&X_sample, n_features)
|
| 125 |
+
safe_realloc(&feature_to_sample, n_features)
|
| 126 |
+
|
| 127 |
+
memset(feature_to_sample, -1, n_features * sizeof(intp_t))
|
| 128 |
+
|
| 129 |
+
# Cycle through all samples
|
| 130 |
+
for sample_i in range(n_samples):
|
| 131 |
+
for feature_i in range(X_indptr[sample_i], X_indptr[sample_i + 1]):
|
| 132 |
+
feature_to_sample[X_indices[feature_i]] = sample_i
|
| 133 |
+
X_sample[X_indices[feature_i]] = X_data[feature_i]
|
| 134 |
+
|
| 135 |
+
# Cycle through all stages
|
| 136 |
+
for stage_i in range(n_stages):
|
| 137 |
+
# Cycle through all trees
|
| 138 |
+
for output_i in range(n_outputs):
|
| 139 |
+
root_node = nodes[stage_i * n_outputs + output_i]
|
| 140 |
+
value = values[stage_i * n_outputs + output_i]
|
| 141 |
+
node = root_node
|
| 142 |
+
|
| 143 |
+
# While node not a leaf
|
| 144 |
+
while node.left_child != TREE_LEAF:
|
| 145 |
+
# ... and node.right_child != TREE_LEAF:
|
| 146 |
+
if feature_to_sample[node.feature] == sample_i:
|
| 147 |
+
feature_value = X_sample[node.feature]
|
| 148 |
+
else:
|
| 149 |
+
feature_value = 0.
|
| 150 |
+
|
| 151 |
+
if feature_value <= node.threshold:
|
| 152 |
+
node = root_node + node.left_child
|
| 153 |
+
else:
|
| 154 |
+
node = root_node + node.right_child
|
| 155 |
+
out[sample_i, output_i] += scale * value[node - root_node]
|
| 156 |
+
|
| 157 |
+
# Free auxiliary arrays
|
| 158 |
+
free(X_sample)
|
| 159 |
+
free(feature_to_sample)
|
| 160 |
+
free(nodes)
|
| 161 |
+
free(values)
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
def predict_stages(
|
| 165 |
+
object[:, :] estimators,
|
| 166 |
+
object X,
|
| 167 |
+
double scale,
|
| 168 |
+
float64_t[:, :] out
|
| 169 |
+
):
|
| 170 |
+
"""Add predictions of ``estimators`` to ``out``.
|
| 171 |
+
|
| 172 |
+
Each estimator is scaled by ``scale`` before its prediction
|
| 173 |
+
is added to ``out``.
|
| 174 |
+
"""
|
| 175 |
+
cdef Py_ssize_t i
|
| 176 |
+
cdef Py_ssize_t k
|
| 177 |
+
cdef Py_ssize_t n_estimators = estimators.shape[0]
|
| 178 |
+
cdef Py_ssize_t K = estimators.shape[1]
|
| 179 |
+
cdef Tree tree
|
| 180 |
+
|
| 181 |
+
if issparse(X):
|
| 182 |
+
if X.format != 'csr':
|
| 183 |
+
raise ValueError("When X is a sparse matrix, a CSR format is"
|
| 184 |
+
" expected, got {!r}".format(type(X)))
|
| 185 |
+
_predict_regression_tree_stages_sparse(
|
| 186 |
+
estimators=estimators, X=X, scale=scale, out=out
|
| 187 |
+
)
|
| 188 |
+
else:
|
| 189 |
+
if not isinstance(X, np.ndarray) or np.isfortran(X):
|
| 190 |
+
raise ValueError(f"X should be C-ordered np.ndarray, got {type(X)}")
|
| 191 |
+
|
| 192 |
+
for i in range(n_estimators):
|
| 193 |
+
for k in range(K):
|
| 194 |
+
tree = estimators[i, k].tree_
|
| 195 |
+
|
| 196 |
+
# avoid buffer validation by casting to ndarray
|
| 197 |
+
# and get data pointer
|
| 198 |
+
# need brackets because of casting operator priority
|
| 199 |
+
_predict_regression_tree_inplace_fast_dense(
|
| 200 |
+
X=X,
|
| 201 |
+
root_node=tree.nodes,
|
| 202 |
+
value=tree.value,
|
| 203 |
+
scale=scale,
|
| 204 |
+
k=k,
|
| 205 |
+
out=out
|
| 206 |
+
)
|
| 207 |
+
# out[:, k] += scale * tree.predict(X).ravel()
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
def predict_stage(
|
| 211 |
+
object[:, :] estimators,
|
| 212 |
+
int stage,
|
| 213 |
+
object X,
|
| 214 |
+
double scale,
|
| 215 |
+
float64_t[:, :] out
|
| 216 |
+
):
|
| 217 |
+
"""Add predictions of ``estimators[stage]`` to ``out``.
|
| 218 |
+
|
| 219 |
+
Each estimator in the stage is scaled by ``scale`` before
|
| 220 |
+
its prediction is added to ``out``.
|
| 221 |
+
"""
|
| 222 |
+
return predict_stages(
|
| 223 |
+
estimators=estimators[stage:stage + 1], X=X, scale=scale, out=out
|
| 224 |
+
)
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
def _random_sample_mask(
|
| 228 |
+
intp_t n_total_samples,
|
| 229 |
+
intp_t n_total_in_bag,
|
| 230 |
+
random_state
|
| 231 |
+
):
|
| 232 |
+
"""Create a random sample mask where ``n_total_in_bag`` elements are set.
|
| 233 |
+
|
| 234 |
+
Parameters
|
| 235 |
+
----------
|
| 236 |
+
n_total_samples : int
|
| 237 |
+
The length of the resulting mask.
|
| 238 |
+
|
| 239 |
+
n_total_in_bag : int
|
| 240 |
+
The number of elements in the sample mask which are set to 1.
|
| 241 |
+
|
| 242 |
+
random_state : RandomState
|
| 243 |
+
A numpy ``RandomState`` object.
|
| 244 |
+
|
| 245 |
+
Returns
|
| 246 |
+
-------
|
| 247 |
+
sample_mask : np.ndarray, shape=[n_total_samples]
|
| 248 |
+
An ndarray where ``n_total_in_bag`` elements are set to ``True``
|
| 249 |
+
the others are ``False``.
|
| 250 |
+
"""
|
| 251 |
+
cdef float64_t[::1] rand = random_state.uniform(size=n_total_samples)
|
| 252 |
+
cdef uint8_t[::1] sample_mask = np_zeros((n_total_samples,), dtype=bool)
|
| 253 |
+
|
| 254 |
+
cdef intp_t n_bagged = 0
|
| 255 |
+
cdef intp_t i = 0
|
| 256 |
+
|
| 257 |
+
for i in range(n_total_samples):
|
| 258 |
+
if rand[i] * (n_total_samples - i) < (n_total_in_bag - n_bagged):
|
| 259 |
+
sample_mask[i] = 1
|
| 260 |
+
n_bagged += 1
|
| 261 |
+
|
| 262 |
+
return sample_mask.base
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__init__.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""This module implements histogram-based gradient boosting estimators.
|
| 2 |
+
|
| 3 |
+
The implementation is a port from pygbm which is itself strongly inspired
|
| 4 |
+
from LightGBM.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
# Authors: The scikit-learn developers
|
| 8 |
+
# SPDX-License-Identifier: BSD-3-Clause
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (3.49 kB). View file
|
|
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_binning.pyx
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Author: Nicolas Hug
|
| 2 |
+
|
| 3 |
+
from cython.parallel import prange
|
| 4 |
+
from libc.math cimport isnan
|
| 5 |
+
|
| 6 |
+
from .common cimport X_DTYPE_C, X_BINNED_DTYPE_C
|
| 7 |
+
from ...utils._typedefs cimport uint8_t
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def _map_to_bins(const X_DTYPE_C [:, :] data,
|
| 11 |
+
list binning_thresholds,
|
| 12 |
+
const uint8_t[::1] is_categorical,
|
| 13 |
+
const uint8_t missing_values_bin_idx,
|
| 14 |
+
int n_threads,
|
| 15 |
+
X_BINNED_DTYPE_C [::1, :] binned):
|
| 16 |
+
"""Bin continuous and categorical values to discrete integer-coded levels.
|
| 17 |
+
|
| 18 |
+
A given value x is mapped into bin value i iff
|
| 19 |
+
thresholds[i - 1] < x <= thresholds[i]
|
| 20 |
+
|
| 21 |
+
Parameters
|
| 22 |
+
----------
|
| 23 |
+
data : ndarray, shape (n_samples, n_features)
|
| 24 |
+
The data to bin.
|
| 25 |
+
binning_thresholds : list of arrays
|
| 26 |
+
For each feature, stores the increasing numeric values that are
|
| 27 |
+
used to separate the bins.
|
| 28 |
+
is_categorical : ndarray of uint8_t of shape (n_features,)
|
| 29 |
+
Indicates categorical features.
|
| 30 |
+
n_threads : int
|
| 31 |
+
Number of OpenMP threads to use.
|
| 32 |
+
binned : ndarray, shape (n_samples, n_features)
|
| 33 |
+
Output array, must be fortran aligned.
|
| 34 |
+
"""
|
| 35 |
+
cdef:
|
| 36 |
+
int feature_idx
|
| 37 |
+
|
| 38 |
+
for feature_idx in range(data.shape[1]):
|
| 39 |
+
_map_col_to_bins(
|
| 40 |
+
data[:, feature_idx],
|
| 41 |
+
binning_thresholds[feature_idx],
|
| 42 |
+
is_categorical[feature_idx],
|
| 43 |
+
missing_values_bin_idx,
|
| 44 |
+
n_threads,
|
| 45 |
+
binned[:, feature_idx]
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
cdef void _map_col_to_bins(
|
| 50 |
+
const X_DTYPE_C [:] data,
|
| 51 |
+
const X_DTYPE_C [:] binning_thresholds,
|
| 52 |
+
const uint8_t is_categorical,
|
| 53 |
+
const uint8_t missing_values_bin_idx,
|
| 54 |
+
int n_threads,
|
| 55 |
+
X_BINNED_DTYPE_C [:] binned
|
| 56 |
+
):
|
| 57 |
+
"""Binary search to find the bin index for each value in the data."""
|
| 58 |
+
cdef:
|
| 59 |
+
int i
|
| 60 |
+
int left
|
| 61 |
+
int right
|
| 62 |
+
int middle
|
| 63 |
+
|
| 64 |
+
for i in prange(data.shape[0], schedule='static', nogil=True,
|
| 65 |
+
num_threads=n_threads):
|
| 66 |
+
if (
|
| 67 |
+
isnan(data[i]) or
|
| 68 |
+
# To follow LightGBM's conventions, negative values for
|
| 69 |
+
# categorical features are considered as missing values.
|
| 70 |
+
(is_categorical and data[i] < 0)
|
| 71 |
+
):
|
| 72 |
+
binned[i] = missing_values_bin_idx
|
| 73 |
+
else:
|
| 74 |
+
# for known values, use binary search
|
| 75 |
+
left, right = 0, binning_thresholds.shape[0]
|
| 76 |
+
while left < right:
|
| 77 |
+
# equal to (right + left - 1) // 2 but avoids overflow
|
| 78 |
+
middle = left + (right - left - 1) // 2
|
| 79 |
+
if data[i] <= binning_thresholds[middle]:
|
| 80 |
+
right = middle
|
| 81 |
+
else:
|
| 82 |
+
left = middle + 1
|
| 83 |
+
|
| 84 |
+
binned[i] = left
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_bitset.pxd
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .common cimport X_BINNED_DTYPE_C
|
| 2 |
+
from .common cimport BITSET_DTYPE_C
|
| 3 |
+
from .common cimport BITSET_INNER_DTYPE_C
|
| 4 |
+
from .common cimport X_DTYPE_C
|
| 5 |
+
from ...utils._typedefs cimport uint8_t
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
cdef void init_bitset(BITSET_DTYPE_C bitset) noexcept nogil
|
| 9 |
+
|
| 10 |
+
cdef void set_bitset(BITSET_DTYPE_C bitset, X_BINNED_DTYPE_C val) noexcept nogil
|
| 11 |
+
|
| 12 |
+
cdef uint8_t in_bitset(BITSET_DTYPE_C bitset, X_BINNED_DTYPE_C val) noexcept nogil
|
| 13 |
+
|
| 14 |
+
cpdef uint8_t in_bitset_memoryview(const BITSET_INNER_DTYPE_C[:] bitset,
|
| 15 |
+
X_BINNED_DTYPE_C val) noexcept nogil
|
| 16 |
+
|
| 17 |
+
cdef uint8_t in_bitset_2d_memoryview(
|
| 18 |
+
const BITSET_INNER_DTYPE_C[:, :] bitset,
|
| 19 |
+
X_BINNED_DTYPE_C val,
|
| 20 |
+
unsigned int row) noexcept nogil
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_predictor.pyx
ADDED
|
@@ -0,0 +1,255 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Author: Nicolas Hug
|
| 2 |
+
|
| 3 |
+
from cython.parallel import prange
|
| 4 |
+
from libc.math cimport isnan
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
from ...utils._typedefs cimport intp_t, uint8_t
|
| 8 |
+
from .common cimport X_DTYPE_C
|
| 9 |
+
from .common cimport Y_DTYPE_C
|
| 10 |
+
from .common import Y_DTYPE
|
| 11 |
+
from .common cimport X_BINNED_DTYPE_C
|
| 12 |
+
from .common cimport BITSET_INNER_DTYPE_C
|
| 13 |
+
from .common cimport node_struct
|
| 14 |
+
from ._bitset cimport in_bitset_2d_memoryview
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def _predict_from_raw_data( # raw data = non-binned data
|
| 18 |
+
const node_struct [:] nodes,
|
| 19 |
+
const X_DTYPE_C [:, :] numeric_data,
|
| 20 |
+
const BITSET_INNER_DTYPE_C [:, ::1] raw_left_cat_bitsets,
|
| 21 |
+
const BITSET_INNER_DTYPE_C [:, ::1] known_cat_bitsets,
|
| 22 |
+
const unsigned int [::1] f_idx_map,
|
| 23 |
+
int n_threads,
|
| 24 |
+
Y_DTYPE_C [:] out):
|
| 25 |
+
|
| 26 |
+
cdef:
|
| 27 |
+
int i
|
| 28 |
+
|
| 29 |
+
for i in prange(numeric_data.shape[0], schedule='static', nogil=True,
|
| 30 |
+
num_threads=n_threads):
|
| 31 |
+
out[i] = _predict_one_from_raw_data(
|
| 32 |
+
nodes, numeric_data, raw_left_cat_bitsets,
|
| 33 |
+
known_cat_bitsets,
|
| 34 |
+
f_idx_map, i)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
cdef inline Y_DTYPE_C _predict_one_from_raw_data(
|
| 38 |
+
const node_struct [:] nodes,
|
| 39 |
+
const X_DTYPE_C [:, :] numeric_data,
|
| 40 |
+
const BITSET_INNER_DTYPE_C [:, ::1] raw_left_cat_bitsets,
|
| 41 |
+
const BITSET_INNER_DTYPE_C [:, ::1] known_cat_bitsets,
|
| 42 |
+
const unsigned int [::1] f_idx_map,
|
| 43 |
+
const int row) noexcept nogil:
|
| 44 |
+
# Need to pass the whole array and the row index, else prange won't work.
|
| 45 |
+
# See issue Cython #2798
|
| 46 |
+
|
| 47 |
+
cdef:
|
| 48 |
+
node_struct node = nodes[0]
|
| 49 |
+
unsigned int node_idx = 0
|
| 50 |
+
X_DTYPE_C data_val
|
| 51 |
+
|
| 52 |
+
while True:
|
| 53 |
+
if node.is_leaf:
|
| 54 |
+
return node.value
|
| 55 |
+
|
| 56 |
+
data_val = numeric_data[row, node.feature_idx]
|
| 57 |
+
|
| 58 |
+
if isnan(data_val):
|
| 59 |
+
if node.missing_go_to_left:
|
| 60 |
+
node_idx = node.left
|
| 61 |
+
else:
|
| 62 |
+
node_idx = node.right
|
| 63 |
+
elif node.is_categorical:
|
| 64 |
+
if data_val < 0:
|
| 65 |
+
# data_val is not in the accepted range, so it is treated as missing value
|
| 66 |
+
node_idx = node.left if node.missing_go_to_left else node.right
|
| 67 |
+
elif in_bitset_2d_memoryview(
|
| 68 |
+
raw_left_cat_bitsets,
|
| 69 |
+
<X_BINNED_DTYPE_C>data_val,
|
| 70 |
+
node.bitset_idx):
|
| 71 |
+
node_idx = node.left
|
| 72 |
+
elif in_bitset_2d_memoryview(
|
| 73 |
+
known_cat_bitsets,
|
| 74 |
+
<X_BINNED_DTYPE_C>data_val,
|
| 75 |
+
f_idx_map[node.feature_idx]):
|
| 76 |
+
node_idx = node.right
|
| 77 |
+
else:
|
| 78 |
+
# Treat unknown categories as missing.
|
| 79 |
+
node_idx = node.left if node.missing_go_to_left else node.right
|
| 80 |
+
else:
|
| 81 |
+
if data_val <= node.num_threshold:
|
| 82 |
+
node_idx = node.left
|
| 83 |
+
else:
|
| 84 |
+
node_idx = node.right
|
| 85 |
+
node = nodes[node_idx]
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def _predict_from_binned_data(
|
| 89 |
+
node_struct [:] nodes,
|
| 90 |
+
const X_BINNED_DTYPE_C [:, :] binned_data,
|
| 91 |
+
BITSET_INNER_DTYPE_C [:, :] binned_left_cat_bitsets,
|
| 92 |
+
const uint8_t missing_values_bin_idx,
|
| 93 |
+
int n_threads,
|
| 94 |
+
Y_DTYPE_C [:] out):
|
| 95 |
+
|
| 96 |
+
cdef:
|
| 97 |
+
int i
|
| 98 |
+
|
| 99 |
+
for i in prange(binned_data.shape[0], schedule='static', nogil=True,
|
| 100 |
+
num_threads=n_threads):
|
| 101 |
+
out[i] = _predict_one_from_binned_data(nodes,
|
| 102 |
+
binned_data,
|
| 103 |
+
binned_left_cat_bitsets, i,
|
| 104 |
+
missing_values_bin_idx)
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
cdef inline Y_DTYPE_C _predict_one_from_binned_data(
|
| 108 |
+
node_struct [:] nodes,
|
| 109 |
+
const X_BINNED_DTYPE_C [:, :] binned_data,
|
| 110 |
+
const BITSET_INNER_DTYPE_C [:, :] binned_left_cat_bitsets,
|
| 111 |
+
const int row,
|
| 112 |
+
const uint8_t missing_values_bin_idx) noexcept nogil:
|
| 113 |
+
# Need to pass the whole array and the row index, else prange won't work.
|
| 114 |
+
# See issue Cython #2798
|
| 115 |
+
|
| 116 |
+
cdef:
|
| 117 |
+
node_struct node = nodes[0]
|
| 118 |
+
unsigned int node_idx = 0
|
| 119 |
+
X_BINNED_DTYPE_C data_val
|
| 120 |
+
|
| 121 |
+
while True:
|
| 122 |
+
if node.is_leaf:
|
| 123 |
+
return node.value
|
| 124 |
+
|
| 125 |
+
data_val = binned_data[row, node.feature_idx]
|
| 126 |
+
|
| 127 |
+
if data_val == missing_values_bin_idx:
|
| 128 |
+
if node.missing_go_to_left:
|
| 129 |
+
node_idx = node.left
|
| 130 |
+
else:
|
| 131 |
+
node_idx = node.right
|
| 132 |
+
elif node.is_categorical:
|
| 133 |
+
if in_bitset_2d_memoryview(
|
| 134 |
+
binned_left_cat_bitsets,
|
| 135 |
+
data_val,
|
| 136 |
+
node.bitset_idx):
|
| 137 |
+
node_idx = node.left
|
| 138 |
+
else:
|
| 139 |
+
node_idx = node.right
|
| 140 |
+
else:
|
| 141 |
+
if data_val <= node.bin_threshold:
|
| 142 |
+
node_idx = node.left
|
| 143 |
+
else:
|
| 144 |
+
node_idx = node.right
|
| 145 |
+
node = nodes[node_idx]
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
def _compute_partial_dependence(
|
| 149 |
+
node_struct [:] nodes,
|
| 150 |
+
const X_DTYPE_C [:, ::1] X,
|
| 151 |
+
const intp_t [:] target_features,
|
| 152 |
+
Y_DTYPE_C [:] out
|
| 153 |
+
):
|
| 154 |
+
"""Partial dependence of the response on the ``target_features`` set.
|
| 155 |
+
|
| 156 |
+
For each sample in ``X`` a tree traversal is performed.
|
| 157 |
+
Each traversal starts from the root with weight 1.0.
|
| 158 |
+
|
| 159 |
+
At each non-leaf node that splits on a target feature, either
|
| 160 |
+
the left child or the right child is visited based on the feature
|
| 161 |
+
value of the current sample, and the weight is not modified.
|
| 162 |
+
At each non-leaf node that splits on a complementary feature,
|
| 163 |
+
both children are visited and the weight is multiplied by the fraction
|
| 164 |
+
of training samples which went to each child.
|
| 165 |
+
|
| 166 |
+
At each leaf, the value of the node is multiplied by the current
|
| 167 |
+
weight (weights sum to 1 for all visited terminal nodes).
|
| 168 |
+
|
| 169 |
+
Parameters
|
| 170 |
+
----------
|
| 171 |
+
nodes : view on array of PREDICTOR_RECORD_DTYPE, shape (n_nodes)
|
| 172 |
+
The array representing the predictor tree.
|
| 173 |
+
X : view on 2d ndarray, shape (n_samples, n_target_features)
|
| 174 |
+
The grid points on which the partial dependence should be
|
| 175 |
+
evaluated.
|
| 176 |
+
target_features : view on 1d ndarray of intp_t, shape (n_target_features)
|
| 177 |
+
The set of target features for which the partial dependence
|
| 178 |
+
should be evaluated.
|
| 179 |
+
out : view on 1d ndarray, shape (n_samples)
|
| 180 |
+
The value of the partial dependence function on each grid
|
| 181 |
+
point.
|
| 182 |
+
"""
|
| 183 |
+
|
| 184 |
+
cdef:
|
| 185 |
+
unsigned int current_node_idx
|
| 186 |
+
unsigned int [:] node_idx_stack = np.zeros(shape=nodes.shape[0],
|
| 187 |
+
dtype=np.uint32)
|
| 188 |
+
Y_DTYPE_C [::1] weight_stack = np.zeros(shape=nodes.shape[0],
|
| 189 |
+
dtype=Y_DTYPE)
|
| 190 |
+
node_struct * current_node # pointer to avoid copying attributes
|
| 191 |
+
|
| 192 |
+
unsigned int sample_idx
|
| 193 |
+
intp_t feature_idx
|
| 194 |
+
unsigned stack_size
|
| 195 |
+
Y_DTYPE_C left_sample_frac
|
| 196 |
+
Y_DTYPE_C current_weight
|
| 197 |
+
Y_DTYPE_C total_weight # used for sanity check only
|
| 198 |
+
bint is_target_feature
|
| 199 |
+
|
| 200 |
+
for sample_idx in range(X.shape[0]):
|
| 201 |
+
# init stacks for current sample
|
| 202 |
+
stack_size = 1
|
| 203 |
+
node_idx_stack[0] = 0 # root node
|
| 204 |
+
weight_stack[0] = 1 # all the samples are in the root node
|
| 205 |
+
total_weight = 0
|
| 206 |
+
|
| 207 |
+
while stack_size > 0:
|
| 208 |
+
|
| 209 |
+
# pop the stack
|
| 210 |
+
stack_size -= 1
|
| 211 |
+
current_node_idx = node_idx_stack[stack_size]
|
| 212 |
+
current_node = &nodes[current_node_idx]
|
| 213 |
+
|
| 214 |
+
if current_node.is_leaf:
|
| 215 |
+
out[sample_idx] += (weight_stack[stack_size] *
|
| 216 |
+
current_node.value)
|
| 217 |
+
total_weight += weight_stack[stack_size]
|
| 218 |
+
else:
|
| 219 |
+
# determine if the split feature is a target feature
|
| 220 |
+
is_target_feature = False
|
| 221 |
+
for feature_idx in range(target_features.shape[0]):
|
| 222 |
+
if target_features[feature_idx] == current_node.feature_idx:
|
| 223 |
+
is_target_feature = True
|
| 224 |
+
break
|
| 225 |
+
|
| 226 |
+
if is_target_feature:
|
| 227 |
+
# In this case, we push left or right child on stack
|
| 228 |
+
if X[sample_idx, feature_idx] <= current_node.num_threshold:
|
| 229 |
+
node_idx_stack[stack_size] = current_node.left
|
| 230 |
+
else:
|
| 231 |
+
node_idx_stack[stack_size] = current_node.right
|
| 232 |
+
stack_size += 1
|
| 233 |
+
else:
|
| 234 |
+
# In this case, we push both children onto the stack,
|
| 235 |
+
# and give a weight proportional to the number of
|
| 236 |
+
# samples going through each branch.
|
| 237 |
+
|
| 238 |
+
# push left child
|
| 239 |
+
node_idx_stack[stack_size] = current_node.left
|
| 240 |
+
left_sample_frac = (
|
| 241 |
+
<Y_DTYPE_C> nodes[current_node.left].count /
|
| 242 |
+
current_node.count)
|
| 243 |
+
current_weight = weight_stack[stack_size]
|
| 244 |
+
weight_stack[stack_size] = current_weight * left_sample_frac
|
| 245 |
+
stack_size += 1
|
| 246 |
+
|
| 247 |
+
# push right child
|
| 248 |
+
node_idx_stack[stack_size] = current_node.right
|
| 249 |
+
weight_stack[stack_size] = (
|
| 250 |
+
current_weight * (1 - left_sample_frac))
|
| 251 |
+
stack_size += 1
|
| 252 |
+
|
| 253 |
+
# Sanity check. Should never happen.
|
| 254 |
+
if not (0.999 < total_weight < 1.001):
|
| 255 |
+
raise ValueError("Total weight should be 1.0 but was %.9f" %total_weight)
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/common.pxd
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ...utils._typedefs cimport float32_t, float64_t, intp_t, uint8_t, uint32_t
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
ctypedef float64_t X_DTYPE_C
|
| 5 |
+
ctypedef uint8_t X_BINNED_DTYPE_C
|
| 6 |
+
ctypedef float64_t Y_DTYPE_C
|
| 7 |
+
ctypedef float32_t G_H_DTYPE_C
|
| 8 |
+
ctypedef uint32_t BITSET_INNER_DTYPE_C
|
| 9 |
+
ctypedef BITSET_INNER_DTYPE_C[8] BITSET_DTYPE_C
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
cdef packed struct hist_struct:
|
| 13 |
+
# Same as histogram dtype but we need a struct to declare views. It needs
|
| 14 |
+
# to be packed since by default numpy dtypes aren't aligned
|
| 15 |
+
Y_DTYPE_C sum_gradients
|
| 16 |
+
Y_DTYPE_C sum_hessians
|
| 17 |
+
unsigned int count
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
cdef packed struct node_struct:
|
| 21 |
+
# Equivalent struct to PREDICTOR_RECORD_DTYPE to use in memory views. It
|
| 22 |
+
# needs to be packed since by default numpy dtypes aren't aligned
|
| 23 |
+
Y_DTYPE_C value
|
| 24 |
+
unsigned int count
|
| 25 |
+
intp_t feature_idx
|
| 26 |
+
X_DTYPE_C num_threshold
|
| 27 |
+
uint8_t missing_go_to_left
|
| 28 |
+
unsigned int left
|
| 29 |
+
unsigned int right
|
| 30 |
+
Y_DTYPE_C gain
|
| 31 |
+
unsigned int depth
|
| 32 |
+
uint8_t is_leaf
|
| 33 |
+
X_BINNED_DTYPE_C bin_threshold
|
| 34 |
+
uint8_t is_categorical
|
| 35 |
+
# The index of the corresponding bitsets in the Predictor's bitset arrays.
|
| 36 |
+
# Only used if is_categorical is True
|
| 37 |
+
unsigned int bitset_idx
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
cpdef enum MonotonicConstraint:
|
| 41 |
+
NO_CST = 0
|
| 42 |
+
POS = 1
|
| 43 |
+
NEG = -1
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/common.pyx
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
# Y_DYTPE is the dtype to which the targets y are converted to. This is also
|
| 4 |
+
# dtype for leaf values, gains, and sums of gradients / hessians. The gradients
|
| 5 |
+
# and hessians arrays are stored as floats to avoid using too much memory.
|
| 6 |
+
Y_DTYPE = np.float64
|
| 7 |
+
X_DTYPE = np.float64
|
| 8 |
+
X_BINNED_DTYPE = np.uint8 # hence max_bins == 256
|
| 9 |
+
# dtype for gradients and hessians arrays
|
| 10 |
+
G_H_DTYPE = np.float32
|
| 11 |
+
X_BITSET_INNER_DTYPE = np.uint32
|
| 12 |
+
|
| 13 |
+
# Note that we use Y_DTYPE=float64 to avoid issues with floating point precision when
|
| 14 |
+
# summing gradients and hessians (both float32). Those are difficult to protect via
|
| 15 |
+
# tools like (Kahan-) Neumaier summation as in CPython, see
|
| 16 |
+
# https://github.com/python/cpython/issues/100425, or pairwise summation as numpy, see
|
| 17 |
+
# https://github.com/numpy/numpy/pull/3685, due to the way histograms are summed
|
| 18 |
+
# (number of additions per bin is not known in advance). See also comment in
|
| 19 |
+
# _subtract_histograms.
|
| 20 |
+
HISTOGRAM_DTYPE = np.dtype([
|
| 21 |
+
('sum_gradients', Y_DTYPE), # sum of sample gradients in bin
|
| 22 |
+
('sum_hessians', Y_DTYPE), # sum of sample hessians in bin
|
| 23 |
+
('count', np.uint32), # number of samples in bin
|
| 24 |
+
])
|
| 25 |
+
|
| 26 |
+
PREDICTOR_RECORD_DTYPE = np.dtype([
|
| 27 |
+
('value', Y_DTYPE),
|
| 28 |
+
('count', np.uint32),
|
| 29 |
+
('feature_idx', np.intp),
|
| 30 |
+
('num_threshold', X_DTYPE),
|
| 31 |
+
('missing_go_to_left', np.uint8),
|
| 32 |
+
('left', np.uint32),
|
| 33 |
+
('right', np.uint32),
|
| 34 |
+
('gain', Y_DTYPE),
|
| 35 |
+
('depth', np.uint32),
|
| 36 |
+
('is_leaf', np.uint8),
|
| 37 |
+
('bin_threshold', X_BINNED_DTYPE),
|
| 38 |
+
('is_categorical', np.uint8),
|
| 39 |
+
# The index of the corresponding bitsets in the Predictor's bitset arrays.
|
| 40 |
+
# Only used if is_categorical is True
|
| 41 |
+
('bitset_idx', np.uint32)
|
| 42 |
+
])
|
| 43 |
+
|
| 44 |
+
ALMOST_INF = 1e300 # see LightGBM AvoidInf()
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/grower.py
ADDED
|
@@ -0,0 +1,807 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
This module contains the TreeGrower class.
|
| 3 |
+
|
| 4 |
+
TreeGrower builds a regression tree fitting a Newton-Raphson step, based on
|
| 5 |
+
the gradients and hessians of the training data.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
# Authors: The scikit-learn developers
|
| 9 |
+
# SPDX-License-Identifier: BSD-3-Clause
|
| 10 |
+
|
| 11 |
+
import numbers
|
| 12 |
+
from heapq import heappop, heappush
|
| 13 |
+
from timeit import default_timer as time
|
| 14 |
+
|
| 15 |
+
import numpy as np
|
| 16 |
+
|
| 17 |
+
from sklearn.utils._openmp_helpers import _openmp_effective_n_threads
|
| 18 |
+
|
| 19 |
+
from ...utils.arrayfuncs import sum_parallel
|
| 20 |
+
from ._bitset import set_raw_bitset_from_binned_bitset
|
| 21 |
+
from .common import (
|
| 22 |
+
PREDICTOR_RECORD_DTYPE,
|
| 23 |
+
X_BITSET_INNER_DTYPE,
|
| 24 |
+
MonotonicConstraint,
|
| 25 |
+
)
|
| 26 |
+
from .histogram import HistogramBuilder
|
| 27 |
+
from .predictor import TreePredictor
|
| 28 |
+
from .splitting import Splitter
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class TreeNode:
|
| 32 |
+
"""Tree Node class used in TreeGrower.
|
| 33 |
+
|
| 34 |
+
This isn't used for prediction purposes, only for training (see
|
| 35 |
+
TreePredictor).
|
| 36 |
+
|
| 37 |
+
Parameters
|
| 38 |
+
----------
|
| 39 |
+
depth : int
|
| 40 |
+
The depth of the node, i.e. its distance from the root.
|
| 41 |
+
sample_indices : ndarray of shape (n_samples_at_node,), dtype=np.uint32
|
| 42 |
+
The indices of the samples at the node.
|
| 43 |
+
partition_start : int
|
| 44 |
+
start position of the node's sample_indices in splitter.partition.
|
| 45 |
+
partition_stop : int
|
| 46 |
+
stop position of the node's sample_indices in splitter.partition.
|
| 47 |
+
sum_gradients : float
|
| 48 |
+
The sum of the gradients of the samples at the node.
|
| 49 |
+
sum_hessians : float
|
| 50 |
+
The sum of the hessians of the samples at the node.
|
| 51 |
+
|
| 52 |
+
Attributes
|
| 53 |
+
----------
|
| 54 |
+
depth : int
|
| 55 |
+
The depth of the node, i.e. its distance from the root.
|
| 56 |
+
sample_indices : ndarray of shape (n_samples_at_node,), dtype=np.uint32
|
| 57 |
+
The indices of the samples at the node.
|
| 58 |
+
sum_gradients : float
|
| 59 |
+
The sum of the gradients of the samples at the node.
|
| 60 |
+
sum_hessians : float
|
| 61 |
+
The sum of the hessians of the samples at the node.
|
| 62 |
+
split_info : SplitInfo or None
|
| 63 |
+
The result of the split evaluation.
|
| 64 |
+
is_leaf : bool
|
| 65 |
+
True if node is a leaf
|
| 66 |
+
left_child : TreeNode or None
|
| 67 |
+
The left child of the node. None for leaves.
|
| 68 |
+
right_child : TreeNode or None
|
| 69 |
+
The right child of the node. None for leaves.
|
| 70 |
+
value : float or None
|
| 71 |
+
The value of the leaf, as computed in finalize_leaf(). None for
|
| 72 |
+
non-leaf nodes.
|
| 73 |
+
partition_start : int
|
| 74 |
+
start position of the node's sample_indices in splitter.partition.
|
| 75 |
+
partition_stop : int
|
| 76 |
+
stop position of the node's sample_indices in splitter.partition.
|
| 77 |
+
allowed_features : None or ndarray, dtype=int
|
| 78 |
+
Indices of features allowed to split for children.
|
| 79 |
+
interaction_cst_indices : None or list of ints
|
| 80 |
+
Indices of the interaction sets that have to be applied on splits of
|
| 81 |
+
child nodes. The fewer sets the stronger the constraint as fewer sets
|
| 82 |
+
contain fewer features.
|
| 83 |
+
children_lower_bound : float
|
| 84 |
+
children_upper_bound : float
|
| 85 |
+
"""
|
| 86 |
+
|
| 87 |
+
def __init__(
|
| 88 |
+
self,
|
| 89 |
+
*,
|
| 90 |
+
depth,
|
| 91 |
+
sample_indices,
|
| 92 |
+
partition_start,
|
| 93 |
+
partition_stop,
|
| 94 |
+
sum_gradients,
|
| 95 |
+
sum_hessians,
|
| 96 |
+
value=None,
|
| 97 |
+
):
|
| 98 |
+
self.depth = depth
|
| 99 |
+
self.sample_indices = sample_indices
|
| 100 |
+
self.n_samples = sample_indices.shape[0]
|
| 101 |
+
self.sum_gradients = sum_gradients
|
| 102 |
+
self.sum_hessians = sum_hessians
|
| 103 |
+
self.value = value
|
| 104 |
+
self.is_leaf = False
|
| 105 |
+
self.allowed_features = None
|
| 106 |
+
self.interaction_cst_indices = None
|
| 107 |
+
self.set_children_bounds(float("-inf"), float("+inf"))
|
| 108 |
+
self.split_info = None
|
| 109 |
+
self.left_child = None
|
| 110 |
+
self.right_child = None
|
| 111 |
+
self.histograms = None
|
| 112 |
+
# start and stop indices of the node in the splitter.partition
|
| 113 |
+
# array. Concretely,
|
| 114 |
+
# self.sample_indices = view(self.splitter.partition[start:stop])
|
| 115 |
+
# Please see the comments about splitter.partition and
|
| 116 |
+
# splitter.split_indices for more info about this design.
|
| 117 |
+
# These 2 attributes are only used in _update_raw_prediction, because we
|
| 118 |
+
# need to iterate over the leaves and I don't know how to efficiently
|
| 119 |
+
# store the sample_indices views because they're all of different sizes.
|
| 120 |
+
self.partition_start = partition_start
|
| 121 |
+
self.partition_stop = partition_stop
|
| 122 |
+
|
| 123 |
+
def set_children_bounds(self, lower, upper):
|
| 124 |
+
"""Set children values bounds to respect monotonic constraints."""
|
| 125 |
+
|
| 126 |
+
# These are bounds for the node's *children* values, not the node's
|
| 127 |
+
# value. The bounds are used in the splitter when considering potential
|
| 128 |
+
# left and right child.
|
| 129 |
+
self.children_lower_bound = lower
|
| 130 |
+
self.children_upper_bound = upper
|
| 131 |
+
|
| 132 |
+
def __lt__(self, other_node):
|
| 133 |
+
"""Comparison for priority queue.
|
| 134 |
+
|
| 135 |
+
Nodes with high gain are higher priority than nodes with low gain.
|
| 136 |
+
|
| 137 |
+
heapq.heappush only need the '<' operator.
|
| 138 |
+
heapq.heappop take the smallest item first (smaller is higher
|
| 139 |
+
priority).
|
| 140 |
+
|
| 141 |
+
Parameters
|
| 142 |
+
----------
|
| 143 |
+
other_node : TreeNode
|
| 144 |
+
The node to compare with.
|
| 145 |
+
"""
|
| 146 |
+
return self.split_info.gain > other_node.split_info.gain
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
class TreeGrower:
|
| 150 |
+
"""Tree grower class used to build a tree.
|
| 151 |
+
|
| 152 |
+
The tree is fitted to predict the values of a Newton-Raphson step. The
|
| 153 |
+
splits are considered in a best-first fashion, and the quality of a
|
| 154 |
+
split is defined in splitting._split_gain.
|
| 155 |
+
|
| 156 |
+
Parameters
|
| 157 |
+
----------
|
| 158 |
+
X_binned : ndarray of shape (n_samples, n_features), dtype=np.uint8
|
| 159 |
+
The binned input samples. Must be Fortran-aligned.
|
| 160 |
+
gradients : ndarray of shape (n_samples,)
|
| 161 |
+
The gradients of each training sample. Those are the gradients of the
|
| 162 |
+
loss w.r.t the predictions, evaluated at iteration ``i - 1``.
|
| 163 |
+
hessians : ndarray of shape (n_samples,)
|
| 164 |
+
The hessians of each training sample. Those are the hessians of the
|
| 165 |
+
loss w.r.t the predictions, evaluated at iteration ``i - 1``.
|
| 166 |
+
max_leaf_nodes : int, default=None
|
| 167 |
+
The maximum number of leaves for each tree. If None, there is no
|
| 168 |
+
maximum limit.
|
| 169 |
+
max_depth : int, default=None
|
| 170 |
+
The maximum depth of each tree. The depth of a tree is the number of
|
| 171 |
+
edges to go from the root to the deepest leaf.
|
| 172 |
+
Depth isn't constrained by default.
|
| 173 |
+
min_samples_leaf : int, default=20
|
| 174 |
+
The minimum number of samples per leaf.
|
| 175 |
+
min_gain_to_split : float, default=0.
|
| 176 |
+
The minimum gain needed to split a node. Splits with lower gain will
|
| 177 |
+
be ignored.
|
| 178 |
+
min_hessian_to_split : float, default=1e-3
|
| 179 |
+
The minimum sum of hessians needed in each node. Splits that result in
|
| 180 |
+
at least one child having a sum of hessians less than
|
| 181 |
+
``min_hessian_to_split`` are discarded.
|
| 182 |
+
n_bins : int, default=256
|
| 183 |
+
The total number of bins, including the bin for missing values. Used
|
| 184 |
+
to define the shape of the histograms.
|
| 185 |
+
n_bins_non_missing : ndarray, dtype=np.uint32, default=None
|
| 186 |
+
For each feature, gives the number of bins actually used for
|
| 187 |
+
non-missing values. For features with a lot of unique values, this
|
| 188 |
+
is equal to ``n_bins - 1``. If it's an int, all features are
|
| 189 |
+
considered to have the same number of bins. If None, all features
|
| 190 |
+
are considered to have ``n_bins - 1`` bins.
|
| 191 |
+
has_missing_values : bool or ndarray, dtype=bool, default=False
|
| 192 |
+
Whether each feature contains missing values (in the training data).
|
| 193 |
+
If it's a bool, the same value is used for all features.
|
| 194 |
+
is_categorical : ndarray of bool of shape (n_features,), default=None
|
| 195 |
+
Indicates categorical features.
|
| 196 |
+
monotonic_cst : array-like of int of shape (n_features,), dtype=int, default=None
|
| 197 |
+
Indicates the monotonic constraint to enforce on each feature.
|
| 198 |
+
- 1: monotonic increase
|
| 199 |
+
- 0: no constraint
|
| 200 |
+
- -1: monotonic decrease
|
| 201 |
+
|
| 202 |
+
Read more in the :ref:`User Guide <monotonic_cst_gbdt>`.
|
| 203 |
+
interaction_cst : list of sets of integers, default=None
|
| 204 |
+
List of interaction constraints.
|
| 205 |
+
l2_regularization : float, default=0.
|
| 206 |
+
The L2 regularization parameter penalizing leaves with small hessians.
|
| 207 |
+
Use ``0`` for no regularization (default).
|
| 208 |
+
feature_fraction_per_split : float, default=1
|
| 209 |
+
Proportion of randomly chosen features in each and every node split.
|
| 210 |
+
This is a form of regularization, smaller values make the trees weaker
|
| 211 |
+
learners and might prevent overfitting.
|
| 212 |
+
rng : Generator
|
| 213 |
+
Numpy random Generator used for feature subsampling.
|
| 214 |
+
shrinkage : float, default=1.
|
| 215 |
+
The shrinkage parameter to apply to the leaves values, also known as
|
| 216 |
+
learning rate.
|
| 217 |
+
n_threads : int, default=None
|
| 218 |
+
Number of OpenMP threads to use. `_openmp_effective_n_threads` is called
|
| 219 |
+
to determine the effective number of threads use, which takes cgroups CPU
|
| 220 |
+
quotes into account. See the docstring of `_openmp_effective_n_threads`
|
| 221 |
+
for details.
|
| 222 |
+
|
| 223 |
+
Attributes
|
| 224 |
+
----------
|
| 225 |
+
histogram_builder : HistogramBuilder
|
| 226 |
+
splitter : Splitter
|
| 227 |
+
root : TreeNode
|
| 228 |
+
finalized_leaves : list of TreeNode
|
| 229 |
+
splittable_nodes : list of TreeNode
|
| 230 |
+
missing_values_bin_idx : int
|
| 231 |
+
Equals n_bins - 1
|
| 232 |
+
n_categorical_splits : int
|
| 233 |
+
n_features : int
|
| 234 |
+
n_nodes : int
|
| 235 |
+
total_find_split_time : float
|
| 236 |
+
Time spent finding the best splits
|
| 237 |
+
total_compute_hist_time : float
|
| 238 |
+
Time spent computing histograms
|
| 239 |
+
total_apply_split_time : float
|
| 240 |
+
Time spent splitting nodes
|
| 241 |
+
with_monotonic_cst : bool
|
| 242 |
+
Whether there are monotonic constraints that apply. False iff monotonic_cst is
|
| 243 |
+
None.
|
| 244 |
+
"""
|
| 245 |
+
|
| 246 |
+
def __init__(
|
| 247 |
+
self,
|
| 248 |
+
X_binned,
|
| 249 |
+
gradients,
|
| 250 |
+
hessians,
|
| 251 |
+
max_leaf_nodes=None,
|
| 252 |
+
max_depth=None,
|
| 253 |
+
min_samples_leaf=20,
|
| 254 |
+
min_gain_to_split=0.0,
|
| 255 |
+
min_hessian_to_split=1e-3,
|
| 256 |
+
n_bins=256,
|
| 257 |
+
n_bins_non_missing=None,
|
| 258 |
+
has_missing_values=False,
|
| 259 |
+
is_categorical=None,
|
| 260 |
+
monotonic_cst=None,
|
| 261 |
+
interaction_cst=None,
|
| 262 |
+
l2_regularization=0.0,
|
| 263 |
+
feature_fraction_per_split=1.0,
|
| 264 |
+
rng=np.random.default_rng(),
|
| 265 |
+
shrinkage=1.0,
|
| 266 |
+
n_threads=None,
|
| 267 |
+
):
|
| 268 |
+
self._validate_parameters(
|
| 269 |
+
X_binned,
|
| 270 |
+
min_gain_to_split,
|
| 271 |
+
min_hessian_to_split,
|
| 272 |
+
)
|
| 273 |
+
n_threads = _openmp_effective_n_threads(n_threads)
|
| 274 |
+
|
| 275 |
+
if n_bins_non_missing is None:
|
| 276 |
+
n_bins_non_missing = n_bins - 1
|
| 277 |
+
|
| 278 |
+
if isinstance(n_bins_non_missing, numbers.Integral):
|
| 279 |
+
n_bins_non_missing = np.array(
|
| 280 |
+
[n_bins_non_missing] * X_binned.shape[1], dtype=np.uint32
|
| 281 |
+
)
|
| 282 |
+
else:
|
| 283 |
+
n_bins_non_missing = np.asarray(n_bins_non_missing, dtype=np.uint32)
|
| 284 |
+
|
| 285 |
+
if isinstance(has_missing_values, bool):
|
| 286 |
+
has_missing_values = [has_missing_values] * X_binned.shape[1]
|
| 287 |
+
has_missing_values = np.asarray(has_missing_values, dtype=np.uint8)
|
| 288 |
+
|
| 289 |
+
# `monotonic_cst` validation is done in _validate_monotonic_cst
|
| 290 |
+
# at the estimator level and therefore the following should not be
|
| 291 |
+
# needed when using the public API.
|
| 292 |
+
if monotonic_cst is None:
|
| 293 |
+
monotonic_cst = np.full(
|
| 294 |
+
shape=X_binned.shape[1],
|
| 295 |
+
fill_value=MonotonicConstraint.NO_CST,
|
| 296 |
+
dtype=np.int8,
|
| 297 |
+
)
|
| 298 |
+
else:
|
| 299 |
+
monotonic_cst = np.asarray(monotonic_cst, dtype=np.int8)
|
| 300 |
+
self.with_monotonic_cst = np.any(monotonic_cst != MonotonicConstraint.NO_CST)
|
| 301 |
+
|
| 302 |
+
if is_categorical is None:
|
| 303 |
+
is_categorical = np.zeros(shape=X_binned.shape[1], dtype=np.uint8)
|
| 304 |
+
else:
|
| 305 |
+
is_categorical = np.asarray(is_categorical, dtype=np.uint8)
|
| 306 |
+
|
| 307 |
+
if np.any(
|
| 308 |
+
np.logical_and(
|
| 309 |
+
is_categorical == 1, monotonic_cst != MonotonicConstraint.NO_CST
|
| 310 |
+
)
|
| 311 |
+
):
|
| 312 |
+
raise ValueError("Categorical features cannot have monotonic constraints.")
|
| 313 |
+
|
| 314 |
+
hessians_are_constant = hessians.shape[0] == 1
|
| 315 |
+
self.histogram_builder = HistogramBuilder(
|
| 316 |
+
X_binned, n_bins, gradients, hessians, hessians_are_constant, n_threads
|
| 317 |
+
)
|
| 318 |
+
missing_values_bin_idx = n_bins - 1
|
| 319 |
+
self.splitter = Splitter(
|
| 320 |
+
X_binned=X_binned,
|
| 321 |
+
n_bins_non_missing=n_bins_non_missing,
|
| 322 |
+
missing_values_bin_idx=missing_values_bin_idx,
|
| 323 |
+
has_missing_values=has_missing_values,
|
| 324 |
+
is_categorical=is_categorical,
|
| 325 |
+
monotonic_cst=monotonic_cst,
|
| 326 |
+
l2_regularization=l2_regularization,
|
| 327 |
+
min_hessian_to_split=min_hessian_to_split,
|
| 328 |
+
min_samples_leaf=min_samples_leaf,
|
| 329 |
+
min_gain_to_split=min_gain_to_split,
|
| 330 |
+
hessians_are_constant=hessians_are_constant,
|
| 331 |
+
feature_fraction_per_split=feature_fraction_per_split,
|
| 332 |
+
rng=rng,
|
| 333 |
+
n_threads=n_threads,
|
| 334 |
+
)
|
| 335 |
+
self.X_binned = X_binned
|
| 336 |
+
self.max_leaf_nodes = max_leaf_nodes
|
| 337 |
+
self.max_depth = max_depth
|
| 338 |
+
self.min_samples_leaf = min_samples_leaf
|
| 339 |
+
self.min_gain_to_split = min_gain_to_split
|
| 340 |
+
self.n_bins_non_missing = n_bins_non_missing
|
| 341 |
+
self.missing_values_bin_idx = missing_values_bin_idx
|
| 342 |
+
self.has_missing_values = has_missing_values
|
| 343 |
+
self.is_categorical = is_categorical
|
| 344 |
+
self.monotonic_cst = monotonic_cst
|
| 345 |
+
self.interaction_cst = interaction_cst
|
| 346 |
+
self.l2_regularization = l2_regularization
|
| 347 |
+
self.shrinkage = shrinkage
|
| 348 |
+
self.n_features = X_binned.shape[1]
|
| 349 |
+
self.n_threads = n_threads
|
| 350 |
+
self.splittable_nodes = []
|
| 351 |
+
self.finalized_leaves = []
|
| 352 |
+
self.total_find_split_time = 0.0 # time spent finding the best splits
|
| 353 |
+
self.total_compute_hist_time = 0.0 # time spent computing histograms
|
| 354 |
+
self.total_apply_split_time = 0.0 # time spent splitting nodes
|
| 355 |
+
self.n_categorical_splits = 0
|
| 356 |
+
self._initialize_root(gradients, hessians)
|
| 357 |
+
self.n_nodes = 1
|
| 358 |
+
|
| 359 |
+
def _validate_parameters(
|
| 360 |
+
self,
|
| 361 |
+
X_binned,
|
| 362 |
+
min_gain_to_split,
|
| 363 |
+
min_hessian_to_split,
|
| 364 |
+
):
|
| 365 |
+
"""Validate parameters passed to __init__.
|
| 366 |
+
|
| 367 |
+
Also validate parameters passed to splitter.
|
| 368 |
+
"""
|
| 369 |
+
if X_binned.dtype != np.uint8:
|
| 370 |
+
raise NotImplementedError("X_binned must be of type uint8.")
|
| 371 |
+
if not X_binned.flags.f_contiguous:
|
| 372 |
+
raise ValueError(
|
| 373 |
+
"X_binned should be passed as Fortran contiguous "
|
| 374 |
+
"array for maximum efficiency."
|
| 375 |
+
)
|
| 376 |
+
if min_gain_to_split < 0:
|
| 377 |
+
raise ValueError(
|
| 378 |
+
"min_gain_to_split={} must be positive.".format(min_gain_to_split)
|
| 379 |
+
)
|
| 380 |
+
if min_hessian_to_split < 0:
|
| 381 |
+
raise ValueError(
|
| 382 |
+
"min_hessian_to_split={} must be positive.".format(min_hessian_to_split)
|
| 383 |
+
)
|
| 384 |
+
|
| 385 |
+
def grow(self):
|
| 386 |
+
"""Grow the tree, from root to leaves."""
|
| 387 |
+
while self.splittable_nodes:
|
| 388 |
+
self.split_next()
|
| 389 |
+
|
| 390 |
+
self._apply_shrinkage()
|
| 391 |
+
|
| 392 |
+
def _apply_shrinkage(self):
|
| 393 |
+
"""Multiply leaves values by shrinkage parameter.
|
| 394 |
+
|
| 395 |
+
This must be done at the very end of the growing process. If this were
|
| 396 |
+
done during the growing process e.g. in finalize_leaf(), then a leaf
|
| 397 |
+
would be shrunk but its sibling would potentially not be (if it's a
|
| 398 |
+
non-leaf), which would lead to a wrong computation of the 'middle'
|
| 399 |
+
value needed to enforce the monotonic constraints.
|
| 400 |
+
"""
|
| 401 |
+
for leaf in self.finalized_leaves:
|
| 402 |
+
leaf.value *= self.shrinkage
|
| 403 |
+
|
| 404 |
+
def _initialize_root(self, gradients, hessians):
|
| 405 |
+
"""Initialize root node and finalize it if needed."""
|
| 406 |
+
n_samples = self.X_binned.shape[0]
|
| 407 |
+
depth = 0
|
| 408 |
+
sum_gradients = sum_parallel(gradients, self.n_threads)
|
| 409 |
+
if self.histogram_builder.hessians_are_constant:
|
| 410 |
+
sum_hessians = hessians[0] * n_samples
|
| 411 |
+
else:
|
| 412 |
+
sum_hessians = sum_parallel(hessians, self.n_threads)
|
| 413 |
+
self.root = TreeNode(
|
| 414 |
+
depth=depth,
|
| 415 |
+
sample_indices=self.splitter.partition,
|
| 416 |
+
partition_start=0,
|
| 417 |
+
partition_stop=n_samples,
|
| 418 |
+
sum_gradients=sum_gradients,
|
| 419 |
+
sum_hessians=sum_hessians,
|
| 420 |
+
value=0,
|
| 421 |
+
)
|
| 422 |
+
|
| 423 |
+
if self.root.n_samples < 2 * self.min_samples_leaf:
|
| 424 |
+
# Do not even bother computing any splitting statistics.
|
| 425 |
+
self._finalize_leaf(self.root)
|
| 426 |
+
return
|
| 427 |
+
if sum_hessians < self.splitter.min_hessian_to_split:
|
| 428 |
+
self._finalize_leaf(self.root)
|
| 429 |
+
return
|
| 430 |
+
|
| 431 |
+
if self.interaction_cst is not None:
|
| 432 |
+
self.root.interaction_cst_indices = range(len(self.interaction_cst))
|
| 433 |
+
allowed_features = set().union(*self.interaction_cst)
|
| 434 |
+
self.root.allowed_features = np.fromiter(
|
| 435 |
+
allowed_features, dtype=np.uint32, count=len(allowed_features)
|
| 436 |
+
)
|
| 437 |
+
|
| 438 |
+
tic = time()
|
| 439 |
+
self.root.histograms = self.histogram_builder.compute_histograms_brute(
|
| 440 |
+
self.root.sample_indices, self.root.allowed_features
|
| 441 |
+
)
|
| 442 |
+
self.total_compute_hist_time += time() - tic
|
| 443 |
+
|
| 444 |
+
tic = time()
|
| 445 |
+
self._compute_best_split_and_push(self.root)
|
| 446 |
+
self.total_find_split_time += time() - tic
|
| 447 |
+
|
| 448 |
+
def _compute_best_split_and_push(self, node):
|
| 449 |
+
"""Compute the best possible split (SplitInfo) of a given node.
|
| 450 |
+
|
| 451 |
+
Also push it in the heap of splittable nodes if gain isn't zero.
|
| 452 |
+
The gain of a node is 0 if either all the leaves are pure
|
| 453 |
+
(best gain = 0), or if no split would satisfy the constraints,
|
| 454 |
+
(min_hessians_to_split, min_gain_to_split, min_samples_leaf)
|
| 455 |
+
"""
|
| 456 |
+
|
| 457 |
+
node.split_info = self.splitter.find_node_split(
|
| 458 |
+
n_samples=node.n_samples,
|
| 459 |
+
histograms=node.histograms,
|
| 460 |
+
sum_gradients=node.sum_gradients,
|
| 461 |
+
sum_hessians=node.sum_hessians,
|
| 462 |
+
value=node.value,
|
| 463 |
+
lower_bound=node.children_lower_bound,
|
| 464 |
+
upper_bound=node.children_upper_bound,
|
| 465 |
+
allowed_features=node.allowed_features,
|
| 466 |
+
)
|
| 467 |
+
|
| 468 |
+
if node.split_info.gain <= 0: # no valid split
|
| 469 |
+
self._finalize_leaf(node)
|
| 470 |
+
else:
|
| 471 |
+
heappush(self.splittable_nodes, node)
|
| 472 |
+
|
| 473 |
+
def split_next(self):
|
| 474 |
+
"""Split the node with highest potential gain.
|
| 475 |
+
|
| 476 |
+
Returns
|
| 477 |
+
-------
|
| 478 |
+
left : TreeNode
|
| 479 |
+
The resulting left child.
|
| 480 |
+
right : TreeNode
|
| 481 |
+
The resulting right child.
|
| 482 |
+
"""
|
| 483 |
+
# Consider the node with the highest loss reduction (a.k.a. gain)
|
| 484 |
+
node = heappop(self.splittable_nodes)
|
| 485 |
+
|
| 486 |
+
tic = time()
|
| 487 |
+
(
|
| 488 |
+
sample_indices_left,
|
| 489 |
+
sample_indices_right,
|
| 490 |
+
right_child_pos,
|
| 491 |
+
) = self.splitter.split_indices(node.split_info, node.sample_indices)
|
| 492 |
+
self.total_apply_split_time += time() - tic
|
| 493 |
+
|
| 494 |
+
depth = node.depth + 1
|
| 495 |
+
n_leaf_nodes = len(self.finalized_leaves) + len(self.splittable_nodes)
|
| 496 |
+
n_leaf_nodes += 2
|
| 497 |
+
|
| 498 |
+
left_child_node = TreeNode(
|
| 499 |
+
depth=depth,
|
| 500 |
+
sample_indices=sample_indices_left,
|
| 501 |
+
partition_start=node.partition_start,
|
| 502 |
+
partition_stop=node.partition_start + right_child_pos,
|
| 503 |
+
sum_gradients=node.split_info.sum_gradient_left,
|
| 504 |
+
sum_hessians=node.split_info.sum_hessian_left,
|
| 505 |
+
value=node.split_info.value_left,
|
| 506 |
+
)
|
| 507 |
+
right_child_node = TreeNode(
|
| 508 |
+
depth=depth,
|
| 509 |
+
sample_indices=sample_indices_right,
|
| 510 |
+
partition_start=left_child_node.partition_stop,
|
| 511 |
+
partition_stop=node.partition_stop,
|
| 512 |
+
sum_gradients=node.split_info.sum_gradient_right,
|
| 513 |
+
sum_hessians=node.split_info.sum_hessian_right,
|
| 514 |
+
value=node.split_info.value_right,
|
| 515 |
+
)
|
| 516 |
+
|
| 517 |
+
node.right_child = right_child_node
|
| 518 |
+
node.left_child = left_child_node
|
| 519 |
+
|
| 520 |
+
# set interaction constraints (the indices of the constraints sets)
|
| 521 |
+
if self.interaction_cst is not None:
|
| 522 |
+
# Calculate allowed_features and interaction_cst_indices only once. Child
|
| 523 |
+
# nodes inherit them before they get split.
|
| 524 |
+
(
|
| 525 |
+
left_child_node.allowed_features,
|
| 526 |
+
left_child_node.interaction_cst_indices,
|
| 527 |
+
) = self._compute_interactions(node)
|
| 528 |
+
right_child_node.interaction_cst_indices = (
|
| 529 |
+
left_child_node.interaction_cst_indices
|
| 530 |
+
)
|
| 531 |
+
right_child_node.allowed_features = left_child_node.allowed_features
|
| 532 |
+
|
| 533 |
+
if not self.has_missing_values[node.split_info.feature_idx]:
|
| 534 |
+
# If no missing values are encountered at fit time, then samples
|
| 535 |
+
# with missing values during predict() will go to whichever child
|
| 536 |
+
# has the most samples.
|
| 537 |
+
node.split_info.missing_go_to_left = (
|
| 538 |
+
left_child_node.n_samples > right_child_node.n_samples
|
| 539 |
+
)
|
| 540 |
+
|
| 541 |
+
self.n_nodes += 2
|
| 542 |
+
self.n_categorical_splits += node.split_info.is_categorical
|
| 543 |
+
|
| 544 |
+
if self.max_leaf_nodes is not None and n_leaf_nodes == self.max_leaf_nodes:
|
| 545 |
+
self._finalize_leaf(left_child_node)
|
| 546 |
+
self._finalize_leaf(right_child_node)
|
| 547 |
+
self._finalize_splittable_nodes()
|
| 548 |
+
return left_child_node, right_child_node
|
| 549 |
+
|
| 550 |
+
if self.max_depth is not None and depth == self.max_depth:
|
| 551 |
+
self._finalize_leaf(left_child_node)
|
| 552 |
+
self._finalize_leaf(right_child_node)
|
| 553 |
+
return left_child_node, right_child_node
|
| 554 |
+
|
| 555 |
+
if left_child_node.n_samples < self.min_samples_leaf * 2:
|
| 556 |
+
self._finalize_leaf(left_child_node)
|
| 557 |
+
if right_child_node.n_samples < self.min_samples_leaf * 2:
|
| 558 |
+
self._finalize_leaf(right_child_node)
|
| 559 |
+
|
| 560 |
+
if self.with_monotonic_cst:
|
| 561 |
+
# Set value bounds for respecting monotonic constraints
|
| 562 |
+
# See test_nodes_values() for details
|
| 563 |
+
if (
|
| 564 |
+
self.monotonic_cst[node.split_info.feature_idx]
|
| 565 |
+
== MonotonicConstraint.NO_CST
|
| 566 |
+
):
|
| 567 |
+
lower_left = lower_right = node.children_lower_bound
|
| 568 |
+
upper_left = upper_right = node.children_upper_bound
|
| 569 |
+
else:
|
| 570 |
+
mid = (left_child_node.value + right_child_node.value) / 2
|
| 571 |
+
if (
|
| 572 |
+
self.monotonic_cst[node.split_info.feature_idx]
|
| 573 |
+
== MonotonicConstraint.POS
|
| 574 |
+
):
|
| 575 |
+
lower_left, upper_left = node.children_lower_bound, mid
|
| 576 |
+
lower_right, upper_right = mid, node.children_upper_bound
|
| 577 |
+
else: # NEG
|
| 578 |
+
lower_left, upper_left = mid, node.children_upper_bound
|
| 579 |
+
lower_right, upper_right = node.children_lower_bound, mid
|
| 580 |
+
left_child_node.set_children_bounds(lower_left, upper_left)
|
| 581 |
+
right_child_node.set_children_bounds(lower_right, upper_right)
|
| 582 |
+
|
| 583 |
+
# Compute histograms of children, and compute their best possible split
|
| 584 |
+
# (if needed)
|
| 585 |
+
should_split_left = not left_child_node.is_leaf
|
| 586 |
+
should_split_right = not right_child_node.is_leaf
|
| 587 |
+
if should_split_left or should_split_right:
|
| 588 |
+
# We will compute the histograms of both nodes even if one of them
|
| 589 |
+
# is a leaf, since computing the second histogram is very cheap
|
| 590 |
+
# (using histogram subtraction).
|
| 591 |
+
n_samples_left = left_child_node.sample_indices.shape[0]
|
| 592 |
+
n_samples_right = right_child_node.sample_indices.shape[0]
|
| 593 |
+
if n_samples_left < n_samples_right:
|
| 594 |
+
smallest_child = left_child_node
|
| 595 |
+
largest_child = right_child_node
|
| 596 |
+
else:
|
| 597 |
+
smallest_child = right_child_node
|
| 598 |
+
largest_child = left_child_node
|
| 599 |
+
|
| 600 |
+
# We use the brute O(n_samples) method on the child that has the
|
| 601 |
+
# smallest number of samples, and the subtraction trick O(n_bins)
|
| 602 |
+
# on the other one.
|
| 603 |
+
# Note that both left and right child have the same allowed_features.
|
| 604 |
+
tic = time()
|
| 605 |
+
smallest_child.histograms = self.histogram_builder.compute_histograms_brute(
|
| 606 |
+
smallest_child.sample_indices, smallest_child.allowed_features
|
| 607 |
+
)
|
| 608 |
+
largest_child.histograms = (
|
| 609 |
+
self.histogram_builder.compute_histograms_subtraction(
|
| 610 |
+
node.histograms,
|
| 611 |
+
smallest_child.histograms,
|
| 612 |
+
smallest_child.allowed_features,
|
| 613 |
+
)
|
| 614 |
+
)
|
| 615 |
+
# node.histograms is reused in largest_child.histograms. To break cyclic
|
| 616 |
+
# memory references and help garbage collection, we set it to None.
|
| 617 |
+
node.histograms = None
|
| 618 |
+
self.total_compute_hist_time += time() - tic
|
| 619 |
+
|
| 620 |
+
tic = time()
|
| 621 |
+
if should_split_left:
|
| 622 |
+
self._compute_best_split_and_push(left_child_node)
|
| 623 |
+
if should_split_right:
|
| 624 |
+
self._compute_best_split_and_push(right_child_node)
|
| 625 |
+
self.total_find_split_time += time() - tic
|
| 626 |
+
|
| 627 |
+
# Release memory used by histograms as they are no longer needed
|
| 628 |
+
# for leaf nodes since they won't be split.
|
| 629 |
+
for child in (left_child_node, right_child_node):
|
| 630 |
+
if child.is_leaf:
|
| 631 |
+
del child.histograms
|
| 632 |
+
|
| 633 |
+
# Release memory used by histograms as they are no longer needed for
|
| 634 |
+
# internal nodes once children histograms have been computed.
|
| 635 |
+
del node.histograms
|
| 636 |
+
|
| 637 |
+
return left_child_node, right_child_node
|
| 638 |
+
|
| 639 |
+
def _compute_interactions(self, node):
|
| 640 |
+
r"""Compute features allowed by interactions to be inherited by child nodes.
|
| 641 |
+
|
| 642 |
+
Example: Assume constraints [{0, 1}, {1, 2}].
|
| 643 |
+
1 <- Both constraint groups could be applied from now on
|
| 644 |
+
/ \
|
| 645 |
+
1 2 <- Left split still fulfills both constraint groups.
|
| 646 |
+
/ \ / \ Right split at feature 2 has only group {1, 2} from now on.
|
| 647 |
+
|
| 648 |
+
LightGBM uses the same logic for overlapping groups. See
|
| 649 |
+
https://github.com/microsoft/LightGBM/issues/4481 for details.
|
| 650 |
+
|
| 651 |
+
Parameters:
|
| 652 |
+
----------
|
| 653 |
+
node : TreeNode
|
| 654 |
+
A node that might have children. Based on its feature_idx, the interaction
|
| 655 |
+
constraints for possible child nodes are computed.
|
| 656 |
+
|
| 657 |
+
Returns
|
| 658 |
+
-------
|
| 659 |
+
allowed_features : ndarray, dtype=uint32
|
| 660 |
+
Indices of features allowed to split for children.
|
| 661 |
+
interaction_cst_indices : list of ints
|
| 662 |
+
Indices of the interaction sets that have to be applied on splits of
|
| 663 |
+
child nodes. The fewer sets the stronger the constraint as fewer sets
|
| 664 |
+
contain fewer features.
|
| 665 |
+
"""
|
| 666 |
+
# Note:
|
| 667 |
+
# - Case of no interactions is already captured before function call.
|
| 668 |
+
# - This is for nodes that are already split and have a
|
| 669 |
+
# node.split_info.feature_idx.
|
| 670 |
+
allowed_features = set()
|
| 671 |
+
interaction_cst_indices = []
|
| 672 |
+
for i in node.interaction_cst_indices:
|
| 673 |
+
if node.split_info.feature_idx in self.interaction_cst[i]:
|
| 674 |
+
interaction_cst_indices.append(i)
|
| 675 |
+
allowed_features.update(self.interaction_cst[i])
|
| 676 |
+
return (
|
| 677 |
+
np.fromiter(allowed_features, dtype=np.uint32, count=len(allowed_features)),
|
| 678 |
+
interaction_cst_indices,
|
| 679 |
+
)
|
| 680 |
+
|
| 681 |
+
def _finalize_leaf(self, node):
|
| 682 |
+
"""Make node a leaf of the tree being grown."""
|
| 683 |
+
|
| 684 |
+
node.is_leaf = True
|
| 685 |
+
self.finalized_leaves.append(node)
|
| 686 |
+
|
| 687 |
+
def _finalize_splittable_nodes(self):
|
| 688 |
+
"""Transform all splittable nodes into leaves.
|
| 689 |
+
|
| 690 |
+
Used when some constraint is met e.g. maximum number of leaves or
|
| 691 |
+
maximum depth."""
|
| 692 |
+
while len(self.splittable_nodes) > 0:
|
| 693 |
+
node = self.splittable_nodes.pop()
|
| 694 |
+
self._finalize_leaf(node)
|
| 695 |
+
|
| 696 |
+
def make_predictor(self, binning_thresholds):
|
| 697 |
+
"""Make a TreePredictor object out of the current tree.
|
| 698 |
+
|
| 699 |
+
Parameters
|
| 700 |
+
----------
|
| 701 |
+
binning_thresholds : array-like of floats
|
| 702 |
+
Corresponds to the bin_thresholds_ attribute of the BinMapper.
|
| 703 |
+
For each feature, this stores:
|
| 704 |
+
|
| 705 |
+
- the bin frontiers for continuous features
|
| 706 |
+
- the unique raw category values for categorical features
|
| 707 |
+
|
| 708 |
+
Returns
|
| 709 |
+
-------
|
| 710 |
+
A TreePredictor object.
|
| 711 |
+
"""
|
| 712 |
+
predictor_nodes = np.zeros(self.n_nodes, dtype=PREDICTOR_RECORD_DTYPE)
|
| 713 |
+
binned_left_cat_bitsets = np.zeros(
|
| 714 |
+
(self.n_categorical_splits, 8), dtype=X_BITSET_INNER_DTYPE
|
| 715 |
+
)
|
| 716 |
+
raw_left_cat_bitsets = np.zeros(
|
| 717 |
+
(self.n_categorical_splits, 8), dtype=X_BITSET_INNER_DTYPE
|
| 718 |
+
)
|
| 719 |
+
_fill_predictor_arrays(
|
| 720 |
+
predictor_nodes,
|
| 721 |
+
binned_left_cat_bitsets,
|
| 722 |
+
raw_left_cat_bitsets,
|
| 723 |
+
self.root,
|
| 724 |
+
binning_thresholds,
|
| 725 |
+
self.n_bins_non_missing,
|
| 726 |
+
)
|
| 727 |
+
return TreePredictor(
|
| 728 |
+
predictor_nodes, binned_left_cat_bitsets, raw_left_cat_bitsets
|
| 729 |
+
)
|
| 730 |
+
|
| 731 |
+
|
| 732 |
+
def _fill_predictor_arrays(
|
| 733 |
+
predictor_nodes,
|
| 734 |
+
binned_left_cat_bitsets,
|
| 735 |
+
raw_left_cat_bitsets,
|
| 736 |
+
grower_node,
|
| 737 |
+
binning_thresholds,
|
| 738 |
+
n_bins_non_missing,
|
| 739 |
+
next_free_node_idx=0,
|
| 740 |
+
next_free_bitset_idx=0,
|
| 741 |
+
):
|
| 742 |
+
"""Helper used in make_predictor to set the TreePredictor fields."""
|
| 743 |
+
node = predictor_nodes[next_free_node_idx]
|
| 744 |
+
node["count"] = grower_node.n_samples
|
| 745 |
+
node["depth"] = grower_node.depth
|
| 746 |
+
if grower_node.split_info is not None:
|
| 747 |
+
node["gain"] = grower_node.split_info.gain
|
| 748 |
+
else:
|
| 749 |
+
node["gain"] = -1
|
| 750 |
+
|
| 751 |
+
node["value"] = grower_node.value
|
| 752 |
+
|
| 753 |
+
if grower_node.is_leaf:
|
| 754 |
+
# Leaf node
|
| 755 |
+
node["is_leaf"] = True
|
| 756 |
+
return next_free_node_idx + 1, next_free_bitset_idx
|
| 757 |
+
|
| 758 |
+
split_info = grower_node.split_info
|
| 759 |
+
feature_idx, bin_idx = split_info.feature_idx, split_info.bin_idx
|
| 760 |
+
node["feature_idx"] = feature_idx
|
| 761 |
+
node["bin_threshold"] = bin_idx
|
| 762 |
+
node["missing_go_to_left"] = split_info.missing_go_to_left
|
| 763 |
+
node["is_categorical"] = split_info.is_categorical
|
| 764 |
+
|
| 765 |
+
if split_info.bin_idx == n_bins_non_missing[feature_idx] - 1:
|
| 766 |
+
# Split is on the last non-missing bin: it's a "split on nans".
|
| 767 |
+
# All nans go to the right, the rest go to the left.
|
| 768 |
+
# Note: for categorical splits, bin_idx is 0 and we rely on the bitset
|
| 769 |
+
node["num_threshold"] = np.inf
|
| 770 |
+
elif split_info.is_categorical:
|
| 771 |
+
categories = binning_thresholds[feature_idx]
|
| 772 |
+
node["bitset_idx"] = next_free_bitset_idx
|
| 773 |
+
binned_left_cat_bitsets[next_free_bitset_idx] = split_info.left_cat_bitset
|
| 774 |
+
set_raw_bitset_from_binned_bitset(
|
| 775 |
+
raw_left_cat_bitsets[next_free_bitset_idx],
|
| 776 |
+
split_info.left_cat_bitset,
|
| 777 |
+
categories,
|
| 778 |
+
)
|
| 779 |
+
next_free_bitset_idx += 1
|
| 780 |
+
else:
|
| 781 |
+
node["num_threshold"] = binning_thresholds[feature_idx][bin_idx]
|
| 782 |
+
|
| 783 |
+
next_free_node_idx += 1
|
| 784 |
+
|
| 785 |
+
node["left"] = next_free_node_idx
|
| 786 |
+
next_free_node_idx, next_free_bitset_idx = _fill_predictor_arrays(
|
| 787 |
+
predictor_nodes,
|
| 788 |
+
binned_left_cat_bitsets,
|
| 789 |
+
raw_left_cat_bitsets,
|
| 790 |
+
grower_node.left_child,
|
| 791 |
+
binning_thresholds=binning_thresholds,
|
| 792 |
+
n_bins_non_missing=n_bins_non_missing,
|
| 793 |
+
next_free_node_idx=next_free_node_idx,
|
| 794 |
+
next_free_bitset_idx=next_free_bitset_idx,
|
| 795 |
+
)
|
| 796 |
+
|
| 797 |
+
node["right"] = next_free_node_idx
|
| 798 |
+
return _fill_predictor_arrays(
|
| 799 |
+
predictor_nodes,
|
| 800 |
+
binned_left_cat_bitsets,
|
| 801 |
+
raw_left_cat_bitsets,
|
| 802 |
+
grower_node.right_child,
|
| 803 |
+
binning_thresholds=binning_thresholds,
|
| 804 |
+
n_bins_non_missing=n_bins_non_missing,
|
| 805 |
+
next_free_node_idx=next_free_node_idx,
|
| 806 |
+
next_free_bitset_idx=next_free_bitset_idx,
|
| 807 |
+
)
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/predictor.py
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
This module contains the TreePredictor class which is used for prediction.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
# Authors: The scikit-learn developers
|
| 6 |
+
# SPDX-License-Identifier: BSD-3-Clause
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
|
| 10 |
+
from ._predictor import (
|
| 11 |
+
_compute_partial_dependence,
|
| 12 |
+
_predict_from_binned_data,
|
| 13 |
+
_predict_from_raw_data,
|
| 14 |
+
)
|
| 15 |
+
from .common import PREDICTOR_RECORD_DTYPE, Y_DTYPE
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class TreePredictor:
|
| 19 |
+
"""Tree class used for predictions.
|
| 20 |
+
|
| 21 |
+
Parameters
|
| 22 |
+
----------
|
| 23 |
+
nodes : ndarray of PREDICTOR_RECORD_DTYPE
|
| 24 |
+
The nodes of the tree.
|
| 25 |
+
binned_left_cat_bitsets : ndarray of shape (n_categorical_splits, 8), dtype=uint32
|
| 26 |
+
Array of bitsets for binned categories used in predict_binned when a
|
| 27 |
+
split is categorical.
|
| 28 |
+
raw_left_cat_bitsets : ndarray of shape (n_categorical_splits, 8), dtype=uint32
|
| 29 |
+
Array of bitsets for raw categories used in predict when a split is
|
| 30 |
+
categorical.
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
def __init__(self, nodes, binned_left_cat_bitsets, raw_left_cat_bitsets):
|
| 34 |
+
self.nodes = nodes
|
| 35 |
+
self.binned_left_cat_bitsets = binned_left_cat_bitsets
|
| 36 |
+
self.raw_left_cat_bitsets = raw_left_cat_bitsets
|
| 37 |
+
|
| 38 |
+
def get_n_leaf_nodes(self):
|
| 39 |
+
"""Return number of leaves."""
|
| 40 |
+
return int(self.nodes["is_leaf"].sum())
|
| 41 |
+
|
| 42 |
+
def get_max_depth(self):
|
| 43 |
+
"""Return maximum depth among all leaves."""
|
| 44 |
+
return int(self.nodes["depth"].max())
|
| 45 |
+
|
| 46 |
+
def predict(self, X, known_cat_bitsets, f_idx_map, n_threads):
|
| 47 |
+
"""Predict raw values for non-binned data.
|
| 48 |
+
|
| 49 |
+
Parameters
|
| 50 |
+
----------
|
| 51 |
+
X : ndarray, shape (n_samples, n_features)
|
| 52 |
+
The input samples.
|
| 53 |
+
|
| 54 |
+
known_cat_bitsets : ndarray of shape (n_categorical_features, 8)
|
| 55 |
+
Array of bitsets of known categories, for each categorical feature.
|
| 56 |
+
|
| 57 |
+
f_idx_map : ndarray of shape (n_features,)
|
| 58 |
+
Map from original feature index to the corresponding index in the
|
| 59 |
+
known_cat_bitsets array.
|
| 60 |
+
|
| 61 |
+
n_threads : int
|
| 62 |
+
Number of OpenMP threads to use.
|
| 63 |
+
|
| 64 |
+
Returns
|
| 65 |
+
-------
|
| 66 |
+
y : ndarray, shape (n_samples,)
|
| 67 |
+
The raw predicted values.
|
| 68 |
+
"""
|
| 69 |
+
out = np.empty(X.shape[0], dtype=Y_DTYPE)
|
| 70 |
+
|
| 71 |
+
_predict_from_raw_data(
|
| 72 |
+
self.nodes,
|
| 73 |
+
X,
|
| 74 |
+
self.raw_left_cat_bitsets,
|
| 75 |
+
known_cat_bitsets,
|
| 76 |
+
f_idx_map,
|
| 77 |
+
n_threads,
|
| 78 |
+
out,
|
| 79 |
+
)
|
| 80 |
+
return out
|
| 81 |
+
|
| 82 |
+
def predict_binned(self, X, missing_values_bin_idx, n_threads):
|
| 83 |
+
"""Predict raw values for binned data.
|
| 84 |
+
|
| 85 |
+
Parameters
|
| 86 |
+
----------
|
| 87 |
+
X : ndarray, shape (n_samples, n_features)
|
| 88 |
+
The input samples.
|
| 89 |
+
missing_values_bin_idx : uint8
|
| 90 |
+
Index of the bin that is used for missing values. This is the
|
| 91 |
+
index of the last bin and is always equal to max_bins (as passed
|
| 92 |
+
to the GBDT classes), or equivalently to n_bins - 1.
|
| 93 |
+
n_threads : int
|
| 94 |
+
Number of OpenMP threads to use.
|
| 95 |
+
|
| 96 |
+
Returns
|
| 97 |
+
-------
|
| 98 |
+
y : ndarray, shape (n_samples,)
|
| 99 |
+
The raw predicted values.
|
| 100 |
+
"""
|
| 101 |
+
out = np.empty(X.shape[0], dtype=Y_DTYPE)
|
| 102 |
+
_predict_from_binned_data(
|
| 103 |
+
self.nodes,
|
| 104 |
+
X,
|
| 105 |
+
self.binned_left_cat_bitsets,
|
| 106 |
+
missing_values_bin_idx,
|
| 107 |
+
n_threads,
|
| 108 |
+
out,
|
| 109 |
+
)
|
| 110 |
+
return out
|
| 111 |
+
|
| 112 |
+
def compute_partial_dependence(self, grid, target_features, out):
|
| 113 |
+
"""Fast partial dependence computation.
|
| 114 |
+
|
| 115 |
+
Parameters
|
| 116 |
+
----------
|
| 117 |
+
grid : ndarray, shape (n_samples, n_target_features)
|
| 118 |
+
The grid points on which the partial dependence should be
|
| 119 |
+
evaluated.
|
| 120 |
+
target_features : ndarray, shape (n_target_features)
|
| 121 |
+
The set of target features for which the partial dependence
|
| 122 |
+
should be evaluated.
|
| 123 |
+
out : ndarray, shape (n_samples)
|
| 124 |
+
The value of the partial dependence function on each grid
|
| 125 |
+
point.
|
| 126 |
+
"""
|
| 127 |
+
_compute_partial_dependence(self.nodes, grid, target_features, out)
|
| 128 |
+
|
| 129 |
+
def __setstate__(self, state):
|
| 130 |
+
try:
|
| 131 |
+
super().__setstate__(state)
|
| 132 |
+
except AttributeError:
|
| 133 |
+
self.__dict__.update(state)
|
| 134 |
+
|
| 135 |
+
# The dtype of feature_idx is np.intp which is platform dependent. Here, we
|
| 136 |
+
# make sure that saving and loading on different bitness systems works without
|
| 137 |
+
# errors. For instance, on a 64 bit Python runtime, np.intp = np.int64,
|
| 138 |
+
# while on 32 bit np.intp = np.int32.
|
| 139 |
+
#
|
| 140 |
+
# TODO: consider always using platform agnostic dtypes for fitted
|
| 141 |
+
# estimator attributes. For this particular estimator, this would
|
| 142 |
+
# mean replacing the intp field of PREDICTOR_RECORD_DTYPE by an int32
|
| 143 |
+
# field. Ideally this should be done consistently throughout
|
| 144 |
+
# scikit-learn along with a common test.
|
| 145 |
+
if self.nodes.dtype != PREDICTOR_RECORD_DTYPE:
|
| 146 |
+
self.nodes = self.nodes.astype(PREDICTOR_RECORD_DTYPE, casting="same_kind")
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/splitting.pyx
ADDED
|
@@ -0,0 +1,1191 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""This module contains routines and data structures to:
|
| 2 |
+
|
| 3 |
+
- Find the best possible split of a node. For a given node, a split is
|
| 4 |
+
characterized by a feature and a bin.
|
| 5 |
+
- Apply a split to a node, i.e. split the indices of the samples at the node
|
| 6 |
+
into the newly created left and right children.
|
| 7 |
+
"""
|
| 8 |
+
# Author: Nicolas Hug
|
| 9 |
+
|
| 10 |
+
cimport cython
|
| 11 |
+
from cython.parallel import prange
|
| 12 |
+
import numpy as np
|
| 13 |
+
from libc.math cimport INFINITY, ceil
|
| 14 |
+
from libc.stdlib cimport malloc, free, qsort
|
| 15 |
+
from libc.string cimport memcpy
|
| 16 |
+
|
| 17 |
+
from ...utils._typedefs cimport uint8_t
|
| 18 |
+
from .common cimport X_BINNED_DTYPE_C
|
| 19 |
+
from .common cimport Y_DTYPE_C
|
| 20 |
+
from .common cimport hist_struct
|
| 21 |
+
from .common cimport BITSET_INNER_DTYPE_C
|
| 22 |
+
from .common cimport BITSET_DTYPE_C
|
| 23 |
+
from .common cimport MonotonicConstraint
|
| 24 |
+
from ._bitset cimport init_bitset
|
| 25 |
+
from ._bitset cimport set_bitset
|
| 26 |
+
from ._bitset cimport in_bitset
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
cdef struct split_info_struct:
|
| 30 |
+
# Same as the SplitInfo class, but we need a C struct to use it in the
|
| 31 |
+
# nogil sections and to use in arrays.
|
| 32 |
+
Y_DTYPE_C gain
|
| 33 |
+
int feature_idx
|
| 34 |
+
unsigned int bin_idx
|
| 35 |
+
uint8_t missing_go_to_left
|
| 36 |
+
Y_DTYPE_C sum_gradient_left
|
| 37 |
+
Y_DTYPE_C sum_gradient_right
|
| 38 |
+
Y_DTYPE_C sum_hessian_left
|
| 39 |
+
Y_DTYPE_C sum_hessian_right
|
| 40 |
+
unsigned int n_samples_left
|
| 41 |
+
unsigned int n_samples_right
|
| 42 |
+
Y_DTYPE_C value_left
|
| 43 |
+
Y_DTYPE_C value_right
|
| 44 |
+
uint8_t is_categorical
|
| 45 |
+
BITSET_DTYPE_C left_cat_bitset
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
# used in categorical splits for sorting categories by increasing values of
|
| 49 |
+
# sum_gradients / sum_hessians
|
| 50 |
+
cdef struct categorical_info:
|
| 51 |
+
X_BINNED_DTYPE_C bin_idx
|
| 52 |
+
Y_DTYPE_C value
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
class SplitInfo:
|
| 56 |
+
"""Pure data class to store information about a potential split.
|
| 57 |
+
|
| 58 |
+
Parameters
|
| 59 |
+
----------
|
| 60 |
+
gain : float
|
| 61 |
+
The gain of the split.
|
| 62 |
+
feature_idx : int
|
| 63 |
+
The index of the feature to be split.
|
| 64 |
+
bin_idx : int
|
| 65 |
+
The index of the bin on which the split is made. Should be ignored if
|
| 66 |
+
`is_categorical` is True: `left_cat_bitset` will be used to determine
|
| 67 |
+
the split.
|
| 68 |
+
missing_go_to_left : bool
|
| 69 |
+
Whether missing values should go to the left child. This is used
|
| 70 |
+
whether the split is categorical or not.
|
| 71 |
+
sum_gradient_left : float
|
| 72 |
+
The sum of the gradients of all the samples in the left child.
|
| 73 |
+
sum_hessian_left : float
|
| 74 |
+
The sum of the hessians of all the samples in the left child.
|
| 75 |
+
sum_gradient_right : float
|
| 76 |
+
The sum of the gradients of all the samples in the right child.
|
| 77 |
+
sum_hessian_right : float
|
| 78 |
+
The sum of the hessians of all the samples in the right child.
|
| 79 |
+
n_samples_left : int, default=0
|
| 80 |
+
The number of samples in the left child.
|
| 81 |
+
n_samples_right : int
|
| 82 |
+
The number of samples in the right child.
|
| 83 |
+
is_categorical : bool
|
| 84 |
+
Whether the split is done on a categorical feature.
|
| 85 |
+
left_cat_bitset : ndarray of shape=(8,), dtype=uint32 or None
|
| 86 |
+
Bitset representing the categories that go to the left. This is used
|
| 87 |
+
only when `is_categorical` is True.
|
| 88 |
+
Note that missing values are part of that bitset if there are missing
|
| 89 |
+
values in the training data. For missing values, we rely on that
|
| 90 |
+
bitset for splitting, but at prediction time, we rely on
|
| 91 |
+
missing_go_to_left.
|
| 92 |
+
"""
|
| 93 |
+
def __init__(self, gain, feature_idx, bin_idx,
|
| 94 |
+
missing_go_to_left, sum_gradient_left, sum_hessian_left,
|
| 95 |
+
sum_gradient_right, sum_hessian_right, n_samples_left,
|
| 96 |
+
n_samples_right, value_left, value_right,
|
| 97 |
+
is_categorical, left_cat_bitset):
|
| 98 |
+
self.gain = gain
|
| 99 |
+
self.feature_idx = feature_idx
|
| 100 |
+
self.bin_idx = bin_idx
|
| 101 |
+
self.missing_go_to_left = missing_go_to_left
|
| 102 |
+
self.sum_gradient_left = sum_gradient_left
|
| 103 |
+
self.sum_hessian_left = sum_hessian_left
|
| 104 |
+
self.sum_gradient_right = sum_gradient_right
|
| 105 |
+
self.sum_hessian_right = sum_hessian_right
|
| 106 |
+
self.n_samples_left = n_samples_left
|
| 107 |
+
self.n_samples_right = n_samples_right
|
| 108 |
+
self.value_left = value_left
|
| 109 |
+
self.value_right = value_right
|
| 110 |
+
self.is_categorical = is_categorical
|
| 111 |
+
self.left_cat_bitset = left_cat_bitset
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
@cython.final
|
| 115 |
+
cdef class Splitter:
|
| 116 |
+
"""Splitter used to find the best possible split at each node.
|
| 117 |
+
|
| 118 |
+
A split (see SplitInfo) is characterized by a feature and a bin.
|
| 119 |
+
|
| 120 |
+
The Splitter is also responsible for partitioning the samples among the
|
| 121 |
+
leaves of the tree (see split_indices() and the partition attribute).
|
| 122 |
+
|
| 123 |
+
Parameters
|
| 124 |
+
----------
|
| 125 |
+
X_binned : ndarray of int, shape (n_samples, n_features)
|
| 126 |
+
The binned input samples. Must be Fortran-aligned.
|
| 127 |
+
n_bins_non_missing : ndarray, shape (n_features,)
|
| 128 |
+
For each feature, gives the number of bins actually used for
|
| 129 |
+
non-missing values.
|
| 130 |
+
missing_values_bin_idx : uint8
|
| 131 |
+
Index of the bin that is used for missing values. This is the index of
|
| 132 |
+
the last bin and is always equal to max_bins (as passed to the GBDT
|
| 133 |
+
classes), or equivalently to n_bins - 1.
|
| 134 |
+
has_missing_values : ndarray, shape (n_features,)
|
| 135 |
+
Whether missing values were observed in the training data, for each
|
| 136 |
+
feature.
|
| 137 |
+
is_categorical : ndarray of bool of shape (n_features,)
|
| 138 |
+
Indicates categorical features.
|
| 139 |
+
monotonic_cst : ndarray of int of shape (n_features,), dtype=int
|
| 140 |
+
Indicates the monotonic constraint to enforce on each feature.
|
| 141 |
+
- 1: monotonic increase
|
| 142 |
+
- 0: no constraint
|
| 143 |
+
- -1: monotonic decrease
|
| 144 |
+
|
| 145 |
+
Read more in the :ref:`User Guide <monotonic_cst_gbdt>`.
|
| 146 |
+
l2_regularization : float
|
| 147 |
+
The L2 regularization parameter.
|
| 148 |
+
min_hessian_to_split : float, default=1e-3
|
| 149 |
+
The minimum sum of hessians needed in each node. Splits that result in
|
| 150 |
+
at least one child having a sum of hessians less than
|
| 151 |
+
min_hessian_to_split are discarded.
|
| 152 |
+
min_samples_leaf : int, default=20
|
| 153 |
+
The minimum number of samples per leaf.
|
| 154 |
+
min_gain_to_split : float, default=0.0
|
| 155 |
+
The minimum gain needed to split a node. Splits with lower gain will
|
| 156 |
+
be ignored.
|
| 157 |
+
hessians_are_constant: bool, default is False
|
| 158 |
+
Whether hessians are constant.
|
| 159 |
+
feature_fraction_per_split : float, default=1
|
| 160 |
+
Proportion of randomly chosen features in each and every node split.
|
| 161 |
+
This is a form of regularization, smaller values make the trees weaker
|
| 162 |
+
learners and might prevent overfitting.
|
| 163 |
+
rng : Generator
|
| 164 |
+
n_threads : int, default=1
|
| 165 |
+
Number of OpenMP threads to use.
|
| 166 |
+
"""
|
| 167 |
+
cdef public:
|
| 168 |
+
const X_BINNED_DTYPE_C [::1, :] X_binned
|
| 169 |
+
unsigned int n_features
|
| 170 |
+
const unsigned int [::1] n_bins_non_missing
|
| 171 |
+
uint8_t missing_values_bin_idx
|
| 172 |
+
const uint8_t [::1] has_missing_values
|
| 173 |
+
const uint8_t [::1] is_categorical
|
| 174 |
+
const signed char [::1] monotonic_cst
|
| 175 |
+
uint8_t hessians_are_constant
|
| 176 |
+
Y_DTYPE_C l2_regularization
|
| 177 |
+
Y_DTYPE_C min_hessian_to_split
|
| 178 |
+
unsigned int min_samples_leaf
|
| 179 |
+
Y_DTYPE_C min_gain_to_split
|
| 180 |
+
Y_DTYPE_C feature_fraction_per_split
|
| 181 |
+
rng
|
| 182 |
+
|
| 183 |
+
unsigned int [::1] partition
|
| 184 |
+
unsigned int [::1] left_indices_buffer
|
| 185 |
+
unsigned int [::1] right_indices_buffer
|
| 186 |
+
int n_threads
|
| 187 |
+
|
| 188 |
+
def __init__(self,
|
| 189 |
+
const X_BINNED_DTYPE_C [::1, :] X_binned,
|
| 190 |
+
const unsigned int [::1] n_bins_non_missing,
|
| 191 |
+
const uint8_t missing_values_bin_idx,
|
| 192 |
+
const uint8_t [::1] has_missing_values,
|
| 193 |
+
const uint8_t [::1] is_categorical,
|
| 194 |
+
const signed char [::1] monotonic_cst,
|
| 195 |
+
Y_DTYPE_C l2_regularization,
|
| 196 |
+
Y_DTYPE_C min_hessian_to_split=1e-3,
|
| 197 |
+
unsigned int min_samples_leaf=20,
|
| 198 |
+
Y_DTYPE_C min_gain_to_split=0.,
|
| 199 |
+
uint8_t hessians_are_constant=False,
|
| 200 |
+
Y_DTYPE_C feature_fraction_per_split=1.0,
|
| 201 |
+
rng=np.random.RandomState(),
|
| 202 |
+
unsigned int n_threads=1):
|
| 203 |
+
|
| 204 |
+
self.X_binned = X_binned
|
| 205 |
+
self.n_features = X_binned.shape[1]
|
| 206 |
+
self.n_bins_non_missing = n_bins_non_missing
|
| 207 |
+
self.missing_values_bin_idx = missing_values_bin_idx
|
| 208 |
+
self.has_missing_values = has_missing_values
|
| 209 |
+
self.is_categorical = is_categorical
|
| 210 |
+
self.monotonic_cst = monotonic_cst
|
| 211 |
+
self.l2_regularization = l2_regularization
|
| 212 |
+
self.min_hessian_to_split = min_hessian_to_split
|
| 213 |
+
self.min_samples_leaf = min_samples_leaf
|
| 214 |
+
self.min_gain_to_split = min_gain_to_split
|
| 215 |
+
self.hessians_are_constant = hessians_are_constant
|
| 216 |
+
self.feature_fraction_per_split = feature_fraction_per_split
|
| 217 |
+
self.rng = rng
|
| 218 |
+
self.n_threads = n_threads
|
| 219 |
+
|
| 220 |
+
# The partition array maps each sample index into the leaves of the
|
| 221 |
+
# tree (a leaf in this context is a node that isn't split yet, not
|
| 222 |
+
# necessarily a 'finalized' leaf). Initially, the root contains all
|
| 223 |
+
# the indices, e.g.:
|
| 224 |
+
# partition = [abcdefghijkl]
|
| 225 |
+
# After a call to split_indices, it may look e.g. like this:
|
| 226 |
+
# partition = [cef|abdghijkl]
|
| 227 |
+
# we have 2 leaves, the left one is at position 0 and the second one at
|
| 228 |
+
# position 3. The order of the samples is irrelevant.
|
| 229 |
+
self.partition = np.arange(X_binned.shape[0], dtype=np.uint32)
|
| 230 |
+
# buffers used in split_indices to support parallel splitting.
|
| 231 |
+
self.left_indices_buffer = np.empty_like(self.partition)
|
| 232 |
+
self.right_indices_buffer = np.empty_like(self.partition)
|
| 233 |
+
|
| 234 |
+
def split_indices(Splitter self, split_info, unsigned int [::1]
|
| 235 |
+
sample_indices):
|
| 236 |
+
"""Split samples into left and right arrays.
|
| 237 |
+
|
| 238 |
+
The split is performed according to the best possible split
|
| 239 |
+
(split_info).
|
| 240 |
+
|
| 241 |
+
Ultimately, this is nothing but a partition of the sample_indices
|
| 242 |
+
array with a given pivot, exactly like a quicksort subroutine.
|
| 243 |
+
|
| 244 |
+
Parameters
|
| 245 |
+
----------
|
| 246 |
+
split_info : SplitInfo
|
| 247 |
+
The SplitInfo of the node to split.
|
| 248 |
+
sample_indices : ndarray of unsigned int, shape (n_samples_at_node,)
|
| 249 |
+
The indices of the samples at the node to split. This is a view
|
| 250 |
+
on self.partition, and it is modified inplace by placing the
|
| 251 |
+
indices of the left child at the beginning, and the indices of
|
| 252 |
+
the right child at the end.
|
| 253 |
+
|
| 254 |
+
Returns
|
| 255 |
+
-------
|
| 256 |
+
left_indices : ndarray of int, shape (n_left_samples,)
|
| 257 |
+
The indices of the samples in the left child. This is a view on
|
| 258 |
+
self.partition.
|
| 259 |
+
right_indices : ndarray of int, shape (n_right_samples,)
|
| 260 |
+
The indices of the samples in the right child. This is a view on
|
| 261 |
+
self.partition.
|
| 262 |
+
right_child_position : int
|
| 263 |
+
The position of the right child in ``sample_indices``.
|
| 264 |
+
"""
|
| 265 |
+
# This is a multi-threaded implementation inspired by lightgbm. Here
|
| 266 |
+
# is a quick break down. Let's suppose we want to split a node with 24
|
| 267 |
+
# samples named from a to x. self.partition looks like this (the * are
|
| 268 |
+
# indices in other leaves that we don't care about):
|
| 269 |
+
# partition = [*************abcdefghijklmnopqrstuvwx****************]
|
| 270 |
+
# ^ ^
|
| 271 |
+
# node_position node_position + node.n_samples
|
| 272 |
+
|
| 273 |
+
# Ultimately, we want to reorder the samples inside the boundaries of
|
| 274 |
+
# the leaf (which becomes a node) to now represent the samples in its
|
| 275 |
+
# left and right child. For example:
|
| 276 |
+
# partition = [*************abefilmnopqrtuxcdghjksvw*****************]
|
| 277 |
+
# ^ ^
|
| 278 |
+
# left_child_pos right_child_pos
|
| 279 |
+
# Note that left_child_pos always takes the value of node_position,
|
| 280 |
+
# and right_child_pos = left_child_pos + left_child.n_samples. The
|
| 281 |
+
# order of the samples inside a leaf is irrelevant.
|
| 282 |
+
|
| 283 |
+
# 1. sample_indices is a view on this region a..x. We conceptually
|
| 284 |
+
# divide it into n_threads regions. Each thread will be responsible
|
| 285 |
+
# for its own region. Here is an example with 4 threads:
|
| 286 |
+
# sample_indices = [abcdef|ghijkl|mnopqr|stuvwx]
|
| 287 |
+
# 2. Each thread processes 6 = 24 // 4 entries and maps them into
|
| 288 |
+
# left_indices_buffer or right_indices_buffer. For example, we could
|
| 289 |
+
# have the following mapping ('.' denotes an undefined entry):
|
| 290 |
+
# - left_indices_buffer = [abef..|il....|mnopqr|tux...]
|
| 291 |
+
# - right_indices_buffer = [cd....|ghjk..|......|svw...]
|
| 292 |
+
# 3. We keep track of the start positions of the regions (the '|') in
|
| 293 |
+
# ``offset_in_buffers`` as well as the size of each region. We also
|
| 294 |
+
# keep track of the number of samples put into the left/right child
|
| 295 |
+
# by each thread. Concretely:
|
| 296 |
+
# - left_counts = [4, 2, 6, 3]
|
| 297 |
+
# - right_counts = [2, 4, 0, 3]
|
| 298 |
+
# 4. Finally, we put left/right_indices_buffer back into the
|
| 299 |
+
# sample_indices, without any undefined entries and the partition
|
| 300 |
+
# looks as expected
|
| 301 |
+
# partition = [*************abefilmnopqrtuxcdghjksvw***************]
|
| 302 |
+
|
| 303 |
+
# Note: We here show left/right_indices_buffer as being the same size
|
| 304 |
+
# as sample_indices for simplicity, but in reality they are of the
|
| 305 |
+
# same size as partition.
|
| 306 |
+
|
| 307 |
+
cdef:
|
| 308 |
+
int n_samples = sample_indices.shape[0]
|
| 309 |
+
X_BINNED_DTYPE_C bin_idx = split_info.bin_idx
|
| 310 |
+
uint8_t missing_go_to_left = split_info.missing_go_to_left
|
| 311 |
+
uint8_t missing_values_bin_idx = self.missing_values_bin_idx
|
| 312 |
+
int feature_idx = split_info.feature_idx
|
| 313 |
+
const X_BINNED_DTYPE_C [::1] X_binned = \
|
| 314 |
+
self.X_binned[:, feature_idx]
|
| 315 |
+
unsigned int [::1] left_indices_buffer = self.left_indices_buffer
|
| 316 |
+
unsigned int [::1] right_indices_buffer = self.right_indices_buffer
|
| 317 |
+
uint8_t is_categorical = split_info.is_categorical
|
| 318 |
+
# Cython is unhappy if we set left_cat_bitset to
|
| 319 |
+
# split_info.left_cat_bitset directly, so we need a tmp var
|
| 320 |
+
BITSET_INNER_DTYPE_C [:] cat_bitset_tmp = split_info.left_cat_bitset
|
| 321 |
+
BITSET_DTYPE_C left_cat_bitset
|
| 322 |
+
int n_threads = self.n_threads
|
| 323 |
+
|
| 324 |
+
int [:] sizes = np.full(n_threads, n_samples // n_threads,
|
| 325 |
+
dtype=np.int32)
|
| 326 |
+
int [:] offset_in_buffers = np.zeros(n_threads, dtype=np.int32)
|
| 327 |
+
int [:] left_counts = np.empty(n_threads, dtype=np.int32)
|
| 328 |
+
int [:] right_counts = np.empty(n_threads, dtype=np.int32)
|
| 329 |
+
int left_count
|
| 330 |
+
int right_count
|
| 331 |
+
int start
|
| 332 |
+
int stop
|
| 333 |
+
int i
|
| 334 |
+
int thread_idx
|
| 335 |
+
int sample_idx
|
| 336 |
+
int right_child_position
|
| 337 |
+
uint8_t turn_left
|
| 338 |
+
int [:] left_offset = np.zeros(n_threads, dtype=np.int32)
|
| 339 |
+
int [:] right_offset = np.zeros(n_threads, dtype=np.int32)
|
| 340 |
+
|
| 341 |
+
# only set left_cat_bitset when is_categorical is True
|
| 342 |
+
if is_categorical:
|
| 343 |
+
left_cat_bitset = &cat_bitset_tmp[0]
|
| 344 |
+
|
| 345 |
+
with nogil:
|
| 346 |
+
for thread_idx in range(n_samples % n_threads):
|
| 347 |
+
sizes[thread_idx] += 1
|
| 348 |
+
|
| 349 |
+
for thread_idx in range(1, n_threads):
|
| 350 |
+
offset_in_buffers[thread_idx] = \
|
| 351 |
+
offset_in_buffers[thread_idx - 1] + sizes[thread_idx - 1]
|
| 352 |
+
|
| 353 |
+
# map indices from sample_indices to left/right_indices_buffer
|
| 354 |
+
for thread_idx in prange(n_threads, schedule='static',
|
| 355 |
+
chunksize=1, num_threads=n_threads):
|
| 356 |
+
left_count = 0
|
| 357 |
+
right_count = 0
|
| 358 |
+
|
| 359 |
+
start = offset_in_buffers[thread_idx]
|
| 360 |
+
stop = start + sizes[thread_idx]
|
| 361 |
+
for i in range(start, stop):
|
| 362 |
+
sample_idx = sample_indices[i]
|
| 363 |
+
turn_left = sample_goes_left(
|
| 364 |
+
missing_go_to_left,
|
| 365 |
+
missing_values_bin_idx, bin_idx,
|
| 366 |
+
X_binned[sample_idx], is_categorical,
|
| 367 |
+
left_cat_bitset)
|
| 368 |
+
|
| 369 |
+
if turn_left:
|
| 370 |
+
left_indices_buffer[start + left_count] = sample_idx
|
| 371 |
+
left_count = left_count + 1
|
| 372 |
+
else:
|
| 373 |
+
right_indices_buffer[start + right_count] = sample_idx
|
| 374 |
+
right_count = right_count + 1
|
| 375 |
+
|
| 376 |
+
left_counts[thread_idx] = left_count
|
| 377 |
+
right_counts[thread_idx] = right_count
|
| 378 |
+
|
| 379 |
+
# position of right child = just after the left child
|
| 380 |
+
right_child_position = 0
|
| 381 |
+
for thread_idx in range(n_threads):
|
| 382 |
+
right_child_position += left_counts[thread_idx]
|
| 383 |
+
|
| 384 |
+
# offset of each thread in sample_indices for left and right
|
| 385 |
+
# child, i.e. where each thread will start to write.
|
| 386 |
+
right_offset[0] = right_child_position
|
| 387 |
+
for thread_idx in range(1, n_threads):
|
| 388 |
+
left_offset[thread_idx] = \
|
| 389 |
+
left_offset[thread_idx - 1] + left_counts[thread_idx - 1]
|
| 390 |
+
right_offset[thread_idx] = \
|
| 391 |
+
right_offset[thread_idx - 1] + right_counts[thread_idx - 1]
|
| 392 |
+
|
| 393 |
+
# map indices in left/right_indices_buffer back into
|
| 394 |
+
# sample_indices. This also updates self.partition since
|
| 395 |
+
# sample_indices is a view.
|
| 396 |
+
for thread_idx in prange(n_threads, schedule='static',
|
| 397 |
+
chunksize=1, num_threads=n_threads):
|
| 398 |
+
memcpy(
|
| 399 |
+
&sample_indices[left_offset[thread_idx]],
|
| 400 |
+
&left_indices_buffer[offset_in_buffers[thread_idx]],
|
| 401 |
+
sizeof(unsigned int) * left_counts[thread_idx]
|
| 402 |
+
)
|
| 403 |
+
if right_counts[thread_idx] > 0:
|
| 404 |
+
# If we're splitting the rightmost node of the tree, i.e. the
|
| 405 |
+
# rightmost node in the partition array, and if n_threads >= 2, one
|
| 406 |
+
# might have right_counts[-1] = 0 and right_offset[-1] = len(sample_indices)
|
| 407 |
+
# leading to evaluating
|
| 408 |
+
#
|
| 409 |
+
# &sample_indices[right_offset[-1]] = &samples_indices[n_samples_at_node]
|
| 410 |
+
# = &partition[n_samples_in_tree]
|
| 411 |
+
#
|
| 412 |
+
# which is an out-of-bounds read access that can cause a segmentation fault.
|
| 413 |
+
# When boundscheck=True, removing this check produces this exception:
|
| 414 |
+
#
|
| 415 |
+
# IndexError: Out of bounds on buffer access
|
| 416 |
+
#
|
| 417 |
+
memcpy(
|
| 418 |
+
&sample_indices[right_offset[thread_idx]],
|
| 419 |
+
&right_indices_buffer[offset_in_buffers[thread_idx]],
|
| 420 |
+
sizeof(unsigned int) * right_counts[thread_idx]
|
| 421 |
+
)
|
| 422 |
+
|
| 423 |
+
return (sample_indices[:right_child_position],
|
| 424 |
+
sample_indices[right_child_position:],
|
| 425 |
+
right_child_position)
|
| 426 |
+
|
| 427 |
+
def find_node_split(
|
| 428 |
+
Splitter self,
|
| 429 |
+
unsigned int n_samples,
|
| 430 |
+
hist_struct [:, ::1] histograms, # IN
|
| 431 |
+
const Y_DTYPE_C sum_gradients,
|
| 432 |
+
const Y_DTYPE_C sum_hessians,
|
| 433 |
+
const Y_DTYPE_C value,
|
| 434 |
+
const Y_DTYPE_C lower_bound=-INFINITY,
|
| 435 |
+
const Y_DTYPE_C upper_bound=INFINITY,
|
| 436 |
+
const unsigned int [:] allowed_features=None,
|
| 437 |
+
):
|
| 438 |
+
"""For each feature, find the best bin to split on at a given node.
|
| 439 |
+
|
| 440 |
+
Return the best split info among all features.
|
| 441 |
+
|
| 442 |
+
Parameters
|
| 443 |
+
----------
|
| 444 |
+
n_samples : int
|
| 445 |
+
The number of samples at the node.
|
| 446 |
+
histograms : ndarray of HISTOGRAM_DTYPE of \
|
| 447 |
+
shape (n_features, max_bins)
|
| 448 |
+
The histograms of the current node.
|
| 449 |
+
sum_gradients : float
|
| 450 |
+
The sum of the gradients for each sample at the node.
|
| 451 |
+
sum_hessians : float
|
| 452 |
+
The sum of the hessians for each sample at the node.
|
| 453 |
+
value : float
|
| 454 |
+
The bounded value of the current node. We directly pass the value
|
| 455 |
+
instead of re-computing it from sum_gradients and sum_hessians,
|
| 456 |
+
because we need to compute the loss and the gain based on the
|
| 457 |
+
*bounded* value: computing the value from
|
| 458 |
+
sum_gradients / sum_hessians would give the unbounded value, and
|
| 459 |
+
the interaction with min_gain_to_split would not be correct
|
| 460 |
+
anymore. Side note: we can't use the lower_bound / upper_bound
|
| 461 |
+
parameters either because these refer to the bounds of the
|
| 462 |
+
children, not the bounds of the current node.
|
| 463 |
+
lower_bound : float
|
| 464 |
+
Lower bound for the children values for respecting the monotonic
|
| 465 |
+
constraints.
|
| 466 |
+
upper_bound : float
|
| 467 |
+
Upper bound for the children values for respecting the monotonic
|
| 468 |
+
constraints.
|
| 469 |
+
allowed_features : None or ndarray, dtype=np.uint32
|
| 470 |
+
Indices of the features that are allowed by interaction constraints to be
|
| 471 |
+
split.
|
| 472 |
+
|
| 473 |
+
Returns
|
| 474 |
+
-------
|
| 475 |
+
best_split_info : SplitInfo
|
| 476 |
+
The info about the best possible split among all features.
|
| 477 |
+
"""
|
| 478 |
+
cdef:
|
| 479 |
+
int feature_idx
|
| 480 |
+
int split_info_idx
|
| 481 |
+
int best_split_info_idx
|
| 482 |
+
int n_allowed_features
|
| 483 |
+
split_info_struct split_info
|
| 484 |
+
split_info_struct * split_infos
|
| 485 |
+
const uint8_t [::1] has_missing_values = self.has_missing_values
|
| 486 |
+
const uint8_t [::1] is_categorical = self.is_categorical
|
| 487 |
+
const signed char [::1] monotonic_cst = self.monotonic_cst
|
| 488 |
+
int n_threads = self.n_threads
|
| 489 |
+
bint has_interaction_cst = False
|
| 490 |
+
Y_DTYPE_C feature_fraction_per_split = self.feature_fraction_per_split
|
| 491 |
+
uint8_t [:] subsample_mask # same as npy_bool
|
| 492 |
+
int n_subsampled_features
|
| 493 |
+
|
| 494 |
+
has_interaction_cst = allowed_features is not None
|
| 495 |
+
if has_interaction_cst:
|
| 496 |
+
n_allowed_features = allowed_features.shape[0]
|
| 497 |
+
else:
|
| 498 |
+
n_allowed_features = self.n_features
|
| 499 |
+
|
| 500 |
+
if feature_fraction_per_split < 1.0:
|
| 501 |
+
# We do all random sampling before the nogil and make sure that we sample
|
| 502 |
+
# exactly n_subsampled_features >= 1 features.
|
| 503 |
+
n_subsampled_features = max(
|
| 504 |
+
1,
|
| 505 |
+
int(ceil(feature_fraction_per_split * n_allowed_features)),
|
| 506 |
+
)
|
| 507 |
+
subsample_mask_arr = np.full(n_allowed_features, False)
|
| 508 |
+
subsample_mask_arr[:n_subsampled_features] = True
|
| 509 |
+
self.rng.shuffle(subsample_mask_arr)
|
| 510 |
+
# https://github.com/numpy/numpy/issues/18273
|
| 511 |
+
subsample_mask = subsample_mask_arr
|
| 512 |
+
|
| 513 |
+
with nogil:
|
| 514 |
+
|
| 515 |
+
split_infos = <split_info_struct *> malloc(
|
| 516 |
+
n_allowed_features * sizeof(split_info_struct))
|
| 517 |
+
|
| 518 |
+
# split_info_idx is index of split_infos of size n_allowed_features.
|
| 519 |
+
# features_idx is the index of the feature column in X.
|
| 520 |
+
for split_info_idx in prange(n_allowed_features, schedule='static',
|
| 521 |
+
num_threads=n_threads):
|
| 522 |
+
if has_interaction_cst:
|
| 523 |
+
feature_idx = allowed_features[split_info_idx]
|
| 524 |
+
else:
|
| 525 |
+
feature_idx = split_info_idx
|
| 526 |
+
|
| 527 |
+
split_infos[split_info_idx].feature_idx = feature_idx
|
| 528 |
+
|
| 529 |
+
# For each feature, find best bin to split on
|
| 530 |
+
# Start with a gain of -1 if no better split is found, that
|
| 531 |
+
# means one of the constraints isn't respected
|
| 532 |
+
# (min_samples_leaf, etc.) and the grower will later turn the
|
| 533 |
+
# node into a leaf.
|
| 534 |
+
split_infos[split_info_idx].gain = -1
|
| 535 |
+
split_infos[split_info_idx].is_categorical = is_categorical[feature_idx]
|
| 536 |
+
|
| 537 |
+
# Note that subsample_mask is indexed by split_info_idx and not by
|
| 538 |
+
# feature_idx because we only need to exclude the same features again
|
| 539 |
+
# and again. We do NOT need to access the features directly by using
|
| 540 |
+
# allowed_features.
|
| 541 |
+
if feature_fraction_per_split < 1.0 and not subsample_mask[split_info_idx]:
|
| 542 |
+
continue
|
| 543 |
+
|
| 544 |
+
if is_categorical[feature_idx]:
|
| 545 |
+
self._find_best_bin_to_split_category(
|
| 546 |
+
feature_idx, has_missing_values[feature_idx],
|
| 547 |
+
histograms, n_samples, sum_gradients, sum_hessians,
|
| 548 |
+
value, monotonic_cst[feature_idx], lower_bound,
|
| 549 |
+
upper_bound, &split_infos[split_info_idx])
|
| 550 |
+
else:
|
| 551 |
+
# We will scan bins from left to right (in all cases), and
|
| 552 |
+
# if there are any missing values, we will also scan bins
|
| 553 |
+
# from right to left. This way, we can consider whichever
|
| 554 |
+
# case yields the best gain: either missing values go to
|
| 555 |
+
# the right (left to right scan) or to the left (right to
|
| 556 |
+
# left case). See algo 3 from the XGBoost paper
|
| 557 |
+
# https://arxiv.org/abs/1603.02754
|
| 558 |
+
# Note: for the categorical features above, this isn't
|
| 559 |
+
# needed since missing values are considered a native
|
| 560 |
+
# category.
|
| 561 |
+
self._find_best_bin_to_split_left_to_right(
|
| 562 |
+
feature_idx, has_missing_values[feature_idx],
|
| 563 |
+
histograms, n_samples, sum_gradients, sum_hessians,
|
| 564 |
+
value, monotonic_cst[feature_idx],
|
| 565 |
+
lower_bound, upper_bound, &split_infos[split_info_idx])
|
| 566 |
+
|
| 567 |
+
if has_missing_values[feature_idx]:
|
| 568 |
+
# We need to explore both directions to check whether
|
| 569 |
+
# sending the nans to the left child would lead to a higher
|
| 570 |
+
# gain
|
| 571 |
+
self._find_best_bin_to_split_right_to_left(
|
| 572 |
+
feature_idx, histograms, n_samples,
|
| 573 |
+
sum_gradients, sum_hessians,
|
| 574 |
+
value, monotonic_cst[feature_idx],
|
| 575 |
+
lower_bound, upper_bound, &split_infos[split_info_idx])
|
| 576 |
+
|
| 577 |
+
# then compute best possible split among all features
|
| 578 |
+
# split_info is set to the best of split_infos
|
| 579 |
+
best_split_info_idx = self._find_best_feature_to_split_helper(
|
| 580 |
+
split_infos, n_allowed_features
|
| 581 |
+
)
|
| 582 |
+
split_info = split_infos[best_split_info_idx]
|
| 583 |
+
|
| 584 |
+
out = SplitInfo(
|
| 585 |
+
split_info.gain,
|
| 586 |
+
split_info.feature_idx,
|
| 587 |
+
split_info.bin_idx,
|
| 588 |
+
split_info.missing_go_to_left,
|
| 589 |
+
split_info.sum_gradient_left,
|
| 590 |
+
split_info.sum_hessian_left,
|
| 591 |
+
split_info.sum_gradient_right,
|
| 592 |
+
split_info.sum_hessian_right,
|
| 593 |
+
split_info.n_samples_left,
|
| 594 |
+
split_info.n_samples_right,
|
| 595 |
+
split_info.value_left,
|
| 596 |
+
split_info.value_right,
|
| 597 |
+
split_info.is_categorical,
|
| 598 |
+
None, # left_cat_bitset will only be set if the split is categorical
|
| 599 |
+
)
|
| 600 |
+
# Only set bitset if the split is categorical
|
| 601 |
+
if split_info.is_categorical:
|
| 602 |
+
out.left_cat_bitset = np.asarray(split_info.left_cat_bitset, dtype=np.uint32)
|
| 603 |
+
|
| 604 |
+
free(split_infos)
|
| 605 |
+
return out
|
| 606 |
+
|
| 607 |
+
cdef int _find_best_feature_to_split_helper(
|
| 608 |
+
self,
|
| 609 |
+
split_info_struct * split_infos, # IN
|
| 610 |
+
int n_allowed_features,
|
| 611 |
+
) noexcept nogil:
|
| 612 |
+
"""Return the index of split_infos with the best feature split."""
|
| 613 |
+
cdef:
|
| 614 |
+
int split_info_idx
|
| 615 |
+
int best_split_info_idx = 0
|
| 616 |
+
|
| 617 |
+
for split_info_idx in range(1, n_allowed_features):
|
| 618 |
+
if (split_infos[split_info_idx].gain > split_infos[best_split_info_idx].gain):
|
| 619 |
+
best_split_info_idx = split_info_idx
|
| 620 |
+
return best_split_info_idx
|
| 621 |
+
|
| 622 |
+
cdef void _find_best_bin_to_split_left_to_right(
|
| 623 |
+
Splitter self,
|
| 624 |
+
unsigned int feature_idx,
|
| 625 |
+
uint8_t has_missing_values,
|
| 626 |
+
const hist_struct [:, ::1] histograms, # IN
|
| 627 |
+
unsigned int n_samples,
|
| 628 |
+
Y_DTYPE_C sum_gradients,
|
| 629 |
+
Y_DTYPE_C sum_hessians,
|
| 630 |
+
Y_DTYPE_C value,
|
| 631 |
+
signed char monotonic_cst,
|
| 632 |
+
Y_DTYPE_C lower_bound,
|
| 633 |
+
Y_DTYPE_C upper_bound,
|
| 634 |
+
split_info_struct * split_info) noexcept nogil: # OUT
|
| 635 |
+
"""Find best bin to split on for a given feature.
|
| 636 |
+
|
| 637 |
+
Splits that do not satisfy the splitting constraints
|
| 638 |
+
(min_gain_to_split, etc.) are discarded here.
|
| 639 |
+
|
| 640 |
+
We scan node from left to right. This version is called whether there
|
| 641 |
+
are missing values or not. If any, missing values are assigned to the
|
| 642 |
+
right node.
|
| 643 |
+
"""
|
| 644 |
+
cdef:
|
| 645 |
+
unsigned int bin_idx
|
| 646 |
+
unsigned int n_samples_left
|
| 647 |
+
unsigned int n_samples_right
|
| 648 |
+
unsigned int n_samples_ = n_samples
|
| 649 |
+
# We set the 'end' variable such that the last non-missing-values
|
| 650 |
+
# bin never goes to the left child (which would result in and
|
| 651 |
+
# empty right child), unless there are missing values, since these
|
| 652 |
+
# would go to the right child.
|
| 653 |
+
unsigned int end = \
|
| 654 |
+
self.n_bins_non_missing[feature_idx] - 1 + has_missing_values
|
| 655 |
+
Y_DTYPE_C sum_hessian_left
|
| 656 |
+
Y_DTYPE_C sum_hessian_right
|
| 657 |
+
Y_DTYPE_C sum_gradient_left
|
| 658 |
+
Y_DTYPE_C sum_gradient_right
|
| 659 |
+
Y_DTYPE_C loss_current_node
|
| 660 |
+
Y_DTYPE_C gain
|
| 661 |
+
uint8_t found_better_split = False
|
| 662 |
+
|
| 663 |
+
Y_DTYPE_C best_sum_hessian_left
|
| 664 |
+
Y_DTYPE_C best_sum_gradient_left
|
| 665 |
+
unsigned int best_bin_idx
|
| 666 |
+
unsigned int best_n_samples_left
|
| 667 |
+
Y_DTYPE_C best_gain = -1
|
| 668 |
+
|
| 669 |
+
sum_gradient_left, sum_hessian_left = 0., 0.
|
| 670 |
+
n_samples_left = 0
|
| 671 |
+
|
| 672 |
+
loss_current_node = _loss_from_value(value, sum_gradients)
|
| 673 |
+
|
| 674 |
+
for bin_idx in range(end):
|
| 675 |
+
n_samples_left += histograms[feature_idx, bin_idx].count
|
| 676 |
+
n_samples_right = n_samples_ - n_samples_left
|
| 677 |
+
|
| 678 |
+
if self.hessians_are_constant:
|
| 679 |
+
sum_hessian_left += histograms[feature_idx, bin_idx].count
|
| 680 |
+
else:
|
| 681 |
+
sum_hessian_left += \
|
| 682 |
+
histograms[feature_idx, bin_idx].sum_hessians
|
| 683 |
+
sum_hessian_right = sum_hessians - sum_hessian_left
|
| 684 |
+
|
| 685 |
+
sum_gradient_left += histograms[feature_idx, bin_idx].sum_gradients
|
| 686 |
+
sum_gradient_right = sum_gradients - sum_gradient_left
|
| 687 |
+
|
| 688 |
+
if n_samples_left < self.min_samples_leaf:
|
| 689 |
+
continue
|
| 690 |
+
if n_samples_right < self.min_samples_leaf:
|
| 691 |
+
# won't get any better
|
| 692 |
+
break
|
| 693 |
+
|
| 694 |
+
if sum_hessian_left < self.min_hessian_to_split:
|
| 695 |
+
continue
|
| 696 |
+
if sum_hessian_right < self.min_hessian_to_split:
|
| 697 |
+
# won't get any better (hessians are > 0 since loss is convex)
|
| 698 |
+
break
|
| 699 |
+
|
| 700 |
+
gain = _split_gain(sum_gradient_left, sum_hessian_left,
|
| 701 |
+
sum_gradient_right, sum_hessian_right,
|
| 702 |
+
loss_current_node,
|
| 703 |
+
monotonic_cst,
|
| 704 |
+
lower_bound,
|
| 705 |
+
upper_bound,
|
| 706 |
+
self.l2_regularization)
|
| 707 |
+
|
| 708 |
+
if gain > best_gain and gain > self.min_gain_to_split:
|
| 709 |
+
found_better_split = True
|
| 710 |
+
best_gain = gain
|
| 711 |
+
best_bin_idx = bin_idx
|
| 712 |
+
best_sum_gradient_left = sum_gradient_left
|
| 713 |
+
best_sum_hessian_left = sum_hessian_left
|
| 714 |
+
best_n_samples_left = n_samples_left
|
| 715 |
+
|
| 716 |
+
if found_better_split:
|
| 717 |
+
split_info.gain = best_gain
|
| 718 |
+
split_info.bin_idx = best_bin_idx
|
| 719 |
+
# we scan from left to right so missing values go to the right
|
| 720 |
+
split_info.missing_go_to_left = False
|
| 721 |
+
split_info.sum_gradient_left = best_sum_gradient_left
|
| 722 |
+
split_info.sum_gradient_right = sum_gradients - best_sum_gradient_left
|
| 723 |
+
split_info.sum_hessian_left = best_sum_hessian_left
|
| 724 |
+
split_info.sum_hessian_right = sum_hessians - best_sum_hessian_left
|
| 725 |
+
split_info.n_samples_left = best_n_samples_left
|
| 726 |
+
split_info.n_samples_right = n_samples - best_n_samples_left
|
| 727 |
+
|
| 728 |
+
# We recompute best values here but it's cheap
|
| 729 |
+
split_info.value_left = compute_node_value(
|
| 730 |
+
split_info.sum_gradient_left, split_info.sum_hessian_left,
|
| 731 |
+
lower_bound, upper_bound, self.l2_regularization)
|
| 732 |
+
|
| 733 |
+
split_info.value_right = compute_node_value(
|
| 734 |
+
split_info.sum_gradient_right, split_info.sum_hessian_right,
|
| 735 |
+
lower_bound, upper_bound, self.l2_regularization)
|
| 736 |
+
|
| 737 |
+
cdef void _find_best_bin_to_split_right_to_left(
|
| 738 |
+
self,
|
| 739 |
+
unsigned int feature_idx,
|
| 740 |
+
const hist_struct [:, ::1] histograms, # IN
|
| 741 |
+
unsigned int n_samples,
|
| 742 |
+
Y_DTYPE_C sum_gradients,
|
| 743 |
+
Y_DTYPE_C sum_hessians,
|
| 744 |
+
Y_DTYPE_C value,
|
| 745 |
+
signed char monotonic_cst,
|
| 746 |
+
Y_DTYPE_C lower_bound,
|
| 747 |
+
Y_DTYPE_C upper_bound,
|
| 748 |
+
split_info_struct * split_info) noexcept nogil: # OUT
|
| 749 |
+
"""Find best bin to split on for a given feature.
|
| 750 |
+
|
| 751 |
+
Splits that do not satisfy the splitting constraints
|
| 752 |
+
(min_gain_to_split, etc.) are discarded here.
|
| 753 |
+
|
| 754 |
+
We scan node from right to left. This version is only called when
|
| 755 |
+
there are missing values. Missing values are assigned to the left
|
| 756 |
+
child.
|
| 757 |
+
|
| 758 |
+
If no missing value are present in the data this method isn't called
|
| 759 |
+
since only calling _find_best_bin_to_split_left_to_right is enough.
|
| 760 |
+
"""
|
| 761 |
+
|
| 762 |
+
cdef:
|
| 763 |
+
unsigned int bin_idx
|
| 764 |
+
unsigned int n_samples_left
|
| 765 |
+
unsigned int n_samples_right
|
| 766 |
+
unsigned int n_samples_ = n_samples
|
| 767 |
+
Y_DTYPE_C sum_hessian_left
|
| 768 |
+
Y_DTYPE_C sum_hessian_right
|
| 769 |
+
Y_DTYPE_C sum_gradient_left
|
| 770 |
+
Y_DTYPE_C sum_gradient_right
|
| 771 |
+
Y_DTYPE_C loss_current_node
|
| 772 |
+
Y_DTYPE_C gain
|
| 773 |
+
unsigned int start = self.n_bins_non_missing[feature_idx] - 2
|
| 774 |
+
uint8_t found_better_split = False
|
| 775 |
+
|
| 776 |
+
Y_DTYPE_C best_sum_hessian_left
|
| 777 |
+
Y_DTYPE_C best_sum_gradient_left
|
| 778 |
+
unsigned int best_bin_idx
|
| 779 |
+
unsigned int best_n_samples_left
|
| 780 |
+
Y_DTYPE_C best_gain = split_info.gain # computed during previous scan
|
| 781 |
+
|
| 782 |
+
sum_gradient_right, sum_hessian_right = 0., 0.
|
| 783 |
+
n_samples_right = 0
|
| 784 |
+
|
| 785 |
+
loss_current_node = _loss_from_value(value, sum_gradients)
|
| 786 |
+
|
| 787 |
+
for bin_idx in range(start, -1, -1):
|
| 788 |
+
n_samples_right += histograms[feature_idx, bin_idx + 1].count
|
| 789 |
+
n_samples_left = n_samples_ - n_samples_right
|
| 790 |
+
|
| 791 |
+
if self.hessians_are_constant:
|
| 792 |
+
sum_hessian_right += histograms[feature_idx, bin_idx + 1].count
|
| 793 |
+
else:
|
| 794 |
+
sum_hessian_right += \
|
| 795 |
+
histograms[feature_idx, bin_idx + 1].sum_hessians
|
| 796 |
+
sum_hessian_left = sum_hessians - sum_hessian_right
|
| 797 |
+
|
| 798 |
+
sum_gradient_right += \
|
| 799 |
+
histograms[feature_idx, bin_idx + 1].sum_gradients
|
| 800 |
+
sum_gradient_left = sum_gradients - sum_gradient_right
|
| 801 |
+
|
| 802 |
+
if n_samples_right < self.min_samples_leaf:
|
| 803 |
+
continue
|
| 804 |
+
if n_samples_left < self.min_samples_leaf:
|
| 805 |
+
# won't get any better
|
| 806 |
+
break
|
| 807 |
+
|
| 808 |
+
if sum_hessian_right < self.min_hessian_to_split:
|
| 809 |
+
continue
|
| 810 |
+
if sum_hessian_left < self.min_hessian_to_split:
|
| 811 |
+
# won't get any better (hessians are > 0 since loss is convex)
|
| 812 |
+
break
|
| 813 |
+
|
| 814 |
+
gain = _split_gain(sum_gradient_left, sum_hessian_left,
|
| 815 |
+
sum_gradient_right, sum_hessian_right,
|
| 816 |
+
loss_current_node,
|
| 817 |
+
monotonic_cst,
|
| 818 |
+
lower_bound,
|
| 819 |
+
upper_bound,
|
| 820 |
+
self.l2_regularization)
|
| 821 |
+
|
| 822 |
+
if gain > best_gain and gain > self.min_gain_to_split:
|
| 823 |
+
found_better_split = True
|
| 824 |
+
best_gain = gain
|
| 825 |
+
best_bin_idx = bin_idx
|
| 826 |
+
best_sum_gradient_left = sum_gradient_left
|
| 827 |
+
best_sum_hessian_left = sum_hessian_left
|
| 828 |
+
best_n_samples_left = n_samples_left
|
| 829 |
+
|
| 830 |
+
if found_better_split:
|
| 831 |
+
split_info.gain = best_gain
|
| 832 |
+
split_info.bin_idx = best_bin_idx
|
| 833 |
+
# we scan from right to left so missing values go to the left
|
| 834 |
+
split_info.missing_go_to_left = True
|
| 835 |
+
split_info.sum_gradient_left = best_sum_gradient_left
|
| 836 |
+
split_info.sum_gradient_right = sum_gradients - best_sum_gradient_left
|
| 837 |
+
split_info.sum_hessian_left = best_sum_hessian_left
|
| 838 |
+
split_info.sum_hessian_right = sum_hessians - best_sum_hessian_left
|
| 839 |
+
split_info.n_samples_left = best_n_samples_left
|
| 840 |
+
split_info.n_samples_right = n_samples - best_n_samples_left
|
| 841 |
+
|
| 842 |
+
# We recompute best values here but it's cheap
|
| 843 |
+
split_info.value_left = compute_node_value(
|
| 844 |
+
split_info.sum_gradient_left, split_info.sum_hessian_left,
|
| 845 |
+
lower_bound, upper_bound, self.l2_regularization)
|
| 846 |
+
|
| 847 |
+
split_info.value_right = compute_node_value(
|
| 848 |
+
split_info.sum_gradient_right, split_info.sum_hessian_right,
|
| 849 |
+
lower_bound, upper_bound, self.l2_regularization)
|
| 850 |
+
|
| 851 |
+
cdef void _find_best_bin_to_split_category(
|
| 852 |
+
self,
|
| 853 |
+
unsigned int feature_idx,
|
| 854 |
+
uint8_t has_missing_values,
|
| 855 |
+
const hist_struct [:, ::1] histograms, # IN
|
| 856 |
+
unsigned int n_samples,
|
| 857 |
+
Y_DTYPE_C sum_gradients,
|
| 858 |
+
Y_DTYPE_C sum_hessians,
|
| 859 |
+
Y_DTYPE_C value,
|
| 860 |
+
char monotonic_cst,
|
| 861 |
+
Y_DTYPE_C lower_bound,
|
| 862 |
+
Y_DTYPE_C upper_bound,
|
| 863 |
+
split_info_struct * split_info) noexcept nogil: # OUT
|
| 864 |
+
"""Find best split for categorical features.
|
| 865 |
+
|
| 866 |
+
Categories are first sorted according to their variance, and then
|
| 867 |
+
a scan is performed as if categories were ordered quantities.
|
| 868 |
+
|
| 869 |
+
Ref: "On Grouping for Maximum Homogeneity", Walter D. Fisher
|
| 870 |
+
"""
|
| 871 |
+
|
| 872 |
+
cdef:
|
| 873 |
+
unsigned int bin_idx
|
| 874 |
+
unsigned int n_bins_non_missing = self.n_bins_non_missing[feature_idx]
|
| 875 |
+
unsigned int missing_values_bin_idx = self.missing_values_bin_idx
|
| 876 |
+
categorical_info * cat_infos
|
| 877 |
+
unsigned int sorted_cat_idx
|
| 878 |
+
unsigned int n_used_bins = 0
|
| 879 |
+
int [2] scan_direction
|
| 880 |
+
int direction = 0
|
| 881 |
+
int best_direction = 0
|
| 882 |
+
unsigned int middle
|
| 883 |
+
unsigned int i
|
| 884 |
+
const hist_struct[::1] feature_hist = histograms[feature_idx, :]
|
| 885 |
+
Y_DTYPE_C sum_gradients_bin
|
| 886 |
+
Y_DTYPE_C sum_hessians_bin
|
| 887 |
+
Y_DTYPE_C loss_current_node
|
| 888 |
+
Y_DTYPE_C sum_gradient_left, sum_hessian_left
|
| 889 |
+
Y_DTYPE_C sum_gradient_right, sum_hessian_right
|
| 890 |
+
unsigned int n_samples_left, n_samples_right
|
| 891 |
+
Y_DTYPE_C gain
|
| 892 |
+
Y_DTYPE_C best_gain = -1.0
|
| 893 |
+
uint8_t found_better_split = False
|
| 894 |
+
Y_DTYPE_C best_sum_hessian_left
|
| 895 |
+
Y_DTYPE_C best_sum_gradient_left
|
| 896 |
+
unsigned int best_n_samples_left
|
| 897 |
+
unsigned int best_cat_infos_thresh
|
| 898 |
+
# Reduces the effect of noises in categorical features,
|
| 899 |
+
# especially for categories with few data. Called cat_smooth in
|
| 900 |
+
# LightGBM. TODO: Make this user adjustable?
|
| 901 |
+
Y_DTYPE_C MIN_CAT_SUPPORT = 10.
|
| 902 |
+
# this is equal to 1 for losses where hessians are constant
|
| 903 |
+
Y_DTYPE_C support_factor = n_samples / sum_hessians
|
| 904 |
+
|
| 905 |
+
# Details on the split finding:
|
| 906 |
+
# We first order categories by their sum_gradients / sum_hessians
|
| 907 |
+
# values, and we exclude categories that don't respect MIN_CAT_SUPPORT
|
| 908 |
+
# from this sorted array. Missing values are treated just like any
|
| 909 |
+
# other category. The low-support categories will always be mapped to
|
| 910 |
+
# the right child. We scan the sorted categories array from left to
|
| 911 |
+
# right and from right to left, and we stop at the middle.
|
| 912 |
+
|
| 913 |
+
# Considering ordered categories A B C D, with E being a low-support
|
| 914 |
+
# category: A B C D
|
| 915 |
+
# ^
|
| 916 |
+
# midpoint
|
| 917 |
+
# The scans will consider the following split-points:
|
| 918 |
+
# * left to right:
|
| 919 |
+
# A - B C D E
|
| 920 |
+
# A B - C D E
|
| 921 |
+
# * right to left:
|
| 922 |
+
# D - A B C E
|
| 923 |
+
# C D - A B E
|
| 924 |
+
|
| 925 |
+
# Note that since we stop at the middle and since low-support
|
| 926 |
+
# categories (E) are always mapped to the right, the following splits
|
| 927 |
+
# aren't considered:
|
| 928 |
+
# A E - B C D
|
| 929 |
+
# D E - A B C
|
| 930 |
+
# Basically, we're forcing E to always be mapped to the child that has
|
| 931 |
+
# *at least half of the categories* (and this child is always the right
|
| 932 |
+
# child, by convention).
|
| 933 |
+
|
| 934 |
+
# Also note that if we scanned in only one direction (e.g. left to
|
| 935 |
+
# right), we would only consider the following splits:
|
| 936 |
+
# A - B C D E
|
| 937 |
+
# A B - C D E
|
| 938 |
+
# A B C - D E
|
| 939 |
+
# and thus we would be missing on D - A B C E and on C D - A B E
|
| 940 |
+
|
| 941 |
+
cat_infos = <categorical_info *> malloc(
|
| 942 |
+
(n_bins_non_missing + has_missing_values) * sizeof(categorical_info))
|
| 943 |
+
|
| 944 |
+
# fill cat_infos while filtering out categories based on MIN_CAT_SUPPORT
|
| 945 |
+
for bin_idx in range(n_bins_non_missing):
|
| 946 |
+
if self.hessians_are_constant:
|
| 947 |
+
sum_hessians_bin = feature_hist[bin_idx].count
|
| 948 |
+
else:
|
| 949 |
+
sum_hessians_bin = feature_hist[bin_idx].sum_hessians
|
| 950 |
+
if sum_hessians_bin * support_factor >= MIN_CAT_SUPPORT:
|
| 951 |
+
cat_infos[n_used_bins].bin_idx = bin_idx
|
| 952 |
+
sum_gradients_bin = feature_hist[bin_idx].sum_gradients
|
| 953 |
+
|
| 954 |
+
cat_infos[n_used_bins].value = (
|
| 955 |
+
sum_gradients_bin / (sum_hessians_bin + MIN_CAT_SUPPORT)
|
| 956 |
+
)
|
| 957 |
+
n_used_bins += 1
|
| 958 |
+
|
| 959 |
+
# Also add missing values bin so that nans are considered as a category
|
| 960 |
+
if has_missing_values:
|
| 961 |
+
if self.hessians_are_constant:
|
| 962 |
+
sum_hessians_bin = feature_hist[missing_values_bin_idx].count
|
| 963 |
+
else:
|
| 964 |
+
sum_hessians_bin = feature_hist[missing_values_bin_idx].sum_hessians
|
| 965 |
+
if sum_hessians_bin * support_factor >= MIN_CAT_SUPPORT:
|
| 966 |
+
cat_infos[n_used_bins].bin_idx = missing_values_bin_idx
|
| 967 |
+
sum_gradients_bin = (
|
| 968 |
+
feature_hist[missing_values_bin_idx].sum_gradients
|
| 969 |
+
)
|
| 970 |
+
|
| 971 |
+
cat_infos[n_used_bins].value = (
|
| 972 |
+
sum_gradients_bin / (sum_hessians_bin + MIN_CAT_SUPPORT)
|
| 973 |
+
)
|
| 974 |
+
n_used_bins += 1
|
| 975 |
+
|
| 976 |
+
# not enough categories to form a split
|
| 977 |
+
if n_used_bins <= 1:
|
| 978 |
+
free(cat_infos)
|
| 979 |
+
return
|
| 980 |
+
|
| 981 |
+
qsort(cat_infos, n_used_bins, sizeof(categorical_info),
|
| 982 |
+
compare_cat_infos)
|
| 983 |
+
|
| 984 |
+
loss_current_node = _loss_from_value(value, sum_gradients)
|
| 985 |
+
|
| 986 |
+
scan_direction[0], scan_direction[1] = 1, -1
|
| 987 |
+
for direction in scan_direction:
|
| 988 |
+
if direction == 1:
|
| 989 |
+
middle = (n_used_bins + 1) // 2
|
| 990 |
+
else:
|
| 991 |
+
middle = (n_used_bins + 1) // 2 - 1
|
| 992 |
+
|
| 993 |
+
# The categories we'll consider will go to the left child
|
| 994 |
+
sum_gradient_left, sum_hessian_left = 0., 0.
|
| 995 |
+
n_samples_left = 0
|
| 996 |
+
|
| 997 |
+
for i in range(middle):
|
| 998 |
+
sorted_cat_idx = i if direction == 1 else n_used_bins - 1 - i
|
| 999 |
+
bin_idx = cat_infos[sorted_cat_idx].bin_idx
|
| 1000 |
+
|
| 1001 |
+
n_samples_left += feature_hist[bin_idx].count
|
| 1002 |
+
n_samples_right = n_samples - n_samples_left
|
| 1003 |
+
|
| 1004 |
+
if self.hessians_are_constant:
|
| 1005 |
+
sum_hessian_left += feature_hist[bin_idx].count
|
| 1006 |
+
else:
|
| 1007 |
+
sum_hessian_left += feature_hist[bin_idx].sum_hessians
|
| 1008 |
+
sum_hessian_right = sum_hessians - sum_hessian_left
|
| 1009 |
+
|
| 1010 |
+
sum_gradient_left += feature_hist[bin_idx].sum_gradients
|
| 1011 |
+
sum_gradient_right = sum_gradients - sum_gradient_left
|
| 1012 |
+
|
| 1013 |
+
if (
|
| 1014 |
+
n_samples_left < self.min_samples_leaf or
|
| 1015 |
+
sum_hessian_left < self.min_hessian_to_split
|
| 1016 |
+
):
|
| 1017 |
+
continue
|
| 1018 |
+
if (
|
| 1019 |
+
n_samples_right < self.min_samples_leaf or
|
| 1020 |
+
sum_hessian_right < self.min_hessian_to_split
|
| 1021 |
+
):
|
| 1022 |
+
break
|
| 1023 |
+
|
| 1024 |
+
gain = _split_gain(sum_gradient_left, sum_hessian_left,
|
| 1025 |
+
sum_gradient_right, sum_hessian_right,
|
| 1026 |
+
loss_current_node, monotonic_cst,
|
| 1027 |
+
lower_bound, upper_bound,
|
| 1028 |
+
self.l2_regularization)
|
| 1029 |
+
if gain > best_gain and gain > self.min_gain_to_split:
|
| 1030 |
+
found_better_split = True
|
| 1031 |
+
best_gain = gain
|
| 1032 |
+
best_cat_infos_thresh = sorted_cat_idx
|
| 1033 |
+
best_sum_gradient_left = sum_gradient_left
|
| 1034 |
+
best_sum_hessian_left = sum_hessian_left
|
| 1035 |
+
best_n_samples_left = n_samples_left
|
| 1036 |
+
best_direction = direction
|
| 1037 |
+
|
| 1038 |
+
if found_better_split:
|
| 1039 |
+
split_info.gain = best_gain
|
| 1040 |
+
|
| 1041 |
+
# split_info.bin_idx is unused for categorical splits: left_cat_bitset
|
| 1042 |
+
# is used instead and set below
|
| 1043 |
+
split_info.bin_idx = 0
|
| 1044 |
+
|
| 1045 |
+
split_info.sum_gradient_left = best_sum_gradient_left
|
| 1046 |
+
split_info.sum_gradient_right = sum_gradients - best_sum_gradient_left
|
| 1047 |
+
split_info.sum_hessian_left = best_sum_hessian_left
|
| 1048 |
+
split_info.sum_hessian_right = sum_hessians - best_sum_hessian_left
|
| 1049 |
+
split_info.n_samples_left = best_n_samples_left
|
| 1050 |
+
split_info.n_samples_right = n_samples - best_n_samples_left
|
| 1051 |
+
|
| 1052 |
+
# We recompute best values here but it's cheap
|
| 1053 |
+
split_info.value_left = compute_node_value(
|
| 1054 |
+
split_info.sum_gradient_left, split_info.sum_hessian_left,
|
| 1055 |
+
lower_bound, upper_bound, self.l2_regularization)
|
| 1056 |
+
|
| 1057 |
+
split_info.value_right = compute_node_value(
|
| 1058 |
+
split_info.sum_gradient_right, split_info.sum_hessian_right,
|
| 1059 |
+
lower_bound, upper_bound, self.l2_regularization)
|
| 1060 |
+
|
| 1061 |
+
# create bitset with values from best_cat_infos_thresh
|
| 1062 |
+
init_bitset(split_info.left_cat_bitset)
|
| 1063 |
+
if best_direction == 1:
|
| 1064 |
+
for sorted_cat_idx in range(best_cat_infos_thresh + 1):
|
| 1065 |
+
bin_idx = cat_infos[sorted_cat_idx].bin_idx
|
| 1066 |
+
set_bitset(split_info.left_cat_bitset, bin_idx)
|
| 1067 |
+
else:
|
| 1068 |
+
for sorted_cat_idx in range(n_used_bins - 1, best_cat_infos_thresh - 1, -1):
|
| 1069 |
+
bin_idx = cat_infos[sorted_cat_idx].bin_idx
|
| 1070 |
+
set_bitset(split_info.left_cat_bitset, bin_idx)
|
| 1071 |
+
|
| 1072 |
+
if has_missing_values:
|
| 1073 |
+
split_info.missing_go_to_left = in_bitset(
|
| 1074 |
+
split_info.left_cat_bitset, missing_values_bin_idx)
|
| 1075 |
+
|
| 1076 |
+
free(cat_infos)
|
| 1077 |
+
|
| 1078 |
+
|
| 1079 |
+
cdef int compare_cat_infos(const void * a, const void * b) noexcept nogil:
|
| 1080 |
+
return -1 if (<categorical_info *>a).value < (<categorical_info *>b).value else 1
|
| 1081 |
+
|
| 1082 |
+
cdef inline Y_DTYPE_C _split_gain(
|
| 1083 |
+
Y_DTYPE_C sum_gradient_left,
|
| 1084 |
+
Y_DTYPE_C sum_hessian_left,
|
| 1085 |
+
Y_DTYPE_C sum_gradient_right,
|
| 1086 |
+
Y_DTYPE_C sum_hessian_right,
|
| 1087 |
+
Y_DTYPE_C loss_current_node,
|
| 1088 |
+
signed char monotonic_cst,
|
| 1089 |
+
Y_DTYPE_C lower_bound,
|
| 1090 |
+
Y_DTYPE_C upper_bound,
|
| 1091 |
+
Y_DTYPE_C l2_regularization) noexcept nogil:
|
| 1092 |
+
"""Loss reduction
|
| 1093 |
+
|
| 1094 |
+
Compute the reduction in loss after taking a split, compared to keeping
|
| 1095 |
+
the node a leaf of the tree.
|
| 1096 |
+
|
| 1097 |
+
See Equation 7 of:
|
| 1098 |
+
:arxiv:`T. Chen, C. Guestrin, (2016) XGBoost: A Scalable Tree Boosting System,
|
| 1099 |
+
<1603.02754>.`
|
| 1100 |
+
"""
|
| 1101 |
+
cdef:
|
| 1102 |
+
Y_DTYPE_C gain
|
| 1103 |
+
Y_DTYPE_C value_left
|
| 1104 |
+
Y_DTYPE_C value_right
|
| 1105 |
+
|
| 1106 |
+
# Compute values of potential left and right children
|
| 1107 |
+
value_left = compute_node_value(sum_gradient_left, sum_hessian_left,
|
| 1108 |
+
lower_bound, upper_bound,
|
| 1109 |
+
l2_regularization)
|
| 1110 |
+
value_right = compute_node_value(sum_gradient_right, sum_hessian_right,
|
| 1111 |
+
lower_bound, upper_bound,
|
| 1112 |
+
l2_regularization)
|
| 1113 |
+
|
| 1114 |
+
if ((monotonic_cst == MonotonicConstraint.POS and value_left > value_right) or
|
| 1115 |
+
(monotonic_cst == MonotonicConstraint.NEG and value_left < value_right)):
|
| 1116 |
+
# don't consider this split since it does not respect the monotonic
|
| 1117 |
+
# constraints. Note that these comparisons need to be done on values
|
| 1118 |
+
# that have already been clipped to take the monotonic constraints into
|
| 1119 |
+
# account (if any).
|
| 1120 |
+
return -1
|
| 1121 |
+
|
| 1122 |
+
gain = loss_current_node
|
| 1123 |
+
gain -= _loss_from_value(value_left, sum_gradient_left)
|
| 1124 |
+
gain -= _loss_from_value(value_right, sum_gradient_right)
|
| 1125 |
+
# Note that for the gain to be correct (and for min_gain_to_split to work
|
| 1126 |
+
# as expected), we need all values to be bounded (current node, left child
|
| 1127 |
+
# and right child).
|
| 1128 |
+
|
| 1129 |
+
return gain
|
| 1130 |
+
|
| 1131 |
+
cdef inline Y_DTYPE_C _loss_from_value(
|
| 1132 |
+
Y_DTYPE_C value,
|
| 1133 |
+
Y_DTYPE_C sum_gradient) noexcept nogil:
|
| 1134 |
+
"""Return loss of a node from its (bounded) value
|
| 1135 |
+
|
| 1136 |
+
See Equation 6 of:
|
| 1137 |
+
:arxiv:`T. Chen, C. Guestrin, (2016) XGBoost: A Scalable Tree Boosting System,
|
| 1138 |
+
<1603.02754>.`
|
| 1139 |
+
"""
|
| 1140 |
+
return sum_gradient * value
|
| 1141 |
+
|
| 1142 |
+
cdef inline uint8_t sample_goes_left(
|
| 1143 |
+
uint8_t missing_go_to_left,
|
| 1144 |
+
uint8_t missing_values_bin_idx,
|
| 1145 |
+
X_BINNED_DTYPE_C split_bin_idx,
|
| 1146 |
+
X_BINNED_DTYPE_C bin_value,
|
| 1147 |
+
uint8_t is_categorical,
|
| 1148 |
+
BITSET_DTYPE_C left_cat_bitset) noexcept nogil:
|
| 1149 |
+
"""Helper to decide whether sample should go to left or right child."""
|
| 1150 |
+
|
| 1151 |
+
if is_categorical:
|
| 1152 |
+
# note: if any, missing values are encoded in left_cat_bitset
|
| 1153 |
+
return in_bitset(left_cat_bitset, bin_value)
|
| 1154 |
+
else:
|
| 1155 |
+
return (
|
| 1156 |
+
(
|
| 1157 |
+
missing_go_to_left and
|
| 1158 |
+
bin_value == missing_values_bin_idx
|
| 1159 |
+
)
|
| 1160 |
+
or (
|
| 1161 |
+
bin_value <= split_bin_idx
|
| 1162 |
+
))
|
| 1163 |
+
|
| 1164 |
+
|
| 1165 |
+
cpdef inline Y_DTYPE_C compute_node_value(
|
| 1166 |
+
Y_DTYPE_C sum_gradient,
|
| 1167 |
+
Y_DTYPE_C sum_hessian,
|
| 1168 |
+
Y_DTYPE_C lower_bound,
|
| 1169 |
+
Y_DTYPE_C upper_bound,
|
| 1170 |
+
Y_DTYPE_C l2_regularization) noexcept nogil:
|
| 1171 |
+
"""Compute a node's value.
|
| 1172 |
+
|
| 1173 |
+
The value is capped in the [lower_bound, upper_bound] interval to respect
|
| 1174 |
+
monotonic constraints. Shrinkage is ignored.
|
| 1175 |
+
|
| 1176 |
+
See Equation 5 of:
|
| 1177 |
+
:arxiv:`T. Chen, C. Guestrin, (2016) XGBoost: A Scalable Tree Boosting System,
|
| 1178 |
+
<1603.02754>.`
|
| 1179 |
+
"""
|
| 1180 |
+
|
| 1181 |
+
cdef:
|
| 1182 |
+
Y_DTYPE_C value
|
| 1183 |
+
|
| 1184 |
+
value = -sum_gradient / (sum_hessian + l2_regularization + 1e-15)
|
| 1185 |
+
|
| 1186 |
+
if value < lower_bound:
|
| 1187 |
+
value = lower_bound
|
| 1188 |
+
elif value > upper_bound:
|
| 1189 |
+
value = upper_bound
|
| 1190 |
+
|
| 1191 |
+
return value
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (204 Bytes). View file
|
|
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_binning.cpython-310.pyc
ADDED
|
Binary file (13 kB). View file
|
|
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_bitset.cpython-310.pyc
ADDED
|
Binary file (1.86 kB). View file
|
|
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_grower.cpython-310.pyc
ADDED
|
Binary file (13.8 kB). View file
|
|
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_histogram.cpython-310.pyc
ADDED
|
Binary file (4.78 kB). View file
|
|
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_monotonic_constraints.cpython-310.pyc
ADDED
|
Binary file (9.05 kB). View file
|
|
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_predictor.cpython-310.pyc
ADDED
|
Binary file (4.51 kB). View file
|
|
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_splitting.cpython-310.pyc
ADDED
|
Binary file (15.6 kB). View file
|
|
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_warm_start.cpython-310.pyc
ADDED
|
Binary file (4.69 kB). View file
|
|
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_monotonic_constraints.py
ADDED
|
@@ -0,0 +1,446 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import pytest
|
| 5 |
+
|
| 6 |
+
from sklearn.ensemble import (
|
| 7 |
+
HistGradientBoostingClassifier,
|
| 8 |
+
HistGradientBoostingRegressor,
|
| 9 |
+
)
|
| 10 |
+
from sklearn.ensemble._hist_gradient_boosting.common import (
|
| 11 |
+
G_H_DTYPE,
|
| 12 |
+
X_BINNED_DTYPE,
|
| 13 |
+
MonotonicConstraint,
|
| 14 |
+
)
|
| 15 |
+
from sklearn.ensemble._hist_gradient_boosting.grower import TreeGrower
|
| 16 |
+
from sklearn.ensemble._hist_gradient_boosting.histogram import HistogramBuilder
|
| 17 |
+
from sklearn.ensemble._hist_gradient_boosting.splitting import (
|
| 18 |
+
Splitter,
|
| 19 |
+
compute_node_value,
|
| 20 |
+
)
|
| 21 |
+
from sklearn.utils._openmp_helpers import _openmp_effective_n_threads
|
| 22 |
+
from sklearn.utils._testing import _convert_container
|
| 23 |
+
|
| 24 |
+
n_threads = _openmp_effective_n_threads()
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def is_increasing(a):
|
| 28 |
+
return (np.diff(a) >= 0.0).all()
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def is_decreasing(a):
|
| 32 |
+
return (np.diff(a) <= 0.0).all()
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def assert_leaves_values_monotonic(predictor, monotonic_cst):
|
| 36 |
+
# make sure leaves values (from left to right) are either all increasing
|
| 37 |
+
# or all decreasing (or neither) depending on the monotonic constraint.
|
| 38 |
+
nodes = predictor.nodes
|
| 39 |
+
|
| 40 |
+
def get_leaves_values():
|
| 41 |
+
"""get leaves values from left to right"""
|
| 42 |
+
values = []
|
| 43 |
+
|
| 44 |
+
def depth_first_collect_leaf_values(node_idx):
|
| 45 |
+
node = nodes[node_idx]
|
| 46 |
+
if node["is_leaf"]:
|
| 47 |
+
values.append(node["value"])
|
| 48 |
+
return
|
| 49 |
+
depth_first_collect_leaf_values(node["left"])
|
| 50 |
+
depth_first_collect_leaf_values(node["right"])
|
| 51 |
+
|
| 52 |
+
depth_first_collect_leaf_values(0) # start at root (0)
|
| 53 |
+
return values
|
| 54 |
+
|
| 55 |
+
values = get_leaves_values()
|
| 56 |
+
|
| 57 |
+
if monotonic_cst == MonotonicConstraint.NO_CST:
|
| 58 |
+
# some increasing, some decreasing
|
| 59 |
+
assert not is_increasing(values) and not is_decreasing(values)
|
| 60 |
+
elif monotonic_cst == MonotonicConstraint.POS:
|
| 61 |
+
# all increasing
|
| 62 |
+
assert is_increasing(values)
|
| 63 |
+
else: # NEG
|
| 64 |
+
# all decreasing
|
| 65 |
+
assert is_decreasing(values)
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def assert_children_values_monotonic(predictor, monotonic_cst):
|
| 69 |
+
# Make sure siblings values respect the monotonic constraints. Left should
|
| 70 |
+
# be lower (resp greater) than right child if constraint is POS (resp.
|
| 71 |
+
# NEG).
|
| 72 |
+
# Note that this property alone isn't enough to ensure full monotonicity,
|
| 73 |
+
# since we also need to guanrantee that all the descendents of the left
|
| 74 |
+
# child won't be greater (resp. lower) than the right child, or its
|
| 75 |
+
# descendents. That's why we need to bound the predicted values (this is
|
| 76 |
+
# tested in assert_children_values_bounded)
|
| 77 |
+
nodes = predictor.nodes
|
| 78 |
+
left_lower = []
|
| 79 |
+
left_greater = []
|
| 80 |
+
for node in nodes:
|
| 81 |
+
if node["is_leaf"]:
|
| 82 |
+
continue
|
| 83 |
+
|
| 84 |
+
left_idx = node["left"]
|
| 85 |
+
right_idx = node["right"]
|
| 86 |
+
|
| 87 |
+
if nodes[left_idx]["value"] < nodes[right_idx]["value"]:
|
| 88 |
+
left_lower.append(node)
|
| 89 |
+
elif nodes[left_idx]["value"] > nodes[right_idx]["value"]:
|
| 90 |
+
left_greater.append(node)
|
| 91 |
+
|
| 92 |
+
if monotonic_cst == MonotonicConstraint.NO_CST:
|
| 93 |
+
assert left_lower and left_greater
|
| 94 |
+
elif monotonic_cst == MonotonicConstraint.POS:
|
| 95 |
+
assert left_lower and not left_greater
|
| 96 |
+
else: # NEG
|
| 97 |
+
assert not left_lower and left_greater
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def assert_children_values_bounded(grower, monotonic_cst):
|
| 101 |
+
# Make sure that the values of the children of a node are bounded by the
|
| 102 |
+
# middle value between that node and its sibling (if there is a monotonic
|
| 103 |
+
# constraint).
|
| 104 |
+
# As a bonus, we also check that the siblings values are properly ordered
|
| 105 |
+
# which is slightly redundant with assert_children_values_monotonic (but
|
| 106 |
+
# this check is done on the grower nodes whereas
|
| 107 |
+
# assert_children_values_monotonic is done on the predictor nodes)
|
| 108 |
+
|
| 109 |
+
if monotonic_cst == MonotonicConstraint.NO_CST:
|
| 110 |
+
return
|
| 111 |
+
|
| 112 |
+
def recursively_check_children_node_values(node, right_sibling=None):
|
| 113 |
+
if node.is_leaf:
|
| 114 |
+
return
|
| 115 |
+
if right_sibling is not None:
|
| 116 |
+
middle = (node.value + right_sibling.value) / 2
|
| 117 |
+
if monotonic_cst == MonotonicConstraint.POS:
|
| 118 |
+
assert node.left_child.value <= node.right_child.value <= middle
|
| 119 |
+
if not right_sibling.is_leaf:
|
| 120 |
+
assert (
|
| 121 |
+
middle
|
| 122 |
+
<= right_sibling.left_child.value
|
| 123 |
+
<= right_sibling.right_child.value
|
| 124 |
+
)
|
| 125 |
+
else: # NEG
|
| 126 |
+
assert node.left_child.value >= node.right_child.value >= middle
|
| 127 |
+
if not right_sibling.is_leaf:
|
| 128 |
+
assert (
|
| 129 |
+
middle
|
| 130 |
+
>= right_sibling.left_child.value
|
| 131 |
+
>= right_sibling.right_child.value
|
| 132 |
+
)
|
| 133 |
+
|
| 134 |
+
recursively_check_children_node_values(
|
| 135 |
+
node.left_child, right_sibling=node.right_child
|
| 136 |
+
)
|
| 137 |
+
recursively_check_children_node_values(node.right_child)
|
| 138 |
+
|
| 139 |
+
recursively_check_children_node_values(grower.root)
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
@pytest.mark.parametrize("seed", range(3))
|
| 143 |
+
@pytest.mark.parametrize(
|
| 144 |
+
"monotonic_cst",
|
| 145 |
+
(
|
| 146 |
+
MonotonicConstraint.NO_CST,
|
| 147 |
+
MonotonicConstraint.POS,
|
| 148 |
+
MonotonicConstraint.NEG,
|
| 149 |
+
),
|
| 150 |
+
)
|
| 151 |
+
def test_nodes_values(monotonic_cst, seed):
|
| 152 |
+
# Build a single tree with only one feature, and make sure the nodes
|
| 153 |
+
# values respect the monotonic constraints.
|
| 154 |
+
|
| 155 |
+
# Considering the following tree with a monotonic POS constraint, we
|
| 156 |
+
# should have:
|
| 157 |
+
#
|
| 158 |
+
# root
|
| 159 |
+
# / \
|
| 160 |
+
# 5 10 # middle = 7.5
|
| 161 |
+
# / \ / \
|
| 162 |
+
# a b c d
|
| 163 |
+
#
|
| 164 |
+
# a <= b and c <= d (assert_children_values_monotonic)
|
| 165 |
+
# a, b <= middle <= c, d (assert_children_values_bounded)
|
| 166 |
+
# a <= b <= c <= d (assert_leaves_values_monotonic)
|
| 167 |
+
#
|
| 168 |
+
# The last one is a consequence of the others, but can't hurt to check
|
| 169 |
+
|
| 170 |
+
rng = np.random.RandomState(seed)
|
| 171 |
+
n_samples = 1000
|
| 172 |
+
n_features = 1
|
| 173 |
+
X_binned = rng.randint(0, 255, size=(n_samples, n_features), dtype=np.uint8)
|
| 174 |
+
X_binned = np.asfortranarray(X_binned)
|
| 175 |
+
|
| 176 |
+
gradients = rng.normal(size=n_samples).astype(G_H_DTYPE)
|
| 177 |
+
hessians = np.ones(shape=1, dtype=G_H_DTYPE)
|
| 178 |
+
|
| 179 |
+
grower = TreeGrower(
|
| 180 |
+
X_binned, gradients, hessians, monotonic_cst=[monotonic_cst], shrinkage=0.1
|
| 181 |
+
)
|
| 182 |
+
grower.grow()
|
| 183 |
+
|
| 184 |
+
# grow() will shrink the leaves values at the very end. For our comparison
|
| 185 |
+
# tests, we need to revert the shrinkage of the leaves, else we would
|
| 186 |
+
# compare the value of a leaf (shrunk) with a node (not shrunk) and the
|
| 187 |
+
# test would not be correct.
|
| 188 |
+
for leave in grower.finalized_leaves:
|
| 189 |
+
leave.value /= grower.shrinkage
|
| 190 |
+
|
| 191 |
+
# We pass undefined binning_thresholds because we won't use predict anyway
|
| 192 |
+
predictor = grower.make_predictor(
|
| 193 |
+
binning_thresholds=np.zeros((X_binned.shape[1], X_binned.max() + 1))
|
| 194 |
+
)
|
| 195 |
+
|
| 196 |
+
# The consistency of the bounds can only be checked on the tree grower
|
| 197 |
+
# as the node bounds are not copied into the predictor tree. The
|
| 198 |
+
# consistency checks on the values of node children and leaves can be
|
| 199 |
+
# done either on the grower tree or on the predictor tree. We only
|
| 200 |
+
# do those checks on the predictor tree as the latter is derived from
|
| 201 |
+
# the former.
|
| 202 |
+
assert_children_values_monotonic(predictor, monotonic_cst)
|
| 203 |
+
assert_children_values_bounded(grower, monotonic_cst)
|
| 204 |
+
assert_leaves_values_monotonic(predictor, monotonic_cst)
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
@pytest.mark.parametrize("use_feature_names", (True, False))
|
| 208 |
+
def test_predictions(global_random_seed, use_feature_names):
|
| 209 |
+
# Train a model with a POS constraint on the first non-categorical feature
|
| 210 |
+
# and a NEG constraint on the second non-categorical feature, and make sure
|
| 211 |
+
# the constraints are respected by checking the predictions.
|
| 212 |
+
# test adapted from lightgbm's test_monotone_constraint(), itself inspired
|
| 213 |
+
# by https://xgboost.readthedocs.io/en/latest/tutorials/monotonic.html
|
| 214 |
+
|
| 215 |
+
rng = np.random.RandomState(global_random_seed)
|
| 216 |
+
|
| 217 |
+
n_samples = 1000
|
| 218 |
+
f_0 = rng.rand(n_samples) # positive correlation with y
|
| 219 |
+
f_1 = rng.rand(n_samples) # negative correlation with y
|
| 220 |
+
|
| 221 |
+
# extra categorical features, no correlation with y,
|
| 222 |
+
# to check the correctness of monotonicity constraint remapping, see issue #28898
|
| 223 |
+
f_a = rng.randint(low=0, high=9, size=n_samples)
|
| 224 |
+
f_b = rng.randint(low=0, high=9, size=n_samples)
|
| 225 |
+
f_c = rng.randint(low=0, high=9, size=n_samples)
|
| 226 |
+
|
| 227 |
+
X = np.c_[f_a, f_0, f_b, f_1, f_c]
|
| 228 |
+
columns_name = ["f_a", "f_0", "f_b", "f_1", "f_c"]
|
| 229 |
+
constructor_name = "dataframe" if use_feature_names else "array"
|
| 230 |
+
X = _convert_container(X, constructor_name, columns_name=columns_name)
|
| 231 |
+
|
| 232 |
+
noise = rng.normal(loc=0.0, scale=0.01, size=n_samples)
|
| 233 |
+
y = 5 * f_0 + np.sin(10 * np.pi * f_0) - 5 * f_1 - np.cos(10 * np.pi * f_1) + noise
|
| 234 |
+
|
| 235 |
+
if use_feature_names:
|
| 236 |
+
monotonic_cst = {"f_0": +1, "f_1": -1}
|
| 237 |
+
categorical_features = ["f_a", "f_b", "f_c"]
|
| 238 |
+
else:
|
| 239 |
+
monotonic_cst = [0, +1, 0, -1, 0]
|
| 240 |
+
categorical_features = [0, 2, 4]
|
| 241 |
+
|
| 242 |
+
gbdt = HistGradientBoostingRegressor(
|
| 243 |
+
monotonic_cst=monotonic_cst, categorical_features=categorical_features
|
| 244 |
+
)
|
| 245 |
+
gbdt.fit(X, y)
|
| 246 |
+
|
| 247 |
+
linspace = np.linspace(0, 1, 100)
|
| 248 |
+
sin = np.sin(linspace)
|
| 249 |
+
constant = np.full_like(linspace, fill_value=0.5)
|
| 250 |
+
|
| 251 |
+
# We now assert the predictions properly respect the constraints, on each
|
| 252 |
+
# feature. When testing for a feature we need to set the other one to a
|
| 253 |
+
# constant, because the monotonic constraints are only a "all else being
|
| 254 |
+
# equal" type of constraints:
|
| 255 |
+
# a constraint on the first feature only means that
|
| 256 |
+
# x0 < x0' => f(x0, x1) < f(x0', x1)
|
| 257 |
+
# while x1 stays constant.
|
| 258 |
+
# The constraint does not guanrantee that
|
| 259 |
+
# x0 < x0' => f(x0, x1) < f(x0', x1')
|
| 260 |
+
|
| 261 |
+
# First non-categorical feature (POS)
|
| 262 |
+
# assert pred is all increasing when f_0 is all increasing
|
| 263 |
+
X = np.c_[constant, linspace, constant, constant, constant]
|
| 264 |
+
X = _convert_container(X, constructor_name, columns_name=columns_name)
|
| 265 |
+
pred = gbdt.predict(X)
|
| 266 |
+
assert is_increasing(pred)
|
| 267 |
+
# assert pred actually follows the variations of f_0
|
| 268 |
+
X = np.c_[constant, sin, constant, constant, constant]
|
| 269 |
+
X = _convert_container(X, constructor_name, columns_name=columns_name)
|
| 270 |
+
pred = gbdt.predict(X)
|
| 271 |
+
assert np.all((np.diff(pred) >= 0) == (np.diff(sin) >= 0))
|
| 272 |
+
|
| 273 |
+
# Second non-categorical feature (NEG)
|
| 274 |
+
# assert pred is all decreasing when f_1 is all increasing
|
| 275 |
+
X = np.c_[constant, constant, constant, linspace, constant]
|
| 276 |
+
X = _convert_container(X, constructor_name, columns_name=columns_name)
|
| 277 |
+
pred = gbdt.predict(X)
|
| 278 |
+
assert is_decreasing(pred)
|
| 279 |
+
# assert pred actually follows the inverse variations of f_1
|
| 280 |
+
X = np.c_[constant, constant, constant, sin, constant]
|
| 281 |
+
X = _convert_container(X, constructor_name, columns_name=columns_name)
|
| 282 |
+
pred = gbdt.predict(X)
|
| 283 |
+
assert ((np.diff(pred) <= 0) == (np.diff(sin) >= 0)).all()
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
def test_input_error():
|
| 287 |
+
X = [[1, 2], [2, 3], [3, 4]]
|
| 288 |
+
y = [0, 1, 2]
|
| 289 |
+
|
| 290 |
+
gbdt = HistGradientBoostingRegressor(monotonic_cst=[1, 0, -1])
|
| 291 |
+
with pytest.raises(
|
| 292 |
+
ValueError, match=re.escape("monotonic_cst has shape (3,) but the input data")
|
| 293 |
+
):
|
| 294 |
+
gbdt.fit(X, y)
|
| 295 |
+
|
| 296 |
+
for monotonic_cst in ([1, 3], [1, -3], [0.3, -0.7]):
|
| 297 |
+
gbdt = HistGradientBoostingRegressor(monotonic_cst=monotonic_cst)
|
| 298 |
+
expected_msg = re.escape(
|
| 299 |
+
"must be an array-like of -1, 0 or 1. Observed values:"
|
| 300 |
+
)
|
| 301 |
+
with pytest.raises(ValueError, match=expected_msg):
|
| 302 |
+
gbdt.fit(X, y)
|
| 303 |
+
|
| 304 |
+
gbdt = HistGradientBoostingClassifier(monotonic_cst=[0, 1])
|
| 305 |
+
with pytest.raises(
|
| 306 |
+
ValueError,
|
| 307 |
+
match="monotonic constraints are not supported for multiclass classification",
|
| 308 |
+
):
|
| 309 |
+
gbdt.fit(X, y)
|
| 310 |
+
|
| 311 |
+
|
| 312 |
+
def test_input_error_related_to_feature_names():
|
| 313 |
+
pd = pytest.importorskip("pandas")
|
| 314 |
+
X = pd.DataFrame({"a": [0, 1, 2], "b": [0, 1, 2]})
|
| 315 |
+
y = np.array([0, 1, 0])
|
| 316 |
+
|
| 317 |
+
monotonic_cst = {"d": 1, "a": 1, "c": -1}
|
| 318 |
+
gbdt = HistGradientBoostingRegressor(monotonic_cst=monotonic_cst)
|
| 319 |
+
expected_msg = re.escape(
|
| 320 |
+
"monotonic_cst contains 2 unexpected feature names: ['c', 'd']."
|
| 321 |
+
)
|
| 322 |
+
with pytest.raises(ValueError, match=expected_msg):
|
| 323 |
+
gbdt.fit(X, y)
|
| 324 |
+
|
| 325 |
+
monotonic_cst = {k: 1 for k in "abcdefghijklmnopqrstuvwxyz"}
|
| 326 |
+
gbdt = HistGradientBoostingRegressor(monotonic_cst=monotonic_cst)
|
| 327 |
+
expected_msg = re.escape(
|
| 328 |
+
"monotonic_cst contains 24 unexpected feature names: "
|
| 329 |
+
"['c', 'd', 'e', 'f', 'g', '...']."
|
| 330 |
+
)
|
| 331 |
+
with pytest.raises(ValueError, match=expected_msg):
|
| 332 |
+
gbdt.fit(X, y)
|
| 333 |
+
|
| 334 |
+
monotonic_cst = {"a": 1}
|
| 335 |
+
gbdt = HistGradientBoostingRegressor(monotonic_cst=monotonic_cst)
|
| 336 |
+
expected_msg = re.escape(
|
| 337 |
+
"HistGradientBoostingRegressor was not fitted on data with feature "
|
| 338 |
+
"names. Pass monotonic_cst as an integer array instead."
|
| 339 |
+
)
|
| 340 |
+
with pytest.raises(ValueError, match=expected_msg):
|
| 341 |
+
gbdt.fit(X.values, y)
|
| 342 |
+
|
| 343 |
+
monotonic_cst = {"b": -1, "a": "+"}
|
| 344 |
+
gbdt = HistGradientBoostingRegressor(monotonic_cst=monotonic_cst)
|
| 345 |
+
expected_msg = re.escape("monotonic_cst['a'] must be either -1, 0 or 1. Got '+'.")
|
| 346 |
+
with pytest.raises(ValueError, match=expected_msg):
|
| 347 |
+
gbdt.fit(X, y)
|
| 348 |
+
|
| 349 |
+
|
| 350 |
+
def test_bounded_value_min_gain_to_split():
|
| 351 |
+
# The purpose of this test is to show that when computing the gain at a
|
| 352 |
+
# given split, the value of the current node should be properly bounded to
|
| 353 |
+
# respect the monotonic constraints, because it strongly interacts with
|
| 354 |
+
# min_gain_to_split. We build a simple example where gradients are [1, 1,
|
| 355 |
+
# 100, 1, 1] (hessians are all ones). The best split happens on the 3rd
|
| 356 |
+
# bin, and depending on whether the value of the node is bounded or not,
|
| 357 |
+
# the min_gain_to_split constraint is or isn't satisfied.
|
| 358 |
+
l2_regularization = 0
|
| 359 |
+
min_hessian_to_split = 0
|
| 360 |
+
min_samples_leaf = 1
|
| 361 |
+
n_bins = n_samples = 5
|
| 362 |
+
X_binned = np.arange(n_samples).reshape(-1, 1).astype(X_BINNED_DTYPE)
|
| 363 |
+
sample_indices = np.arange(n_samples, dtype=np.uint32)
|
| 364 |
+
all_hessians = np.ones(n_samples, dtype=G_H_DTYPE)
|
| 365 |
+
all_gradients = np.array([1, 1, 100, 1, 1], dtype=G_H_DTYPE)
|
| 366 |
+
sum_gradients = all_gradients.sum()
|
| 367 |
+
sum_hessians = all_hessians.sum()
|
| 368 |
+
hessians_are_constant = False
|
| 369 |
+
|
| 370 |
+
builder = HistogramBuilder(
|
| 371 |
+
X_binned, n_bins, all_gradients, all_hessians, hessians_are_constant, n_threads
|
| 372 |
+
)
|
| 373 |
+
n_bins_non_missing = np.array([n_bins - 1] * X_binned.shape[1], dtype=np.uint32)
|
| 374 |
+
has_missing_values = np.array([False] * X_binned.shape[1], dtype=np.uint8)
|
| 375 |
+
monotonic_cst = np.array(
|
| 376 |
+
[MonotonicConstraint.NO_CST] * X_binned.shape[1], dtype=np.int8
|
| 377 |
+
)
|
| 378 |
+
is_categorical = np.zeros_like(monotonic_cst, dtype=np.uint8)
|
| 379 |
+
missing_values_bin_idx = n_bins - 1
|
| 380 |
+
children_lower_bound, children_upper_bound = -np.inf, np.inf
|
| 381 |
+
|
| 382 |
+
min_gain_to_split = 2000
|
| 383 |
+
splitter = Splitter(
|
| 384 |
+
X_binned,
|
| 385 |
+
n_bins_non_missing,
|
| 386 |
+
missing_values_bin_idx,
|
| 387 |
+
has_missing_values,
|
| 388 |
+
is_categorical,
|
| 389 |
+
monotonic_cst,
|
| 390 |
+
l2_regularization,
|
| 391 |
+
min_hessian_to_split,
|
| 392 |
+
min_samples_leaf,
|
| 393 |
+
min_gain_to_split,
|
| 394 |
+
hessians_are_constant,
|
| 395 |
+
)
|
| 396 |
+
|
| 397 |
+
histograms = builder.compute_histograms_brute(sample_indices)
|
| 398 |
+
|
| 399 |
+
# Since the gradient array is [1, 1, 100, 1, 1]
|
| 400 |
+
# the max possible gain happens on the 3rd bin (or equivalently in the 2nd)
|
| 401 |
+
# and is equal to about 1307, which less than min_gain_to_split = 2000, so
|
| 402 |
+
# the node is considered unsplittable (gain = -1)
|
| 403 |
+
current_lower_bound, current_upper_bound = -np.inf, np.inf
|
| 404 |
+
value = compute_node_value(
|
| 405 |
+
sum_gradients,
|
| 406 |
+
sum_hessians,
|
| 407 |
+
current_lower_bound,
|
| 408 |
+
current_upper_bound,
|
| 409 |
+
l2_regularization,
|
| 410 |
+
)
|
| 411 |
+
# the unbounded value is equal to -sum_gradients / sum_hessians
|
| 412 |
+
assert value == pytest.approx(-104 / 5)
|
| 413 |
+
split_info = splitter.find_node_split(
|
| 414 |
+
n_samples,
|
| 415 |
+
histograms,
|
| 416 |
+
sum_gradients,
|
| 417 |
+
sum_hessians,
|
| 418 |
+
value,
|
| 419 |
+
lower_bound=children_lower_bound,
|
| 420 |
+
upper_bound=children_upper_bound,
|
| 421 |
+
)
|
| 422 |
+
assert split_info.gain == -1 # min_gain_to_split not respected
|
| 423 |
+
|
| 424 |
+
# here again the max possible gain is on the 3rd bin but we now cap the
|
| 425 |
+
# value of the node into [-10, inf].
|
| 426 |
+
# This means the gain is now about 2430 which is more than the
|
| 427 |
+
# min_gain_to_split constraint.
|
| 428 |
+
current_lower_bound, current_upper_bound = -10, np.inf
|
| 429 |
+
value = compute_node_value(
|
| 430 |
+
sum_gradients,
|
| 431 |
+
sum_hessians,
|
| 432 |
+
current_lower_bound,
|
| 433 |
+
current_upper_bound,
|
| 434 |
+
l2_regularization,
|
| 435 |
+
)
|
| 436 |
+
assert value == -10
|
| 437 |
+
split_info = splitter.find_node_split(
|
| 438 |
+
n_samples,
|
| 439 |
+
histograms,
|
| 440 |
+
sum_gradients,
|
| 441 |
+
sum_hessians,
|
| 442 |
+
value,
|
| 443 |
+
lower_bound=children_lower_bound,
|
| 444 |
+
upper_bound=children_upper_bound,
|
| 445 |
+
)
|
| 446 |
+
assert split_info.gain > min_gain_to_split
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_predictor.py
ADDED
|
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
from numpy.testing import assert_allclose
|
| 4 |
+
|
| 5 |
+
from sklearn.datasets import make_regression
|
| 6 |
+
from sklearn.ensemble._hist_gradient_boosting._bitset import (
|
| 7 |
+
set_bitset_memoryview,
|
| 8 |
+
set_raw_bitset_from_binned_bitset,
|
| 9 |
+
)
|
| 10 |
+
from sklearn.ensemble._hist_gradient_boosting.binning import _BinMapper
|
| 11 |
+
from sklearn.ensemble._hist_gradient_boosting.common import (
|
| 12 |
+
ALMOST_INF,
|
| 13 |
+
G_H_DTYPE,
|
| 14 |
+
PREDICTOR_RECORD_DTYPE,
|
| 15 |
+
X_BINNED_DTYPE,
|
| 16 |
+
X_BITSET_INNER_DTYPE,
|
| 17 |
+
X_DTYPE,
|
| 18 |
+
)
|
| 19 |
+
from sklearn.ensemble._hist_gradient_boosting.grower import TreeGrower
|
| 20 |
+
from sklearn.ensemble._hist_gradient_boosting.predictor import TreePredictor
|
| 21 |
+
from sklearn.metrics import r2_score
|
| 22 |
+
from sklearn.model_selection import train_test_split
|
| 23 |
+
from sklearn.utils._openmp_helpers import _openmp_effective_n_threads
|
| 24 |
+
|
| 25 |
+
n_threads = _openmp_effective_n_threads()
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
@pytest.mark.parametrize("n_bins", [200, 256])
|
| 29 |
+
def test_regression_dataset(n_bins):
|
| 30 |
+
X, y = make_regression(
|
| 31 |
+
n_samples=500, n_features=10, n_informative=5, random_state=42
|
| 32 |
+
)
|
| 33 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
|
| 34 |
+
|
| 35 |
+
mapper = _BinMapper(n_bins=n_bins, random_state=42)
|
| 36 |
+
X_train_binned = mapper.fit_transform(X_train)
|
| 37 |
+
|
| 38 |
+
# Init gradients and hessians to that of least squares loss
|
| 39 |
+
gradients = -y_train.astype(G_H_DTYPE)
|
| 40 |
+
hessians = np.ones(1, dtype=G_H_DTYPE)
|
| 41 |
+
|
| 42 |
+
min_samples_leaf = 10
|
| 43 |
+
max_leaf_nodes = 30
|
| 44 |
+
grower = TreeGrower(
|
| 45 |
+
X_train_binned,
|
| 46 |
+
gradients,
|
| 47 |
+
hessians,
|
| 48 |
+
min_samples_leaf=min_samples_leaf,
|
| 49 |
+
max_leaf_nodes=max_leaf_nodes,
|
| 50 |
+
n_bins=n_bins,
|
| 51 |
+
n_bins_non_missing=mapper.n_bins_non_missing_,
|
| 52 |
+
)
|
| 53 |
+
grower.grow()
|
| 54 |
+
|
| 55 |
+
predictor = grower.make_predictor(binning_thresholds=mapper.bin_thresholds_)
|
| 56 |
+
|
| 57 |
+
known_cat_bitsets = np.zeros((0, 8), dtype=X_BITSET_INNER_DTYPE)
|
| 58 |
+
f_idx_map = np.zeros(0, dtype=np.uint32)
|
| 59 |
+
|
| 60 |
+
y_pred_train = predictor.predict(X_train, known_cat_bitsets, f_idx_map, n_threads)
|
| 61 |
+
assert r2_score(y_train, y_pred_train) > 0.82
|
| 62 |
+
|
| 63 |
+
y_pred_test = predictor.predict(X_test, known_cat_bitsets, f_idx_map, n_threads)
|
| 64 |
+
assert r2_score(y_test, y_pred_test) > 0.67
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
@pytest.mark.parametrize(
|
| 68 |
+
"num_threshold, expected_predictions",
|
| 69 |
+
[
|
| 70 |
+
(-np.inf, [0, 1, 1, 1]),
|
| 71 |
+
(10, [0, 0, 1, 1]),
|
| 72 |
+
(20, [0, 0, 0, 1]),
|
| 73 |
+
(ALMOST_INF, [0, 0, 0, 1]),
|
| 74 |
+
(np.inf, [0, 0, 0, 0]),
|
| 75 |
+
],
|
| 76 |
+
)
|
| 77 |
+
def test_infinite_values_and_thresholds(num_threshold, expected_predictions):
|
| 78 |
+
# Make sure infinite values and infinite thresholds are handled properly.
|
| 79 |
+
# In particular, if a value is +inf and the threshold is ALMOST_INF the
|
| 80 |
+
# sample should go to the right child. If the threshold is inf (split on
|
| 81 |
+
# nan), the +inf sample will go to the left child.
|
| 82 |
+
|
| 83 |
+
X = np.array([-np.inf, 10, 20, np.inf]).reshape(-1, 1)
|
| 84 |
+
nodes = np.zeros(3, dtype=PREDICTOR_RECORD_DTYPE)
|
| 85 |
+
|
| 86 |
+
# We just construct a simple tree with 1 root and 2 children
|
| 87 |
+
# parent node
|
| 88 |
+
nodes[0]["left"] = 1
|
| 89 |
+
nodes[0]["right"] = 2
|
| 90 |
+
nodes[0]["feature_idx"] = 0
|
| 91 |
+
nodes[0]["num_threshold"] = num_threshold
|
| 92 |
+
|
| 93 |
+
# left child
|
| 94 |
+
nodes[1]["is_leaf"] = True
|
| 95 |
+
nodes[1]["value"] = 0
|
| 96 |
+
|
| 97 |
+
# right child
|
| 98 |
+
nodes[2]["is_leaf"] = True
|
| 99 |
+
nodes[2]["value"] = 1
|
| 100 |
+
|
| 101 |
+
binned_cat_bitsets = np.zeros((0, 8), dtype=X_BITSET_INNER_DTYPE)
|
| 102 |
+
raw_categorical_bitsets = np.zeros((0, 8), dtype=X_BITSET_INNER_DTYPE)
|
| 103 |
+
known_cat_bitset = np.zeros((0, 8), dtype=X_BITSET_INNER_DTYPE)
|
| 104 |
+
f_idx_map = np.zeros(0, dtype=np.uint32)
|
| 105 |
+
|
| 106 |
+
predictor = TreePredictor(nodes, binned_cat_bitsets, raw_categorical_bitsets)
|
| 107 |
+
predictions = predictor.predict(X, known_cat_bitset, f_idx_map, n_threads)
|
| 108 |
+
|
| 109 |
+
assert np.all(predictions == expected_predictions)
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
@pytest.mark.parametrize(
|
| 113 |
+
"bins_go_left, expected_predictions",
|
| 114 |
+
[
|
| 115 |
+
([0, 3, 4, 6], [1, 0, 0, 1, 1, 0]),
|
| 116 |
+
([0, 1, 2, 6], [1, 1, 1, 0, 0, 0]),
|
| 117 |
+
([3, 5, 6], [0, 0, 0, 1, 0, 1]),
|
| 118 |
+
],
|
| 119 |
+
)
|
| 120 |
+
def test_categorical_predictor(bins_go_left, expected_predictions):
|
| 121 |
+
# Test predictor outputs are correct with categorical features
|
| 122 |
+
|
| 123 |
+
X_binned = np.array([[0, 1, 2, 3, 4, 5]], dtype=X_BINNED_DTYPE).T
|
| 124 |
+
categories = np.array([2, 5, 6, 8, 10, 15], dtype=X_DTYPE)
|
| 125 |
+
|
| 126 |
+
bins_go_left = np.array(bins_go_left, dtype=X_BINNED_DTYPE)
|
| 127 |
+
|
| 128 |
+
# We just construct a simple tree with 1 root and 2 children
|
| 129 |
+
# parent node
|
| 130 |
+
nodes = np.zeros(3, dtype=PREDICTOR_RECORD_DTYPE)
|
| 131 |
+
nodes[0]["left"] = 1
|
| 132 |
+
nodes[0]["right"] = 2
|
| 133 |
+
nodes[0]["feature_idx"] = 0
|
| 134 |
+
nodes[0]["is_categorical"] = True
|
| 135 |
+
nodes[0]["missing_go_to_left"] = True
|
| 136 |
+
|
| 137 |
+
# left child
|
| 138 |
+
nodes[1]["is_leaf"] = True
|
| 139 |
+
nodes[1]["value"] = 1
|
| 140 |
+
|
| 141 |
+
# right child
|
| 142 |
+
nodes[2]["is_leaf"] = True
|
| 143 |
+
nodes[2]["value"] = 0
|
| 144 |
+
|
| 145 |
+
binned_cat_bitsets = np.zeros((1, 8), dtype=X_BITSET_INNER_DTYPE)
|
| 146 |
+
raw_categorical_bitsets = np.zeros((1, 8), dtype=X_BITSET_INNER_DTYPE)
|
| 147 |
+
for go_left in bins_go_left:
|
| 148 |
+
set_bitset_memoryview(binned_cat_bitsets[0], go_left)
|
| 149 |
+
|
| 150 |
+
set_raw_bitset_from_binned_bitset(
|
| 151 |
+
raw_categorical_bitsets[0], binned_cat_bitsets[0], categories
|
| 152 |
+
)
|
| 153 |
+
|
| 154 |
+
predictor = TreePredictor(nodes, binned_cat_bitsets, raw_categorical_bitsets)
|
| 155 |
+
|
| 156 |
+
# Check binned data gives correct predictions
|
| 157 |
+
prediction_binned = predictor.predict_binned(
|
| 158 |
+
X_binned, missing_values_bin_idx=6, n_threads=n_threads
|
| 159 |
+
)
|
| 160 |
+
assert_allclose(prediction_binned, expected_predictions)
|
| 161 |
+
|
| 162 |
+
# manually construct bitset
|
| 163 |
+
known_cat_bitsets = np.zeros((1, 8), dtype=np.uint32)
|
| 164 |
+
known_cat_bitsets[0, 0] = np.sum(2**categories, dtype=np.uint32)
|
| 165 |
+
f_idx_map = np.array([0], dtype=np.uint32)
|
| 166 |
+
|
| 167 |
+
# Check with un-binned data
|
| 168 |
+
predictions = predictor.predict(
|
| 169 |
+
categories.reshape(-1, 1), known_cat_bitsets, f_idx_map, n_threads
|
| 170 |
+
)
|
| 171 |
+
assert_allclose(predictions, expected_predictions)
|
| 172 |
+
|
| 173 |
+
# Check missing goes left because missing_values_bin_idx=6
|
| 174 |
+
X_binned_missing = np.array([[6]], dtype=X_BINNED_DTYPE).T
|
| 175 |
+
predictions = predictor.predict_binned(
|
| 176 |
+
X_binned_missing, missing_values_bin_idx=6, n_threads=n_threads
|
| 177 |
+
)
|
| 178 |
+
assert_allclose(predictions, [1])
|
| 179 |
+
|
| 180 |
+
# missing and unknown go left
|
| 181 |
+
predictions = predictor.predict(
|
| 182 |
+
np.array([[np.nan, 17]], dtype=X_DTYPE).T,
|
| 183 |
+
known_cat_bitsets,
|
| 184 |
+
f_idx_map,
|
| 185 |
+
n_threads,
|
| 186 |
+
)
|
| 187 |
+
assert_allclose(predictions, [1, 1])
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_iforest.py
ADDED
|
@@ -0,0 +1,673 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Authors: The scikit-learn developers
|
| 2 |
+
# SPDX-License-Identifier: BSD-3-Clause
|
| 3 |
+
|
| 4 |
+
import numbers
|
| 5 |
+
import threading
|
| 6 |
+
from numbers import Integral, Real
|
| 7 |
+
from warnings import warn
|
| 8 |
+
|
| 9 |
+
import numpy as np
|
| 10 |
+
from scipy.sparse import issparse
|
| 11 |
+
|
| 12 |
+
from ..base import OutlierMixin, _fit_context
|
| 13 |
+
from ..tree import ExtraTreeRegressor
|
| 14 |
+
from ..tree._tree import DTYPE as tree_dtype
|
| 15 |
+
from ..utils import (
|
| 16 |
+
check_array,
|
| 17 |
+
check_random_state,
|
| 18 |
+
gen_batches,
|
| 19 |
+
)
|
| 20 |
+
from ..utils._chunking import get_chunk_n_rows
|
| 21 |
+
from ..utils._param_validation import Interval, RealNotInt, StrOptions
|
| 22 |
+
from ..utils.parallel import Parallel, delayed
|
| 23 |
+
from ..utils.validation import _num_samples, check_is_fitted, validate_data
|
| 24 |
+
from ._bagging import BaseBagging
|
| 25 |
+
|
| 26 |
+
__all__ = ["IsolationForest"]
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def _parallel_compute_tree_depths(
|
| 30 |
+
tree,
|
| 31 |
+
X,
|
| 32 |
+
features,
|
| 33 |
+
tree_decision_path_lengths,
|
| 34 |
+
tree_avg_path_lengths,
|
| 35 |
+
depths,
|
| 36 |
+
lock,
|
| 37 |
+
):
|
| 38 |
+
"""Parallel computation of isolation tree depth."""
|
| 39 |
+
if features is None:
|
| 40 |
+
X_subset = X
|
| 41 |
+
else:
|
| 42 |
+
X_subset = X[:, features]
|
| 43 |
+
|
| 44 |
+
leaves_index = tree.apply(X_subset, check_input=False)
|
| 45 |
+
|
| 46 |
+
with lock:
|
| 47 |
+
depths += (
|
| 48 |
+
tree_decision_path_lengths[leaves_index]
|
| 49 |
+
+ tree_avg_path_lengths[leaves_index]
|
| 50 |
+
- 1.0
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class IsolationForest(OutlierMixin, BaseBagging):
|
| 55 |
+
"""
|
| 56 |
+
Isolation Forest Algorithm.
|
| 57 |
+
|
| 58 |
+
Return the anomaly score of each sample using the IsolationForest algorithm
|
| 59 |
+
|
| 60 |
+
The IsolationForest 'isolates' observations by randomly selecting a feature
|
| 61 |
+
and then randomly selecting a split value between the maximum and minimum
|
| 62 |
+
values of the selected feature.
|
| 63 |
+
|
| 64 |
+
Since recursive partitioning can be represented by a tree structure, the
|
| 65 |
+
number of splittings required to isolate a sample is equivalent to the path
|
| 66 |
+
length from the root node to the terminating node.
|
| 67 |
+
|
| 68 |
+
This path length, averaged over a forest of such random trees, is a
|
| 69 |
+
measure of normality and our decision function.
|
| 70 |
+
|
| 71 |
+
Random partitioning produces noticeably shorter paths for anomalies.
|
| 72 |
+
Hence, when a forest of random trees collectively produce shorter path
|
| 73 |
+
lengths for particular samples, they are highly likely to be anomalies.
|
| 74 |
+
|
| 75 |
+
Read more in the :ref:`User Guide <isolation_forest>`.
|
| 76 |
+
|
| 77 |
+
.. versionadded:: 0.18
|
| 78 |
+
|
| 79 |
+
Parameters
|
| 80 |
+
----------
|
| 81 |
+
n_estimators : int, default=100
|
| 82 |
+
The number of base estimators in the ensemble.
|
| 83 |
+
|
| 84 |
+
max_samples : "auto", int or float, default="auto"
|
| 85 |
+
The number of samples to draw from X to train each base estimator.
|
| 86 |
+
|
| 87 |
+
- If int, then draw `max_samples` samples.
|
| 88 |
+
- If float, then draw `max_samples * X.shape[0]` samples.
|
| 89 |
+
- If "auto", then `max_samples=min(256, n_samples)`.
|
| 90 |
+
|
| 91 |
+
If max_samples is larger than the number of samples provided,
|
| 92 |
+
all samples will be used for all trees (no sampling).
|
| 93 |
+
|
| 94 |
+
contamination : 'auto' or float, default='auto'
|
| 95 |
+
The amount of contamination of the data set, i.e. the proportion
|
| 96 |
+
of outliers in the data set. Used when fitting to define the threshold
|
| 97 |
+
on the scores of the samples.
|
| 98 |
+
|
| 99 |
+
- If 'auto', the threshold is determined as in the
|
| 100 |
+
original paper.
|
| 101 |
+
- If float, the contamination should be in the range (0, 0.5].
|
| 102 |
+
|
| 103 |
+
.. versionchanged:: 0.22
|
| 104 |
+
The default value of ``contamination`` changed from 0.1
|
| 105 |
+
to ``'auto'``.
|
| 106 |
+
|
| 107 |
+
max_features : int or float, default=1.0
|
| 108 |
+
The number of features to draw from X to train each base estimator.
|
| 109 |
+
|
| 110 |
+
- If int, then draw `max_features` features.
|
| 111 |
+
- If float, then draw `max(1, int(max_features * n_features_in_))` features.
|
| 112 |
+
|
| 113 |
+
Note: using a float number less than 1.0 or integer less than number of
|
| 114 |
+
features will enable feature subsampling and leads to a longer runtime.
|
| 115 |
+
|
| 116 |
+
bootstrap : bool, default=False
|
| 117 |
+
If True, individual trees are fit on random subsets of the training
|
| 118 |
+
data sampled with replacement. If False, sampling without replacement
|
| 119 |
+
is performed.
|
| 120 |
+
|
| 121 |
+
n_jobs : int, default=None
|
| 122 |
+
The number of jobs to run in parallel for :meth:`fit`. ``None`` means 1
|
| 123 |
+
unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using
|
| 124 |
+
all processors. See :term:`Glossary <n_jobs>` for more details.
|
| 125 |
+
|
| 126 |
+
random_state : int, RandomState instance or None, default=None
|
| 127 |
+
Controls the pseudo-randomness of the selection of the feature
|
| 128 |
+
and split values for each branching step and each tree in the forest.
|
| 129 |
+
|
| 130 |
+
Pass an int for reproducible results across multiple function calls.
|
| 131 |
+
See :term:`Glossary <random_state>`.
|
| 132 |
+
|
| 133 |
+
verbose : int, default=0
|
| 134 |
+
Controls the verbosity of the tree building process.
|
| 135 |
+
|
| 136 |
+
warm_start : bool, default=False
|
| 137 |
+
When set to ``True``, reuse the solution of the previous call to fit
|
| 138 |
+
and add more estimators to the ensemble, otherwise, just fit a whole
|
| 139 |
+
new forest. See :term:`the Glossary <warm_start>`.
|
| 140 |
+
|
| 141 |
+
.. versionadded:: 0.21
|
| 142 |
+
|
| 143 |
+
Attributes
|
| 144 |
+
----------
|
| 145 |
+
estimator_ : :class:`~sklearn.tree.ExtraTreeRegressor` instance
|
| 146 |
+
The child estimator template used to create the collection of
|
| 147 |
+
fitted sub-estimators.
|
| 148 |
+
|
| 149 |
+
.. versionadded:: 1.2
|
| 150 |
+
`base_estimator_` was renamed to `estimator_`.
|
| 151 |
+
|
| 152 |
+
estimators_ : list of ExtraTreeRegressor instances
|
| 153 |
+
The collection of fitted sub-estimators.
|
| 154 |
+
|
| 155 |
+
estimators_features_ : list of ndarray
|
| 156 |
+
The subset of drawn features for each base estimator.
|
| 157 |
+
|
| 158 |
+
estimators_samples_ : list of ndarray
|
| 159 |
+
The subset of drawn samples (i.e., the in-bag samples) for each base
|
| 160 |
+
estimator.
|
| 161 |
+
|
| 162 |
+
max_samples_ : int
|
| 163 |
+
The actual number of samples.
|
| 164 |
+
|
| 165 |
+
offset_ : float
|
| 166 |
+
Offset used to define the decision function from the raw scores. We
|
| 167 |
+
have the relation: ``decision_function = score_samples - offset_``.
|
| 168 |
+
``offset_`` is defined as follows. When the contamination parameter is
|
| 169 |
+
set to "auto", the offset is equal to -0.5 as the scores of inliers are
|
| 170 |
+
close to 0 and the scores of outliers are close to -1. When a
|
| 171 |
+
contamination parameter different than "auto" is provided, the offset
|
| 172 |
+
is defined in such a way we obtain the expected number of outliers
|
| 173 |
+
(samples with decision function < 0) in training.
|
| 174 |
+
|
| 175 |
+
.. versionadded:: 0.20
|
| 176 |
+
|
| 177 |
+
n_features_in_ : int
|
| 178 |
+
Number of features seen during :term:`fit`.
|
| 179 |
+
|
| 180 |
+
.. versionadded:: 0.24
|
| 181 |
+
|
| 182 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
| 183 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
| 184 |
+
has feature names that are all strings.
|
| 185 |
+
|
| 186 |
+
.. versionadded:: 1.0
|
| 187 |
+
|
| 188 |
+
See Also
|
| 189 |
+
--------
|
| 190 |
+
sklearn.covariance.EllipticEnvelope : An object for detecting outliers in a
|
| 191 |
+
Gaussian distributed dataset.
|
| 192 |
+
sklearn.svm.OneClassSVM : Unsupervised Outlier Detection.
|
| 193 |
+
Estimate the support of a high-dimensional distribution.
|
| 194 |
+
The implementation is based on libsvm.
|
| 195 |
+
sklearn.neighbors.LocalOutlierFactor : Unsupervised Outlier Detection
|
| 196 |
+
using Local Outlier Factor (LOF).
|
| 197 |
+
|
| 198 |
+
Notes
|
| 199 |
+
-----
|
| 200 |
+
The implementation is based on an ensemble of ExtraTreeRegressor. The
|
| 201 |
+
maximum depth of each tree is set to ``ceil(log_2(n))`` where
|
| 202 |
+
:math:`n` is the number of samples used to build the tree
|
| 203 |
+
(see (Liu et al., 2008) for more details).
|
| 204 |
+
|
| 205 |
+
References
|
| 206 |
+
----------
|
| 207 |
+
.. [1] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. "Isolation forest."
|
| 208 |
+
Data Mining, 2008. ICDM'08. Eighth IEEE International Conference on.
|
| 209 |
+
.. [2] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. "Isolation-based
|
| 210 |
+
anomaly detection." ACM Transactions on Knowledge Discovery from
|
| 211 |
+
Data (TKDD) 6.1 (2012): 3.
|
| 212 |
+
|
| 213 |
+
Examples
|
| 214 |
+
--------
|
| 215 |
+
>>> from sklearn.ensemble import IsolationForest
|
| 216 |
+
>>> X = [[-1.1], [0.3], [0.5], [100]]
|
| 217 |
+
>>> clf = IsolationForest(random_state=0).fit(X)
|
| 218 |
+
>>> clf.predict([[0.1], [0], [90]])
|
| 219 |
+
array([ 1, 1, -1])
|
| 220 |
+
|
| 221 |
+
For an example of using isolation forest for anomaly detection see
|
| 222 |
+
:ref:`sphx_glr_auto_examples_ensemble_plot_isolation_forest.py`.
|
| 223 |
+
"""
|
| 224 |
+
|
| 225 |
+
_parameter_constraints: dict = {
|
| 226 |
+
"n_estimators": [Interval(Integral, 1, None, closed="left")],
|
| 227 |
+
"max_samples": [
|
| 228 |
+
StrOptions({"auto"}),
|
| 229 |
+
Interval(Integral, 1, None, closed="left"),
|
| 230 |
+
Interval(RealNotInt, 0, 1, closed="right"),
|
| 231 |
+
],
|
| 232 |
+
"contamination": [
|
| 233 |
+
StrOptions({"auto"}),
|
| 234 |
+
Interval(Real, 0, 0.5, closed="right"),
|
| 235 |
+
],
|
| 236 |
+
"max_features": [
|
| 237 |
+
Integral,
|
| 238 |
+
Interval(Real, 0, 1, closed="right"),
|
| 239 |
+
],
|
| 240 |
+
"bootstrap": ["boolean"],
|
| 241 |
+
"n_jobs": [Integral, None],
|
| 242 |
+
"random_state": ["random_state"],
|
| 243 |
+
"verbose": ["verbose"],
|
| 244 |
+
"warm_start": ["boolean"],
|
| 245 |
+
}
|
| 246 |
+
|
| 247 |
+
def __init__(
|
| 248 |
+
self,
|
| 249 |
+
*,
|
| 250 |
+
n_estimators=100,
|
| 251 |
+
max_samples="auto",
|
| 252 |
+
contamination="auto",
|
| 253 |
+
max_features=1.0,
|
| 254 |
+
bootstrap=False,
|
| 255 |
+
n_jobs=None,
|
| 256 |
+
random_state=None,
|
| 257 |
+
verbose=0,
|
| 258 |
+
warm_start=False,
|
| 259 |
+
):
|
| 260 |
+
super().__init__(
|
| 261 |
+
estimator=None,
|
| 262 |
+
# here above max_features has no links with self.max_features
|
| 263 |
+
bootstrap=bootstrap,
|
| 264 |
+
bootstrap_features=False,
|
| 265 |
+
n_estimators=n_estimators,
|
| 266 |
+
max_samples=max_samples,
|
| 267 |
+
max_features=max_features,
|
| 268 |
+
warm_start=warm_start,
|
| 269 |
+
n_jobs=n_jobs,
|
| 270 |
+
random_state=random_state,
|
| 271 |
+
verbose=verbose,
|
| 272 |
+
)
|
| 273 |
+
|
| 274 |
+
self.contamination = contamination
|
| 275 |
+
|
| 276 |
+
def _get_estimator(self):
|
| 277 |
+
return ExtraTreeRegressor(
|
| 278 |
+
# here max_features has no links with self.max_features
|
| 279 |
+
max_features=1,
|
| 280 |
+
splitter="random",
|
| 281 |
+
random_state=self.random_state,
|
| 282 |
+
)
|
| 283 |
+
|
| 284 |
+
def _set_oob_score(self, X, y):
|
| 285 |
+
raise NotImplementedError("OOB score not supported by iforest")
|
| 286 |
+
|
| 287 |
+
def _parallel_args(self):
|
| 288 |
+
# ExtraTreeRegressor releases the GIL, so it's more efficient to use
|
| 289 |
+
# a thread-based backend rather than a process-based backend so as
|
| 290 |
+
# to avoid suffering from communication overhead and extra memory
|
| 291 |
+
# copies. This is only used in the fit method.
|
| 292 |
+
return {"prefer": "threads"}
|
| 293 |
+
|
| 294 |
+
@_fit_context(prefer_skip_nested_validation=True)
|
| 295 |
+
def fit(self, X, y=None, sample_weight=None):
|
| 296 |
+
"""
|
| 297 |
+
Fit estimator.
|
| 298 |
+
|
| 299 |
+
Parameters
|
| 300 |
+
----------
|
| 301 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 302 |
+
The input samples. Use ``dtype=np.float32`` for maximum
|
| 303 |
+
efficiency. Sparse matrices are also supported, use sparse
|
| 304 |
+
``csc_matrix`` for maximum efficiency.
|
| 305 |
+
|
| 306 |
+
y : Ignored
|
| 307 |
+
Not used, present for API consistency by convention.
|
| 308 |
+
|
| 309 |
+
sample_weight : array-like of shape (n_samples,), default=None
|
| 310 |
+
Sample weights. If None, then samples are equally weighted.
|
| 311 |
+
|
| 312 |
+
Returns
|
| 313 |
+
-------
|
| 314 |
+
self : object
|
| 315 |
+
Fitted estimator.
|
| 316 |
+
"""
|
| 317 |
+
X = validate_data(
|
| 318 |
+
self, X, accept_sparse=["csc"], dtype=tree_dtype, ensure_all_finite=False
|
| 319 |
+
)
|
| 320 |
+
if issparse(X):
|
| 321 |
+
# Pre-sort indices to avoid that each individual tree of the
|
| 322 |
+
# ensemble sorts the indices.
|
| 323 |
+
X.sort_indices()
|
| 324 |
+
|
| 325 |
+
rnd = check_random_state(self.random_state)
|
| 326 |
+
y = rnd.uniform(size=X.shape[0])
|
| 327 |
+
|
| 328 |
+
# ensure that max_sample is in [1, n_samples]:
|
| 329 |
+
n_samples = X.shape[0]
|
| 330 |
+
|
| 331 |
+
if isinstance(self.max_samples, str) and self.max_samples == "auto":
|
| 332 |
+
max_samples = min(256, n_samples)
|
| 333 |
+
|
| 334 |
+
elif isinstance(self.max_samples, numbers.Integral):
|
| 335 |
+
if self.max_samples > n_samples:
|
| 336 |
+
warn(
|
| 337 |
+
"max_samples (%s) is greater than the "
|
| 338 |
+
"total number of samples (%s). max_samples "
|
| 339 |
+
"will be set to n_samples for estimation."
|
| 340 |
+
% (self.max_samples, n_samples)
|
| 341 |
+
)
|
| 342 |
+
max_samples = n_samples
|
| 343 |
+
else:
|
| 344 |
+
max_samples = self.max_samples
|
| 345 |
+
else: # max_samples is float
|
| 346 |
+
max_samples = int(self.max_samples * X.shape[0])
|
| 347 |
+
|
| 348 |
+
self.max_samples_ = max_samples
|
| 349 |
+
max_depth = int(np.ceil(np.log2(max(max_samples, 2))))
|
| 350 |
+
super()._fit(
|
| 351 |
+
X,
|
| 352 |
+
y,
|
| 353 |
+
max_samples,
|
| 354 |
+
max_depth=max_depth,
|
| 355 |
+
sample_weight=sample_weight,
|
| 356 |
+
check_input=False,
|
| 357 |
+
)
|
| 358 |
+
|
| 359 |
+
self._average_path_length_per_tree, self._decision_path_lengths = zip(
|
| 360 |
+
*[
|
| 361 |
+
(
|
| 362 |
+
_average_path_length(tree.tree_.n_node_samples),
|
| 363 |
+
tree.tree_.compute_node_depths(),
|
| 364 |
+
)
|
| 365 |
+
for tree in self.estimators_
|
| 366 |
+
]
|
| 367 |
+
)
|
| 368 |
+
|
| 369 |
+
if self.contamination == "auto":
|
| 370 |
+
# 0.5 plays a special role as described in the original paper.
|
| 371 |
+
# we take the opposite as we consider the opposite of their score.
|
| 372 |
+
self.offset_ = -0.5
|
| 373 |
+
return self
|
| 374 |
+
|
| 375 |
+
# Else, define offset_ wrt contamination parameter
|
| 376 |
+
# To avoid performing input validation a second time we call
|
| 377 |
+
# _score_samples rather than score_samples.
|
| 378 |
+
# _score_samples expects a CSR matrix, so we convert if necessary.
|
| 379 |
+
if issparse(X):
|
| 380 |
+
X = X.tocsr()
|
| 381 |
+
self.offset_ = np.percentile(self._score_samples(X), 100.0 * self.contamination)
|
| 382 |
+
|
| 383 |
+
return self
|
| 384 |
+
|
| 385 |
+
def predict(self, X):
|
| 386 |
+
"""
|
| 387 |
+
Predict if a particular sample is an outlier or not.
|
| 388 |
+
|
| 389 |
+
Parameters
|
| 390 |
+
----------
|
| 391 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 392 |
+
The input samples. Internally, it will be converted to
|
| 393 |
+
``dtype=np.float32`` and if a sparse matrix is provided
|
| 394 |
+
to a sparse ``csr_matrix``.
|
| 395 |
+
|
| 396 |
+
Returns
|
| 397 |
+
-------
|
| 398 |
+
is_inlier : ndarray of shape (n_samples,)
|
| 399 |
+
For each observation, tells whether or not (+1 or -1) it should
|
| 400 |
+
be considered as an inlier according to the fitted model.
|
| 401 |
+
|
| 402 |
+
Notes
|
| 403 |
+
-----
|
| 404 |
+
The predict method can be parallelized by setting a joblib context. This
|
| 405 |
+
inherently does NOT use the ``n_jobs`` parameter initialized in the class,
|
| 406 |
+
which is used during ``fit``. This is because, predict may actually be faster
|
| 407 |
+
without parallelization for a small number of samples,
|
| 408 |
+
such as for 1000 samples or less. The user can set the
|
| 409 |
+
number of jobs in the joblib context to control the number of parallel jobs.
|
| 410 |
+
|
| 411 |
+
.. code-block:: python
|
| 412 |
+
|
| 413 |
+
from joblib import parallel_backend
|
| 414 |
+
|
| 415 |
+
# Note, we use threading here as the predict method is not CPU bound.
|
| 416 |
+
with parallel_backend("threading", n_jobs=4):
|
| 417 |
+
model.predict(X)
|
| 418 |
+
"""
|
| 419 |
+
check_is_fitted(self)
|
| 420 |
+
decision_func = self.decision_function(X)
|
| 421 |
+
is_inlier = np.ones_like(decision_func, dtype=int)
|
| 422 |
+
is_inlier[decision_func < 0] = -1
|
| 423 |
+
return is_inlier
|
| 424 |
+
|
| 425 |
+
def decision_function(self, X):
|
| 426 |
+
"""
|
| 427 |
+
Average anomaly score of X of the base classifiers.
|
| 428 |
+
|
| 429 |
+
The anomaly score of an input sample is computed as
|
| 430 |
+
the mean anomaly score of the trees in the forest.
|
| 431 |
+
|
| 432 |
+
The measure of normality of an observation given a tree is the depth
|
| 433 |
+
of the leaf containing this observation, which is equivalent to
|
| 434 |
+
the number of splittings required to isolate this point. In case of
|
| 435 |
+
several observations n_left in the leaf, the average path length of
|
| 436 |
+
a n_left samples isolation tree is added.
|
| 437 |
+
|
| 438 |
+
Parameters
|
| 439 |
+
----------
|
| 440 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 441 |
+
The input samples. Internally, it will be converted to
|
| 442 |
+
``dtype=np.float32`` and if a sparse matrix is provided
|
| 443 |
+
to a sparse ``csr_matrix``.
|
| 444 |
+
|
| 445 |
+
Returns
|
| 446 |
+
-------
|
| 447 |
+
scores : ndarray of shape (n_samples,)
|
| 448 |
+
The anomaly score of the input samples.
|
| 449 |
+
The lower, the more abnormal. Negative scores represent outliers,
|
| 450 |
+
positive scores represent inliers.
|
| 451 |
+
|
| 452 |
+
Notes
|
| 453 |
+
-----
|
| 454 |
+
The decision_function method can be parallelized by setting a joblib context.
|
| 455 |
+
This inherently does NOT use the ``n_jobs`` parameter initialized in the class,
|
| 456 |
+
which is used during ``fit``. This is because, calculating the score may
|
| 457 |
+
actually be faster without parallelization for a small number of samples,
|
| 458 |
+
such as for 1000 samples or less.
|
| 459 |
+
The user can set the number of jobs in the joblib context to control the
|
| 460 |
+
number of parallel jobs.
|
| 461 |
+
|
| 462 |
+
.. code-block:: python
|
| 463 |
+
|
| 464 |
+
from joblib import parallel_backend
|
| 465 |
+
|
| 466 |
+
# Note, we use threading here as the decision_function method is
|
| 467 |
+
# not CPU bound.
|
| 468 |
+
with parallel_backend("threading", n_jobs=4):
|
| 469 |
+
model.decision_function(X)
|
| 470 |
+
"""
|
| 471 |
+
# We subtract self.offset_ to make 0 be the threshold value for being
|
| 472 |
+
# an outlier:
|
| 473 |
+
|
| 474 |
+
return self.score_samples(X) - self.offset_
|
| 475 |
+
|
| 476 |
+
def score_samples(self, X):
|
| 477 |
+
"""
|
| 478 |
+
Opposite of the anomaly score defined in the original paper.
|
| 479 |
+
|
| 480 |
+
The anomaly score of an input sample is computed as
|
| 481 |
+
the mean anomaly score of the trees in the forest.
|
| 482 |
+
|
| 483 |
+
The measure of normality of an observation given a tree is the depth
|
| 484 |
+
of the leaf containing this observation, which is equivalent to
|
| 485 |
+
the number of splittings required to isolate this point. In case of
|
| 486 |
+
several observations n_left in the leaf, the average path length of
|
| 487 |
+
a n_left samples isolation tree is added.
|
| 488 |
+
|
| 489 |
+
Parameters
|
| 490 |
+
----------
|
| 491 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 492 |
+
The input samples.
|
| 493 |
+
|
| 494 |
+
Returns
|
| 495 |
+
-------
|
| 496 |
+
scores : ndarray of shape (n_samples,)
|
| 497 |
+
The anomaly score of the input samples.
|
| 498 |
+
The lower, the more abnormal.
|
| 499 |
+
|
| 500 |
+
Notes
|
| 501 |
+
-----
|
| 502 |
+
The score function method can be parallelized by setting a joblib context. This
|
| 503 |
+
inherently does NOT use the ``n_jobs`` parameter initialized in the class,
|
| 504 |
+
which is used during ``fit``. This is because, calculating the score may
|
| 505 |
+
actually be faster without parallelization for a small number of samples,
|
| 506 |
+
such as for 1000 samples or less.
|
| 507 |
+
The user can set the number of jobs in the joblib context to control the
|
| 508 |
+
number of parallel jobs.
|
| 509 |
+
|
| 510 |
+
.. code-block:: python
|
| 511 |
+
|
| 512 |
+
from joblib import parallel_backend
|
| 513 |
+
|
| 514 |
+
# Note, we use threading here as the score_samples method is not CPU bound.
|
| 515 |
+
with parallel_backend("threading", n_jobs=4):
|
| 516 |
+
model.score(X)
|
| 517 |
+
"""
|
| 518 |
+
# Check data
|
| 519 |
+
X = validate_data(
|
| 520 |
+
self,
|
| 521 |
+
X,
|
| 522 |
+
accept_sparse="csr",
|
| 523 |
+
dtype=tree_dtype,
|
| 524 |
+
reset=False,
|
| 525 |
+
ensure_all_finite=False,
|
| 526 |
+
)
|
| 527 |
+
|
| 528 |
+
return self._score_samples(X)
|
| 529 |
+
|
| 530 |
+
def _score_samples(self, X):
|
| 531 |
+
"""Private version of score_samples without input validation.
|
| 532 |
+
|
| 533 |
+
Input validation would remove feature names, so we disable it.
|
| 534 |
+
"""
|
| 535 |
+
# Code structure from ForestClassifier/predict_proba
|
| 536 |
+
|
| 537 |
+
check_is_fitted(self)
|
| 538 |
+
|
| 539 |
+
# Take the opposite of the scores as bigger is better (here less abnormal)
|
| 540 |
+
return -self._compute_chunked_score_samples(X)
|
| 541 |
+
|
| 542 |
+
def _compute_chunked_score_samples(self, X):
|
| 543 |
+
n_samples = _num_samples(X)
|
| 544 |
+
|
| 545 |
+
if self._max_features == X.shape[1]:
|
| 546 |
+
subsample_features = False
|
| 547 |
+
else:
|
| 548 |
+
subsample_features = True
|
| 549 |
+
|
| 550 |
+
# We get as many rows as possible within our working_memory budget
|
| 551 |
+
# (defined by sklearn.get_config()['working_memory']) to store
|
| 552 |
+
# self._max_features in each row during computation.
|
| 553 |
+
#
|
| 554 |
+
# Note:
|
| 555 |
+
# - this will get at least 1 row, even if 1 row of score will
|
| 556 |
+
# exceed working_memory.
|
| 557 |
+
# - this does only account for temporary memory usage while loading
|
| 558 |
+
# the data needed to compute the scores -- the returned scores
|
| 559 |
+
# themselves are 1D.
|
| 560 |
+
|
| 561 |
+
chunk_n_rows = get_chunk_n_rows(
|
| 562 |
+
row_bytes=16 * self._max_features, max_n_rows=n_samples
|
| 563 |
+
)
|
| 564 |
+
slices = gen_batches(n_samples, chunk_n_rows)
|
| 565 |
+
|
| 566 |
+
scores = np.zeros(n_samples, order="f")
|
| 567 |
+
|
| 568 |
+
for sl in slices:
|
| 569 |
+
# compute score on the slices of test samples:
|
| 570 |
+
scores[sl] = self._compute_score_samples(X[sl], subsample_features)
|
| 571 |
+
|
| 572 |
+
return scores
|
| 573 |
+
|
| 574 |
+
def _compute_score_samples(self, X, subsample_features):
|
| 575 |
+
"""
|
| 576 |
+
Compute the score of each samples in X going through the extra trees.
|
| 577 |
+
|
| 578 |
+
Parameters
|
| 579 |
+
----------
|
| 580 |
+
X : array-like or sparse matrix
|
| 581 |
+
Data matrix.
|
| 582 |
+
|
| 583 |
+
subsample_features : bool
|
| 584 |
+
Whether features should be subsampled.
|
| 585 |
+
|
| 586 |
+
Returns
|
| 587 |
+
-------
|
| 588 |
+
scores : ndarray of shape (n_samples,)
|
| 589 |
+
The score of each sample in X.
|
| 590 |
+
"""
|
| 591 |
+
n_samples = X.shape[0]
|
| 592 |
+
|
| 593 |
+
depths = np.zeros(n_samples, order="f")
|
| 594 |
+
|
| 595 |
+
average_path_length_max_samples = _average_path_length([self._max_samples])
|
| 596 |
+
|
| 597 |
+
# Note: we use default n_jobs value, i.e. sequential computation, which
|
| 598 |
+
# we expect to be more performant that parallelizing for small number
|
| 599 |
+
# of samples, e.g. < 1k samples. Default n_jobs value can be overriden
|
| 600 |
+
# by using joblib.parallel_backend context manager around
|
| 601 |
+
# ._compute_score_samples. Using a higher n_jobs may speed up the
|
| 602 |
+
# computation of the scores, e.g. for > 1k samples. See
|
| 603 |
+
# https://github.com/scikit-learn/scikit-learn/pull/28622 for more
|
| 604 |
+
# details.
|
| 605 |
+
lock = threading.Lock()
|
| 606 |
+
Parallel(
|
| 607 |
+
verbose=self.verbose,
|
| 608 |
+
require="sharedmem",
|
| 609 |
+
)(
|
| 610 |
+
delayed(_parallel_compute_tree_depths)(
|
| 611 |
+
tree,
|
| 612 |
+
X,
|
| 613 |
+
features if subsample_features else None,
|
| 614 |
+
self._decision_path_lengths[tree_idx],
|
| 615 |
+
self._average_path_length_per_tree[tree_idx],
|
| 616 |
+
depths,
|
| 617 |
+
lock,
|
| 618 |
+
)
|
| 619 |
+
for tree_idx, (tree, features) in enumerate(
|
| 620 |
+
zip(self.estimators_, self.estimators_features_)
|
| 621 |
+
)
|
| 622 |
+
)
|
| 623 |
+
|
| 624 |
+
denominator = len(self.estimators_) * average_path_length_max_samples
|
| 625 |
+
scores = 2 ** (
|
| 626 |
+
# For a single training sample, denominator and depth are 0.
|
| 627 |
+
# Therefore, we set the score manually to 1.
|
| 628 |
+
-np.divide(
|
| 629 |
+
depths, denominator, out=np.ones_like(depths), where=denominator != 0
|
| 630 |
+
)
|
| 631 |
+
)
|
| 632 |
+
return scores
|
| 633 |
+
|
| 634 |
+
def __sklearn_tags__(self):
|
| 635 |
+
tags = super().__sklearn_tags__()
|
| 636 |
+
tags.input_tags.allow_nan = True
|
| 637 |
+
return tags
|
| 638 |
+
|
| 639 |
+
|
| 640 |
+
def _average_path_length(n_samples_leaf):
|
| 641 |
+
"""
|
| 642 |
+
The average path length in a n_samples iTree, which is equal to
|
| 643 |
+
the average path length of an unsuccessful BST search since the
|
| 644 |
+
latter has the same structure as an isolation tree.
|
| 645 |
+
Parameters
|
| 646 |
+
----------
|
| 647 |
+
n_samples_leaf : array-like of shape (n_samples,)
|
| 648 |
+
The number of training samples in each test sample leaf, for
|
| 649 |
+
each estimators.
|
| 650 |
+
|
| 651 |
+
Returns
|
| 652 |
+
-------
|
| 653 |
+
average_path_length : ndarray of shape (n_samples,)
|
| 654 |
+
"""
|
| 655 |
+
|
| 656 |
+
n_samples_leaf = check_array(n_samples_leaf, ensure_2d=False)
|
| 657 |
+
|
| 658 |
+
n_samples_leaf_shape = n_samples_leaf.shape
|
| 659 |
+
n_samples_leaf = n_samples_leaf.reshape((1, -1))
|
| 660 |
+
average_path_length = np.zeros(n_samples_leaf.shape)
|
| 661 |
+
|
| 662 |
+
mask_1 = n_samples_leaf <= 1
|
| 663 |
+
mask_2 = n_samples_leaf == 2
|
| 664 |
+
not_mask = ~np.logical_or(mask_1, mask_2)
|
| 665 |
+
|
| 666 |
+
average_path_length[mask_1] = 0.0
|
| 667 |
+
average_path_length[mask_2] = 1.0
|
| 668 |
+
average_path_length[not_mask] = (
|
| 669 |
+
2.0 * (np.log(n_samples_leaf[not_mask] - 1.0) + np.euler_gamma)
|
| 670 |
+
- 2.0 * (n_samples_leaf[not_mask] - 1.0) / n_samples_leaf[not_mask]
|
| 671 |
+
)
|
| 672 |
+
|
| 673 |
+
return average_path_length.reshape(n_samples_leaf_shape)
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_weight_boosting.py
ADDED
|
@@ -0,0 +1,1173 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Weight Boosting.
|
| 2 |
+
|
| 3 |
+
This module contains weight boosting estimators for both classification and
|
| 4 |
+
regression.
|
| 5 |
+
|
| 6 |
+
The module structure is the following:
|
| 7 |
+
|
| 8 |
+
- The `BaseWeightBoosting` base class implements a common ``fit`` method
|
| 9 |
+
for all the estimators in the module. Regression and classification
|
| 10 |
+
only differ from each other in the loss function that is optimized.
|
| 11 |
+
|
| 12 |
+
- :class:`~sklearn.ensemble.AdaBoostClassifier` implements adaptive boosting
|
| 13 |
+
(AdaBoost-SAMME) for classification problems.
|
| 14 |
+
|
| 15 |
+
- :class:`~sklearn.ensemble.AdaBoostRegressor` implements adaptive boosting
|
| 16 |
+
(AdaBoost.R2) for regression problems.
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
# Authors: The scikit-learn developers
|
| 20 |
+
# SPDX-License-Identifier: BSD-3-Clause
|
| 21 |
+
|
| 22 |
+
import warnings
|
| 23 |
+
from abc import ABCMeta, abstractmethod
|
| 24 |
+
from numbers import Integral, Real
|
| 25 |
+
|
| 26 |
+
import numpy as np
|
| 27 |
+
|
| 28 |
+
from ..base import (
|
| 29 |
+
ClassifierMixin,
|
| 30 |
+
RegressorMixin,
|
| 31 |
+
_fit_context,
|
| 32 |
+
is_classifier,
|
| 33 |
+
is_regressor,
|
| 34 |
+
)
|
| 35 |
+
from ..metrics import accuracy_score, r2_score
|
| 36 |
+
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
|
| 37 |
+
from ..utils import _safe_indexing, check_random_state
|
| 38 |
+
from ..utils._param_validation import HasMethods, Hidden, Interval, StrOptions
|
| 39 |
+
from ..utils.extmath import softmax, stable_cumsum
|
| 40 |
+
from ..utils.metadata_routing import (
|
| 41 |
+
_raise_for_unsupported_routing,
|
| 42 |
+
_RoutingNotSupportedMixin,
|
| 43 |
+
)
|
| 44 |
+
from ..utils.validation import (
|
| 45 |
+
_check_sample_weight,
|
| 46 |
+
_num_samples,
|
| 47 |
+
check_is_fitted,
|
| 48 |
+
has_fit_parameter,
|
| 49 |
+
validate_data,
|
| 50 |
+
)
|
| 51 |
+
from ._base import BaseEnsemble
|
| 52 |
+
|
| 53 |
+
__all__ = [
|
| 54 |
+
"AdaBoostClassifier",
|
| 55 |
+
"AdaBoostRegressor",
|
| 56 |
+
]
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
class BaseWeightBoosting(BaseEnsemble, metaclass=ABCMeta):
|
| 60 |
+
"""Base class for AdaBoost estimators.
|
| 61 |
+
|
| 62 |
+
Warning: This class should not be used directly. Use derived classes
|
| 63 |
+
instead.
|
| 64 |
+
"""
|
| 65 |
+
|
| 66 |
+
_parameter_constraints: dict = {
|
| 67 |
+
"estimator": [HasMethods(["fit", "predict"]), None],
|
| 68 |
+
"n_estimators": [Interval(Integral, 1, None, closed="left")],
|
| 69 |
+
"learning_rate": [Interval(Real, 0, None, closed="neither")],
|
| 70 |
+
"random_state": ["random_state"],
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
@abstractmethod
|
| 74 |
+
def __init__(
|
| 75 |
+
self,
|
| 76 |
+
estimator=None,
|
| 77 |
+
*,
|
| 78 |
+
n_estimators=50,
|
| 79 |
+
estimator_params=tuple(),
|
| 80 |
+
learning_rate=1.0,
|
| 81 |
+
random_state=None,
|
| 82 |
+
):
|
| 83 |
+
super().__init__(
|
| 84 |
+
estimator=estimator,
|
| 85 |
+
n_estimators=n_estimators,
|
| 86 |
+
estimator_params=estimator_params,
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
self.learning_rate = learning_rate
|
| 90 |
+
self.random_state = random_state
|
| 91 |
+
|
| 92 |
+
def _check_X(self, X):
|
| 93 |
+
# Only called to validate X in non-fit methods, therefore reset=False
|
| 94 |
+
return validate_data(
|
| 95 |
+
self,
|
| 96 |
+
X,
|
| 97 |
+
accept_sparse=["csr", "csc"],
|
| 98 |
+
ensure_2d=True,
|
| 99 |
+
allow_nd=True,
|
| 100 |
+
dtype=None,
|
| 101 |
+
reset=False,
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
@_fit_context(
|
| 105 |
+
# AdaBoost*.estimator is not validated yet
|
| 106 |
+
prefer_skip_nested_validation=False
|
| 107 |
+
)
|
| 108 |
+
def fit(self, X, y, sample_weight=None):
|
| 109 |
+
"""Build a boosted classifier/regressor from the training set (X, y).
|
| 110 |
+
|
| 111 |
+
Parameters
|
| 112 |
+
----------
|
| 113 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 114 |
+
The training input samples. Sparse matrix can be CSC, CSR, COO,
|
| 115 |
+
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
|
| 116 |
+
|
| 117 |
+
y : array-like of shape (n_samples,)
|
| 118 |
+
The target values.
|
| 119 |
+
|
| 120 |
+
sample_weight : array-like of shape (n_samples,), default=None
|
| 121 |
+
Sample weights. If None, the sample weights are initialized to
|
| 122 |
+
1 / n_samples.
|
| 123 |
+
|
| 124 |
+
Returns
|
| 125 |
+
-------
|
| 126 |
+
self : object
|
| 127 |
+
Fitted estimator.
|
| 128 |
+
"""
|
| 129 |
+
_raise_for_unsupported_routing(self, "fit", sample_weight=sample_weight)
|
| 130 |
+
X, y = validate_data(
|
| 131 |
+
self,
|
| 132 |
+
X,
|
| 133 |
+
y,
|
| 134 |
+
accept_sparse=["csr", "csc"],
|
| 135 |
+
ensure_2d=True,
|
| 136 |
+
allow_nd=True,
|
| 137 |
+
dtype=None,
|
| 138 |
+
y_numeric=is_regressor(self),
|
| 139 |
+
)
|
| 140 |
+
|
| 141 |
+
sample_weight = _check_sample_weight(
|
| 142 |
+
sample_weight, X, np.float64, copy=True, ensure_non_negative=True
|
| 143 |
+
)
|
| 144 |
+
sample_weight /= sample_weight.sum()
|
| 145 |
+
|
| 146 |
+
# Check parameters
|
| 147 |
+
self._validate_estimator()
|
| 148 |
+
|
| 149 |
+
# Clear any previous fit results
|
| 150 |
+
self.estimators_ = []
|
| 151 |
+
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float64)
|
| 152 |
+
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float64)
|
| 153 |
+
|
| 154 |
+
# Initialization of the random number instance that will be used to
|
| 155 |
+
# generate a seed at each iteration
|
| 156 |
+
random_state = check_random_state(self.random_state)
|
| 157 |
+
epsilon = np.finfo(sample_weight.dtype).eps
|
| 158 |
+
|
| 159 |
+
zero_weight_mask = sample_weight == 0.0
|
| 160 |
+
for iboost in range(self.n_estimators):
|
| 161 |
+
# avoid extremely small sample weight, for details see issue #20320
|
| 162 |
+
sample_weight = np.clip(sample_weight, a_min=epsilon, a_max=None)
|
| 163 |
+
# do not clip sample weights that were exactly zero originally
|
| 164 |
+
sample_weight[zero_weight_mask] = 0.0
|
| 165 |
+
|
| 166 |
+
# Boosting step
|
| 167 |
+
sample_weight, estimator_weight, estimator_error = self._boost(
|
| 168 |
+
iboost, X, y, sample_weight, random_state
|
| 169 |
+
)
|
| 170 |
+
|
| 171 |
+
# Early termination
|
| 172 |
+
if sample_weight is None:
|
| 173 |
+
break
|
| 174 |
+
self.estimator_weights_[iboost] = estimator_weight
|
| 175 |
+
self.estimator_errors_[iboost] = estimator_error
|
| 176 |
+
|
| 177 |
+
# Stop if error is zero
|
| 178 |
+
if estimator_error == 0:
|
| 179 |
+
break
|
| 180 |
+
|
| 181 |
+
sample_weight_sum = np.sum(sample_weight)
|
| 182 |
+
|
| 183 |
+
if not np.isfinite(sample_weight_sum):
|
| 184 |
+
warnings.warn(
|
| 185 |
+
(
|
| 186 |
+
"Sample weights have reached infinite values,"
|
| 187 |
+
f" at iteration {iboost}, causing overflow. "
|
| 188 |
+
"Iterations stopped. Try lowering the learning rate."
|
| 189 |
+
),
|
| 190 |
+
stacklevel=2,
|
| 191 |
+
)
|
| 192 |
+
break
|
| 193 |
+
|
| 194 |
+
# Stop if the sum of sample weights has become non-positive
|
| 195 |
+
if sample_weight_sum <= 0:
|
| 196 |
+
break
|
| 197 |
+
|
| 198 |
+
if iboost < self.n_estimators - 1:
|
| 199 |
+
# Normalize
|
| 200 |
+
sample_weight /= sample_weight_sum
|
| 201 |
+
|
| 202 |
+
return self
|
| 203 |
+
|
| 204 |
+
@abstractmethod
|
| 205 |
+
def _boost(self, iboost, X, y, sample_weight, random_state):
|
| 206 |
+
"""Implement a single boost.
|
| 207 |
+
|
| 208 |
+
Warning: This method needs to be overridden by subclasses.
|
| 209 |
+
|
| 210 |
+
Parameters
|
| 211 |
+
----------
|
| 212 |
+
iboost : int
|
| 213 |
+
The index of the current boost iteration.
|
| 214 |
+
|
| 215 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 216 |
+
The training input samples. Sparse matrix can be CSC, CSR, COO,
|
| 217 |
+
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
|
| 218 |
+
|
| 219 |
+
y : array-like of shape (n_samples,)
|
| 220 |
+
The target values (class labels).
|
| 221 |
+
|
| 222 |
+
sample_weight : array-like of shape (n_samples,)
|
| 223 |
+
The current sample weights.
|
| 224 |
+
|
| 225 |
+
random_state : RandomState
|
| 226 |
+
The current random number generator
|
| 227 |
+
|
| 228 |
+
Returns
|
| 229 |
+
-------
|
| 230 |
+
sample_weight : array-like of shape (n_samples,) or None
|
| 231 |
+
The reweighted sample weights.
|
| 232 |
+
If None then boosting has terminated early.
|
| 233 |
+
|
| 234 |
+
estimator_weight : float
|
| 235 |
+
The weight for the current boost.
|
| 236 |
+
If None then boosting has terminated early.
|
| 237 |
+
|
| 238 |
+
error : float
|
| 239 |
+
The classification error for the current boost.
|
| 240 |
+
If None then boosting has terminated early.
|
| 241 |
+
"""
|
| 242 |
+
pass
|
| 243 |
+
|
| 244 |
+
def staged_score(self, X, y, sample_weight=None):
|
| 245 |
+
"""Return staged scores for X, y.
|
| 246 |
+
|
| 247 |
+
This generator method yields the ensemble score after each iteration of
|
| 248 |
+
boosting and therefore allows monitoring, such as to determine the
|
| 249 |
+
score on a test set after each boost.
|
| 250 |
+
|
| 251 |
+
Parameters
|
| 252 |
+
----------
|
| 253 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 254 |
+
The training input samples. Sparse matrix can be CSC, CSR, COO,
|
| 255 |
+
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
|
| 256 |
+
|
| 257 |
+
y : array-like of shape (n_samples,)
|
| 258 |
+
Labels for X.
|
| 259 |
+
|
| 260 |
+
sample_weight : array-like of shape (n_samples,), default=None
|
| 261 |
+
Sample weights.
|
| 262 |
+
|
| 263 |
+
Yields
|
| 264 |
+
------
|
| 265 |
+
z : float
|
| 266 |
+
"""
|
| 267 |
+
X = self._check_X(X)
|
| 268 |
+
|
| 269 |
+
for y_pred in self.staged_predict(X):
|
| 270 |
+
if is_classifier(self):
|
| 271 |
+
yield accuracy_score(y, y_pred, sample_weight=sample_weight)
|
| 272 |
+
else:
|
| 273 |
+
yield r2_score(y, y_pred, sample_weight=sample_weight)
|
| 274 |
+
|
| 275 |
+
@property
|
| 276 |
+
def feature_importances_(self):
|
| 277 |
+
"""The impurity-based feature importances.
|
| 278 |
+
|
| 279 |
+
The higher, the more important the feature.
|
| 280 |
+
The importance of a feature is computed as the (normalized)
|
| 281 |
+
total reduction of the criterion brought by that feature. It is also
|
| 282 |
+
known as the Gini importance.
|
| 283 |
+
|
| 284 |
+
Warning: impurity-based feature importances can be misleading for
|
| 285 |
+
high cardinality features (many unique values). See
|
| 286 |
+
:func:`sklearn.inspection.permutation_importance` as an alternative.
|
| 287 |
+
|
| 288 |
+
Returns
|
| 289 |
+
-------
|
| 290 |
+
feature_importances_ : ndarray of shape (n_features,)
|
| 291 |
+
The feature importances.
|
| 292 |
+
"""
|
| 293 |
+
if self.estimators_ is None or len(self.estimators_) == 0:
|
| 294 |
+
raise ValueError(
|
| 295 |
+
"Estimator not fitted, call `fit` before `feature_importances_`."
|
| 296 |
+
)
|
| 297 |
+
|
| 298 |
+
try:
|
| 299 |
+
norm = self.estimator_weights_.sum()
|
| 300 |
+
return (
|
| 301 |
+
sum(
|
| 302 |
+
weight * clf.feature_importances_
|
| 303 |
+
for weight, clf in zip(self.estimator_weights_, self.estimators_)
|
| 304 |
+
)
|
| 305 |
+
/ norm
|
| 306 |
+
)
|
| 307 |
+
|
| 308 |
+
except AttributeError as e:
|
| 309 |
+
raise AttributeError(
|
| 310 |
+
"Unable to compute feature importances "
|
| 311 |
+
"since estimator does not have a "
|
| 312 |
+
"feature_importances_ attribute"
|
| 313 |
+
) from e
|
| 314 |
+
|
| 315 |
+
def __sklearn_tags__(self):
|
| 316 |
+
tags = super().__sklearn_tags__()
|
| 317 |
+
tags.input_tags.sparse = True
|
| 318 |
+
return tags
|
| 319 |
+
|
| 320 |
+
|
| 321 |
+
def _samme_proba(estimator, n_classes, X):
|
| 322 |
+
"""Calculate algorithm 4, step 2, equation c) of Zhu et al [1].
|
| 323 |
+
|
| 324 |
+
References
|
| 325 |
+
----------
|
| 326 |
+
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
|
| 327 |
+
|
| 328 |
+
"""
|
| 329 |
+
proba = estimator.predict_proba(X)
|
| 330 |
+
|
| 331 |
+
# Displace zero probabilities so the log is defined.
|
| 332 |
+
# Also fix negative elements which may occur with
|
| 333 |
+
# negative sample weights.
|
| 334 |
+
np.clip(proba, np.finfo(proba.dtype).eps, None, out=proba)
|
| 335 |
+
log_proba = np.log(proba)
|
| 336 |
+
|
| 337 |
+
return (n_classes - 1) * (
|
| 338 |
+
log_proba - (1.0 / n_classes) * log_proba.sum(axis=1)[:, np.newaxis]
|
| 339 |
+
)
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
class AdaBoostClassifier(
|
| 343 |
+
_RoutingNotSupportedMixin, ClassifierMixin, BaseWeightBoosting
|
| 344 |
+
):
|
| 345 |
+
"""An AdaBoost classifier.
|
| 346 |
+
|
| 347 |
+
An AdaBoost [1]_ classifier is a meta-estimator that begins by fitting a
|
| 348 |
+
classifier on the original dataset and then fits additional copies of the
|
| 349 |
+
classifier on the same dataset but where the weights of incorrectly
|
| 350 |
+
classified instances are adjusted such that subsequent classifiers focus
|
| 351 |
+
more on difficult cases.
|
| 352 |
+
|
| 353 |
+
This class implements the algorithm based on [2]_.
|
| 354 |
+
|
| 355 |
+
Read more in the :ref:`User Guide <adaboost>`.
|
| 356 |
+
|
| 357 |
+
.. versionadded:: 0.14
|
| 358 |
+
|
| 359 |
+
Parameters
|
| 360 |
+
----------
|
| 361 |
+
estimator : object, default=None
|
| 362 |
+
The base estimator from which the boosted ensemble is built.
|
| 363 |
+
Support for sample weighting is required, as well as proper
|
| 364 |
+
``classes_`` and ``n_classes_`` attributes. If ``None``, then
|
| 365 |
+
the base estimator is :class:`~sklearn.tree.DecisionTreeClassifier`
|
| 366 |
+
initialized with `max_depth=1`.
|
| 367 |
+
|
| 368 |
+
.. versionadded:: 1.2
|
| 369 |
+
`base_estimator` was renamed to `estimator`.
|
| 370 |
+
|
| 371 |
+
n_estimators : int, default=50
|
| 372 |
+
The maximum number of estimators at which boosting is terminated.
|
| 373 |
+
In case of perfect fit, the learning procedure is stopped early.
|
| 374 |
+
Values must be in the range `[1, inf)`.
|
| 375 |
+
|
| 376 |
+
learning_rate : float, default=1.0
|
| 377 |
+
Weight applied to each classifier at each boosting iteration. A higher
|
| 378 |
+
learning rate increases the contribution of each classifier. There is
|
| 379 |
+
a trade-off between the `learning_rate` and `n_estimators` parameters.
|
| 380 |
+
Values must be in the range `(0.0, inf)`.
|
| 381 |
+
|
| 382 |
+
algorithm : {'SAMME'}, default='SAMME'
|
| 383 |
+
Use the SAMME discrete boosting algorithm.
|
| 384 |
+
|
| 385 |
+
.. deprecated:: 1.6
|
| 386 |
+
`algorithm` is deprecated and will be removed in version 1.8. This
|
| 387 |
+
estimator only implements the 'SAMME' algorithm.
|
| 388 |
+
|
| 389 |
+
random_state : int, RandomState instance or None, default=None
|
| 390 |
+
Controls the random seed given at each `estimator` at each
|
| 391 |
+
boosting iteration.
|
| 392 |
+
Thus, it is only used when `estimator` exposes a `random_state`.
|
| 393 |
+
Pass an int for reproducible output across multiple function calls.
|
| 394 |
+
See :term:`Glossary <random_state>`.
|
| 395 |
+
|
| 396 |
+
Attributes
|
| 397 |
+
----------
|
| 398 |
+
estimator_ : estimator
|
| 399 |
+
The base estimator from which the ensemble is grown.
|
| 400 |
+
|
| 401 |
+
.. versionadded:: 1.2
|
| 402 |
+
`base_estimator_` was renamed to `estimator_`.
|
| 403 |
+
|
| 404 |
+
estimators_ : list of classifiers
|
| 405 |
+
The collection of fitted sub-estimators.
|
| 406 |
+
|
| 407 |
+
classes_ : ndarray of shape (n_classes,)
|
| 408 |
+
The classes labels.
|
| 409 |
+
|
| 410 |
+
n_classes_ : int
|
| 411 |
+
The number of classes.
|
| 412 |
+
|
| 413 |
+
estimator_weights_ : ndarray of floats
|
| 414 |
+
Weights for each estimator in the boosted ensemble.
|
| 415 |
+
|
| 416 |
+
estimator_errors_ : ndarray of floats
|
| 417 |
+
Classification error for each estimator in the boosted
|
| 418 |
+
ensemble.
|
| 419 |
+
|
| 420 |
+
feature_importances_ : ndarray of shape (n_features,)
|
| 421 |
+
The impurity-based feature importances if supported by the
|
| 422 |
+
``estimator`` (when based on decision trees).
|
| 423 |
+
|
| 424 |
+
Warning: impurity-based feature importances can be misleading for
|
| 425 |
+
high cardinality features (many unique values). See
|
| 426 |
+
:func:`sklearn.inspection.permutation_importance` as an alternative.
|
| 427 |
+
|
| 428 |
+
n_features_in_ : int
|
| 429 |
+
Number of features seen during :term:`fit`.
|
| 430 |
+
|
| 431 |
+
.. versionadded:: 0.24
|
| 432 |
+
|
| 433 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
| 434 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
| 435 |
+
has feature names that are all strings.
|
| 436 |
+
|
| 437 |
+
.. versionadded:: 1.0
|
| 438 |
+
|
| 439 |
+
See Also
|
| 440 |
+
--------
|
| 441 |
+
AdaBoostRegressor : An AdaBoost regressor that begins by fitting a
|
| 442 |
+
regressor on the original dataset and then fits additional copies of
|
| 443 |
+
the regressor on the same dataset but where the weights of instances
|
| 444 |
+
are adjusted according to the error of the current prediction.
|
| 445 |
+
|
| 446 |
+
GradientBoostingClassifier : GB builds an additive model in a forward
|
| 447 |
+
stage-wise fashion. Regression trees are fit on the negative gradient
|
| 448 |
+
of the binomial or multinomial deviance loss function. Binary
|
| 449 |
+
classification is a special case where only a single regression tree is
|
| 450 |
+
induced.
|
| 451 |
+
|
| 452 |
+
sklearn.tree.DecisionTreeClassifier : A non-parametric supervised learning
|
| 453 |
+
method used for classification.
|
| 454 |
+
Creates a model that predicts the value of a target variable by
|
| 455 |
+
learning simple decision rules inferred from the data features.
|
| 456 |
+
|
| 457 |
+
References
|
| 458 |
+
----------
|
| 459 |
+
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
|
| 460 |
+
on-Line Learning and an Application to Boosting", 1995.
|
| 461 |
+
|
| 462 |
+
.. [2] :doi:`J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class adaboost."
|
| 463 |
+
Statistics and its Interface 2.3 (2009): 349-360.
|
| 464 |
+
<10.4310/SII.2009.v2.n3.a8>`
|
| 465 |
+
|
| 466 |
+
Examples
|
| 467 |
+
--------
|
| 468 |
+
>>> from sklearn.ensemble import AdaBoostClassifier
|
| 469 |
+
>>> from sklearn.datasets import make_classification
|
| 470 |
+
>>> X, y = make_classification(n_samples=1000, n_features=4,
|
| 471 |
+
... n_informative=2, n_redundant=0,
|
| 472 |
+
... random_state=0, shuffle=False)
|
| 473 |
+
>>> clf = AdaBoostClassifier(n_estimators=100, random_state=0)
|
| 474 |
+
>>> clf.fit(X, y)
|
| 475 |
+
AdaBoostClassifier(n_estimators=100, random_state=0)
|
| 476 |
+
>>> clf.predict([[0, 0, 0, 0]])
|
| 477 |
+
array([1])
|
| 478 |
+
>>> clf.score(X, y)
|
| 479 |
+
0.96...
|
| 480 |
+
|
| 481 |
+
For a detailed example of using AdaBoost to fit a sequence of DecisionTrees
|
| 482 |
+
as weaklearners, please refer to
|
| 483 |
+
:ref:`sphx_glr_auto_examples_ensemble_plot_adaboost_multiclass.py`.
|
| 484 |
+
|
| 485 |
+
For a detailed example of using AdaBoost to fit a non-linearly seperable
|
| 486 |
+
classification dataset composed of two Gaussian quantiles clusters, please
|
| 487 |
+
refer to :ref:`sphx_glr_auto_examples_ensemble_plot_adaboost_twoclass.py`.
|
| 488 |
+
"""
|
| 489 |
+
|
| 490 |
+
# TODO(1.8): remove "algorithm" entry
|
| 491 |
+
_parameter_constraints: dict = {
|
| 492 |
+
**BaseWeightBoosting._parameter_constraints,
|
| 493 |
+
"algorithm": [StrOptions({"SAMME"}), Hidden(StrOptions({"deprecated"}))],
|
| 494 |
+
}
|
| 495 |
+
|
| 496 |
+
def __init__(
|
| 497 |
+
self,
|
| 498 |
+
estimator=None,
|
| 499 |
+
*,
|
| 500 |
+
n_estimators=50,
|
| 501 |
+
learning_rate=1.0,
|
| 502 |
+
algorithm="deprecated",
|
| 503 |
+
random_state=None,
|
| 504 |
+
):
|
| 505 |
+
super().__init__(
|
| 506 |
+
estimator=estimator,
|
| 507 |
+
n_estimators=n_estimators,
|
| 508 |
+
learning_rate=learning_rate,
|
| 509 |
+
random_state=random_state,
|
| 510 |
+
)
|
| 511 |
+
|
| 512 |
+
self.algorithm = algorithm
|
| 513 |
+
|
| 514 |
+
def _validate_estimator(self):
|
| 515 |
+
"""Check the estimator and set the estimator_ attribute."""
|
| 516 |
+
super()._validate_estimator(default=DecisionTreeClassifier(max_depth=1))
|
| 517 |
+
|
| 518 |
+
if self.algorithm != "deprecated":
|
| 519 |
+
warnings.warn(
|
| 520 |
+
"The parameter 'algorithm' is deprecated in 1.6 and has no effect. "
|
| 521 |
+
"It will be removed in version 1.8.",
|
| 522 |
+
FutureWarning,
|
| 523 |
+
)
|
| 524 |
+
|
| 525 |
+
if not has_fit_parameter(self.estimator_, "sample_weight"):
|
| 526 |
+
raise ValueError(
|
| 527 |
+
f"{self.estimator.__class__.__name__} doesn't support sample_weight."
|
| 528 |
+
)
|
| 529 |
+
|
| 530 |
+
def _boost(self, iboost, X, y, sample_weight, random_state):
|
| 531 |
+
"""Implement a single boost.
|
| 532 |
+
|
| 533 |
+
Perform a single boost according to the discrete SAMME algorithm and return the
|
| 534 |
+
updated sample weights.
|
| 535 |
+
|
| 536 |
+
Parameters
|
| 537 |
+
----------
|
| 538 |
+
iboost : int
|
| 539 |
+
The index of the current boost iteration.
|
| 540 |
+
|
| 541 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 542 |
+
The training input samples.
|
| 543 |
+
|
| 544 |
+
y : array-like of shape (n_samples,)
|
| 545 |
+
The target values (class labels).
|
| 546 |
+
|
| 547 |
+
sample_weight : array-like of shape (n_samples,)
|
| 548 |
+
The current sample weights.
|
| 549 |
+
|
| 550 |
+
random_state : RandomState instance
|
| 551 |
+
The RandomState instance used if the base estimator accepts a
|
| 552 |
+
`random_state` attribute.
|
| 553 |
+
|
| 554 |
+
Returns
|
| 555 |
+
-------
|
| 556 |
+
sample_weight : array-like of shape (n_samples,) or None
|
| 557 |
+
The reweighted sample weights.
|
| 558 |
+
If None then boosting has terminated early.
|
| 559 |
+
|
| 560 |
+
estimator_weight : float
|
| 561 |
+
The weight for the current boost.
|
| 562 |
+
If None then boosting has terminated early.
|
| 563 |
+
|
| 564 |
+
estimator_error : float
|
| 565 |
+
The classification error for the current boost.
|
| 566 |
+
If None then boosting has terminated early.
|
| 567 |
+
"""
|
| 568 |
+
estimator = self._make_estimator(random_state=random_state)
|
| 569 |
+
|
| 570 |
+
estimator.fit(X, y, sample_weight=sample_weight)
|
| 571 |
+
|
| 572 |
+
y_predict = estimator.predict(X)
|
| 573 |
+
|
| 574 |
+
if iboost == 0:
|
| 575 |
+
self.classes_ = getattr(estimator, "classes_", None)
|
| 576 |
+
self.n_classes_ = len(self.classes_)
|
| 577 |
+
|
| 578 |
+
# Instances incorrectly classified
|
| 579 |
+
incorrect = y_predict != y
|
| 580 |
+
|
| 581 |
+
# Error fraction
|
| 582 |
+
estimator_error = np.mean(np.average(incorrect, weights=sample_weight, axis=0))
|
| 583 |
+
|
| 584 |
+
# Stop if classification is perfect
|
| 585 |
+
if estimator_error <= 0:
|
| 586 |
+
return sample_weight, 1.0, 0.0
|
| 587 |
+
|
| 588 |
+
n_classes = self.n_classes_
|
| 589 |
+
|
| 590 |
+
# Stop if the error is at least as bad as random guessing
|
| 591 |
+
if estimator_error >= 1.0 - (1.0 / n_classes):
|
| 592 |
+
self.estimators_.pop(-1)
|
| 593 |
+
if len(self.estimators_) == 0:
|
| 594 |
+
raise ValueError(
|
| 595 |
+
"BaseClassifier in AdaBoostClassifier "
|
| 596 |
+
"ensemble is worse than random, ensemble "
|
| 597 |
+
"can not be fit."
|
| 598 |
+
)
|
| 599 |
+
return None, None, None
|
| 600 |
+
|
| 601 |
+
# Boost weight using multi-class AdaBoost SAMME alg
|
| 602 |
+
estimator_weight = self.learning_rate * (
|
| 603 |
+
np.log((1.0 - estimator_error) / estimator_error) + np.log(n_classes - 1.0)
|
| 604 |
+
)
|
| 605 |
+
|
| 606 |
+
# Only boost the weights if it will fit again
|
| 607 |
+
if not iboost == self.n_estimators - 1:
|
| 608 |
+
# Only boost positive weights
|
| 609 |
+
sample_weight = np.exp(
|
| 610 |
+
np.log(sample_weight)
|
| 611 |
+
+ estimator_weight * incorrect * (sample_weight > 0)
|
| 612 |
+
)
|
| 613 |
+
|
| 614 |
+
return sample_weight, estimator_weight, estimator_error
|
| 615 |
+
|
| 616 |
+
def predict(self, X):
|
| 617 |
+
"""Predict classes for X.
|
| 618 |
+
|
| 619 |
+
The predicted class of an input sample is computed as the weighted mean
|
| 620 |
+
prediction of the classifiers in the ensemble.
|
| 621 |
+
|
| 622 |
+
Parameters
|
| 623 |
+
----------
|
| 624 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 625 |
+
The training input samples. Sparse matrix can be CSC, CSR, COO,
|
| 626 |
+
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
|
| 627 |
+
|
| 628 |
+
Returns
|
| 629 |
+
-------
|
| 630 |
+
y : ndarray of shape (n_samples,)
|
| 631 |
+
The predicted classes.
|
| 632 |
+
"""
|
| 633 |
+
pred = self.decision_function(X)
|
| 634 |
+
|
| 635 |
+
if self.n_classes_ == 2:
|
| 636 |
+
return self.classes_.take(pred > 0, axis=0)
|
| 637 |
+
|
| 638 |
+
return self.classes_.take(np.argmax(pred, axis=1), axis=0)
|
| 639 |
+
|
| 640 |
+
def staged_predict(self, X):
|
| 641 |
+
"""Return staged predictions for X.
|
| 642 |
+
|
| 643 |
+
The predicted class of an input sample is computed as the weighted mean
|
| 644 |
+
prediction of the classifiers in the ensemble.
|
| 645 |
+
|
| 646 |
+
This generator method yields the ensemble prediction after each
|
| 647 |
+
iteration of boosting and therefore allows monitoring, such as to
|
| 648 |
+
determine the prediction on a test set after each boost.
|
| 649 |
+
|
| 650 |
+
Parameters
|
| 651 |
+
----------
|
| 652 |
+
X : array-like of shape (n_samples, n_features)
|
| 653 |
+
The input samples. Sparse matrix can be CSC, CSR, COO,
|
| 654 |
+
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
|
| 655 |
+
|
| 656 |
+
Yields
|
| 657 |
+
------
|
| 658 |
+
y : generator of ndarray of shape (n_samples,)
|
| 659 |
+
The predicted classes.
|
| 660 |
+
"""
|
| 661 |
+
X = self._check_X(X)
|
| 662 |
+
|
| 663 |
+
n_classes = self.n_classes_
|
| 664 |
+
classes = self.classes_
|
| 665 |
+
|
| 666 |
+
if n_classes == 2:
|
| 667 |
+
for pred in self.staged_decision_function(X):
|
| 668 |
+
yield np.array(classes.take(pred > 0, axis=0))
|
| 669 |
+
|
| 670 |
+
else:
|
| 671 |
+
for pred in self.staged_decision_function(X):
|
| 672 |
+
yield np.array(classes.take(np.argmax(pred, axis=1), axis=0))
|
| 673 |
+
|
| 674 |
+
def decision_function(self, X):
|
| 675 |
+
"""Compute the decision function of ``X``.
|
| 676 |
+
|
| 677 |
+
Parameters
|
| 678 |
+
----------
|
| 679 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 680 |
+
The training input samples. Sparse matrix can be CSC, CSR, COO,
|
| 681 |
+
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
|
| 682 |
+
|
| 683 |
+
Returns
|
| 684 |
+
-------
|
| 685 |
+
score : ndarray of shape of (n_samples, k)
|
| 686 |
+
The decision function of the input samples. The order of
|
| 687 |
+
outputs is the same as that of the :term:`classes_` attribute.
|
| 688 |
+
Binary classification is a special cases with ``k == 1``,
|
| 689 |
+
otherwise ``k==n_classes``. For binary classification,
|
| 690 |
+
values closer to -1 or 1 mean more like the first or second
|
| 691 |
+
class in ``classes_``, respectively.
|
| 692 |
+
"""
|
| 693 |
+
check_is_fitted(self)
|
| 694 |
+
X = self._check_X(X)
|
| 695 |
+
|
| 696 |
+
n_classes = self.n_classes_
|
| 697 |
+
classes = self.classes_[:, np.newaxis]
|
| 698 |
+
|
| 699 |
+
if n_classes == 1:
|
| 700 |
+
return np.zeros_like(X, shape=(X.shape[0], 1))
|
| 701 |
+
|
| 702 |
+
pred = sum(
|
| 703 |
+
np.where(
|
| 704 |
+
(estimator.predict(X) == classes).T,
|
| 705 |
+
w,
|
| 706 |
+
-1 / (n_classes - 1) * w,
|
| 707 |
+
)
|
| 708 |
+
for estimator, w in zip(self.estimators_, self.estimator_weights_)
|
| 709 |
+
)
|
| 710 |
+
|
| 711 |
+
pred /= self.estimator_weights_.sum()
|
| 712 |
+
if n_classes == 2:
|
| 713 |
+
pred[:, 0] *= -1
|
| 714 |
+
return pred.sum(axis=1)
|
| 715 |
+
return pred
|
| 716 |
+
|
| 717 |
+
def staged_decision_function(self, X):
|
| 718 |
+
"""Compute decision function of ``X`` for each boosting iteration.
|
| 719 |
+
|
| 720 |
+
This method allows monitoring (i.e. determine error on testing set)
|
| 721 |
+
after each boosting iteration.
|
| 722 |
+
|
| 723 |
+
Parameters
|
| 724 |
+
----------
|
| 725 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 726 |
+
The training input samples. Sparse matrix can be CSC, CSR, COO,
|
| 727 |
+
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
|
| 728 |
+
|
| 729 |
+
Yields
|
| 730 |
+
------
|
| 731 |
+
score : generator of ndarray of shape (n_samples, k)
|
| 732 |
+
The decision function of the input samples. The order of
|
| 733 |
+
outputs is the same of that of the :term:`classes_` attribute.
|
| 734 |
+
Binary classification is a special cases with ``k == 1``,
|
| 735 |
+
otherwise ``k==n_classes``. For binary classification,
|
| 736 |
+
values closer to -1 or 1 mean more like the first or second
|
| 737 |
+
class in ``classes_``, respectively.
|
| 738 |
+
"""
|
| 739 |
+
check_is_fitted(self)
|
| 740 |
+
X = self._check_X(X)
|
| 741 |
+
|
| 742 |
+
n_classes = self.n_classes_
|
| 743 |
+
classes = self.classes_[:, np.newaxis]
|
| 744 |
+
pred = None
|
| 745 |
+
norm = 0.0
|
| 746 |
+
|
| 747 |
+
for weight, estimator in zip(self.estimator_weights_, self.estimators_):
|
| 748 |
+
norm += weight
|
| 749 |
+
|
| 750 |
+
current_pred = np.where(
|
| 751 |
+
(estimator.predict(X) == classes).T,
|
| 752 |
+
weight,
|
| 753 |
+
-1 / (n_classes - 1) * weight,
|
| 754 |
+
)
|
| 755 |
+
|
| 756 |
+
if pred is None:
|
| 757 |
+
pred = current_pred
|
| 758 |
+
else:
|
| 759 |
+
pred += current_pred
|
| 760 |
+
|
| 761 |
+
if n_classes == 2:
|
| 762 |
+
tmp_pred = np.copy(pred)
|
| 763 |
+
tmp_pred[:, 0] *= -1
|
| 764 |
+
yield (tmp_pred / norm).sum(axis=1)
|
| 765 |
+
else:
|
| 766 |
+
yield pred / norm
|
| 767 |
+
|
| 768 |
+
@staticmethod
|
| 769 |
+
def _compute_proba_from_decision(decision, n_classes):
|
| 770 |
+
"""Compute probabilities from the decision function.
|
| 771 |
+
|
| 772 |
+
This is based eq. (15) of [1] where:
|
| 773 |
+
p(y=c|X) = exp((1 / K-1) f_c(X)) / sum_k(exp((1 / K-1) f_k(X)))
|
| 774 |
+
= softmax((1 / K-1) * f(X))
|
| 775 |
+
|
| 776 |
+
References
|
| 777 |
+
----------
|
| 778 |
+
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost",
|
| 779 |
+
2009.
|
| 780 |
+
"""
|
| 781 |
+
if n_classes == 2:
|
| 782 |
+
decision = np.vstack([-decision, decision]).T / 2
|
| 783 |
+
else:
|
| 784 |
+
decision /= n_classes - 1
|
| 785 |
+
return softmax(decision, copy=False)
|
| 786 |
+
|
| 787 |
+
def predict_proba(self, X):
|
| 788 |
+
"""Predict class probabilities for X.
|
| 789 |
+
|
| 790 |
+
The predicted class probabilities of an input sample is computed as
|
| 791 |
+
the weighted mean predicted class probabilities of the classifiers
|
| 792 |
+
in the ensemble.
|
| 793 |
+
|
| 794 |
+
Parameters
|
| 795 |
+
----------
|
| 796 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 797 |
+
The training input samples. Sparse matrix can be CSC, CSR, COO,
|
| 798 |
+
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
|
| 799 |
+
|
| 800 |
+
Returns
|
| 801 |
+
-------
|
| 802 |
+
p : ndarray of shape (n_samples, n_classes)
|
| 803 |
+
The class probabilities of the input samples. The order of
|
| 804 |
+
outputs is the same of that of the :term:`classes_` attribute.
|
| 805 |
+
"""
|
| 806 |
+
check_is_fitted(self)
|
| 807 |
+
n_classes = self.n_classes_
|
| 808 |
+
|
| 809 |
+
if n_classes == 1:
|
| 810 |
+
return np.ones((_num_samples(X), 1))
|
| 811 |
+
|
| 812 |
+
decision = self.decision_function(X)
|
| 813 |
+
return self._compute_proba_from_decision(decision, n_classes)
|
| 814 |
+
|
| 815 |
+
def staged_predict_proba(self, X):
|
| 816 |
+
"""Predict class probabilities for X.
|
| 817 |
+
|
| 818 |
+
The predicted class probabilities of an input sample is computed as
|
| 819 |
+
the weighted mean predicted class probabilities of the classifiers
|
| 820 |
+
in the ensemble.
|
| 821 |
+
|
| 822 |
+
This generator method yields the ensemble predicted class probabilities
|
| 823 |
+
after each iteration of boosting and therefore allows monitoring, such
|
| 824 |
+
as to determine the predicted class probabilities on a test set after
|
| 825 |
+
each boost.
|
| 826 |
+
|
| 827 |
+
Parameters
|
| 828 |
+
----------
|
| 829 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 830 |
+
The training input samples. Sparse matrix can be CSC, CSR, COO,
|
| 831 |
+
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
|
| 832 |
+
|
| 833 |
+
Yields
|
| 834 |
+
------
|
| 835 |
+
p : generator of ndarray of shape (n_samples,)
|
| 836 |
+
The class probabilities of the input samples. The order of
|
| 837 |
+
outputs is the same of that of the :term:`classes_` attribute.
|
| 838 |
+
"""
|
| 839 |
+
|
| 840 |
+
n_classes = self.n_classes_
|
| 841 |
+
|
| 842 |
+
for decision in self.staged_decision_function(X):
|
| 843 |
+
yield self._compute_proba_from_decision(decision, n_classes)
|
| 844 |
+
|
| 845 |
+
def predict_log_proba(self, X):
|
| 846 |
+
"""Predict class log-probabilities for X.
|
| 847 |
+
|
| 848 |
+
The predicted class log-probabilities of an input sample is computed as
|
| 849 |
+
the weighted mean predicted class log-probabilities of the classifiers
|
| 850 |
+
in the ensemble.
|
| 851 |
+
|
| 852 |
+
Parameters
|
| 853 |
+
----------
|
| 854 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 855 |
+
The training input samples. Sparse matrix can be CSC, CSR, COO,
|
| 856 |
+
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
|
| 857 |
+
|
| 858 |
+
Returns
|
| 859 |
+
-------
|
| 860 |
+
p : ndarray of shape (n_samples, n_classes)
|
| 861 |
+
The class probabilities of the input samples. The order of
|
| 862 |
+
outputs is the same of that of the :term:`classes_` attribute.
|
| 863 |
+
"""
|
| 864 |
+
return np.log(self.predict_proba(X))
|
| 865 |
+
|
| 866 |
+
|
| 867 |
+
class AdaBoostRegressor(_RoutingNotSupportedMixin, RegressorMixin, BaseWeightBoosting):
|
| 868 |
+
"""An AdaBoost regressor.
|
| 869 |
+
|
| 870 |
+
An AdaBoost [1] regressor is a meta-estimator that begins by fitting a
|
| 871 |
+
regressor on the original dataset and then fits additional copies of the
|
| 872 |
+
regressor on the same dataset but where the weights of instances are
|
| 873 |
+
adjusted according to the error of the current prediction. As such,
|
| 874 |
+
subsequent regressors focus more on difficult cases.
|
| 875 |
+
|
| 876 |
+
This class implements the algorithm known as AdaBoost.R2 [2].
|
| 877 |
+
|
| 878 |
+
Read more in the :ref:`User Guide <adaboost>`.
|
| 879 |
+
|
| 880 |
+
.. versionadded:: 0.14
|
| 881 |
+
|
| 882 |
+
Parameters
|
| 883 |
+
----------
|
| 884 |
+
estimator : object, default=None
|
| 885 |
+
The base estimator from which the boosted ensemble is built.
|
| 886 |
+
If ``None``, then the base estimator is
|
| 887 |
+
:class:`~sklearn.tree.DecisionTreeRegressor` initialized with
|
| 888 |
+
`max_depth=3`.
|
| 889 |
+
|
| 890 |
+
.. versionadded:: 1.2
|
| 891 |
+
`base_estimator` was renamed to `estimator`.
|
| 892 |
+
|
| 893 |
+
n_estimators : int, default=50
|
| 894 |
+
The maximum number of estimators at which boosting is terminated.
|
| 895 |
+
In case of perfect fit, the learning procedure is stopped early.
|
| 896 |
+
Values must be in the range `[1, inf)`.
|
| 897 |
+
|
| 898 |
+
learning_rate : float, default=1.0
|
| 899 |
+
Weight applied to each regressor at each boosting iteration. A higher
|
| 900 |
+
learning rate increases the contribution of each regressor. There is
|
| 901 |
+
a trade-off between the `learning_rate` and `n_estimators` parameters.
|
| 902 |
+
Values must be in the range `(0.0, inf)`.
|
| 903 |
+
|
| 904 |
+
loss : {'linear', 'square', 'exponential'}, default='linear'
|
| 905 |
+
The loss function to use when updating the weights after each
|
| 906 |
+
boosting iteration.
|
| 907 |
+
|
| 908 |
+
random_state : int, RandomState instance or None, default=None
|
| 909 |
+
Controls the random seed given at each `estimator` at each
|
| 910 |
+
boosting iteration.
|
| 911 |
+
Thus, it is only used when `estimator` exposes a `random_state`.
|
| 912 |
+
In addition, it controls the bootstrap of the weights used to train the
|
| 913 |
+
`estimator` at each boosting iteration.
|
| 914 |
+
Pass an int for reproducible output across multiple function calls.
|
| 915 |
+
See :term:`Glossary <random_state>`.
|
| 916 |
+
|
| 917 |
+
Attributes
|
| 918 |
+
----------
|
| 919 |
+
estimator_ : estimator
|
| 920 |
+
The base estimator from which the ensemble is grown.
|
| 921 |
+
|
| 922 |
+
.. versionadded:: 1.2
|
| 923 |
+
`base_estimator_` was renamed to `estimator_`.
|
| 924 |
+
|
| 925 |
+
estimators_ : list of regressors
|
| 926 |
+
The collection of fitted sub-estimators.
|
| 927 |
+
|
| 928 |
+
estimator_weights_ : ndarray of floats
|
| 929 |
+
Weights for each estimator in the boosted ensemble.
|
| 930 |
+
|
| 931 |
+
estimator_errors_ : ndarray of floats
|
| 932 |
+
Regression error for each estimator in the boosted ensemble.
|
| 933 |
+
|
| 934 |
+
feature_importances_ : ndarray of shape (n_features,)
|
| 935 |
+
The impurity-based feature importances if supported by the
|
| 936 |
+
``estimator`` (when based on decision trees).
|
| 937 |
+
|
| 938 |
+
Warning: impurity-based feature importances can be misleading for
|
| 939 |
+
high cardinality features (many unique values). See
|
| 940 |
+
:func:`sklearn.inspection.permutation_importance` as an alternative.
|
| 941 |
+
|
| 942 |
+
n_features_in_ : int
|
| 943 |
+
Number of features seen during :term:`fit`.
|
| 944 |
+
|
| 945 |
+
.. versionadded:: 0.24
|
| 946 |
+
|
| 947 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
| 948 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
| 949 |
+
has feature names that are all strings.
|
| 950 |
+
|
| 951 |
+
.. versionadded:: 1.0
|
| 952 |
+
|
| 953 |
+
See Also
|
| 954 |
+
--------
|
| 955 |
+
AdaBoostClassifier : An AdaBoost classifier.
|
| 956 |
+
GradientBoostingRegressor : Gradient Boosting Classification Tree.
|
| 957 |
+
sklearn.tree.DecisionTreeRegressor : A decision tree regressor.
|
| 958 |
+
|
| 959 |
+
References
|
| 960 |
+
----------
|
| 961 |
+
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
|
| 962 |
+
on-Line Learning and an Application to Boosting", 1995.
|
| 963 |
+
|
| 964 |
+
.. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
|
| 965 |
+
|
| 966 |
+
Examples
|
| 967 |
+
--------
|
| 968 |
+
>>> from sklearn.ensemble import AdaBoostRegressor
|
| 969 |
+
>>> from sklearn.datasets import make_regression
|
| 970 |
+
>>> X, y = make_regression(n_features=4, n_informative=2,
|
| 971 |
+
... random_state=0, shuffle=False)
|
| 972 |
+
>>> regr = AdaBoostRegressor(random_state=0, n_estimators=100)
|
| 973 |
+
>>> regr.fit(X, y)
|
| 974 |
+
AdaBoostRegressor(n_estimators=100, random_state=0)
|
| 975 |
+
>>> regr.predict([[0, 0, 0, 0]])
|
| 976 |
+
array([4.7972...])
|
| 977 |
+
>>> regr.score(X, y)
|
| 978 |
+
0.9771...
|
| 979 |
+
|
| 980 |
+
For a detailed example of utilizing :class:`~sklearn.ensemble.AdaBoostRegressor`
|
| 981 |
+
to fit a sequence of decision trees as weak learners, please refer to
|
| 982 |
+
:ref:`sphx_glr_auto_examples_ensemble_plot_adaboost_regression.py`.
|
| 983 |
+
"""
|
| 984 |
+
|
| 985 |
+
_parameter_constraints: dict = {
|
| 986 |
+
**BaseWeightBoosting._parameter_constraints,
|
| 987 |
+
"loss": [StrOptions({"linear", "square", "exponential"})],
|
| 988 |
+
}
|
| 989 |
+
|
| 990 |
+
def __init__(
|
| 991 |
+
self,
|
| 992 |
+
estimator=None,
|
| 993 |
+
*,
|
| 994 |
+
n_estimators=50,
|
| 995 |
+
learning_rate=1.0,
|
| 996 |
+
loss="linear",
|
| 997 |
+
random_state=None,
|
| 998 |
+
):
|
| 999 |
+
super().__init__(
|
| 1000 |
+
estimator=estimator,
|
| 1001 |
+
n_estimators=n_estimators,
|
| 1002 |
+
learning_rate=learning_rate,
|
| 1003 |
+
random_state=random_state,
|
| 1004 |
+
)
|
| 1005 |
+
|
| 1006 |
+
self.loss = loss
|
| 1007 |
+
self.random_state = random_state
|
| 1008 |
+
|
| 1009 |
+
def _validate_estimator(self):
|
| 1010 |
+
"""Check the estimator and set the estimator_ attribute."""
|
| 1011 |
+
super()._validate_estimator(default=DecisionTreeRegressor(max_depth=3))
|
| 1012 |
+
|
| 1013 |
+
def _boost(self, iboost, X, y, sample_weight, random_state):
|
| 1014 |
+
"""Implement a single boost for regression
|
| 1015 |
+
|
| 1016 |
+
Perform a single boost according to the AdaBoost.R2 algorithm and
|
| 1017 |
+
return the updated sample weights.
|
| 1018 |
+
|
| 1019 |
+
Parameters
|
| 1020 |
+
----------
|
| 1021 |
+
iboost : int
|
| 1022 |
+
The index of the current boost iteration.
|
| 1023 |
+
|
| 1024 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 1025 |
+
The training input samples.
|
| 1026 |
+
|
| 1027 |
+
y : array-like of shape (n_samples,)
|
| 1028 |
+
The target values (class labels in classification, real numbers in
|
| 1029 |
+
regression).
|
| 1030 |
+
|
| 1031 |
+
sample_weight : array-like of shape (n_samples,)
|
| 1032 |
+
The current sample weights.
|
| 1033 |
+
|
| 1034 |
+
random_state : RandomState
|
| 1035 |
+
The RandomState instance used if the base estimator accepts a
|
| 1036 |
+
`random_state` attribute.
|
| 1037 |
+
Controls also the bootstrap of the weights used to train the weak
|
| 1038 |
+
learner.
|
| 1039 |
+
|
| 1040 |
+
Returns
|
| 1041 |
+
-------
|
| 1042 |
+
sample_weight : array-like of shape (n_samples,) or None
|
| 1043 |
+
The reweighted sample weights.
|
| 1044 |
+
If None then boosting has terminated early.
|
| 1045 |
+
|
| 1046 |
+
estimator_weight : float
|
| 1047 |
+
The weight for the current boost.
|
| 1048 |
+
If None then boosting has terminated early.
|
| 1049 |
+
|
| 1050 |
+
estimator_error : float
|
| 1051 |
+
The regression error for the current boost.
|
| 1052 |
+
If None then boosting has terminated early.
|
| 1053 |
+
"""
|
| 1054 |
+
estimator = self._make_estimator(random_state=random_state)
|
| 1055 |
+
|
| 1056 |
+
# Weighted sampling of the training set with replacement
|
| 1057 |
+
bootstrap_idx = random_state.choice(
|
| 1058 |
+
np.arange(_num_samples(X)),
|
| 1059 |
+
size=_num_samples(X),
|
| 1060 |
+
replace=True,
|
| 1061 |
+
p=sample_weight,
|
| 1062 |
+
)
|
| 1063 |
+
|
| 1064 |
+
# Fit on the bootstrapped sample and obtain a prediction
|
| 1065 |
+
# for all samples in the training set
|
| 1066 |
+
X_ = _safe_indexing(X, bootstrap_idx)
|
| 1067 |
+
y_ = _safe_indexing(y, bootstrap_idx)
|
| 1068 |
+
estimator.fit(X_, y_)
|
| 1069 |
+
y_predict = estimator.predict(X)
|
| 1070 |
+
|
| 1071 |
+
error_vect = np.abs(y_predict - y)
|
| 1072 |
+
sample_mask = sample_weight > 0
|
| 1073 |
+
masked_sample_weight = sample_weight[sample_mask]
|
| 1074 |
+
masked_error_vector = error_vect[sample_mask]
|
| 1075 |
+
|
| 1076 |
+
error_max = masked_error_vector.max()
|
| 1077 |
+
if error_max != 0:
|
| 1078 |
+
masked_error_vector /= error_max
|
| 1079 |
+
|
| 1080 |
+
if self.loss == "square":
|
| 1081 |
+
masked_error_vector **= 2
|
| 1082 |
+
elif self.loss == "exponential":
|
| 1083 |
+
masked_error_vector = 1.0 - np.exp(-masked_error_vector)
|
| 1084 |
+
|
| 1085 |
+
# Calculate the average loss
|
| 1086 |
+
estimator_error = (masked_sample_weight * masked_error_vector).sum()
|
| 1087 |
+
|
| 1088 |
+
if estimator_error <= 0:
|
| 1089 |
+
# Stop if fit is perfect
|
| 1090 |
+
return sample_weight, 1.0, 0.0
|
| 1091 |
+
|
| 1092 |
+
elif estimator_error >= 0.5:
|
| 1093 |
+
# Discard current estimator only if it isn't the only one
|
| 1094 |
+
if len(self.estimators_) > 1:
|
| 1095 |
+
self.estimators_.pop(-1)
|
| 1096 |
+
return None, None, None
|
| 1097 |
+
|
| 1098 |
+
beta = estimator_error / (1.0 - estimator_error)
|
| 1099 |
+
|
| 1100 |
+
# Boost weight using AdaBoost.R2 alg
|
| 1101 |
+
estimator_weight = self.learning_rate * np.log(1.0 / beta)
|
| 1102 |
+
|
| 1103 |
+
if not iboost == self.n_estimators - 1:
|
| 1104 |
+
sample_weight[sample_mask] *= np.power(
|
| 1105 |
+
beta, (1.0 - masked_error_vector) * self.learning_rate
|
| 1106 |
+
)
|
| 1107 |
+
|
| 1108 |
+
return sample_weight, estimator_weight, estimator_error
|
| 1109 |
+
|
| 1110 |
+
def _get_median_predict(self, X, limit):
|
| 1111 |
+
# Evaluate predictions of all estimators
|
| 1112 |
+
predictions = np.array([est.predict(X) for est in self.estimators_[:limit]]).T
|
| 1113 |
+
|
| 1114 |
+
# Sort the predictions
|
| 1115 |
+
sorted_idx = np.argsort(predictions, axis=1)
|
| 1116 |
+
|
| 1117 |
+
# Find index of median prediction for each sample
|
| 1118 |
+
weight_cdf = stable_cumsum(self.estimator_weights_[sorted_idx], axis=1)
|
| 1119 |
+
median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis]
|
| 1120 |
+
median_idx = median_or_above.argmax(axis=1)
|
| 1121 |
+
|
| 1122 |
+
median_estimators = sorted_idx[np.arange(_num_samples(X)), median_idx]
|
| 1123 |
+
|
| 1124 |
+
# Return median predictions
|
| 1125 |
+
return predictions[np.arange(_num_samples(X)), median_estimators]
|
| 1126 |
+
|
| 1127 |
+
def predict(self, X):
|
| 1128 |
+
"""Predict regression value for X.
|
| 1129 |
+
|
| 1130 |
+
The predicted regression value of an input sample is computed
|
| 1131 |
+
as the weighted median prediction of the regressors in the ensemble.
|
| 1132 |
+
|
| 1133 |
+
Parameters
|
| 1134 |
+
----------
|
| 1135 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 1136 |
+
The training input samples. Sparse matrix can be CSC, CSR, COO,
|
| 1137 |
+
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
|
| 1138 |
+
|
| 1139 |
+
Returns
|
| 1140 |
+
-------
|
| 1141 |
+
y : ndarray of shape (n_samples,)
|
| 1142 |
+
The predicted regression values.
|
| 1143 |
+
"""
|
| 1144 |
+
check_is_fitted(self)
|
| 1145 |
+
X = self._check_X(X)
|
| 1146 |
+
|
| 1147 |
+
return self._get_median_predict(X, len(self.estimators_))
|
| 1148 |
+
|
| 1149 |
+
def staged_predict(self, X):
|
| 1150 |
+
"""Return staged predictions for X.
|
| 1151 |
+
|
| 1152 |
+
The predicted regression value of an input sample is computed
|
| 1153 |
+
as the weighted median prediction of the regressors in the ensemble.
|
| 1154 |
+
|
| 1155 |
+
This generator method yields the ensemble prediction after each
|
| 1156 |
+
iteration of boosting and therefore allows monitoring, such as to
|
| 1157 |
+
determine the prediction on a test set after each boost.
|
| 1158 |
+
|
| 1159 |
+
Parameters
|
| 1160 |
+
----------
|
| 1161 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 1162 |
+
The training input samples.
|
| 1163 |
+
|
| 1164 |
+
Yields
|
| 1165 |
+
------
|
| 1166 |
+
y : generator of ndarray of shape (n_samples,)
|
| 1167 |
+
The predicted regression values.
|
| 1168 |
+
"""
|
| 1169 |
+
check_is_fitted(self)
|
| 1170 |
+
X = self._check_X(X)
|
| 1171 |
+
|
| 1172 |
+
for i, _ in enumerate(self.estimators_, 1):
|
| 1173 |
+
yield self._get_median_predict(X, limit=i)
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/meson.build
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
py.extension_module(
|
| 2 |
+
'_gradient_boosting',
|
| 3 |
+
['_gradient_boosting.pyx'] + utils_cython_tree,
|
| 4 |
+
dependencies: [np_dep],
|
| 5 |
+
cython_args: cython_args,
|
| 6 |
+
subdir: 'sklearn/ensemble',
|
| 7 |
+
install: true
|
| 8 |
+
)
|
| 9 |
+
|
| 10 |
+
subdir('_hist_gradient_boosting')
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__init__.py
ADDED
|
File without changes
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (180 Bytes). View file
|
|
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_bagging.cpython-310.pyc
ADDED
|
Binary file (21.8 kB). View file
|
|
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_base.cpython-310.pyc
ADDED
|
Binary file (3.05 kB). View file
|
|
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_common.cpython-310.pyc
ADDED
|
Binary file (5.1 kB). View file
|
|
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_forest.cpython-310.pyc
ADDED
|
Binary file (44.9 kB). View file
|
|
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_gradient_boosting.cpython-310.pyc
ADDED
|
Binary file (38.5 kB). View file
|
|
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_iforest.cpython-310.pyc
ADDED
|
Binary file (10.8 kB). View file
|
|
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_stacking.cpython-310.pyc
ADDED
|
Binary file (21.5 kB). View file
|
|
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_voting.cpython-310.pyc
ADDED
|
Binary file (21.1 kB). View file
|
|
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_weight_boosting.cpython-310.pyc
ADDED
|
Binary file (17.5 kB). View file
|
|
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/test_bagging.py
ADDED
|
@@ -0,0 +1,977 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Testing for the bagging ensemble module (sklearn.ensemble.bagging).
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
# Authors: The scikit-learn developers
|
| 6 |
+
# SPDX-License-Identifier: BSD-3-Clause
|
| 7 |
+
|
| 8 |
+
from itertools import cycle, product
|
| 9 |
+
|
| 10 |
+
import joblib
|
| 11 |
+
import numpy as np
|
| 12 |
+
import pytest
|
| 13 |
+
|
| 14 |
+
import sklearn
|
| 15 |
+
from sklearn.base import BaseEstimator
|
| 16 |
+
from sklearn.datasets import load_diabetes, load_iris, make_hastie_10_2
|
| 17 |
+
from sklearn.dummy import DummyClassifier, DummyRegressor
|
| 18 |
+
from sklearn.ensemble import (
|
| 19 |
+
AdaBoostClassifier,
|
| 20 |
+
AdaBoostRegressor,
|
| 21 |
+
BaggingClassifier,
|
| 22 |
+
BaggingRegressor,
|
| 23 |
+
HistGradientBoostingClassifier,
|
| 24 |
+
HistGradientBoostingRegressor,
|
| 25 |
+
RandomForestClassifier,
|
| 26 |
+
RandomForestRegressor,
|
| 27 |
+
)
|
| 28 |
+
from sklearn.feature_selection import SelectKBest
|
| 29 |
+
from sklearn.linear_model import LogisticRegression, Perceptron
|
| 30 |
+
from sklearn.model_selection import GridSearchCV, ParameterGrid, train_test_split
|
| 31 |
+
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
|
| 32 |
+
from sklearn.pipeline import make_pipeline
|
| 33 |
+
from sklearn.preprocessing import FunctionTransformer, scale
|
| 34 |
+
from sklearn.random_projection import SparseRandomProjection
|
| 35 |
+
from sklearn.svm import SVC, SVR
|
| 36 |
+
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
|
| 37 |
+
from sklearn.utils import check_random_state
|
| 38 |
+
from sklearn.utils._testing import assert_array_almost_equal, assert_array_equal
|
| 39 |
+
from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS
|
| 40 |
+
|
| 41 |
+
rng = check_random_state(0)
|
| 42 |
+
|
| 43 |
+
# also load the iris dataset
|
| 44 |
+
# and randomly permute it
|
| 45 |
+
iris = load_iris()
|
| 46 |
+
perm = rng.permutation(iris.target.size)
|
| 47 |
+
iris.data = iris.data[perm]
|
| 48 |
+
iris.target = iris.target[perm]
|
| 49 |
+
|
| 50 |
+
# also load the diabetes dataset
|
| 51 |
+
# and randomly permute it
|
| 52 |
+
diabetes = load_diabetes()
|
| 53 |
+
perm = rng.permutation(diabetes.target.size)
|
| 54 |
+
diabetes.data = diabetes.data[perm]
|
| 55 |
+
diabetes.target = diabetes.target[perm]
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def test_classification():
|
| 59 |
+
# Check classification for various parameter settings.
|
| 60 |
+
rng = check_random_state(0)
|
| 61 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
| 62 |
+
iris.data, iris.target, random_state=rng
|
| 63 |
+
)
|
| 64 |
+
grid = ParameterGrid(
|
| 65 |
+
{
|
| 66 |
+
"max_samples": [0.5, 1.0],
|
| 67 |
+
"max_features": [1, 4],
|
| 68 |
+
"bootstrap": [True, False],
|
| 69 |
+
"bootstrap_features": [True, False],
|
| 70 |
+
}
|
| 71 |
+
)
|
| 72 |
+
estimators = [
|
| 73 |
+
None,
|
| 74 |
+
DummyClassifier(),
|
| 75 |
+
Perceptron(max_iter=20),
|
| 76 |
+
DecisionTreeClassifier(max_depth=2),
|
| 77 |
+
KNeighborsClassifier(),
|
| 78 |
+
SVC(),
|
| 79 |
+
]
|
| 80 |
+
# Try different parameter settings with different base classifiers without
|
| 81 |
+
# doing the full cartesian product to keep the test durations low.
|
| 82 |
+
for params, estimator in zip(grid, cycle(estimators)):
|
| 83 |
+
BaggingClassifier(
|
| 84 |
+
estimator=estimator,
|
| 85 |
+
random_state=rng,
|
| 86 |
+
n_estimators=2,
|
| 87 |
+
**params,
|
| 88 |
+
).fit(X_train, y_train).predict(X_test)
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
@pytest.mark.parametrize(
|
| 92 |
+
"sparse_container, params, method",
|
| 93 |
+
product(
|
| 94 |
+
CSR_CONTAINERS + CSC_CONTAINERS,
|
| 95 |
+
[
|
| 96 |
+
{
|
| 97 |
+
"max_samples": 0.5,
|
| 98 |
+
"max_features": 2,
|
| 99 |
+
"bootstrap": True,
|
| 100 |
+
"bootstrap_features": True,
|
| 101 |
+
},
|
| 102 |
+
{
|
| 103 |
+
"max_samples": 1.0,
|
| 104 |
+
"max_features": 4,
|
| 105 |
+
"bootstrap": True,
|
| 106 |
+
"bootstrap_features": True,
|
| 107 |
+
},
|
| 108 |
+
{"max_features": 2, "bootstrap": False, "bootstrap_features": True},
|
| 109 |
+
{"max_samples": 0.5, "bootstrap": True, "bootstrap_features": False},
|
| 110 |
+
],
|
| 111 |
+
["predict", "predict_proba", "predict_log_proba", "decision_function"],
|
| 112 |
+
),
|
| 113 |
+
)
|
| 114 |
+
def test_sparse_classification(sparse_container, params, method):
|
| 115 |
+
# Check classification for various parameter settings on sparse input.
|
| 116 |
+
|
| 117 |
+
class CustomSVC(SVC):
|
| 118 |
+
"""SVC variant that records the nature of the training set"""
|
| 119 |
+
|
| 120 |
+
def fit(self, X, y):
|
| 121 |
+
super().fit(X, y)
|
| 122 |
+
self.data_type_ = type(X)
|
| 123 |
+
return self
|
| 124 |
+
|
| 125 |
+
rng = check_random_state(0)
|
| 126 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
| 127 |
+
scale(iris.data), iris.target, random_state=rng
|
| 128 |
+
)
|
| 129 |
+
|
| 130 |
+
X_train_sparse = sparse_container(X_train)
|
| 131 |
+
X_test_sparse = sparse_container(X_test)
|
| 132 |
+
# Trained on sparse format
|
| 133 |
+
sparse_classifier = BaggingClassifier(
|
| 134 |
+
estimator=CustomSVC(kernel="linear", decision_function_shape="ovr"),
|
| 135 |
+
random_state=1,
|
| 136 |
+
**params,
|
| 137 |
+
).fit(X_train_sparse, y_train)
|
| 138 |
+
sparse_results = getattr(sparse_classifier, method)(X_test_sparse)
|
| 139 |
+
|
| 140 |
+
# Trained on dense format
|
| 141 |
+
dense_classifier = BaggingClassifier(
|
| 142 |
+
estimator=CustomSVC(kernel="linear", decision_function_shape="ovr"),
|
| 143 |
+
random_state=1,
|
| 144 |
+
**params,
|
| 145 |
+
).fit(X_train, y_train)
|
| 146 |
+
dense_results = getattr(dense_classifier, method)(X_test)
|
| 147 |
+
assert_array_almost_equal(sparse_results, dense_results)
|
| 148 |
+
|
| 149 |
+
sparse_type = type(X_train_sparse)
|
| 150 |
+
types = [i.data_type_ for i in sparse_classifier.estimators_]
|
| 151 |
+
|
| 152 |
+
assert all([t == sparse_type for t in types])
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def test_regression():
|
| 156 |
+
# Check regression for various parameter settings.
|
| 157 |
+
rng = check_random_state(0)
|
| 158 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
| 159 |
+
diabetes.data[:50], diabetes.target[:50], random_state=rng
|
| 160 |
+
)
|
| 161 |
+
grid = ParameterGrid(
|
| 162 |
+
{
|
| 163 |
+
"max_samples": [0.5, 1.0],
|
| 164 |
+
"max_features": [0.5, 1.0],
|
| 165 |
+
"bootstrap": [True, False],
|
| 166 |
+
"bootstrap_features": [True, False],
|
| 167 |
+
}
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
for estimator in [
|
| 171 |
+
None,
|
| 172 |
+
DummyRegressor(),
|
| 173 |
+
DecisionTreeRegressor(),
|
| 174 |
+
KNeighborsRegressor(),
|
| 175 |
+
SVR(),
|
| 176 |
+
]:
|
| 177 |
+
for params in grid:
|
| 178 |
+
BaggingRegressor(estimator=estimator, random_state=rng, **params).fit(
|
| 179 |
+
X_train, y_train
|
| 180 |
+
).predict(X_test)
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
@pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + CSC_CONTAINERS)
|
| 184 |
+
def test_sparse_regression(sparse_container):
|
| 185 |
+
# Check regression for various parameter settings on sparse input.
|
| 186 |
+
rng = check_random_state(0)
|
| 187 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
| 188 |
+
diabetes.data[:50], diabetes.target[:50], random_state=rng
|
| 189 |
+
)
|
| 190 |
+
|
| 191 |
+
class CustomSVR(SVR):
|
| 192 |
+
"""SVC variant that records the nature of the training set"""
|
| 193 |
+
|
| 194 |
+
def fit(self, X, y):
|
| 195 |
+
super().fit(X, y)
|
| 196 |
+
self.data_type_ = type(X)
|
| 197 |
+
return self
|
| 198 |
+
|
| 199 |
+
parameter_sets = [
|
| 200 |
+
{
|
| 201 |
+
"max_samples": 0.5,
|
| 202 |
+
"max_features": 2,
|
| 203 |
+
"bootstrap": True,
|
| 204 |
+
"bootstrap_features": True,
|
| 205 |
+
},
|
| 206 |
+
{
|
| 207 |
+
"max_samples": 1.0,
|
| 208 |
+
"max_features": 4,
|
| 209 |
+
"bootstrap": True,
|
| 210 |
+
"bootstrap_features": True,
|
| 211 |
+
},
|
| 212 |
+
{"max_features": 2, "bootstrap": False, "bootstrap_features": True},
|
| 213 |
+
{"max_samples": 0.5, "bootstrap": True, "bootstrap_features": False},
|
| 214 |
+
]
|
| 215 |
+
|
| 216 |
+
X_train_sparse = sparse_container(X_train)
|
| 217 |
+
X_test_sparse = sparse_container(X_test)
|
| 218 |
+
for params in parameter_sets:
|
| 219 |
+
# Trained on sparse format
|
| 220 |
+
sparse_classifier = BaggingRegressor(
|
| 221 |
+
estimator=CustomSVR(), random_state=1, **params
|
| 222 |
+
).fit(X_train_sparse, y_train)
|
| 223 |
+
sparse_results = sparse_classifier.predict(X_test_sparse)
|
| 224 |
+
|
| 225 |
+
# Trained on dense format
|
| 226 |
+
dense_results = (
|
| 227 |
+
BaggingRegressor(estimator=CustomSVR(), random_state=1, **params)
|
| 228 |
+
.fit(X_train, y_train)
|
| 229 |
+
.predict(X_test)
|
| 230 |
+
)
|
| 231 |
+
|
| 232 |
+
sparse_type = type(X_train_sparse)
|
| 233 |
+
types = [i.data_type_ for i in sparse_classifier.estimators_]
|
| 234 |
+
|
| 235 |
+
assert_array_almost_equal(sparse_results, dense_results)
|
| 236 |
+
assert all([t == sparse_type for t in types])
|
| 237 |
+
assert_array_almost_equal(sparse_results, dense_results)
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
class DummySizeEstimator(BaseEstimator):
|
| 241 |
+
def fit(self, X, y):
|
| 242 |
+
self.training_size_ = X.shape[0]
|
| 243 |
+
self.training_hash_ = joblib.hash(X)
|
| 244 |
+
|
| 245 |
+
def predict(self, X):
|
| 246 |
+
return np.ones(X.shape[0])
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
def test_bootstrap_samples():
|
| 250 |
+
# Test that bootstrapping samples generate non-perfect base estimators.
|
| 251 |
+
rng = check_random_state(0)
|
| 252 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
| 253 |
+
diabetes.data, diabetes.target, random_state=rng
|
| 254 |
+
)
|
| 255 |
+
|
| 256 |
+
estimator = DecisionTreeRegressor().fit(X_train, y_train)
|
| 257 |
+
|
| 258 |
+
# without bootstrap, all trees are perfect on the training set
|
| 259 |
+
ensemble = BaggingRegressor(
|
| 260 |
+
estimator=DecisionTreeRegressor(),
|
| 261 |
+
max_samples=1.0,
|
| 262 |
+
bootstrap=False,
|
| 263 |
+
random_state=rng,
|
| 264 |
+
).fit(X_train, y_train)
|
| 265 |
+
|
| 266 |
+
assert estimator.score(X_train, y_train) == ensemble.score(X_train, y_train)
|
| 267 |
+
|
| 268 |
+
# with bootstrap, trees are no longer perfect on the training set
|
| 269 |
+
ensemble = BaggingRegressor(
|
| 270 |
+
estimator=DecisionTreeRegressor(),
|
| 271 |
+
max_samples=1.0,
|
| 272 |
+
bootstrap=True,
|
| 273 |
+
random_state=rng,
|
| 274 |
+
).fit(X_train, y_train)
|
| 275 |
+
|
| 276 |
+
assert estimator.score(X_train, y_train) > ensemble.score(X_train, y_train)
|
| 277 |
+
|
| 278 |
+
# check that each sampling correspond to a complete bootstrap resample.
|
| 279 |
+
# the size of each bootstrap should be the same as the input data but
|
| 280 |
+
# the data should be different (checked using the hash of the data).
|
| 281 |
+
ensemble = BaggingRegressor(estimator=DummySizeEstimator(), bootstrap=True).fit(
|
| 282 |
+
X_train, y_train
|
| 283 |
+
)
|
| 284 |
+
training_hash = []
|
| 285 |
+
for estimator in ensemble.estimators_:
|
| 286 |
+
assert estimator.training_size_ == X_train.shape[0]
|
| 287 |
+
training_hash.append(estimator.training_hash_)
|
| 288 |
+
assert len(set(training_hash)) == len(training_hash)
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
def test_bootstrap_features():
|
| 292 |
+
# Test that bootstrapping features may generate duplicate features.
|
| 293 |
+
rng = check_random_state(0)
|
| 294 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
| 295 |
+
diabetes.data, diabetes.target, random_state=rng
|
| 296 |
+
)
|
| 297 |
+
|
| 298 |
+
ensemble = BaggingRegressor(
|
| 299 |
+
estimator=DecisionTreeRegressor(),
|
| 300 |
+
max_features=1.0,
|
| 301 |
+
bootstrap_features=False,
|
| 302 |
+
random_state=rng,
|
| 303 |
+
).fit(X_train, y_train)
|
| 304 |
+
|
| 305 |
+
for features in ensemble.estimators_features_:
|
| 306 |
+
assert diabetes.data.shape[1] == np.unique(features).shape[0]
|
| 307 |
+
|
| 308 |
+
ensemble = BaggingRegressor(
|
| 309 |
+
estimator=DecisionTreeRegressor(),
|
| 310 |
+
max_features=1.0,
|
| 311 |
+
bootstrap_features=True,
|
| 312 |
+
random_state=rng,
|
| 313 |
+
).fit(X_train, y_train)
|
| 314 |
+
|
| 315 |
+
for features in ensemble.estimators_features_:
|
| 316 |
+
assert diabetes.data.shape[1] > np.unique(features).shape[0]
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
def test_probability():
|
| 320 |
+
# Predict probabilities.
|
| 321 |
+
rng = check_random_state(0)
|
| 322 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
| 323 |
+
iris.data, iris.target, random_state=rng
|
| 324 |
+
)
|
| 325 |
+
|
| 326 |
+
with np.errstate(divide="ignore", invalid="ignore"):
|
| 327 |
+
# Normal case
|
| 328 |
+
ensemble = BaggingClassifier(
|
| 329 |
+
estimator=DecisionTreeClassifier(), random_state=rng
|
| 330 |
+
).fit(X_train, y_train)
|
| 331 |
+
|
| 332 |
+
assert_array_almost_equal(
|
| 333 |
+
np.sum(ensemble.predict_proba(X_test), axis=1), np.ones(len(X_test))
|
| 334 |
+
)
|
| 335 |
+
|
| 336 |
+
assert_array_almost_equal(
|
| 337 |
+
ensemble.predict_proba(X_test), np.exp(ensemble.predict_log_proba(X_test))
|
| 338 |
+
)
|
| 339 |
+
|
| 340 |
+
# Degenerate case, where some classes are missing
|
| 341 |
+
ensemble = BaggingClassifier(
|
| 342 |
+
estimator=LogisticRegression(), random_state=rng, max_samples=5
|
| 343 |
+
).fit(X_train, y_train)
|
| 344 |
+
|
| 345 |
+
assert_array_almost_equal(
|
| 346 |
+
np.sum(ensemble.predict_proba(X_test), axis=1), np.ones(len(X_test))
|
| 347 |
+
)
|
| 348 |
+
|
| 349 |
+
assert_array_almost_equal(
|
| 350 |
+
ensemble.predict_proba(X_test), np.exp(ensemble.predict_log_proba(X_test))
|
| 351 |
+
)
|
| 352 |
+
|
| 353 |
+
|
| 354 |
+
def test_oob_score_classification():
|
| 355 |
+
# Check that oob prediction is a good estimation of the generalization
|
| 356 |
+
# error.
|
| 357 |
+
rng = check_random_state(0)
|
| 358 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
| 359 |
+
iris.data, iris.target, random_state=rng
|
| 360 |
+
)
|
| 361 |
+
|
| 362 |
+
for estimator in [DecisionTreeClassifier(), SVC()]:
|
| 363 |
+
clf = BaggingClassifier(
|
| 364 |
+
estimator=estimator,
|
| 365 |
+
n_estimators=100,
|
| 366 |
+
bootstrap=True,
|
| 367 |
+
oob_score=True,
|
| 368 |
+
random_state=rng,
|
| 369 |
+
).fit(X_train, y_train)
|
| 370 |
+
|
| 371 |
+
test_score = clf.score(X_test, y_test)
|
| 372 |
+
|
| 373 |
+
assert abs(test_score - clf.oob_score_) < 0.1
|
| 374 |
+
|
| 375 |
+
# Test with few estimators
|
| 376 |
+
warn_msg = (
|
| 377 |
+
"Some inputs do not have OOB scores. This probably means too few "
|
| 378 |
+
"estimators were used to compute any reliable oob estimates."
|
| 379 |
+
)
|
| 380 |
+
with pytest.warns(UserWarning, match=warn_msg):
|
| 381 |
+
clf = BaggingClassifier(
|
| 382 |
+
estimator=estimator,
|
| 383 |
+
n_estimators=1,
|
| 384 |
+
bootstrap=True,
|
| 385 |
+
oob_score=True,
|
| 386 |
+
random_state=rng,
|
| 387 |
+
)
|
| 388 |
+
clf.fit(X_train, y_train)
|
| 389 |
+
|
| 390 |
+
|
| 391 |
+
def test_oob_score_regression():
|
| 392 |
+
# Check that oob prediction is a good estimation of the generalization
|
| 393 |
+
# error.
|
| 394 |
+
rng = check_random_state(0)
|
| 395 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
| 396 |
+
diabetes.data, diabetes.target, random_state=rng
|
| 397 |
+
)
|
| 398 |
+
|
| 399 |
+
clf = BaggingRegressor(
|
| 400 |
+
estimator=DecisionTreeRegressor(),
|
| 401 |
+
n_estimators=50,
|
| 402 |
+
bootstrap=True,
|
| 403 |
+
oob_score=True,
|
| 404 |
+
random_state=rng,
|
| 405 |
+
).fit(X_train, y_train)
|
| 406 |
+
|
| 407 |
+
test_score = clf.score(X_test, y_test)
|
| 408 |
+
|
| 409 |
+
assert abs(test_score - clf.oob_score_) < 0.1
|
| 410 |
+
|
| 411 |
+
# Test with few estimators
|
| 412 |
+
warn_msg = (
|
| 413 |
+
"Some inputs do not have OOB scores. This probably means too few "
|
| 414 |
+
"estimators were used to compute any reliable oob estimates."
|
| 415 |
+
)
|
| 416 |
+
with pytest.warns(UserWarning, match=warn_msg):
|
| 417 |
+
regr = BaggingRegressor(
|
| 418 |
+
estimator=DecisionTreeRegressor(),
|
| 419 |
+
n_estimators=1,
|
| 420 |
+
bootstrap=True,
|
| 421 |
+
oob_score=True,
|
| 422 |
+
random_state=rng,
|
| 423 |
+
)
|
| 424 |
+
regr.fit(X_train, y_train)
|
| 425 |
+
|
| 426 |
+
|
| 427 |
+
def test_single_estimator():
|
| 428 |
+
# Check singleton ensembles.
|
| 429 |
+
rng = check_random_state(0)
|
| 430 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
| 431 |
+
diabetes.data, diabetes.target, random_state=rng
|
| 432 |
+
)
|
| 433 |
+
|
| 434 |
+
clf1 = BaggingRegressor(
|
| 435 |
+
estimator=KNeighborsRegressor(),
|
| 436 |
+
n_estimators=1,
|
| 437 |
+
bootstrap=False,
|
| 438 |
+
bootstrap_features=False,
|
| 439 |
+
random_state=rng,
|
| 440 |
+
).fit(X_train, y_train)
|
| 441 |
+
|
| 442 |
+
clf2 = KNeighborsRegressor().fit(X_train, y_train)
|
| 443 |
+
|
| 444 |
+
assert_array_almost_equal(clf1.predict(X_test), clf2.predict(X_test))
|
| 445 |
+
|
| 446 |
+
|
| 447 |
+
def test_error():
|
| 448 |
+
# Test support of decision_function
|
| 449 |
+
X, y = iris.data, iris.target
|
| 450 |
+
base = DecisionTreeClassifier()
|
| 451 |
+
assert not hasattr(BaggingClassifier(base).fit(X, y), "decision_function")
|
| 452 |
+
|
| 453 |
+
|
| 454 |
+
def test_parallel_classification():
|
| 455 |
+
# Check parallel classification.
|
| 456 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
| 457 |
+
iris.data, iris.target, random_state=0
|
| 458 |
+
)
|
| 459 |
+
|
| 460 |
+
ensemble = BaggingClassifier(
|
| 461 |
+
DecisionTreeClassifier(), n_jobs=3, random_state=0
|
| 462 |
+
).fit(X_train, y_train)
|
| 463 |
+
|
| 464 |
+
# predict_proba
|
| 465 |
+
y1 = ensemble.predict_proba(X_test)
|
| 466 |
+
ensemble.set_params(n_jobs=1)
|
| 467 |
+
y2 = ensemble.predict_proba(X_test)
|
| 468 |
+
assert_array_almost_equal(y1, y2)
|
| 469 |
+
|
| 470 |
+
ensemble = BaggingClassifier(
|
| 471 |
+
DecisionTreeClassifier(), n_jobs=1, random_state=0
|
| 472 |
+
).fit(X_train, y_train)
|
| 473 |
+
|
| 474 |
+
y3 = ensemble.predict_proba(X_test)
|
| 475 |
+
assert_array_almost_equal(y1, y3)
|
| 476 |
+
|
| 477 |
+
# decision_function
|
| 478 |
+
ensemble = BaggingClassifier(
|
| 479 |
+
SVC(decision_function_shape="ovr"), n_jobs=3, random_state=0
|
| 480 |
+
).fit(X_train, y_train)
|
| 481 |
+
|
| 482 |
+
decisions1 = ensemble.decision_function(X_test)
|
| 483 |
+
ensemble.set_params(n_jobs=1)
|
| 484 |
+
decisions2 = ensemble.decision_function(X_test)
|
| 485 |
+
assert_array_almost_equal(decisions1, decisions2)
|
| 486 |
+
|
| 487 |
+
ensemble = BaggingClassifier(
|
| 488 |
+
SVC(decision_function_shape="ovr"), n_jobs=1, random_state=0
|
| 489 |
+
).fit(X_train, y_train)
|
| 490 |
+
|
| 491 |
+
decisions3 = ensemble.decision_function(X_test)
|
| 492 |
+
assert_array_almost_equal(decisions1, decisions3)
|
| 493 |
+
|
| 494 |
+
|
| 495 |
+
def test_parallel_regression():
|
| 496 |
+
# Check parallel regression.
|
| 497 |
+
rng = check_random_state(0)
|
| 498 |
+
|
| 499 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
| 500 |
+
diabetes.data, diabetes.target, random_state=rng
|
| 501 |
+
)
|
| 502 |
+
|
| 503 |
+
ensemble = BaggingRegressor(DecisionTreeRegressor(), n_jobs=3, random_state=0).fit(
|
| 504 |
+
X_train, y_train
|
| 505 |
+
)
|
| 506 |
+
|
| 507 |
+
ensemble.set_params(n_jobs=1)
|
| 508 |
+
y1 = ensemble.predict(X_test)
|
| 509 |
+
ensemble.set_params(n_jobs=2)
|
| 510 |
+
y2 = ensemble.predict(X_test)
|
| 511 |
+
assert_array_almost_equal(y1, y2)
|
| 512 |
+
|
| 513 |
+
ensemble = BaggingRegressor(DecisionTreeRegressor(), n_jobs=1, random_state=0).fit(
|
| 514 |
+
X_train, y_train
|
| 515 |
+
)
|
| 516 |
+
|
| 517 |
+
y3 = ensemble.predict(X_test)
|
| 518 |
+
assert_array_almost_equal(y1, y3)
|
| 519 |
+
|
| 520 |
+
|
| 521 |
+
def test_gridsearch():
|
| 522 |
+
# Check that bagging ensembles can be grid-searched.
|
| 523 |
+
# Transform iris into a binary classification task
|
| 524 |
+
X, y = iris.data, iris.target
|
| 525 |
+
y[y == 2] = 1
|
| 526 |
+
|
| 527 |
+
# Grid search with scoring based on decision_function
|
| 528 |
+
parameters = {"n_estimators": (1, 2), "estimator__C": (1, 2)}
|
| 529 |
+
|
| 530 |
+
GridSearchCV(BaggingClassifier(SVC()), parameters, scoring="roc_auc").fit(X, y)
|
| 531 |
+
|
| 532 |
+
|
| 533 |
+
def test_estimator():
|
| 534 |
+
# Check estimator and its default values.
|
| 535 |
+
rng = check_random_state(0)
|
| 536 |
+
|
| 537 |
+
# Classification
|
| 538 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
| 539 |
+
iris.data, iris.target, random_state=rng
|
| 540 |
+
)
|
| 541 |
+
|
| 542 |
+
ensemble = BaggingClassifier(None, n_jobs=3, random_state=0).fit(X_train, y_train)
|
| 543 |
+
|
| 544 |
+
assert isinstance(ensemble.estimator_, DecisionTreeClassifier)
|
| 545 |
+
|
| 546 |
+
ensemble = BaggingClassifier(
|
| 547 |
+
DecisionTreeClassifier(), n_jobs=3, random_state=0
|
| 548 |
+
).fit(X_train, y_train)
|
| 549 |
+
|
| 550 |
+
assert isinstance(ensemble.estimator_, DecisionTreeClassifier)
|
| 551 |
+
|
| 552 |
+
ensemble = BaggingClassifier(Perceptron(), n_jobs=3, random_state=0).fit(
|
| 553 |
+
X_train, y_train
|
| 554 |
+
)
|
| 555 |
+
|
| 556 |
+
assert isinstance(ensemble.estimator_, Perceptron)
|
| 557 |
+
|
| 558 |
+
# Regression
|
| 559 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
| 560 |
+
diabetes.data, diabetes.target, random_state=rng
|
| 561 |
+
)
|
| 562 |
+
|
| 563 |
+
ensemble = BaggingRegressor(None, n_jobs=3, random_state=0).fit(X_train, y_train)
|
| 564 |
+
|
| 565 |
+
assert isinstance(ensemble.estimator_, DecisionTreeRegressor)
|
| 566 |
+
|
| 567 |
+
ensemble = BaggingRegressor(DecisionTreeRegressor(), n_jobs=3, random_state=0).fit(
|
| 568 |
+
X_train, y_train
|
| 569 |
+
)
|
| 570 |
+
|
| 571 |
+
assert isinstance(ensemble.estimator_, DecisionTreeRegressor)
|
| 572 |
+
|
| 573 |
+
ensemble = BaggingRegressor(SVR(), n_jobs=3, random_state=0).fit(X_train, y_train)
|
| 574 |
+
assert isinstance(ensemble.estimator_, SVR)
|
| 575 |
+
|
| 576 |
+
|
| 577 |
+
def test_bagging_with_pipeline():
|
| 578 |
+
estimator = BaggingClassifier(
|
| 579 |
+
make_pipeline(SelectKBest(k=1), DecisionTreeClassifier()), max_features=2
|
| 580 |
+
)
|
| 581 |
+
estimator.fit(iris.data, iris.target)
|
| 582 |
+
assert isinstance(estimator[0].steps[-1][1].random_state, int)
|
| 583 |
+
|
| 584 |
+
|
| 585 |
+
class DummyZeroEstimator(BaseEstimator):
|
| 586 |
+
def fit(self, X, y):
|
| 587 |
+
self.classes_ = np.unique(y)
|
| 588 |
+
return self
|
| 589 |
+
|
| 590 |
+
def predict(self, X):
|
| 591 |
+
return self.classes_[np.zeros(X.shape[0], dtype=int)]
|
| 592 |
+
|
| 593 |
+
|
| 594 |
+
def test_bagging_sample_weight_unsupported_but_passed():
|
| 595 |
+
estimator = BaggingClassifier(DummyZeroEstimator())
|
| 596 |
+
rng = check_random_state(0)
|
| 597 |
+
|
| 598 |
+
estimator.fit(iris.data, iris.target).predict(iris.data)
|
| 599 |
+
with pytest.raises(ValueError):
|
| 600 |
+
estimator.fit(
|
| 601 |
+
iris.data,
|
| 602 |
+
iris.target,
|
| 603 |
+
sample_weight=rng.randint(10, size=(iris.data.shape[0])),
|
| 604 |
+
)
|
| 605 |
+
|
| 606 |
+
|
| 607 |
+
def test_warm_start(random_state=42):
|
| 608 |
+
# Test if fitting incrementally with warm start gives a forest of the
|
| 609 |
+
# right size and the same results as a normal fit.
|
| 610 |
+
X, y = make_hastie_10_2(n_samples=20, random_state=1)
|
| 611 |
+
|
| 612 |
+
clf_ws = None
|
| 613 |
+
for n_estimators in [5, 10]:
|
| 614 |
+
if clf_ws is None:
|
| 615 |
+
clf_ws = BaggingClassifier(
|
| 616 |
+
n_estimators=n_estimators, random_state=random_state, warm_start=True
|
| 617 |
+
)
|
| 618 |
+
else:
|
| 619 |
+
clf_ws.set_params(n_estimators=n_estimators)
|
| 620 |
+
clf_ws.fit(X, y)
|
| 621 |
+
assert len(clf_ws) == n_estimators
|
| 622 |
+
|
| 623 |
+
clf_no_ws = BaggingClassifier(
|
| 624 |
+
n_estimators=10, random_state=random_state, warm_start=False
|
| 625 |
+
)
|
| 626 |
+
clf_no_ws.fit(X, y)
|
| 627 |
+
|
| 628 |
+
assert set([tree.random_state for tree in clf_ws]) == set(
|
| 629 |
+
[tree.random_state for tree in clf_no_ws]
|
| 630 |
+
)
|
| 631 |
+
|
| 632 |
+
|
| 633 |
+
def test_warm_start_smaller_n_estimators():
|
| 634 |
+
# Test if warm start'ed second fit with smaller n_estimators raises error.
|
| 635 |
+
X, y = make_hastie_10_2(n_samples=20, random_state=1)
|
| 636 |
+
clf = BaggingClassifier(n_estimators=5, warm_start=True)
|
| 637 |
+
clf.fit(X, y)
|
| 638 |
+
clf.set_params(n_estimators=4)
|
| 639 |
+
with pytest.raises(ValueError):
|
| 640 |
+
clf.fit(X, y)
|
| 641 |
+
|
| 642 |
+
|
| 643 |
+
def test_warm_start_equal_n_estimators():
|
| 644 |
+
# Test that nothing happens when fitting without increasing n_estimators
|
| 645 |
+
X, y = make_hastie_10_2(n_samples=20, random_state=1)
|
| 646 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
|
| 647 |
+
|
| 648 |
+
clf = BaggingClassifier(n_estimators=5, warm_start=True, random_state=83)
|
| 649 |
+
clf.fit(X_train, y_train)
|
| 650 |
+
|
| 651 |
+
y_pred = clf.predict(X_test)
|
| 652 |
+
# modify X to nonsense values, this should not change anything
|
| 653 |
+
X_train += 1.0
|
| 654 |
+
|
| 655 |
+
warn_msg = "Warm-start fitting without increasing n_estimators does not"
|
| 656 |
+
with pytest.warns(UserWarning, match=warn_msg):
|
| 657 |
+
clf.fit(X_train, y_train)
|
| 658 |
+
assert_array_equal(y_pred, clf.predict(X_test))
|
| 659 |
+
|
| 660 |
+
|
| 661 |
+
def test_warm_start_equivalence():
|
| 662 |
+
# warm started classifier with 5+5 estimators should be equivalent to
|
| 663 |
+
# one classifier with 10 estimators
|
| 664 |
+
X, y = make_hastie_10_2(n_samples=20, random_state=1)
|
| 665 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
|
| 666 |
+
|
| 667 |
+
clf_ws = BaggingClassifier(n_estimators=5, warm_start=True, random_state=3141)
|
| 668 |
+
clf_ws.fit(X_train, y_train)
|
| 669 |
+
clf_ws.set_params(n_estimators=10)
|
| 670 |
+
clf_ws.fit(X_train, y_train)
|
| 671 |
+
y1 = clf_ws.predict(X_test)
|
| 672 |
+
|
| 673 |
+
clf = BaggingClassifier(n_estimators=10, warm_start=False, random_state=3141)
|
| 674 |
+
clf.fit(X_train, y_train)
|
| 675 |
+
y2 = clf.predict(X_test)
|
| 676 |
+
|
| 677 |
+
assert_array_almost_equal(y1, y2)
|
| 678 |
+
|
| 679 |
+
|
| 680 |
+
def test_warm_start_with_oob_score_fails():
|
| 681 |
+
# Check using oob_score and warm_start simultaneously fails
|
| 682 |
+
X, y = make_hastie_10_2(n_samples=20, random_state=1)
|
| 683 |
+
clf = BaggingClassifier(n_estimators=5, warm_start=True, oob_score=True)
|
| 684 |
+
with pytest.raises(ValueError):
|
| 685 |
+
clf.fit(X, y)
|
| 686 |
+
|
| 687 |
+
|
| 688 |
+
def test_oob_score_removed_on_warm_start():
|
| 689 |
+
X, y = make_hastie_10_2(n_samples=100, random_state=1)
|
| 690 |
+
|
| 691 |
+
clf = BaggingClassifier(n_estimators=5, oob_score=True)
|
| 692 |
+
clf.fit(X, y)
|
| 693 |
+
|
| 694 |
+
clf.set_params(warm_start=True, oob_score=False, n_estimators=10)
|
| 695 |
+
clf.fit(X, y)
|
| 696 |
+
|
| 697 |
+
with pytest.raises(AttributeError):
|
| 698 |
+
getattr(clf, "oob_score_")
|
| 699 |
+
|
| 700 |
+
|
| 701 |
+
def test_oob_score_consistency():
|
| 702 |
+
# Make sure OOB scores are identical when random_state, estimator, and
|
| 703 |
+
# training data are fixed and fitting is done twice
|
| 704 |
+
X, y = make_hastie_10_2(n_samples=200, random_state=1)
|
| 705 |
+
bagging = BaggingClassifier(
|
| 706 |
+
KNeighborsClassifier(),
|
| 707 |
+
max_samples=0.5,
|
| 708 |
+
max_features=0.5,
|
| 709 |
+
oob_score=True,
|
| 710 |
+
random_state=1,
|
| 711 |
+
)
|
| 712 |
+
assert bagging.fit(X, y).oob_score_ == bagging.fit(X, y).oob_score_
|
| 713 |
+
|
| 714 |
+
|
| 715 |
+
def test_estimators_samples():
|
| 716 |
+
# Check that format of estimators_samples_ is correct and that results
|
| 717 |
+
# generated at fit time can be identically reproduced at a later time
|
| 718 |
+
# using data saved in object attributes.
|
| 719 |
+
X, y = make_hastie_10_2(n_samples=200, random_state=1)
|
| 720 |
+
bagging = BaggingClassifier(
|
| 721 |
+
LogisticRegression(),
|
| 722 |
+
max_samples=0.5,
|
| 723 |
+
max_features=0.5,
|
| 724 |
+
random_state=1,
|
| 725 |
+
bootstrap=False,
|
| 726 |
+
)
|
| 727 |
+
bagging.fit(X, y)
|
| 728 |
+
|
| 729 |
+
# Get relevant attributes
|
| 730 |
+
estimators_samples = bagging.estimators_samples_
|
| 731 |
+
estimators_features = bagging.estimators_features_
|
| 732 |
+
estimators = bagging.estimators_
|
| 733 |
+
|
| 734 |
+
# Test for correct formatting
|
| 735 |
+
assert len(estimators_samples) == len(estimators)
|
| 736 |
+
assert len(estimators_samples[0]) == len(X) // 2
|
| 737 |
+
assert estimators_samples[0].dtype.kind == "i"
|
| 738 |
+
|
| 739 |
+
# Re-fit single estimator to test for consistent sampling
|
| 740 |
+
estimator_index = 0
|
| 741 |
+
estimator_samples = estimators_samples[estimator_index]
|
| 742 |
+
estimator_features = estimators_features[estimator_index]
|
| 743 |
+
estimator = estimators[estimator_index]
|
| 744 |
+
|
| 745 |
+
X_train = (X[estimator_samples])[:, estimator_features]
|
| 746 |
+
y_train = y[estimator_samples]
|
| 747 |
+
|
| 748 |
+
orig_coefs = estimator.coef_
|
| 749 |
+
estimator.fit(X_train, y_train)
|
| 750 |
+
new_coefs = estimator.coef_
|
| 751 |
+
|
| 752 |
+
assert_array_almost_equal(orig_coefs, new_coefs)
|
| 753 |
+
|
| 754 |
+
|
| 755 |
+
def test_estimators_samples_deterministic():
|
| 756 |
+
# This test is a regression test to check that with a random step
|
| 757 |
+
# (e.g. SparseRandomProjection) and a given random state, the results
|
| 758 |
+
# generated at fit time can be identically reproduced at a later time using
|
| 759 |
+
# data saved in object attributes. Check issue #9524 for full discussion.
|
| 760 |
+
|
| 761 |
+
iris = load_iris()
|
| 762 |
+
X, y = iris.data, iris.target
|
| 763 |
+
|
| 764 |
+
base_pipeline = make_pipeline(
|
| 765 |
+
SparseRandomProjection(n_components=2), LogisticRegression()
|
| 766 |
+
)
|
| 767 |
+
clf = BaggingClassifier(estimator=base_pipeline, max_samples=0.5, random_state=0)
|
| 768 |
+
clf.fit(X, y)
|
| 769 |
+
pipeline_estimator_coef = clf.estimators_[0].steps[-1][1].coef_.copy()
|
| 770 |
+
|
| 771 |
+
estimator = clf.estimators_[0]
|
| 772 |
+
estimator_sample = clf.estimators_samples_[0]
|
| 773 |
+
estimator_feature = clf.estimators_features_[0]
|
| 774 |
+
|
| 775 |
+
X_train = (X[estimator_sample])[:, estimator_feature]
|
| 776 |
+
y_train = y[estimator_sample]
|
| 777 |
+
|
| 778 |
+
estimator.fit(X_train, y_train)
|
| 779 |
+
assert_array_equal(estimator.steps[-1][1].coef_, pipeline_estimator_coef)
|
| 780 |
+
|
| 781 |
+
|
| 782 |
+
def test_max_samples_consistency():
|
| 783 |
+
# Make sure validated max_samples and original max_samples are identical
|
| 784 |
+
# when valid integer max_samples supplied by user
|
| 785 |
+
max_samples = 100
|
| 786 |
+
X, y = make_hastie_10_2(n_samples=2 * max_samples, random_state=1)
|
| 787 |
+
bagging = BaggingClassifier(
|
| 788 |
+
KNeighborsClassifier(),
|
| 789 |
+
max_samples=max_samples,
|
| 790 |
+
max_features=0.5,
|
| 791 |
+
random_state=1,
|
| 792 |
+
)
|
| 793 |
+
bagging.fit(X, y)
|
| 794 |
+
assert bagging._max_samples == max_samples
|
| 795 |
+
|
| 796 |
+
|
| 797 |
+
def test_set_oob_score_label_encoding():
|
| 798 |
+
# Make sure the oob_score doesn't change when the labels change
|
| 799 |
+
# See: https://github.com/scikit-learn/scikit-learn/issues/8933
|
| 800 |
+
random_state = 5
|
| 801 |
+
X = [[-1], [0], [1]] * 5
|
| 802 |
+
Y1 = ["A", "B", "C"] * 5
|
| 803 |
+
Y2 = [-1, 0, 1] * 5
|
| 804 |
+
Y3 = [0, 1, 2] * 5
|
| 805 |
+
x1 = (
|
| 806 |
+
BaggingClassifier(oob_score=True, random_state=random_state)
|
| 807 |
+
.fit(X, Y1)
|
| 808 |
+
.oob_score_
|
| 809 |
+
)
|
| 810 |
+
x2 = (
|
| 811 |
+
BaggingClassifier(oob_score=True, random_state=random_state)
|
| 812 |
+
.fit(X, Y2)
|
| 813 |
+
.oob_score_
|
| 814 |
+
)
|
| 815 |
+
x3 = (
|
| 816 |
+
BaggingClassifier(oob_score=True, random_state=random_state)
|
| 817 |
+
.fit(X, Y3)
|
| 818 |
+
.oob_score_
|
| 819 |
+
)
|
| 820 |
+
assert [x1, x2] == [x3, x3]
|
| 821 |
+
|
| 822 |
+
|
| 823 |
+
def replace(X):
|
| 824 |
+
X = X.astype("float", copy=True)
|
| 825 |
+
X[~np.isfinite(X)] = 0
|
| 826 |
+
return X
|
| 827 |
+
|
| 828 |
+
|
| 829 |
+
def test_bagging_regressor_with_missing_inputs():
|
| 830 |
+
# Check that BaggingRegressor can accept X with missing/infinite data
|
| 831 |
+
X = np.array(
|
| 832 |
+
[
|
| 833 |
+
[1, 3, 5],
|
| 834 |
+
[2, None, 6],
|
| 835 |
+
[2, np.nan, 6],
|
| 836 |
+
[2, np.inf, 6],
|
| 837 |
+
[2, -np.inf, 6],
|
| 838 |
+
]
|
| 839 |
+
)
|
| 840 |
+
y_values = [
|
| 841 |
+
np.array([2, 3, 3, 3, 3]),
|
| 842 |
+
np.array(
|
| 843 |
+
[
|
| 844 |
+
[2, 1, 9],
|
| 845 |
+
[3, 6, 8],
|
| 846 |
+
[3, 6, 8],
|
| 847 |
+
[3, 6, 8],
|
| 848 |
+
[3, 6, 8],
|
| 849 |
+
]
|
| 850 |
+
),
|
| 851 |
+
]
|
| 852 |
+
for y in y_values:
|
| 853 |
+
regressor = DecisionTreeRegressor()
|
| 854 |
+
pipeline = make_pipeline(FunctionTransformer(replace), regressor)
|
| 855 |
+
pipeline.fit(X, y).predict(X)
|
| 856 |
+
bagging_regressor = BaggingRegressor(pipeline)
|
| 857 |
+
y_hat = bagging_regressor.fit(X, y).predict(X)
|
| 858 |
+
assert y.shape == y_hat.shape
|
| 859 |
+
|
| 860 |
+
# Verify that exceptions can be raised by wrapper regressor
|
| 861 |
+
regressor = DecisionTreeRegressor()
|
| 862 |
+
pipeline = make_pipeline(regressor)
|
| 863 |
+
with pytest.raises(ValueError):
|
| 864 |
+
pipeline.fit(X, y)
|
| 865 |
+
bagging_regressor = BaggingRegressor(pipeline)
|
| 866 |
+
with pytest.raises(ValueError):
|
| 867 |
+
bagging_regressor.fit(X, y)
|
| 868 |
+
|
| 869 |
+
|
| 870 |
+
def test_bagging_classifier_with_missing_inputs():
|
| 871 |
+
# Check that BaggingClassifier can accept X with missing/infinite data
|
| 872 |
+
X = np.array(
|
| 873 |
+
[
|
| 874 |
+
[1, 3, 5],
|
| 875 |
+
[2, None, 6],
|
| 876 |
+
[2, np.nan, 6],
|
| 877 |
+
[2, np.inf, 6],
|
| 878 |
+
[2, -np.inf, 6],
|
| 879 |
+
]
|
| 880 |
+
)
|
| 881 |
+
y = np.array([3, 6, 6, 6, 6])
|
| 882 |
+
classifier = DecisionTreeClassifier()
|
| 883 |
+
pipeline = make_pipeline(FunctionTransformer(replace), classifier)
|
| 884 |
+
pipeline.fit(X, y).predict(X)
|
| 885 |
+
bagging_classifier = BaggingClassifier(pipeline)
|
| 886 |
+
bagging_classifier.fit(X, y)
|
| 887 |
+
y_hat = bagging_classifier.predict(X)
|
| 888 |
+
assert y.shape == y_hat.shape
|
| 889 |
+
bagging_classifier.predict_log_proba(X)
|
| 890 |
+
bagging_classifier.predict_proba(X)
|
| 891 |
+
|
| 892 |
+
# Verify that exceptions can be raised by wrapper classifier
|
| 893 |
+
classifier = DecisionTreeClassifier()
|
| 894 |
+
pipeline = make_pipeline(classifier)
|
| 895 |
+
with pytest.raises(ValueError):
|
| 896 |
+
pipeline.fit(X, y)
|
| 897 |
+
bagging_classifier = BaggingClassifier(pipeline)
|
| 898 |
+
with pytest.raises(ValueError):
|
| 899 |
+
bagging_classifier.fit(X, y)
|
| 900 |
+
|
| 901 |
+
|
| 902 |
+
def test_bagging_small_max_features():
|
| 903 |
+
# Check that Bagging estimator can accept low fractional max_features
|
| 904 |
+
|
| 905 |
+
X = np.array([[1, 2], [3, 4]])
|
| 906 |
+
y = np.array([1, 0])
|
| 907 |
+
|
| 908 |
+
bagging = BaggingClassifier(LogisticRegression(), max_features=0.3, random_state=1)
|
| 909 |
+
bagging.fit(X, y)
|
| 910 |
+
|
| 911 |
+
|
| 912 |
+
def test_bagging_get_estimators_indices():
|
| 913 |
+
# Check that Bagging estimator can generate sample indices properly
|
| 914 |
+
# Non-regression test for:
|
| 915 |
+
# https://github.com/scikit-learn/scikit-learn/issues/16436
|
| 916 |
+
|
| 917 |
+
rng = np.random.RandomState(0)
|
| 918 |
+
X = rng.randn(13, 4)
|
| 919 |
+
y = np.arange(13)
|
| 920 |
+
|
| 921 |
+
class MyEstimator(DecisionTreeRegressor):
|
| 922 |
+
"""An estimator which stores y indices information at fit."""
|
| 923 |
+
|
| 924 |
+
def fit(self, X, y):
|
| 925 |
+
self._sample_indices = y
|
| 926 |
+
|
| 927 |
+
clf = BaggingRegressor(estimator=MyEstimator(), n_estimators=1, random_state=0)
|
| 928 |
+
clf.fit(X, y)
|
| 929 |
+
|
| 930 |
+
assert_array_equal(clf.estimators_[0]._sample_indices, clf.estimators_samples_[0])
|
| 931 |
+
|
| 932 |
+
|
| 933 |
+
@pytest.mark.parametrize(
|
| 934 |
+
"bagging, expected_allow_nan",
|
| 935 |
+
[
|
| 936 |
+
(BaggingClassifier(HistGradientBoostingClassifier(max_iter=1)), True),
|
| 937 |
+
(BaggingRegressor(HistGradientBoostingRegressor(max_iter=1)), True),
|
| 938 |
+
(BaggingClassifier(LogisticRegression()), False),
|
| 939 |
+
(BaggingRegressor(SVR()), False),
|
| 940 |
+
],
|
| 941 |
+
)
|
| 942 |
+
def test_bagging_allow_nan_tag(bagging, expected_allow_nan):
|
| 943 |
+
"""Check that bagging inherits allow_nan tag."""
|
| 944 |
+
assert bagging.__sklearn_tags__().input_tags.allow_nan == expected_allow_nan
|
| 945 |
+
|
| 946 |
+
|
| 947 |
+
@pytest.mark.parametrize(
|
| 948 |
+
"model",
|
| 949 |
+
[
|
| 950 |
+
BaggingClassifier(
|
| 951 |
+
estimator=RandomForestClassifier(n_estimators=1), n_estimators=1
|
| 952 |
+
),
|
| 953 |
+
BaggingRegressor(
|
| 954 |
+
estimator=RandomForestRegressor(n_estimators=1), n_estimators=1
|
| 955 |
+
),
|
| 956 |
+
],
|
| 957 |
+
)
|
| 958 |
+
def test_bagging_with_metadata_routing(model):
|
| 959 |
+
"""Make sure that metadata routing works with non-default estimator."""
|
| 960 |
+
with sklearn.config_context(enable_metadata_routing=True):
|
| 961 |
+
model.fit(iris.data, iris.target)
|
| 962 |
+
|
| 963 |
+
|
| 964 |
+
@pytest.mark.parametrize(
|
| 965 |
+
"model",
|
| 966 |
+
[
|
| 967 |
+
BaggingClassifier(
|
| 968 |
+
estimator=AdaBoostClassifier(n_estimators=1),
|
| 969 |
+
n_estimators=1,
|
| 970 |
+
),
|
| 971 |
+
BaggingRegressor(estimator=AdaBoostRegressor(n_estimators=1), n_estimators=1),
|
| 972 |
+
],
|
| 973 |
+
)
|
| 974 |
+
def test_bagging_without_support_metadata_routing(model):
|
| 975 |
+
"""Make sure that we still can use an estimator that does not implement the
|
| 976 |
+
metadata routing."""
|
| 977 |
+
model.fit(iris.data, iris.target)
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/test_common.py
ADDED
|
@@ -0,0 +1,262 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
from sklearn.base import ClassifierMixin, clone, is_classifier
|
| 5 |
+
from sklearn.datasets import (
|
| 6 |
+
load_diabetes,
|
| 7 |
+
load_iris,
|
| 8 |
+
make_classification,
|
| 9 |
+
make_regression,
|
| 10 |
+
)
|
| 11 |
+
from sklearn.ensemble import (
|
| 12 |
+
RandomForestClassifier,
|
| 13 |
+
RandomForestRegressor,
|
| 14 |
+
StackingClassifier,
|
| 15 |
+
StackingRegressor,
|
| 16 |
+
VotingClassifier,
|
| 17 |
+
VotingRegressor,
|
| 18 |
+
)
|
| 19 |
+
from sklearn.impute import SimpleImputer
|
| 20 |
+
from sklearn.linear_model import LinearRegression, LogisticRegression
|
| 21 |
+
from sklearn.pipeline import make_pipeline
|
| 22 |
+
from sklearn.svm import SVC, SVR, LinearSVC, LinearSVR
|
| 23 |
+
|
| 24 |
+
X, y = load_iris(return_X_y=True)
|
| 25 |
+
|
| 26 |
+
X_r, y_r = load_diabetes(return_X_y=True)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
@pytest.mark.parametrize(
|
| 30 |
+
"X, y, estimator",
|
| 31 |
+
[
|
| 32 |
+
(
|
| 33 |
+
*make_classification(n_samples=10),
|
| 34 |
+
StackingClassifier(
|
| 35 |
+
estimators=[
|
| 36 |
+
("lr", LogisticRegression()),
|
| 37 |
+
("svm", LinearSVC()),
|
| 38 |
+
("rf", RandomForestClassifier(n_estimators=5, max_depth=3)),
|
| 39 |
+
],
|
| 40 |
+
cv=2,
|
| 41 |
+
),
|
| 42 |
+
),
|
| 43 |
+
(
|
| 44 |
+
*make_classification(n_samples=10),
|
| 45 |
+
VotingClassifier(
|
| 46 |
+
estimators=[
|
| 47 |
+
("lr", LogisticRegression()),
|
| 48 |
+
("svm", LinearSVC()),
|
| 49 |
+
("rf", RandomForestClassifier(n_estimators=5, max_depth=3)),
|
| 50 |
+
]
|
| 51 |
+
),
|
| 52 |
+
),
|
| 53 |
+
(
|
| 54 |
+
*make_regression(n_samples=10),
|
| 55 |
+
StackingRegressor(
|
| 56 |
+
estimators=[
|
| 57 |
+
("lr", LinearRegression()),
|
| 58 |
+
("svm", LinearSVR()),
|
| 59 |
+
("rf", RandomForestRegressor(n_estimators=5, max_depth=3)),
|
| 60 |
+
],
|
| 61 |
+
cv=2,
|
| 62 |
+
),
|
| 63 |
+
),
|
| 64 |
+
(
|
| 65 |
+
*make_regression(n_samples=10),
|
| 66 |
+
VotingRegressor(
|
| 67 |
+
estimators=[
|
| 68 |
+
("lr", LinearRegression()),
|
| 69 |
+
("svm", LinearSVR()),
|
| 70 |
+
("rf", RandomForestRegressor(n_estimators=5, max_depth=3)),
|
| 71 |
+
]
|
| 72 |
+
),
|
| 73 |
+
),
|
| 74 |
+
],
|
| 75 |
+
ids=[
|
| 76 |
+
"stacking-classifier",
|
| 77 |
+
"voting-classifier",
|
| 78 |
+
"stacking-regressor",
|
| 79 |
+
"voting-regressor",
|
| 80 |
+
],
|
| 81 |
+
)
|
| 82 |
+
def test_ensemble_heterogeneous_estimators_behavior(X, y, estimator):
|
| 83 |
+
# check that the behavior of `estimators`, `estimators_`,
|
| 84 |
+
# `named_estimators`, `named_estimators_` is consistent across all
|
| 85 |
+
# ensemble classes and when using `set_params()`.
|
| 86 |
+
|
| 87 |
+
# before fit
|
| 88 |
+
assert "svm" in estimator.named_estimators
|
| 89 |
+
assert estimator.named_estimators.svm is estimator.estimators[1][1]
|
| 90 |
+
assert estimator.named_estimators.svm is estimator.named_estimators["svm"]
|
| 91 |
+
|
| 92 |
+
# check fitted attributes
|
| 93 |
+
estimator.fit(X, y)
|
| 94 |
+
assert len(estimator.named_estimators) == 3
|
| 95 |
+
assert len(estimator.named_estimators_) == 3
|
| 96 |
+
assert sorted(list(estimator.named_estimators_.keys())) == sorted(
|
| 97 |
+
["lr", "svm", "rf"]
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
# check that set_params() does not add a new attribute
|
| 101 |
+
estimator_new_params = clone(estimator)
|
| 102 |
+
svm_estimator = SVC() if is_classifier(estimator) else SVR()
|
| 103 |
+
estimator_new_params.set_params(svm=svm_estimator).fit(X, y)
|
| 104 |
+
assert not hasattr(estimator_new_params, "svm")
|
| 105 |
+
assert (
|
| 106 |
+
estimator_new_params.named_estimators.lr.get_params()
|
| 107 |
+
== estimator.named_estimators.lr.get_params()
|
| 108 |
+
)
|
| 109 |
+
assert (
|
| 110 |
+
estimator_new_params.named_estimators.rf.get_params()
|
| 111 |
+
== estimator.named_estimators.rf.get_params()
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
# check the behavior when setting an dropping an estimator
|
| 115 |
+
estimator_dropped = clone(estimator)
|
| 116 |
+
estimator_dropped.set_params(svm="drop")
|
| 117 |
+
estimator_dropped.fit(X, y)
|
| 118 |
+
assert len(estimator_dropped.named_estimators) == 3
|
| 119 |
+
assert estimator_dropped.named_estimators.svm == "drop"
|
| 120 |
+
assert len(estimator_dropped.named_estimators_) == 3
|
| 121 |
+
assert sorted(list(estimator_dropped.named_estimators_.keys())) == sorted(
|
| 122 |
+
["lr", "svm", "rf"]
|
| 123 |
+
)
|
| 124 |
+
for sub_est in estimator_dropped.named_estimators_:
|
| 125 |
+
# check that the correspondence is correct
|
| 126 |
+
assert not isinstance(sub_est, type(estimator.named_estimators.svm))
|
| 127 |
+
|
| 128 |
+
# check that we can set the parameters of the underlying classifier
|
| 129 |
+
estimator.set_params(svm__C=10.0)
|
| 130 |
+
estimator.set_params(rf__max_depth=5)
|
| 131 |
+
assert (
|
| 132 |
+
estimator.get_params()["svm__C"]
|
| 133 |
+
== estimator.get_params()["svm"].get_params()["C"]
|
| 134 |
+
)
|
| 135 |
+
assert (
|
| 136 |
+
estimator.get_params()["rf__max_depth"]
|
| 137 |
+
== estimator.get_params()["rf"].get_params()["max_depth"]
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
@pytest.mark.parametrize(
|
| 142 |
+
"Ensemble",
|
| 143 |
+
[VotingClassifier, StackingRegressor, VotingRegressor],
|
| 144 |
+
)
|
| 145 |
+
def test_ensemble_heterogeneous_estimators_type(Ensemble):
|
| 146 |
+
# check that ensemble will fail during validation if the underlying
|
| 147 |
+
# estimators are not of the same type (i.e. classifier or regressor)
|
| 148 |
+
# StackingClassifier can have an underlying regresor so it's not checked
|
| 149 |
+
if issubclass(Ensemble, ClassifierMixin):
|
| 150 |
+
X, y = make_classification(n_samples=10)
|
| 151 |
+
estimators = [("lr", LinearRegression())]
|
| 152 |
+
ensemble_type = "classifier"
|
| 153 |
+
else:
|
| 154 |
+
X, y = make_regression(n_samples=10)
|
| 155 |
+
estimators = [("lr", LogisticRegression())]
|
| 156 |
+
ensemble_type = "regressor"
|
| 157 |
+
ensemble = Ensemble(estimators=estimators)
|
| 158 |
+
|
| 159 |
+
err_msg = "should be a {}".format(ensemble_type)
|
| 160 |
+
with pytest.raises(ValueError, match=err_msg):
|
| 161 |
+
ensemble.fit(X, y)
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
@pytest.mark.parametrize(
|
| 165 |
+
"X, y, Ensemble",
|
| 166 |
+
[
|
| 167 |
+
(*make_classification(n_samples=10), StackingClassifier),
|
| 168 |
+
(*make_classification(n_samples=10), VotingClassifier),
|
| 169 |
+
(*make_regression(n_samples=10), StackingRegressor),
|
| 170 |
+
(*make_regression(n_samples=10), VotingRegressor),
|
| 171 |
+
],
|
| 172 |
+
)
|
| 173 |
+
def test_ensemble_heterogeneous_estimators_name_validation(X, y, Ensemble):
|
| 174 |
+
# raise an error when the name contains dunder
|
| 175 |
+
if issubclass(Ensemble, ClassifierMixin):
|
| 176 |
+
estimators = [("lr__", LogisticRegression())]
|
| 177 |
+
else:
|
| 178 |
+
estimators = [("lr__", LinearRegression())]
|
| 179 |
+
ensemble = Ensemble(estimators=estimators)
|
| 180 |
+
|
| 181 |
+
err_msg = r"Estimator names must not contain __: got \['lr__'\]"
|
| 182 |
+
with pytest.raises(ValueError, match=err_msg):
|
| 183 |
+
ensemble.fit(X, y)
|
| 184 |
+
|
| 185 |
+
# raise an error when the name is not unique
|
| 186 |
+
if issubclass(Ensemble, ClassifierMixin):
|
| 187 |
+
estimators = [("lr", LogisticRegression()), ("lr", LogisticRegression())]
|
| 188 |
+
else:
|
| 189 |
+
estimators = [("lr", LinearRegression()), ("lr", LinearRegression())]
|
| 190 |
+
ensemble = Ensemble(estimators=estimators)
|
| 191 |
+
|
| 192 |
+
err_msg = r"Names provided are not unique: \['lr', 'lr'\]"
|
| 193 |
+
with pytest.raises(ValueError, match=err_msg):
|
| 194 |
+
ensemble.fit(X, y)
|
| 195 |
+
|
| 196 |
+
# raise an error when the name conflicts with the parameters
|
| 197 |
+
if issubclass(Ensemble, ClassifierMixin):
|
| 198 |
+
estimators = [("estimators", LogisticRegression())]
|
| 199 |
+
else:
|
| 200 |
+
estimators = [("estimators", LinearRegression())]
|
| 201 |
+
ensemble = Ensemble(estimators=estimators)
|
| 202 |
+
|
| 203 |
+
err_msg = "Estimator names conflict with constructor arguments"
|
| 204 |
+
with pytest.raises(ValueError, match=err_msg):
|
| 205 |
+
ensemble.fit(X, y)
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
@pytest.mark.parametrize(
|
| 209 |
+
"X, y, estimator",
|
| 210 |
+
[
|
| 211 |
+
(
|
| 212 |
+
*make_classification(n_samples=10),
|
| 213 |
+
StackingClassifier(estimators=[("lr", LogisticRegression())]),
|
| 214 |
+
),
|
| 215 |
+
(
|
| 216 |
+
*make_classification(n_samples=10),
|
| 217 |
+
VotingClassifier(estimators=[("lr", LogisticRegression())]),
|
| 218 |
+
),
|
| 219 |
+
(
|
| 220 |
+
*make_regression(n_samples=10),
|
| 221 |
+
StackingRegressor(estimators=[("lr", LinearRegression())]),
|
| 222 |
+
),
|
| 223 |
+
(
|
| 224 |
+
*make_regression(n_samples=10),
|
| 225 |
+
VotingRegressor(estimators=[("lr", LinearRegression())]),
|
| 226 |
+
),
|
| 227 |
+
],
|
| 228 |
+
ids=[
|
| 229 |
+
"stacking-classifier",
|
| 230 |
+
"voting-classifier",
|
| 231 |
+
"stacking-regressor",
|
| 232 |
+
"voting-regressor",
|
| 233 |
+
],
|
| 234 |
+
)
|
| 235 |
+
def test_ensemble_heterogeneous_estimators_all_dropped(X, y, estimator):
|
| 236 |
+
# check that we raise a consistent error when all estimators are
|
| 237 |
+
# dropped
|
| 238 |
+
estimator.set_params(lr="drop")
|
| 239 |
+
with pytest.raises(ValueError, match="All estimators are dropped."):
|
| 240 |
+
estimator.fit(X, y)
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
@pytest.mark.parametrize(
|
| 244 |
+
"Ensemble, Estimator, X, y",
|
| 245 |
+
[
|
| 246 |
+
(StackingClassifier, LogisticRegression, X, y),
|
| 247 |
+
(StackingRegressor, LinearRegression, X_r, y_r),
|
| 248 |
+
(VotingClassifier, LogisticRegression, X, y),
|
| 249 |
+
(VotingRegressor, LinearRegression, X_r, y_r),
|
| 250 |
+
],
|
| 251 |
+
)
|
| 252 |
+
# FIXME: we should move this test in `estimator_checks` once we are able
|
| 253 |
+
# to construct meta-estimator instances
|
| 254 |
+
def test_heterogeneous_ensemble_support_missing_values(Ensemble, Estimator, X, y):
|
| 255 |
+
# check that Voting and Stacking predictor delegate the missing values
|
| 256 |
+
# validation to the underlying estimator.
|
| 257 |
+
X = X.copy()
|
| 258 |
+
mask = np.random.choice([1, 0], X.shape, p=[0.1, 0.9]).astype(bool)
|
| 259 |
+
X[mask] = np.nan
|
| 260 |
+
pipe = make_pipeline(SimpleImputer(), Estimator())
|
| 261 |
+
ensemble = Ensemble(estimators=[("pipe1", pipe), ("pipe2", pipe)])
|
| 262 |
+
ensemble.fit(X, y).score(X, y)
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/test_forest.py
ADDED
|
@@ -0,0 +1,1864 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Testing for the forest module (sklearn.ensemble.forest).
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
# Authors: The scikit-learn developers
|
| 6 |
+
# SPDX-License-Identifier: BSD-3-Clause
|
| 7 |
+
|
| 8 |
+
import itertools
|
| 9 |
+
import math
|
| 10 |
+
import pickle
|
| 11 |
+
from collections import defaultdict
|
| 12 |
+
from functools import partial
|
| 13 |
+
from itertools import combinations, product
|
| 14 |
+
from typing import Any, Dict
|
| 15 |
+
from unittest.mock import patch
|
| 16 |
+
|
| 17 |
+
import joblib
|
| 18 |
+
import numpy as np
|
| 19 |
+
import pytest
|
| 20 |
+
from scipy.special import comb
|
| 21 |
+
|
| 22 |
+
import sklearn
|
| 23 |
+
from sklearn import clone, datasets
|
| 24 |
+
from sklearn.datasets import make_classification, make_hastie_10_2
|
| 25 |
+
from sklearn.decomposition import TruncatedSVD
|
| 26 |
+
from sklearn.dummy import DummyRegressor
|
| 27 |
+
from sklearn.ensemble import (
|
| 28 |
+
ExtraTreesClassifier,
|
| 29 |
+
ExtraTreesRegressor,
|
| 30 |
+
RandomForestClassifier,
|
| 31 |
+
RandomForestRegressor,
|
| 32 |
+
RandomTreesEmbedding,
|
| 33 |
+
)
|
| 34 |
+
from sklearn.ensemble._forest import (
|
| 35 |
+
_generate_unsampled_indices,
|
| 36 |
+
_get_n_samples_bootstrap,
|
| 37 |
+
)
|
| 38 |
+
from sklearn.exceptions import NotFittedError
|
| 39 |
+
from sklearn.metrics import (
|
| 40 |
+
explained_variance_score,
|
| 41 |
+
f1_score,
|
| 42 |
+
mean_poisson_deviance,
|
| 43 |
+
mean_squared_error,
|
| 44 |
+
)
|
| 45 |
+
from sklearn.model_selection import GridSearchCV, cross_val_score, train_test_split
|
| 46 |
+
from sklearn.svm import LinearSVC
|
| 47 |
+
from sklearn.tree._classes import SPARSE_SPLITTERS
|
| 48 |
+
from sklearn.utils._testing import (
|
| 49 |
+
_convert_container,
|
| 50 |
+
assert_allclose,
|
| 51 |
+
assert_almost_equal,
|
| 52 |
+
assert_array_almost_equal,
|
| 53 |
+
assert_array_equal,
|
| 54 |
+
ignore_warnings,
|
| 55 |
+
skip_if_no_parallel,
|
| 56 |
+
)
|
| 57 |
+
from sklearn.utils.fixes import COO_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS
|
| 58 |
+
from sklearn.utils.multiclass import type_of_target
|
| 59 |
+
from sklearn.utils.parallel import Parallel
|
| 60 |
+
from sklearn.utils.validation import check_random_state
|
| 61 |
+
|
| 62 |
+
# toy sample
|
| 63 |
+
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
|
| 64 |
+
y = [-1, -1, -1, 1, 1, 1]
|
| 65 |
+
T = [[-1, -1], [2, 2], [3, 2]]
|
| 66 |
+
true_result = [-1, 1, 1]
|
| 67 |
+
|
| 68 |
+
# Larger classification sample used for testing feature importances
|
| 69 |
+
X_large, y_large = datasets.make_classification(
|
| 70 |
+
n_samples=500,
|
| 71 |
+
n_features=10,
|
| 72 |
+
n_informative=3,
|
| 73 |
+
n_redundant=0,
|
| 74 |
+
n_repeated=0,
|
| 75 |
+
shuffle=False,
|
| 76 |
+
random_state=0,
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
# also load the iris dataset
|
| 80 |
+
# and randomly permute it
|
| 81 |
+
iris = datasets.load_iris()
|
| 82 |
+
rng = check_random_state(0)
|
| 83 |
+
perm = rng.permutation(iris.target.size)
|
| 84 |
+
iris.data = iris.data[perm]
|
| 85 |
+
iris.target = iris.target[perm]
|
| 86 |
+
|
| 87 |
+
# Make regression dataset
|
| 88 |
+
X_reg, y_reg = datasets.make_regression(n_samples=500, n_features=10, random_state=1)
|
| 89 |
+
|
| 90 |
+
# also make a hastie_10_2 dataset
|
| 91 |
+
hastie_X, hastie_y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
|
| 92 |
+
hastie_X = hastie_X.astype(np.float32)
|
| 93 |
+
|
| 94 |
+
# Get the default backend in joblib to test parallelism and interaction with
|
| 95 |
+
# different backends
|
| 96 |
+
DEFAULT_JOBLIB_BACKEND = joblib.parallel.get_active_backend()[0].__class__
|
| 97 |
+
|
| 98 |
+
FOREST_CLASSIFIERS = {
|
| 99 |
+
"ExtraTreesClassifier": ExtraTreesClassifier,
|
| 100 |
+
"RandomForestClassifier": RandomForestClassifier,
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
FOREST_REGRESSORS = {
|
| 104 |
+
"ExtraTreesRegressor": ExtraTreesRegressor,
|
| 105 |
+
"RandomForestRegressor": RandomForestRegressor,
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
FOREST_TRANSFORMERS = {
|
| 109 |
+
"RandomTreesEmbedding": RandomTreesEmbedding,
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
FOREST_ESTIMATORS: Dict[str, Any] = dict()
|
| 113 |
+
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
|
| 114 |
+
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
|
| 115 |
+
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
|
| 116 |
+
|
| 117 |
+
FOREST_CLASSIFIERS_REGRESSORS: Dict[str, Any] = FOREST_CLASSIFIERS.copy()
|
| 118 |
+
FOREST_CLASSIFIERS_REGRESSORS.update(FOREST_REGRESSORS)
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
|
| 122 |
+
def test_classification_toy(name):
|
| 123 |
+
"""Check classification on a toy dataset."""
|
| 124 |
+
ForestClassifier = FOREST_CLASSIFIERS[name]
|
| 125 |
+
|
| 126 |
+
clf = ForestClassifier(n_estimators=10, random_state=1)
|
| 127 |
+
clf.fit(X, y)
|
| 128 |
+
assert_array_equal(clf.predict(T), true_result)
|
| 129 |
+
assert 10 == len(clf)
|
| 130 |
+
|
| 131 |
+
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
|
| 132 |
+
clf.fit(X, y)
|
| 133 |
+
assert_array_equal(clf.predict(T), true_result)
|
| 134 |
+
assert 10 == len(clf)
|
| 135 |
+
|
| 136 |
+
# also test apply
|
| 137 |
+
leaf_indices = clf.apply(X)
|
| 138 |
+
assert leaf_indices.shape == (len(X), clf.n_estimators)
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
|
| 142 |
+
@pytest.mark.parametrize("criterion", ("gini", "log_loss"))
|
| 143 |
+
def test_iris_criterion(name, criterion):
|
| 144 |
+
# Check consistency on dataset iris.
|
| 145 |
+
ForestClassifier = FOREST_CLASSIFIERS[name]
|
| 146 |
+
|
| 147 |
+
clf = ForestClassifier(n_estimators=10, criterion=criterion, random_state=1)
|
| 148 |
+
clf.fit(iris.data, iris.target)
|
| 149 |
+
score = clf.score(iris.data, iris.target)
|
| 150 |
+
assert score > 0.9, "Failed with criterion %s and score = %f" % (criterion, score)
|
| 151 |
+
|
| 152 |
+
clf = ForestClassifier(
|
| 153 |
+
n_estimators=10, criterion=criterion, max_features=2, random_state=1
|
| 154 |
+
)
|
| 155 |
+
clf.fit(iris.data, iris.target)
|
| 156 |
+
score = clf.score(iris.data, iris.target)
|
| 157 |
+
assert score > 0.5, "Failed with criterion %s and score = %f" % (criterion, score)
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
@pytest.mark.parametrize("name", FOREST_REGRESSORS)
|
| 161 |
+
@pytest.mark.parametrize(
|
| 162 |
+
"criterion", ("squared_error", "absolute_error", "friedman_mse")
|
| 163 |
+
)
|
| 164 |
+
def test_regression_criterion(name, criterion):
|
| 165 |
+
# Check consistency on regression dataset.
|
| 166 |
+
ForestRegressor = FOREST_REGRESSORS[name]
|
| 167 |
+
|
| 168 |
+
reg = ForestRegressor(n_estimators=5, criterion=criterion, random_state=1)
|
| 169 |
+
reg.fit(X_reg, y_reg)
|
| 170 |
+
score = reg.score(X_reg, y_reg)
|
| 171 |
+
assert (
|
| 172 |
+
score > 0.93
|
| 173 |
+
), "Failed with max_features=None, criterion %s and score = %f" % (
|
| 174 |
+
criterion,
|
| 175 |
+
score,
|
| 176 |
+
)
|
| 177 |
+
|
| 178 |
+
reg = ForestRegressor(
|
| 179 |
+
n_estimators=5, criterion=criterion, max_features=6, random_state=1
|
| 180 |
+
)
|
| 181 |
+
reg.fit(X_reg, y_reg)
|
| 182 |
+
score = reg.score(X_reg, y_reg)
|
| 183 |
+
assert score > 0.92, "Failed with max_features=6, criterion %s and score = %f" % (
|
| 184 |
+
criterion,
|
| 185 |
+
score,
|
| 186 |
+
)
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
def test_poisson_vs_mse():
|
| 190 |
+
"""Test that random forest with poisson criterion performs better than
|
| 191 |
+
mse for a poisson target.
|
| 192 |
+
|
| 193 |
+
There is a similar test for DecisionTreeRegressor.
|
| 194 |
+
"""
|
| 195 |
+
rng = np.random.RandomState(42)
|
| 196 |
+
n_train, n_test, n_features = 500, 500, 10
|
| 197 |
+
X = datasets.make_low_rank_matrix(
|
| 198 |
+
n_samples=n_train + n_test, n_features=n_features, random_state=rng
|
| 199 |
+
)
|
| 200 |
+
# We create a log-linear Poisson model and downscale coef as it will get
|
| 201 |
+
# exponentiated.
|
| 202 |
+
coef = rng.uniform(low=-2, high=2, size=n_features) / np.max(X, axis=0)
|
| 203 |
+
y = rng.poisson(lam=np.exp(X @ coef))
|
| 204 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
| 205 |
+
X, y, test_size=n_test, random_state=rng
|
| 206 |
+
)
|
| 207 |
+
# We prevent some overfitting by setting min_samples_split=10.
|
| 208 |
+
forest_poi = RandomForestRegressor(
|
| 209 |
+
criterion="poisson", min_samples_leaf=10, max_features="sqrt", random_state=rng
|
| 210 |
+
)
|
| 211 |
+
forest_mse = RandomForestRegressor(
|
| 212 |
+
criterion="squared_error",
|
| 213 |
+
min_samples_leaf=10,
|
| 214 |
+
max_features="sqrt",
|
| 215 |
+
random_state=rng,
|
| 216 |
+
)
|
| 217 |
+
|
| 218 |
+
forest_poi.fit(X_train, y_train)
|
| 219 |
+
forest_mse.fit(X_train, y_train)
|
| 220 |
+
dummy = DummyRegressor(strategy="mean").fit(X_train, y_train)
|
| 221 |
+
|
| 222 |
+
for X, y, data_name in [(X_train, y_train, "train"), (X_test, y_test, "test")]:
|
| 223 |
+
metric_poi = mean_poisson_deviance(y, forest_poi.predict(X))
|
| 224 |
+
# squared_error forest might produce non-positive predictions => clip
|
| 225 |
+
# If y = 0 for those, the poisson deviance gets too good.
|
| 226 |
+
# If we drew more samples, we would eventually get y > 0 and the
|
| 227 |
+
# poisson deviance would explode, i.e. be undefined. Therefore, we do
|
| 228 |
+
# not clip to a tiny value like 1e-15, but to 1e-6. This acts like a
|
| 229 |
+
# small penalty to the non-positive predictions.
|
| 230 |
+
metric_mse = mean_poisson_deviance(
|
| 231 |
+
y, np.clip(forest_mse.predict(X), 1e-6, None)
|
| 232 |
+
)
|
| 233 |
+
metric_dummy = mean_poisson_deviance(y, dummy.predict(X))
|
| 234 |
+
# As squared_error might correctly predict 0 in train set, its train
|
| 235 |
+
# score can be better than Poisson. This is no longer the case for the
|
| 236 |
+
# test set. But keep the above comment for clipping in mind.
|
| 237 |
+
if data_name == "test":
|
| 238 |
+
assert metric_poi < metric_mse
|
| 239 |
+
assert metric_poi < 0.8 * metric_dummy
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
@pytest.mark.parametrize("criterion", ("poisson", "squared_error"))
|
| 243 |
+
def test_balance_property_random_forest(criterion):
|
| 244 |
+
""" "Test that sum(y_pred)==sum(y_true) on the training set."""
|
| 245 |
+
rng = np.random.RandomState(42)
|
| 246 |
+
n_train, n_test, n_features = 500, 500, 10
|
| 247 |
+
X = datasets.make_low_rank_matrix(
|
| 248 |
+
n_samples=n_train + n_test, n_features=n_features, random_state=rng
|
| 249 |
+
)
|
| 250 |
+
|
| 251 |
+
coef = rng.uniform(low=-2, high=2, size=n_features) / np.max(X, axis=0)
|
| 252 |
+
y = rng.poisson(lam=np.exp(X @ coef))
|
| 253 |
+
|
| 254 |
+
reg = RandomForestRegressor(
|
| 255 |
+
criterion=criterion, n_estimators=10, bootstrap=False, random_state=rng
|
| 256 |
+
)
|
| 257 |
+
reg.fit(X, y)
|
| 258 |
+
|
| 259 |
+
assert np.sum(reg.predict(X)) == pytest.approx(np.sum(y))
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
@pytest.mark.parametrize("name", FOREST_REGRESSORS)
|
| 263 |
+
def test_regressor_attributes(name):
|
| 264 |
+
# Regression models should not have a classes_ attribute.
|
| 265 |
+
r = FOREST_REGRESSORS[name](random_state=0)
|
| 266 |
+
assert not hasattr(r, "classes_")
|
| 267 |
+
assert not hasattr(r, "n_classes_")
|
| 268 |
+
|
| 269 |
+
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
|
| 270 |
+
assert not hasattr(r, "classes_")
|
| 271 |
+
assert not hasattr(r, "n_classes_")
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
|
| 275 |
+
def test_probability(name):
|
| 276 |
+
# Predict probabilities.
|
| 277 |
+
ForestClassifier = FOREST_CLASSIFIERS[name]
|
| 278 |
+
with np.errstate(divide="ignore"):
|
| 279 |
+
clf = ForestClassifier(
|
| 280 |
+
n_estimators=10, random_state=1, max_features=1, max_depth=1
|
| 281 |
+
)
|
| 282 |
+
clf.fit(iris.data, iris.target)
|
| 283 |
+
assert_array_almost_equal(
|
| 284 |
+
np.sum(clf.predict_proba(iris.data), axis=1), np.ones(iris.data.shape[0])
|
| 285 |
+
)
|
| 286 |
+
assert_array_almost_equal(
|
| 287 |
+
clf.predict_proba(iris.data), np.exp(clf.predict_log_proba(iris.data))
|
| 288 |
+
)
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
@pytest.mark.parametrize("dtype", (np.float64, np.float32))
|
| 292 |
+
@pytest.mark.parametrize(
|
| 293 |
+
"name, criterion",
|
| 294 |
+
itertools.chain(
|
| 295 |
+
product(FOREST_CLASSIFIERS, ["gini", "log_loss"]),
|
| 296 |
+
product(FOREST_REGRESSORS, ["squared_error", "friedman_mse", "absolute_error"]),
|
| 297 |
+
),
|
| 298 |
+
)
|
| 299 |
+
def test_importances(dtype, name, criterion):
|
| 300 |
+
tolerance = 0.01
|
| 301 |
+
if name in FOREST_REGRESSORS and criterion == "absolute_error":
|
| 302 |
+
tolerance = 0.05
|
| 303 |
+
|
| 304 |
+
# cast as dtype
|
| 305 |
+
X = X_large.astype(dtype, copy=False)
|
| 306 |
+
y = y_large.astype(dtype, copy=False)
|
| 307 |
+
|
| 308 |
+
ForestEstimator = FOREST_ESTIMATORS[name]
|
| 309 |
+
|
| 310 |
+
est = ForestEstimator(n_estimators=10, criterion=criterion, random_state=0)
|
| 311 |
+
est.fit(X, y)
|
| 312 |
+
importances = est.feature_importances_
|
| 313 |
+
|
| 314 |
+
# The forest estimator can detect that only the first 3 features of the
|
| 315 |
+
# dataset are informative:
|
| 316 |
+
n_important = np.sum(importances > 0.1)
|
| 317 |
+
assert importances.shape[0] == 10
|
| 318 |
+
assert n_important == 3
|
| 319 |
+
assert np.all(importances[:3] > 0.1)
|
| 320 |
+
|
| 321 |
+
# Check with parallel
|
| 322 |
+
importances = est.feature_importances_
|
| 323 |
+
est.set_params(n_jobs=2)
|
| 324 |
+
importances_parallel = est.feature_importances_
|
| 325 |
+
assert_array_almost_equal(importances, importances_parallel)
|
| 326 |
+
|
| 327 |
+
# Check with sample weights
|
| 328 |
+
sample_weight = check_random_state(0).randint(1, 10, len(X))
|
| 329 |
+
est = ForestEstimator(n_estimators=10, random_state=0, criterion=criterion)
|
| 330 |
+
est.fit(X, y, sample_weight=sample_weight)
|
| 331 |
+
importances = est.feature_importances_
|
| 332 |
+
assert np.all(importances >= 0.0)
|
| 333 |
+
|
| 334 |
+
for scale in [0.5, 100]:
|
| 335 |
+
est = ForestEstimator(n_estimators=10, random_state=0, criterion=criterion)
|
| 336 |
+
est.fit(X, y, sample_weight=scale * sample_weight)
|
| 337 |
+
importances_bis = est.feature_importances_
|
| 338 |
+
assert np.abs(importances - importances_bis).mean() < tolerance
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
def test_importances_asymptotic():
|
| 342 |
+
# Check whether variable importances of totally randomized trees
|
| 343 |
+
# converge towards their theoretical values (See Louppe et al,
|
| 344 |
+
# Understanding variable importances in forests of randomized trees, 2013).
|
| 345 |
+
|
| 346 |
+
def binomial(k, n):
|
| 347 |
+
return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True)
|
| 348 |
+
|
| 349 |
+
def entropy(samples):
|
| 350 |
+
n_samples = len(samples)
|
| 351 |
+
entropy = 0.0
|
| 352 |
+
|
| 353 |
+
for count in np.bincount(samples):
|
| 354 |
+
p = 1.0 * count / n_samples
|
| 355 |
+
if p > 0:
|
| 356 |
+
entropy -= p * np.log2(p)
|
| 357 |
+
|
| 358 |
+
return entropy
|
| 359 |
+
|
| 360 |
+
def mdi_importance(X_m, X, y):
|
| 361 |
+
n_samples, n_features = X.shape
|
| 362 |
+
|
| 363 |
+
features = list(range(n_features))
|
| 364 |
+
features.pop(X_m)
|
| 365 |
+
values = [np.unique(X[:, i]) for i in range(n_features)]
|
| 366 |
+
|
| 367 |
+
imp = 0.0
|
| 368 |
+
|
| 369 |
+
for k in range(n_features):
|
| 370 |
+
# Weight of each B of size k
|
| 371 |
+
coef = 1.0 / (binomial(k, n_features) * (n_features - k))
|
| 372 |
+
|
| 373 |
+
# For all B of size k
|
| 374 |
+
for B in combinations(features, k):
|
| 375 |
+
# For all values B=b
|
| 376 |
+
for b in product(*[values[B[j]] for j in range(k)]):
|
| 377 |
+
mask_b = np.ones(n_samples, dtype=bool)
|
| 378 |
+
|
| 379 |
+
for j in range(k):
|
| 380 |
+
mask_b &= X[:, B[j]] == b[j]
|
| 381 |
+
|
| 382 |
+
X_, y_ = X[mask_b, :], y[mask_b]
|
| 383 |
+
n_samples_b = len(X_)
|
| 384 |
+
|
| 385 |
+
if n_samples_b > 0:
|
| 386 |
+
children = []
|
| 387 |
+
|
| 388 |
+
for xi in values[X_m]:
|
| 389 |
+
mask_xi = X_[:, X_m] == xi
|
| 390 |
+
children.append(y_[mask_xi])
|
| 391 |
+
|
| 392 |
+
imp += (
|
| 393 |
+
coef
|
| 394 |
+
* (1.0 * n_samples_b / n_samples) # P(B=b)
|
| 395 |
+
* (
|
| 396 |
+
entropy(y_)
|
| 397 |
+
- sum(
|
| 398 |
+
[
|
| 399 |
+
entropy(c) * len(c) / n_samples_b
|
| 400 |
+
for c in children
|
| 401 |
+
]
|
| 402 |
+
)
|
| 403 |
+
)
|
| 404 |
+
)
|
| 405 |
+
|
| 406 |
+
return imp
|
| 407 |
+
|
| 408 |
+
data = np.array(
|
| 409 |
+
[
|
| 410 |
+
[0, 0, 1, 0, 0, 1, 0, 1],
|
| 411 |
+
[1, 0, 1, 1, 1, 0, 1, 2],
|
| 412 |
+
[1, 0, 1, 1, 0, 1, 1, 3],
|
| 413 |
+
[0, 1, 1, 1, 0, 1, 0, 4],
|
| 414 |
+
[1, 1, 0, 1, 0, 1, 1, 5],
|
| 415 |
+
[1, 1, 0, 1, 1, 1, 1, 6],
|
| 416 |
+
[1, 0, 1, 0, 0, 1, 0, 7],
|
| 417 |
+
[1, 1, 1, 1, 1, 1, 1, 8],
|
| 418 |
+
[1, 1, 1, 1, 0, 1, 1, 9],
|
| 419 |
+
[1, 1, 1, 0, 1, 1, 1, 0],
|
| 420 |
+
]
|
| 421 |
+
)
|
| 422 |
+
|
| 423 |
+
X, y = np.array(data[:, :7], dtype=bool), data[:, 7]
|
| 424 |
+
n_features = X.shape[1]
|
| 425 |
+
|
| 426 |
+
# Compute true importances
|
| 427 |
+
true_importances = np.zeros(n_features)
|
| 428 |
+
|
| 429 |
+
for i in range(n_features):
|
| 430 |
+
true_importances[i] = mdi_importance(i, X, y)
|
| 431 |
+
|
| 432 |
+
# Estimate importances with totally randomized trees
|
| 433 |
+
clf = ExtraTreesClassifier(
|
| 434 |
+
n_estimators=500, max_features=1, criterion="log_loss", random_state=0
|
| 435 |
+
).fit(X, y)
|
| 436 |
+
|
| 437 |
+
importances = (
|
| 438 |
+
sum(
|
| 439 |
+
tree.tree_.compute_feature_importances(normalize=False)
|
| 440 |
+
for tree in clf.estimators_
|
| 441 |
+
)
|
| 442 |
+
/ clf.n_estimators
|
| 443 |
+
)
|
| 444 |
+
|
| 445 |
+
# Check correctness
|
| 446 |
+
assert_almost_equal(entropy(y), sum(importances))
|
| 447 |
+
assert np.abs(true_importances - importances).mean() < 0.01
|
| 448 |
+
|
| 449 |
+
|
| 450 |
+
@pytest.mark.parametrize("name", FOREST_ESTIMATORS)
|
| 451 |
+
def test_unfitted_feature_importances(name):
|
| 452 |
+
err_msg = (
|
| 453 |
+
"This {} instance is not fitted yet. Call 'fit' with "
|
| 454 |
+
"appropriate arguments before using this estimator.".format(name)
|
| 455 |
+
)
|
| 456 |
+
with pytest.raises(NotFittedError, match=err_msg):
|
| 457 |
+
getattr(FOREST_ESTIMATORS[name](), "feature_importances_")
|
| 458 |
+
|
| 459 |
+
|
| 460 |
+
@pytest.mark.parametrize("ForestClassifier", FOREST_CLASSIFIERS.values())
|
| 461 |
+
@pytest.mark.parametrize("X_type", ["array", "sparse_csr", "sparse_csc"])
|
| 462 |
+
@pytest.mark.parametrize(
|
| 463 |
+
"X, y, lower_bound_accuracy",
|
| 464 |
+
[
|
| 465 |
+
(
|
| 466 |
+
*datasets.make_classification(n_samples=300, n_classes=2, random_state=0),
|
| 467 |
+
0.9,
|
| 468 |
+
),
|
| 469 |
+
(
|
| 470 |
+
*datasets.make_classification(
|
| 471 |
+
n_samples=1000, n_classes=3, n_informative=6, random_state=0
|
| 472 |
+
),
|
| 473 |
+
0.65,
|
| 474 |
+
),
|
| 475 |
+
(
|
| 476 |
+
iris.data,
|
| 477 |
+
iris.target * 2 + 1,
|
| 478 |
+
0.65,
|
| 479 |
+
),
|
| 480 |
+
(
|
| 481 |
+
*datasets.make_multilabel_classification(n_samples=300, random_state=0),
|
| 482 |
+
0.18,
|
| 483 |
+
),
|
| 484 |
+
],
|
| 485 |
+
)
|
| 486 |
+
@pytest.mark.parametrize("oob_score", [True, partial(f1_score, average="micro")])
|
| 487 |
+
def test_forest_classifier_oob(
|
| 488 |
+
ForestClassifier, X, y, X_type, lower_bound_accuracy, oob_score
|
| 489 |
+
):
|
| 490 |
+
"""Check that OOB score is close to score on a test set."""
|
| 491 |
+
X = _convert_container(X, constructor_name=X_type)
|
| 492 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
| 493 |
+
X,
|
| 494 |
+
y,
|
| 495 |
+
test_size=0.5,
|
| 496 |
+
random_state=0,
|
| 497 |
+
)
|
| 498 |
+
classifier = ForestClassifier(
|
| 499 |
+
n_estimators=40,
|
| 500 |
+
bootstrap=True,
|
| 501 |
+
oob_score=oob_score,
|
| 502 |
+
random_state=0,
|
| 503 |
+
)
|
| 504 |
+
|
| 505 |
+
assert not hasattr(classifier, "oob_score_")
|
| 506 |
+
assert not hasattr(classifier, "oob_decision_function_")
|
| 507 |
+
|
| 508 |
+
classifier.fit(X_train, y_train)
|
| 509 |
+
if callable(oob_score):
|
| 510 |
+
test_score = oob_score(y_test, classifier.predict(X_test))
|
| 511 |
+
else:
|
| 512 |
+
test_score = classifier.score(X_test, y_test)
|
| 513 |
+
assert classifier.oob_score_ >= lower_bound_accuracy
|
| 514 |
+
|
| 515 |
+
abs_diff = abs(test_score - classifier.oob_score_)
|
| 516 |
+
assert abs_diff <= 0.11, f"{abs_diff=} is greater than 0.11"
|
| 517 |
+
|
| 518 |
+
assert hasattr(classifier, "oob_score_")
|
| 519 |
+
assert not hasattr(classifier, "oob_prediction_")
|
| 520 |
+
assert hasattr(classifier, "oob_decision_function_")
|
| 521 |
+
|
| 522 |
+
if y.ndim == 1:
|
| 523 |
+
expected_shape = (X_train.shape[0], len(set(y)))
|
| 524 |
+
else:
|
| 525 |
+
expected_shape = (X_train.shape[0], len(set(y[:, 0])), y.shape[1])
|
| 526 |
+
assert classifier.oob_decision_function_.shape == expected_shape
|
| 527 |
+
|
| 528 |
+
|
| 529 |
+
@pytest.mark.parametrize("ForestRegressor", FOREST_REGRESSORS.values())
|
| 530 |
+
@pytest.mark.parametrize("X_type", ["array", "sparse_csr", "sparse_csc"])
|
| 531 |
+
@pytest.mark.parametrize(
|
| 532 |
+
"X, y, lower_bound_r2",
|
| 533 |
+
[
|
| 534 |
+
(
|
| 535 |
+
*datasets.make_regression(
|
| 536 |
+
n_samples=500, n_features=10, n_targets=1, random_state=0
|
| 537 |
+
),
|
| 538 |
+
0.7,
|
| 539 |
+
),
|
| 540 |
+
(
|
| 541 |
+
*datasets.make_regression(
|
| 542 |
+
n_samples=500, n_features=10, n_targets=2, random_state=0
|
| 543 |
+
),
|
| 544 |
+
0.55,
|
| 545 |
+
),
|
| 546 |
+
],
|
| 547 |
+
)
|
| 548 |
+
@pytest.mark.parametrize("oob_score", [True, explained_variance_score])
|
| 549 |
+
def test_forest_regressor_oob(ForestRegressor, X, y, X_type, lower_bound_r2, oob_score):
|
| 550 |
+
"""Check that forest-based regressor provide an OOB score close to the
|
| 551 |
+
score on a test set."""
|
| 552 |
+
X = _convert_container(X, constructor_name=X_type)
|
| 553 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
| 554 |
+
X,
|
| 555 |
+
y,
|
| 556 |
+
test_size=0.5,
|
| 557 |
+
random_state=0,
|
| 558 |
+
)
|
| 559 |
+
regressor = ForestRegressor(
|
| 560 |
+
n_estimators=50,
|
| 561 |
+
bootstrap=True,
|
| 562 |
+
oob_score=oob_score,
|
| 563 |
+
random_state=0,
|
| 564 |
+
)
|
| 565 |
+
|
| 566 |
+
assert not hasattr(regressor, "oob_score_")
|
| 567 |
+
assert not hasattr(regressor, "oob_prediction_")
|
| 568 |
+
|
| 569 |
+
regressor.fit(X_train, y_train)
|
| 570 |
+
if callable(oob_score):
|
| 571 |
+
test_score = oob_score(y_test, regressor.predict(X_test))
|
| 572 |
+
else:
|
| 573 |
+
test_score = regressor.score(X_test, y_test)
|
| 574 |
+
assert regressor.oob_score_ >= lower_bound_r2
|
| 575 |
+
|
| 576 |
+
assert abs(test_score - regressor.oob_score_) <= 0.1
|
| 577 |
+
|
| 578 |
+
assert hasattr(regressor, "oob_score_")
|
| 579 |
+
assert hasattr(regressor, "oob_prediction_")
|
| 580 |
+
assert not hasattr(regressor, "oob_decision_function_")
|
| 581 |
+
|
| 582 |
+
if y.ndim == 1:
|
| 583 |
+
expected_shape = (X_train.shape[0],)
|
| 584 |
+
else:
|
| 585 |
+
expected_shape = (X_train.shape[0], y.ndim)
|
| 586 |
+
assert regressor.oob_prediction_.shape == expected_shape
|
| 587 |
+
|
| 588 |
+
|
| 589 |
+
@pytest.mark.parametrize("ForestEstimator", FOREST_CLASSIFIERS_REGRESSORS.values())
|
| 590 |
+
def test_forest_oob_warning(ForestEstimator):
|
| 591 |
+
"""Check that a warning is raised when not enough estimator and the OOB
|
| 592 |
+
estimates will be inaccurate."""
|
| 593 |
+
estimator = ForestEstimator(
|
| 594 |
+
n_estimators=1,
|
| 595 |
+
oob_score=True,
|
| 596 |
+
bootstrap=True,
|
| 597 |
+
random_state=0,
|
| 598 |
+
)
|
| 599 |
+
with pytest.warns(UserWarning, match="Some inputs do not have OOB scores"):
|
| 600 |
+
estimator.fit(iris.data, iris.target)
|
| 601 |
+
|
| 602 |
+
|
| 603 |
+
@pytest.mark.parametrize("ForestEstimator", FOREST_CLASSIFIERS_REGRESSORS.values())
|
| 604 |
+
def test_forest_oob_score_requires_bootstrap(ForestEstimator):
|
| 605 |
+
"""Check that we raise an error if OOB score is requested without
|
| 606 |
+
activating bootstrapping.
|
| 607 |
+
"""
|
| 608 |
+
X = iris.data
|
| 609 |
+
y = iris.target
|
| 610 |
+
err_msg = "Out of bag estimation only available if bootstrap=True"
|
| 611 |
+
estimator = ForestEstimator(oob_score=True, bootstrap=False)
|
| 612 |
+
with pytest.raises(ValueError, match=err_msg):
|
| 613 |
+
estimator.fit(X, y)
|
| 614 |
+
|
| 615 |
+
|
| 616 |
+
@pytest.mark.parametrize("ForestClassifier", FOREST_CLASSIFIERS.values())
|
| 617 |
+
def test_classifier_error_oob_score_multiclass_multioutput(ForestClassifier):
|
| 618 |
+
"""Check that we raise an error with when requesting OOB score with
|
| 619 |
+
multiclass-multioutput classification target.
|
| 620 |
+
"""
|
| 621 |
+
rng = np.random.RandomState(42)
|
| 622 |
+
X = iris.data
|
| 623 |
+
y = rng.randint(low=0, high=5, size=(iris.data.shape[0], 2))
|
| 624 |
+
y_type = type_of_target(y)
|
| 625 |
+
assert y_type == "multiclass-multioutput"
|
| 626 |
+
estimator = ForestClassifier(oob_score=True, bootstrap=True)
|
| 627 |
+
err_msg = "The type of target cannot be used to compute OOB estimates"
|
| 628 |
+
with pytest.raises(ValueError, match=err_msg):
|
| 629 |
+
estimator.fit(X, y)
|
| 630 |
+
|
| 631 |
+
|
| 632 |
+
@pytest.mark.parametrize("ForestRegressor", FOREST_REGRESSORS.values())
|
| 633 |
+
def test_forest_multioutput_integral_regression_target(ForestRegressor):
|
| 634 |
+
"""Check that multioutput regression with integral values is not interpreted
|
| 635 |
+
as a multiclass-multioutput target and OOB score can be computed.
|
| 636 |
+
"""
|
| 637 |
+
rng = np.random.RandomState(42)
|
| 638 |
+
X = iris.data
|
| 639 |
+
y = rng.randint(low=0, high=10, size=(iris.data.shape[0], 2))
|
| 640 |
+
estimator = ForestRegressor(
|
| 641 |
+
n_estimators=30, oob_score=True, bootstrap=True, random_state=0
|
| 642 |
+
)
|
| 643 |
+
estimator.fit(X, y)
|
| 644 |
+
|
| 645 |
+
n_samples_bootstrap = _get_n_samples_bootstrap(len(X), estimator.max_samples)
|
| 646 |
+
n_samples_test = X.shape[0] // 4
|
| 647 |
+
oob_pred = np.zeros([n_samples_test, 2])
|
| 648 |
+
for sample_idx, sample in enumerate(X[:n_samples_test]):
|
| 649 |
+
n_samples_oob = 0
|
| 650 |
+
oob_pred_sample = np.zeros(2)
|
| 651 |
+
for tree in estimator.estimators_:
|
| 652 |
+
oob_unsampled_indices = _generate_unsampled_indices(
|
| 653 |
+
tree.random_state, len(X), n_samples_bootstrap
|
| 654 |
+
)
|
| 655 |
+
if sample_idx in oob_unsampled_indices:
|
| 656 |
+
n_samples_oob += 1
|
| 657 |
+
oob_pred_sample += tree.predict(sample.reshape(1, -1)).squeeze()
|
| 658 |
+
oob_pred[sample_idx] = oob_pred_sample / n_samples_oob
|
| 659 |
+
assert_allclose(oob_pred, estimator.oob_prediction_[:n_samples_test])
|
| 660 |
+
|
| 661 |
+
|
| 662 |
+
@pytest.mark.parametrize("oob_score", [True, False])
|
| 663 |
+
def test_random_trees_embedding_raise_error_oob(oob_score):
|
| 664 |
+
with pytest.raises(TypeError, match="got an unexpected keyword argument"):
|
| 665 |
+
RandomTreesEmbedding(oob_score=oob_score)
|
| 666 |
+
with pytest.raises(NotImplementedError, match="OOB score not supported"):
|
| 667 |
+
RandomTreesEmbedding()._set_oob_score_and_attributes(X, y)
|
| 668 |
+
|
| 669 |
+
|
| 670 |
+
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
|
| 671 |
+
def test_gridsearch(name):
|
| 672 |
+
# Check that base trees can be grid-searched.
|
| 673 |
+
forest = FOREST_CLASSIFIERS[name]()
|
| 674 |
+
clf = GridSearchCV(forest, {"n_estimators": (1, 2), "max_depth": (1, 2)})
|
| 675 |
+
clf.fit(iris.data, iris.target)
|
| 676 |
+
|
| 677 |
+
|
| 678 |
+
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS)
|
| 679 |
+
def test_parallel(name):
|
| 680 |
+
"""Check parallel computations in classification"""
|
| 681 |
+
if name in FOREST_CLASSIFIERS:
|
| 682 |
+
X = iris.data
|
| 683 |
+
y = iris.target
|
| 684 |
+
elif name in FOREST_REGRESSORS:
|
| 685 |
+
X = X_reg
|
| 686 |
+
y = y_reg
|
| 687 |
+
|
| 688 |
+
ForestEstimator = FOREST_ESTIMATORS[name]
|
| 689 |
+
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
|
| 690 |
+
|
| 691 |
+
forest.fit(X, y)
|
| 692 |
+
assert len(forest) == 10
|
| 693 |
+
|
| 694 |
+
forest.set_params(n_jobs=1)
|
| 695 |
+
y1 = forest.predict(X)
|
| 696 |
+
forest.set_params(n_jobs=2)
|
| 697 |
+
y2 = forest.predict(X)
|
| 698 |
+
assert_array_almost_equal(y1, y2, 3)
|
| 699 |
+
|
| 700 |
+
|
| 701 |
+
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS)
|
| 702 |
+
def test_pickle(name):
|
| 703 |
+
# Check pickability.
|
| 704 |
+
if name in FOREST_CLASSIFIERS:
|
| 705 |
+
X = iris.data[::2]
|
| 706 |
+
y = iris.target[::2]
|
| 707 |
+
elif name in FOREST_REGRESSORS:
|
| 708 |
+
X = X_reg[::2]
|
| 709 |
+
y = y_reg[::2]
|
| 710 |
+
|
| 711 |
+
ForestEstimator = FOREST_ESTIMATORS[name]
|
| 712 |
+
obj = ForestEstimator(random_state=0)
|
| 713 |
+
obj.fit(X, y)
|
| 714 |
+
score = obj.score(X, y)
|
| 715 |
+
pickle_object = pickle.dumps(obj)
|
| 716 |
+
|
| 717 |
+
obj2 = pickle.loads(pickle_object)
|
| 718 |
+
assert type(obj2) == obj.__class__
|
| 719 |
+
score2 = obj2.score(X, y)
|
| 720 |
+
assert score == score2
|
| 721 |
+
|
| 722 |
+
|
| 723 |
+
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS)
|
| 724 |
+
def test_multioutput(name):
|
| 725 |
+
# Check estimators on multi-output problems.
|
| 726 |
+
|
| 727 |
+
X_train = [
|
| 728 |
+
[-2, -1],
|
| 729 |
+
[-1, -1],
|
| 730 |
+
[-1, -2],
|
| 731 |
+
[1, 1],
|
| 732 |
+
[1, 2],
|
| 733 |
+
[2, 1],
|
| 734 |
+
[-2, 1],
|
| 735 |
+
[-1, 1],
|
| 736 |
+
[-1, 2],
|
| 737 |
+
[2, -1],
|
| 738 |
+
[1, -1],
|
| 739 |
+
[1, -2],
|
| 740 |
+
]
|
| 741 |
+
y_train = [
|
| 742 |
+
[-1, 0],
|
| 743 |
+
[-1, 0],
|
| 744 |
+
[-1, 0],
|
| 745 |
+
[1, 1],
|
| 746 |
+
[1, 1],
|
| 747 |
+
[1, 1],
|
| 748 |
+
[-1, 2],
|
| 749 |
+
[-1, 2],
|
| 750 |
+
[-1, 2],
|
| 751 |
+
[1, 3],
|
| 752 |
+
[1, 3],
|
| 753 |
+
[1, 3],
|
| 754 |
+
]
|
| 755 |
+
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
|
| 756 |
+
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
|
| 757 |
+
|
| 758 |
+
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
|
| 759 |
+
y_pred = est.fit(X_train, y_train).predict(X_test)
|
| 760 |
+
assert_array_almost_equal(y_pred, y_test)
|
| 761 |
+
|
| 762 |
+
if name in FOREST_CLASSIFIERS:
|
| 763 |
+
with np.errstate(divide="ignore"):
|
| 764 |
+
proba = est.predict_proba(X_test)
|
| 765 |
+
assert len(proba) == 2
|
| 766 |
+
assert proba[0].shape == (4, 2)
|
| 767 |
+
assert proba[1].shape == (4, 4)
|
| 768 |
+
|
| 769 |
+
log_proba = est.predict_log_proba(X_test)
|
| 770 |
+
assert len(log_proba) == 2
|
| 771 |
+
assert log_proba[0].shape == (4, 2)
|
| 772 |
+
assert log_proba[1].shape == (4, 4)
|
| 773 |
+
|
| 774 |
+
|
| 775 |
+
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
|
| 776 |
+
def test_multioutput_string(name):
|
| 777 |
+
# Check estimators on multi-output problems with string outputs.
|
| 778 |
+
|
| 779 |
+
X_train = [
|
| 780 |
+
[-2, -1],
|
| 781 |
+
[-1, -1],
|
| 782 |
+
[-1, -2],
|
| 783 |
+
[1, 1],
|
| 784 |
+
[1, 2],
|
| 785 |
+
[2, 1],
|
| 786 |
+
[-2, 1],
|
| 787 |
+
[-1, 1],
|
| 788 |
+
[-1, 2],
|
| 789 |
+
[2, -1],
|
| 790 |
+
[1, -1],
|
| 791 |
+
[1, -2],
|
| 792 |
+
]
|
| 793 |
+
y_train = [
|
| 794 |
+
["red", "blue"],
|
| 795 |
+
["red", "blue"],
|
| 796 |
+
["red", "blue"],
|
| 797 |
+
["green", "green"],
|
| 798 |
+
["green", "green"],
|
| 799 |
+
["green", "green"],
|
| 800 |
+
["red", "purple"],
|
| 801 |
+
["red", "purple"],
|
| 802 |
+
["red", "purple"],
|
| 803 |
+
["green", "yellow"],
|
| 804 |
+
["green", "yellow"],
|
| 805 |
+
["green", "yellow"],
|
| 806 |
+
]
|
| 807 |
+
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
|
| 808 |
+
y_test = [
|
| 809 |
+
["red", "blue"],
|
| 810 |
+
["green", "green"],
|
| 811 |
+
["red", "purple"],
|
| 812 |
+
["green", "yellow"],
|
| 813 |
+
]
|
| 814 |
+
|
| 815 |
+
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
|
| 816 |
+
y_pred = est.fit(X_train, y_train).predict(X_test)
|
| 817 |
+
assert_array_equal(y_pred, y_test)
|
| 818 |
+
|
| 819 |
+
with np.errstate(divide="ignore"):
|
| 820 |
+
proba = est.predict_proba(X_test)
|
| 821 |
+
assert len(proba) == 2
|
| 822 |
+
assert proba[0].shape == (4, 2)
|
| 823 |
+
assert proba[1].shape == (4, 4)
|
| 824 |
+
|
| 825 |
+
log_proba = est.predict_log_proba(X_test)
|
| 826 |
+
assert len(log_proba) == 2
|
| 827 |
+
assert log_proba[0].shape == (4, 2)
|
| 828 |
+
assert log_proba[1].shape == (4, 4)
|
| 829 |
+
|
| 830 |
+
|
| 831 |
+
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
|
| 832 |
+
def test_classes_shape(name):
|
| 833 |
+
# Test that n_classes_ and classes_ have proper shape.
|
| 834 |
+
ForestClassifier = FOREST_CLASSIFIERS[name]
|
| 835 |
+
|
| 836 |
+
# Classification, single output
|
| 837 |
+
clf = ForestClassifier(random_state=0).fit(X, y)
|
| 838 |
+
|
| 839 |
+
assert clf.n_classes_ == 2
|
| 840 |
+
assert_array_equal(clf.classes_, [-1, 1])
|
| 841 |
+
|
| 842 |
+
# Classification, multi-output
|
| 843 |
+
_y = np.vstack((y, np.array(y) * 2)).T
|
| 844 |
+
clf = ForestClassifier(random_state=0).fit(X, _y)
|
| 845 |
+
|
| 846 |
+
assert_array_equal(clf.n_classes_, [2, 2])
|
| 847 |
+
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
|
| 848 |
+
|
| 849 |
+
|
| 850 |
+
def test_random_trees_dense_type():
|
| 851 |
+
# Test that the `sparse_output` parameter of RandomTreesEmbedding
|
| 852 |
+
# works by returning a dense array.
|
| 853 |
+
|
| 854 |
+
# Create the RTE with sparse=False
|
| 855 |
+
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
|
| 856 |
+
X, y = datasets.make_circles(factor=0.5)
|
| 857 |
+
X_transformed = hasher.fit_transform(X)
|
| 858 |
+
|
| 859 |
+
# Assert that type is ndarray, not scipy.sparse.csr_matrix
|
| 860 |
+
assert isinstance(X_transformed, np.ndarray)
|
| 861 |
+
|
| 862 |
+
|
| 863 |
+
def test_random_trees_dense_equal():
|
| 864 |
+
# Test that the `sparse_output` parameter of RandomTreesEmbedding
|
| 865 |
+
# works by returning the same array for both argument values.
|
| 866 |
+
|
| 867 |
+
# Create the RTEs
|
| 868 |
+
hasher_dense = RandomTreesEmbedding(
|
| 869 |
+
n_estimators=10, sparse_output=False, random_state=0
|
| 870 |
+
)
|
| 871 |
+
hasher_sparse = RandomTreesEmbedding(
|
| 872 |
+
n_estimators=10, sparse_output=True, random_state=0
|
| 873 |
+
)
|
| 874 |
+
X, y = datasets.make_circles(factor=0.5)
|
| 875 |
+
X_transformed_dense = hasher_dense.fit_transform(X)
|
| 876 |
+
X_transformed_sparse = hasher_sparse.fit_transform(X)
|
| 877 |
+
|
| 878 |
+
# Assert that dense and sparse hashers have same array.
|
| 879 |
+
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
|
| 880 |
+
|
| 881 |
+
|
| 882 |
+
def test_random_hasher():
|
| 883 |
+
# test random forest hashing on circles dataset
|
| 884 |
+
# make sure that it is linearly separable.
|
| 885 |
+
# even after projected to two SVD dimensions
|
| 886 |
+
# Note: Not all random_states produce perfect results.
|
| 887 |
+
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
|
| 888 |
+
X, y = datasets.make_circles(factor=0.5)
|
| 889 |
+
X_transformed = hasher.fit_transform(X)
|
| 890 |
+
|
| 891 |
+
# test fit and transform:
|
| 892 |
+
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
|
| 893 |
+
assert_array_equal(hasher.fit(X).transform(X).toarray(), X_transformed.toarray())
|
| 894 |
+
|
| 895 |
+
# one leaf active per data point per forest
|
| 896 |
+
assert X_transformed.shape[0] == X.shape[0]
|
| 897 |
+
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
|
| 898 |
+
svd = TruncatedSVD(n_components=2)
|
| 899 |
+
X_reduced = svd.fit_transform(X_transformed)
|
| 900 |
+
linear_clf = LinearSVC()
|
| 901 |
+
linear_clf.fit(X_reduced, y)
|
| 902 |
+
assert linear_clf.score(X_reduced, y) == 1.0
|
| 903 |
+
|
| 904 |
+
|
| 905 |
+
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
|
| 906 |
+
def test_random_hasher_sparse_data(csc_container):
|
| 907 |
+
X, y = datasets.make_multilabel_classification(random_state=0)
|
| 908 |
+
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
|
| 909 |
+
X_transformed = hasher.fit_transform(X)
|
| 910 |
+
X_transformed_sparse = hasher.fit_transform(csc_container(X))
|
| 911 |
+
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
|
| 912 |
+
|
| 913 |
+
|
| 914 |
+
def test_parallel_train():
|
| 915 |
+
rng = check_random_state(12321)
|
| 916 |
+
n_samples, n_features = 80, 30
|
| 917 |
+
X_train = rng.randn(n_samples, n_features)
|
| 918 |
+
y_train = rng.randint(0, 2, n_samples)
|
| 919 |
+
|
| 920 |
+
clfs = [
|
| 921 |
+
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs, random_state=12345).fit(
|
| 922 |
+
X_train, y_train
|
| 923 |
+
)
|
| 924 |
+
for n_jobs in [1, 2, 3, 8, 16, 32]
|
| 925 |
+
]
|
| 926 |
+
|
| 927 |
+
X_test = rng.randn(n_samples, n_features)
|
| 928 |
+
probas = [clf.predict_proba(X_test) for clf in clfs]
|
| 929 |
+
for proba1, proba2 in zip(probas, probas[1:]):
|
| 930 |
+
assert_array_almost_equal(proba1, proba2)
|
| 931 |
+
|
| 932 |
+
|
| 933 |
+
def test_distribution():
|
| 934 |
+
rng = check_random_state(12321)
|
| 935 |
+
|
| 936 |
+
# Single variable with 4 values
|
| 937 |
+
X = rng.randint(0, 4, size=(1000, 1))
|
| 938 |
+
y = rng.rand(1000)
|
| 939 |
+
n_trees = 500
|
| 940 |
+
|
| 941 |
+
reg = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
|
| 942 |
+
|
| 943 |
+
uniques = defaultdict(int)
|
| 944 |
+
for tree in reg.estimators_:
|
| 945 |
+
tree = "".join(
|
| 946 |
+
("%d,%d/" % (f, int(t)) if f >= 0 else "-")
|
| 947 |
+
for f, t in zip(tree.tree_.feature, tree.tree_.threshold)
|
| 948 |
+
)
|
| 949 |
+
|
| 950 |
+
uniques[tree] += 1
|
| 951 |
+
|
| 952 |
+
uniques = sorted([(1.0 * count / n_trees, tree) for tree, count in uniques.items()])
|
| 953 |
+
|
| 954 |
+
# On a single variable problem where X_0 has 4 equiprobable values, there
|
| 955 |
+
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
|
| 956 |
+
# them has probability 1/3 while the 4 others have probability 1/6.
|
| 957 |
+
|
| 958 |
+
assert len(uniques) == 5
|
| 959 |
+
assert 0.20 > uniques[0][0] # Rough approximation of 1/6.
|
| 960 |
+
assert 0.20 > uniques[1][0]
|
| 961 |
+
assert 0.20 > uniques[2][0]
|
| 962 |
+
assert 0.20 > uniques[3][0]
|
| 963 |
+
assert uniques[4][0] > 0.3
|
| 964 |
+
assert uniques[4][1] == "0,1/0,0/--0,2/--"
|
| 965 |
+
|
| 966 |
+
# Two variables, one with 2 values, one with 3 values
|
| 967 |
+
X = np.empty((1000, 2))
|
| 968 |
+
X[:, 0] = np.random.randint(0, 2, 1000)
|
| 969 |
+
X[:, 1] = np.random.randint(0, 3, 1000)
|
| 970 |
+
y = rng.rand(1000)
|
| 971 |
+
|
| 972 |
+
reg = ExtraTreesRegressor(max_features=1, random_state=1).fit(X, y)
|
| 973 |
+
|
| 974 |
+
uniques = defaultdict(int)
|
| 975 |
+
for tree in reg.estimators_:
|
| 976 |
+
tree = "".join(
|
| 977 |
+
("%d,%d/" % (f, int(t)) if f >= 0 else "-")
|
| 978 |
+
for f, t in zip(tree.tree_.feature, tree.tree_.threshold)
|
| 979 |
+
)
|
| 980 |
+
|
| 981 |
+
uniques[tree] += 1
|
| 982 |
+
|
| 983 |
+
uniques = [(count, tree) for tree, count in uniques.items()]
|
| 984 |
+
assert len(uniques) == 8
|
| 985 |
+
|
| 986 |
+
|
| 987 |
+
@pytest.mark.parametrize("name", FOREST_ESTIMATORS)
|
| 988 |
+
def test_max_leaf_nodes_max_depth(name):
|
| 989 |
+
X, y = hastie_X, hastie_y
|
| 990 |
+
|
| 991 |
+
# Test precedence of max_leaf_nodes over max_depth.
|
| 992 |
+
ForestEstimator = FOREST_ESTIMATORS[name]
|
| 993 |
+
est = ForestEstimator(
|
| 994 |
+
max_depth=1, max_leaf_nodes=4, n_estimators=1, random_state=0
|
| 995 |
+
).fit(X, y)
|
| 996 |
+
assert est.estimators_[0].get_depth() == 1
|
| 997 |
+
|
| 998 |
+
est = ForestEstimator(max_depth=1, n_estimators=1, random_state=0).fit(X, y)
|
| 999 |
+
assert est.estimators_[0].get_depth() == 1
|
| 1000 |
+
|
| 1001 |
+
|
| 1002 |
+
@pytest.mark.parametrize("name", FOREST_ESTIMATORS)
|
| 1003 |
+
def test_min_samples_split(name):
|
| 1004 |
+
X, y = hastie_X, hastie_y
|
| 1005 |
+
ForestEstimator = FOREST_ESTIMATORS[name]
|
| 1006 |
+
|
| 1007 |
+
est = ForestEstimator(min_samples_split=10, n_estimators=1, random_state=0)
|
| 1008 |
+
est.fit(X, y)
|
| 1009 |
+
node_idx = est.estimators_[0].tree_.children_left != -1
|
| 1010 |
+
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
|
| 1011 |
+
|
| 1012 |
+
assert np.min(node_samples) > len(X) * 0.5 - 1, "Failed with {0}".format(name)
|
| 1013 |
+
|
| 1014 |
+
est = ForestEstimator(min_samples_split=0.5, n_estimators=1, random_state=0)
|
| 1015 |
+
est.fit(X, y)
|
| 1016 |
+
node_idx = est.estimators_[0].tree_.children_left != -1
|
| 1017 |
+
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
|
| 1018 |
+
|
| 1019 |
+
assert np.min(node_samples) > len(X) * 0.5 - 1, "Failed with {0}".format(name)
|
| 1020 |
+
|
| 1021 |
+
|
| 1022 |
+
@pytest.mark.parametrize("name", FOREST_ESTIMATORS)
|
| 1023 |
+
def test_min_samples_leaf(name):
|
| 1024 |
+
X, y = hastie_X, hastie_y
|
| 1025 |
+
|
| 1026 |
+
# Test if leaves contain more than leaf_count training examples
|
| 1027 |
+
ForestEstimator = FOREST_ESTIMATORS[name]
|
| 1028 |
+
|
| 1029 |
+
est = ForestEstimator(min_samples_leaf=5, n_estimators=1, random_state=0)
|
| 1030 |
+
est.fit(X, y)
|
| 1031 |
+
out = est.estimators_[0].tree_.apply(X)
|
| 1032 |
+
node_counts = np.bincount(out)
|
| 1033 |
+
# drop inner nodes
|
| 1034 |
+
leaf_count = node_counts[node_counts != 0]
|
| 1035 |
+
assert np.min(leaf_count) > 4, "Failed with {0}".format(name)
|
| 1036 |
+
|
| 1037 |
+
est = ForestEstimator(min_samples_leaf=0.25, n_estimators=1, random_state=0)
|
| 1038 |
+
est.fit(X, y)
|
| 1039 |
+
out = est.estimators_[0].tree_.apply(X)
|
| 1040 |
+
node_counts = np.bincount(out)
|
| 1041 |
+
# drop inner nodes
|
| 1042 |
+
leaf_count = node_counts[node_counts != 0]
|
| 1043 |
+
assert np.min(leaf_count) > len(X) * 0.25 - 1, "Failed with {0}".format(name)
|
| 1044 |
+
|
| 1045 |
+
|
| 1046 |
+
@pytest.mark.parametrize("name", FOREST_ESTIMATORS)
|
| 1047 |
+
def test_min_weight_fraction_leaf(name):
|
| 1048 |
+
X, y = hastie_X, hastie_y
|
| 1049 |
+
|
| 1050 |
+
# Test if leaves contain at least min_weight_fraction_leaf of the
|
| 1051 |
+
# training set
|
| 1052 |
+
ForestEstimator = FOREST_ESTIMATORS[name]
|
| 1053 |
+
rng = np.random.RandomState(0)
|
| 1054 |
+
weights = rng.rand(X.shape[0])
|
| 1055 |
+
total_weight = np.sum(weights)
|
| 1056 |
+
|
| 1057 |
+
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
|
| 1058 |
+
# by setting max_leaf_nodes
|
| 1059 |
+
for frac in np.linspace(0, 0.5, 6):
|
| 1060 |
+
est = ForestEstimator(
|
| 1061 |
+
min_weight_fraction_leaf=frac, n_estimators=1, random_state=0
|
| 1062 |
+
)
|
| 1063 |
+
if "RandomForest" in name:
|
| 1064 |
+
est.bootstrap = False
|
| 1065 |
+
|
| 1066 |
+
est.fit(X, y, sample_weight=weights)
|
| 1067 |
+
out = est.estimators_[0].tree_.apply(X)
|
| 1068 |
+
node_weights = np.bincount(out, weights=weights)
|
| 1069 |
+
# drop inner nodes
|
| 1070 |
+
leaf_weights = node_weights[node_weights != 0]
|
| 1071 |
+
assert (
|
| 1072 |
+
np.min(leaf_weights) >= total_weight * est.min_weight_fraction_leaf
|
| 1073 |
+
), "Failed with {0} min_weight_fraction_leaf={1}".format(
|
| 1074 |
+
name, est.min_weight_fraction_leaf
|
| 1075 |
+
)
|
| 1076 |
+
|
| 1077 |
+
|
| 1078 |
+
@pytest.mark.parametrize("name", FOREST_ESTIMATORS)
|
| 1079 |
+
@pytest.mark.parametrize(
|
| 1080 |
+
"sparse_container", COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS
|
| 1081 |
+
)
|
| 1082 |
+
def test_sparse_input(name, sparse_container):
|
| 1083 |
+
X, y = datasets.make_multilabel_classification(random_state=0, n_samples=50)
|
| 1084 |
+
|
| 1085 |
+
ForestEstimator = FOREST_ESTIMATORS[name]
|
| 1086 |
+
|
| 1087 |
+
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
|
| 1088 |
+
sparse = ForestEstimator(random_state=0, max_depth=2).fit(sparse_container(X), y)
|
| 1089 |
+
|
| 1090 |
+
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
|
| 1091 |
+
|
| 1092 |
+
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
|
| 1093 |
+
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
|
| 1094 |
+
assert_array_almost_equal(
|
| 1095 |
+
sparse.feature_importances_, dense.feature_importances_
|
| 1096 |
+
)
|
| 1097 |
+
|
| 1098 |
+
if name in FOREST_CLASSIFIERS:
|
| 1099 |
+
assert_array_almost_equal(sparse.predict_proba(X), dense.predict_proba(X))
|
| 1100 |
+
assert_array_almost_equal(
|
| 1101 |
+
sparse.predict_log_proba(X), dense.predict_log_proba(X)
|
| 1102 |
+
)
|
| 1103 |
+
|
| 1104 |
+
if name in FOREST_TRANSFORMERS:
|
| 1105 |
+
assert_array_almost_equal(
|
| 1106 |
+
sparse.transform(X).toarray(), dense.transform(X).toarray()
|
| 1107 |
+
)
|
| 1108 |
+
assert_array_almost_equal(
|
| 1109 |
+
sparse.fit_transform(X).toarray(), dense.fit_transform(X).toarray()
|
| 1110 |
+
)
|
| 1111 |
+
|
| 1112 |
+
|
| 1113 |
+
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS)
|
| 1114 |
+
@pytest.mark.parametrize("dtype", (np.float64, np.float32))
|
| 1115 |
+
def test_memory_layout(name, dtype):
|
| 1116 |
+
# Test that it works no matter the memory layout
|
| 1117 |
+
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
|
| 1118 |
+
|
| 1119 |
+
# Dense
|
| 1120 |
+
for container, kwargs in (
|
| 1121 |
+
(np.asarray, {}), # Nothing
|
| 1122 |
+
(np.asarray, {"order": "C"}), # C-order
|
| 1123 |
+
(np.asarray, {"order": "F"}), # F-order
|
| 1124 |
+
(np.ascontiguousarray, {}), # Contiguous
|
| 1125 |
+
):
|
| 1126 |
+
X = container(iris.data, dtype=dtype, **kwargs)
|
| 1127 |
+
y = iris.target
|
| 1128 |
+
assert_array_almost_equal(est.fit(X, y).predict(X), y)
|
| 1129 |
+
|
| 1130 |
+
# Sparse (if applicable)
|
| 1131 |
+
if est.estimator.splitter in SPARSE_SPLITTERS:
|
| 1132 |
+
for sparse_container in COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS:
|
| 1133 |
+
X = sparse_container(iris.data, dtype=dtype)
|
| 1134 |
+
y = iris.target
|
| 1135 |
+
assert_array_almost_equal(est.fit(X, y).predict(X), y)
|
| 1136 |
+
|
| 1137 |
+
# Strided
|
| 1138 |
+
X = np.asarray(iris.data[::3], dtype=dtype)
|
| 1139 |
+
y = iris.target[::3]
|
| 1140 |
+
assert_array_almost_equal(est.fit(X, y).predict(X), y)
|
| 1141 |
+
|
| 1142 |
+
|
| 1143 |
+
@pytest.mark.parametrize("name", FOREST_ESTIMATORS)
|
| 1144 |
+
def test_1d_input(name):
|
| 1145 |
+
X = iris.data[:, 0]
|
| 1146 |
+
X_2d = iris.data[:, 0].reshape((-1, 1))
|
| 1147 |
+
y = iris.target
|
| 1148 |
+
|
| 1149 |
+
with ignore_warnings():
|
| 1150 |
+
ForestEstimator = FOREST_ESTIMATORS[name]
|
| 1151 |
+
with pytest.raises(ValueError):
|
| 1152 |
+
ForestEstimator(n_estimators=1, random_state=0).fit(X, y)
|
| 1153 |
+
|
| 1154 |
+
est = ForestEstimator(random_state=0)
|
| 1155 |
+
est.fit(X_2d, y)
|
| 1156 |
+
|
| 1157 |
+
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
|
| 1158 |
+
with pytest.raises(ValueError):
|
| 1159 |
+
est.predict(X)
|
| 1160 |
+
|
| 1161 |
+
|
| 1162 |
+
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
|
| 1163 |
+
def test_class_weights(name):
|
| 1164 |
+
# Check class_weights resemble sample_weights behavior.
|
| 1165 |
+
ForestClassifier = FOREST_CLASSIFIERS[name]
|
| 1166 |
+
|
| 1167 |
+
# Iris is balanced, so no effect expected for using 'balanced' weights
|
| 1168 |
+
clf1 = ForestClassifier(random_state=0)
|
| 1169 |
+
clf1.fit(iris.data, iris.target)
|
| 1170 |
+
clf2 = ForestClassifier(class_weight="balanced", random_state=0)
|
| 1171 |
+
clf2.fit(iris.data, iris.target)
|
| 1172 |
+
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
|
| 1173 |
+
|
| 1174 |
+
# Make a multi-output problem with three copies of Iris
|
| 1175 |
+
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
|
| 1176 |
+
# Create user-defined weights that should balance over the outputs
|
| 1177 |
+
clf3 = ForestClassifier(
|
| 1178 |
+
class_weight=[
|
| 1179 |
+
{0: 2.0, 1: 2.0, 2: 1.0},
|
| 1180 |
+
{0: 2.0, 1: 1.0, 2: 2.0},
|
| 1181 |
+
{0: 1.0, 1: 2.0, 2: 2.0},
|
| 1182 |
+
],
|
| 1183 |
+
random_state=0,
|
| 1184 |
+
)
|
| 1185 |
+
clf3.fit(iris.data, iris_multi)
|
| 1186 |
+
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
|
| 1187 |
+
# Check against multi-output "balanced" which should also have no effect
|
| 1188 |
+
clf4 = ForestClassifier(class_weight="balanced", random_state=0)
|
| 1189 |
+
clf4.fit(iris.data, iris_multi)
|
| 1190 |
+
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
|
| 1191 |
+
|
| 1192 |
+
# Inflate importance of class 1, check against user-defined weights
|
| 1193 |
+
sample_weight = np.ones(iris.target.shape)
|
| 1194 |
+
sample_weight[iris.target == 1] *= 100
|
| 1195 |
+
class_weight = {0: 1.0, 1: 100.0, 2: 1.0}
|
| 1196 |
+
clf1 = ForestClassifier(random_state=0)
|
| 1197 |
+
clf1.fit(iris.data, iris.target, sample_weight)
|
| 1198 |
+
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
|
| 1199 |
+
clf2.fit(iris.data, iris.target)
|
| 1200 |
+
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
|
| 1201 |
+
|
| 1202 |
+
# Check that sample_weight and class_weight are multiplicative
|
| 1203 |
+
clf1 = ForestClassifier(random_state=0)
|
| 1204 |
+
clf1.fit(iris.data, iris.target, sample_weight**2)
|
| 1205 |
+
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
|
| 1206 |
+
clf2.fit(iris.data, iris.target, sample_weight)
|
| 1207 |
+
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
|
| 1208 |
+
|
| 1209 |
+
|
| 1210 |
+
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
|
| 1211 |
+
def test_class_weight_balanced_and_bootstrap_multi_output(name):
|
| 1212 |
+
# Test class_weight works for multi-output"""
|
| 1213 |
+
ForestClassifier = FOREST_CLASSIFIERS[name]
|
| 1214 |
+
_y = np.vstack((y, np.array(y) * 2)).T
|
| 1215 |
+
clf = ForestClassifier(class_weight="balanced", random_state=0)
|
| 1216 |
+
clf.fit(X, _y)
|
| 1217 |
+
clf = ForestClassifier(
|
| 1218 |
+
class_weight=[{-1: 0.5, 1: 1.0}, {-2: 1.0, 2: 1.0}], random_state=0
|
| 1219 |
+
)
|
| 1220 |
+
clf.fit(X, _y)
|
| 1221 |
+
# smoke test for balanced subsample
|
| 1222 |
+
clf = ForestClassifier(class_weight="balanced_subsample", random_state=0)
|
| 1223 |
+
clf.fit(X, _y)
|
| 1224 |
+
|
| 1225 |
+
|
| 1226 |
+
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
|
| 1227 |
+
def test_class_weight_errors(name):
|
| 1228 |
+
# Test if class_weight raises errors and warnings when expected.
|
| 1229 |
+
ForestClassifier = FOREST_CLASSIFIERS[name]
|
| 1230 |
+
_y = np.vstack((y, np.array(y) * 2)).T
|
| 1231 |
+
|
| 1232 |
+
# Warning warm_start with preset
|
| 1233 |
+
clf = ForestClassifier(class_weight="balanced", warm_start=True, random_state=0)
|
| 1234 |
+
clf.fit(X, y)
|
| 1235 |
+
|
| 1236 |
+
warn_msg = (
|
| 1237 |
+
"Warm-start fitting without increasing n_estimators does not fit new trees."
|
| 1238 |
+
)
|
| 1239 |
+
with pytest.warns(UserWarning, match=warn_msg):
|
| 1240 |
+
clf.fit(X, _y)
|
| 1241 |
+
|
| 1242 |
+
# Incorrect length list for multi-output
|
| 1243 |
+
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.0}], random_state=0)
|
| 1244 |
+
with pytest.raises(ValueError):
|
| 1245 |
+
clf.fit(X, _y)
|
| 1246 |
+
|
| 1247 |
+
|
| 1248 |
+
@pytest.mark.parametrize("name", FOREST_ESTIMATORS)
|
| 1249 |
+
def test_warm_start(name):
|
| 1250 |
+
# Test if fitting incrementally with warm start gives a forest of the
|
| 1251 |
+
# right size and the same results as a normal fit.
|
| 1252 |
+
X, y = hastie_X, hastie_y
|
| 1253 |
+
ForestEstimator = FOREST_ESTIMATORS[name]
|
| 1254 |
+
est_ws = None
|
| 1255 |
+
for n_estimators in [5, 10]:
|
| 1256 |
+
if est_ws is None:
|
| 1257 |
+
est_ws = ForestEstimator(
|
| 1258 |
+
n_estimators=n_estimators, random_state=42, warm_start=True
|
| 1259 |
+
)
|
| 1260 |
+
else:
|
| 1261 |
+
est_ws.set_params(n_estimators=n_estimators)
|
| 1262 |
+
est_ws.fit(X, y)
|
| 1263 |
+
assert len(est_ws) == n_estimators
|
| 1264 |
+
|
| 1265 |
+
est_no_ws = ForestEstimator(n_estimators=10, random_state=42, warm_start=False)
|
| 1266 |
+
est_no_ws.fit(X, y)
|
| 1267 |
+
|
| 1268 |
+
assert set([tree.random_state for tree in est_ws]) == set(
|
| 1269 |
+
[tree.random_state for tree in est_no_ws]
|
| 1270 |
+
)
|
| 1271 |
+
|
| 1272 |
+
assert_array_equal(
|
| 1273 |
+
est_ws.apply(X), est_no_ws.apply(X), err_msg="Failed with {0}".format(name)
|
| 1274 |
+
)
|
| 1275 |
+
|
| 1276 |
+
|
| 1277 |
+
@pytest.mark.parametrize("name", FOREST_ESTIMATORS)
|
| 1278 |
+
def test_warm_start_clear(name):
|
| 1279 |
+
# Test if fit clears state and grows a new forest when warm_start==False.
|
| 1280 |
+
X, y = hastie_X, hastie_y
|
| 1281 |
+
ForestEstimator = FOREST_ESTIMATORS[name]
|
| 1282 |
+
est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False, random_state=1)
|
| 1283 |
+
est.fit(X, y)
|
| 1284 |
+
|
| 1285 |
+
est_2 = ForestEstimator(
|
| 1286 |
+
n_estimators=5, max_depth=1, warm_start=True, random_state=2
|
| 1287 |
+
)
|
| 1288 |
+
est_2.fit(X, y) # inits state
|
| 1289 |
+
est_2.set_params(warm_start=False, random_state=1)
|
| 1290 |
+
est_2.fit(X, y) # clears old state and equals est
|
| 1291 |
+
|
| 1292 |
+
assert_array_almost_equal(est_2.apply(X), est.apply(X))
|
| 1293 |
+
|
| 1294 |
+
|
| 1295 |
+
@pytest.mark.parametrize("name", FOREST_ESTIMATORS)
|
| 1296 |
+
def test_warm_start_smaller_n_estimators(name):
|
| 1297 |
+
# Test if warm start second fit with smaller n_estimators raises error.
|
| 1298 |
+
X, y = hastie_X, hastie_y
|
| 1299 |
+
ForestEstimator = FOREST_ESTIMATORS[name]
|
| 1300 |
+
est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
|
| 1301 |
+
est.fit(X, y)
|
| 1302 |
+
est.set_params(n_estimators=4)
|
| 1303 |
+
with pytest.raises(ValueError):
|
| 1304 |
+
est.fit(X, y)
|
| 1305 |
+
|
| 1306 |
+
|
| 1307 |
+
@pytest.mark.parametrize("name", FOREST_ESTIMATORS)
|
| 1308 |
+
def test_warm_start_equal_n_estimators(name):
|
| 1309 |
+
# Test if warm start with equal n_estimators does nothing and returns the
|
| 1310 |
+
# same forest and raises a warning.
|
| 1311 |
+
X, y = hastie_X, hastie_y
|
| 1312 |
+
ForestEstimator = FOREST_ESTIMATORS[name]
|
| 1313 |
+
est = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True, random_state=1)
|
| 1314 |
+
est.fit(X, y)
|
| 1315 |
+
|
| 1316 |
+
est_2 = ForestEstimator(
|
| 1317 |
+
n_estimators=5, max_depth=3, warm_start=True, random_state=1
|
| 1318 |
+
)
|
| 1319 |
+
est_2.fit(X, y)
|
| 1320 |
+
# Now est_2 equals est.
|
| 1321 |
+
|
| 1322 |
+
est_2.set_params(random_state=2)
|
| 1323 |
+
warn_msg = (
|
| 1324 |
+
"Warm-start fitting without increasing n_estimators does not fit new trees."
|
| 1325 |
+
)
|
| 1326 |
+
with pytest.warns(UserWarning, match=warn_msg):
|
| 1327 |
+
est_2.fit(X, y)
|
| 1328 |
+
# If we had fit the trees again we would have got a different forest as we
|
| 1329 |
+
# changed the random state.
|
| 1330 |
+
assert_array_equal(est.apply(X), est_2.apply(X))
|
| 1331 |
+
|
| 1332 |
+
|
| 1333 |
+
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS)
|
| 1334 |
+
def test_warm_start_oob(name):
|
| 1335 |
+
# Test that the warm start computes oob score when asked.
|
| 1336 |
+
X, y = hastie_X, hastie_y
|
| 1337 |
+
ForestEstimator = FOREST_ESTIMATORS[name]
|
| 1338 |
+
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
|
| 1339 |
+
est = ForestEstimator(
|
| 1340 |
+
n_estimators=15,
|
| 1341 |
+
max_depth=3,
|
| 1342 |
+
warm_start=False,
|
| 1343 |
+
random_state=1,
|
| 1344 |
+
bootstrap=True,
|
| 1345 |
+
oob_score=True,
|
| 1346 |
+
)
|
| 1347 |
+
est.fit(X, y)
|
| 1348 |
+
|
| 1349 |
+
est_2 = ForestEstimator(
|
| 1350 |
+
n_estimators=5,
|
| 1351 |
+
max_depth=3,
|
| 1352 |
+
warm_start=False,
|
| 1353 |
+
random_state=1,
|
| 1354 |
+
bootstrap=True,
|
| 1355 |
+
oob_score=False,
|
| 1356 |
+
)
|
| 1357 |
+
est_2.fit(X, y)
|
| 1358 |
+
|
| 1359 |
+
est_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
|
| 1360 |
+
est_2.fit(X, y)
|
| 1361 |
+
|
| 1362 |
+
assert hasattr(est_2, "oob_score_")
|
| 1363 |
+
assert est.oob_score_ == est_2.oob_score_
|
| 1364 |
+
|
| 1365 |
+
# Test that oob_score is computed even if we don't need to train
|
| 1366 |
+
# additional trees.
|
| 1367 |
+
est_3 = ForestEstimator(
|
| 1368 |
+
n_estimators=15,
|
| 1369 |
+
max_depth=3,
|
| 1370 |
+
warm_start=True,
|
| 1371 |
+
random_state=1,
|
| 1372 |
+
bootstrap=True,
|
| 1373 |
+
oob_score=False,
|
| 1374 |
+
)
|
| 1375 |
+
est_3.fit(X, y)
|
| 1376 |
+
assert not hasattr(est_3, "oob_score_")
|
| 1377 |
+
|
| 1378 |
+
est_3.set_params(oob_score=True)
|
| 1379 |
+
ignore_warnings(est_3.fit)(X, y)
|
| 1380 |
+
|
| 1381 |
+
assert est.oob_score_ == est_3.oob_score_
|
| 1382 |
+
|
| 1383 |
+
|
| 1384 |
+
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS)
|
| 1385 |
+
def test_oob_not_computed_twice(name):
|
| 1386 |
+
# Check that oob_score is not computed twice when warm_start=True.
|
| 1387 |
+
X, y = hastie_X, hastie_y
|
| 1388 |
+
ForestEstimator = FOREST_ESTIMATORS[name]
|
| 1389 |
+
|
| 1390 |
+
est = ForestEstimator(
|
| 1391 |
+
n_estimators=10, warm_start=True, bootstrap=True, oob_score=True
|
| 1392 |
+
)
|
| 1393 |
+
|
| 1394 |
+
with patch.object(
|
| 1395 |
+
est, "_set_oob_score_and_attributes", wraps=est._set_oob_score_and_attributes
|
| 1396 |
+
) as mock_set_oob_score_and_attributes:
|
| 1397 |
+
est.fit(X, y)
|
| 1398 |
+
|
| 1399 |
+
with pytest.warns(UserWarning, match="Warm-start fitting without increasing"):
|
| 1400 |
+
est.fit(X, y)
|
| 1401 |
+
|
| 1402 |
+
mock_set_oob_score_and_attributes.assert_called_once()
|
| 1403 |
+
|
| 1404 |
+
|
| 1405 |
+
def test_dtype_convert(n_classes=15):
|
| 1406 |
+
classifier = RandomForestClassifier(random_state=0, bootstrap=False)
|
| 1407 |
+
|
| 1408 |
+
X = np.eye(n_classes)
|
| 1409 |
+
y = [ch for ch in "ABCDEFGHIJKLMNOPQRSTU"[:n_classes]]
|
| 1410 |
+
|
| 1411 |
+
result = classifier.fit(X, y).predict(X)
|
| 1412 |
+
assert_array_equal(classifier.classes_, y)
|
| 1413 |
+
assert_array_equal(result, y)
|
| 1414 |
+
|
| 1415 |
+
|
| 1416 |
+
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS)
|
| 1417 |
+
def test_decision_path(name):
|
| 1418 |
+
X, y = hastie_X, hastie_y
|
| 1419 |
+
n_samples = X.shape[0]
|
| 1420 |
+
ForestEstimator = FOREST_ESTIMATORS[name]
|
| 1421 |
+
est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False, random_state=1)
|
| 1422 |
+
est.fit(X, y)
|
| 1423 |
+
indicator, n_nodes_ptr = est.decision_path(X)
|
| 1424 |
+
|
| 1425 |
+
assert indicator.shape[1] == n_nodes_ptr[-1]
|
| 1426 |
+
assert indicator.shape[0] == n_samples
|
| 1427 |
+
assert_array_equal(
|
| 1428 |
+
np.diff(n_nodes_ptr), [e.tree_.node_count for e in est.estimators_]
|
| 1429 |
+
)
|
| 1430 |
+
|
| 1431 |
+
# Assert that leaves index are correct
|
| 1432 |
+
leaves = est.apply(X)
|
| 1433 |
+
for est_id in range(leaves.shape[1]):
|
| 1434 |
+
leave_indicator = [
|
| 1435 |
+
indicator[i, n_nodes_ptr[est_id] + j]
|
| 1436 |
+
for i, j in enumerate(leaves[:, est_id])
|
| 1437 |
+
]
|
| 1438 |
+
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
|
| 1439 |
+
|
| 1440 |
+
|
| 1441 |
+
def test_min_impurity_decrease():
|
| 1442 |
+
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
|
| 1443 |
+
all_estimators = [
|
| 1444 |
+
RandomForestClassifier,
|
| 1445 |
+
RandomForestRegressor,
|
| 1446 |
+
ExtraTreesClassifier,
|
| 1447 |
+
ExtraTreesRegressor,
|
| 1448 |
+
]
|
| 1449 |
+
|
| 1450 |
+
for Estimator in all_estimators:
|
| 1451 |
+
est = Estimator(min_impurity_decrease=0.1)
|
| 1452 |
+
est.fit(X, y)
|
| 1453 |
+
for tree in est.estimators_:
|
| 1454 |
+
# Simply check if the parameter is passed on correctly. Tree tests
|
| 1455 |
+
# will suffice for the actual working of this param
|
| 1456 |
+
assert tree.min_impurity_decrease == 0.1
|
| 1457 |
+
|
| 1458 |
+
|
| 1459 |
+
def test_poisson_y_positive_check():
|
| 1460 |
+
est = RandomForestRegressor(criterion="poisson")
|
| 1461 |
+
X = np.zeros((3, 3))
|
| 1462 |
+
|
| 1463 |
+
y = [-1, 1, 3]
|
| 1464 |
+
err_msg = (
|
| 1465 |
+
r"Some value\(s\) of y are negative which is "
|
| 1466 |
+
r"not allowed for Poisson regression."
|
| 1467 |
+
)
|
| 1468 |
+
with pytest.raises(ValueError, match=err_msg):
|
| 1469 |
+
est.fit(X, y)
|
| 1470 |
+
|
| 1471 |
+
y = [0, 0, 0]
|
| 1472 |
+
err_msg = (
|
| 1473 |
+
r"Sum of y is not strictly positive which "
|
| 1474 |
+
r"is necessary for Poisson regression."
|
| 1475 |
+
)
|
| 1476 |
+
with pytest.raises(ValueError, match=err_msg):
|
| 1477 |
+
est.fit(X, y)
|
| 1478 |
+
|
| 1479 |
+
|
| 1480 |
+
# mypy error: Variable "DEFAULT_JOBLIB_BACKEND" is not valid type
|
| 1481 |
+
class MyBackend(DEFAULT_JOBLIB_BACKEND): # type: ignore
|
| 1482 |
+
def __init__(self, *args, **kwargs):
|
| 1483 |
+
self.count = 0
|
| 1484 |
+
super().__init__(*args, **kwargs)
|
| 1485 |
+
|
| 1486 |
+
def start_call(self):
|
| 1487 |
+
self.count += 1
|
| 1488 |
+
return super().start_call()
|
| 1489 |
+
|
| 1490 |
+
|
| 1491 |
+
joblib.register_parallel_backend("testing", MyBackend)
|
| 1492 |
+
|
| 1493 |
+
|
| 1494 |
+
@skip_if_no_parallel
|
| 1495 |
+
def test_backend_respected():
|
| 1496 |
+
clf = RandomForestClassifier(n_estimators=10, n_jobs=2)
|
| 1497 |
+
|
| 1498 |
+
with joblib.parallel_backend("testing") as (ba, n_jobs):
|
| 1499 |
+
clf.fit(X, y)
|
| 1500 |
+
|
| 1501 |
+
assert ba.count > 0
|
| 1502 |
+
|
| 1503 |
+
# predict_proba requires shared memory. Ensure that's honored.
|
| 1504 |
+
with joblib.parallel_backend("testing") as (ba, _):
|
| 1505 |
+
clf.predict_proba(X)
|
| 1506 |
+
|
| 1507 |
+
assert ba.count == 0
|
| 1508 |
+
|
| 1509 |
+
|
| 1510 |
+
def test_forest_feature_importances_sum():
|
| 1511 |
+
X, y = make_classification(
|
| 1512 |
+
n_samples=15, n_informative=3, random_state=1, n_classes=3
|
| 1513 |
+
)
|
| 1514 |
+
clf = RandomForestClassifier(
|
| 1515 |
+
min_samples_leaf=5, random_state=42, n_estimators=200
|
| 1516 |
+
).fit(X, y)
|
| 1517 |
+
assert math.isclose(1, clf.feature_importances_.sum(), abs_tol=1e-7)
|
| 1518 |
+
|
| 1519 |
+
|
| 1520 |
+
def test_forest_degenerate_feature_importances():
|
| 1521 |
+
# build a forest of single node trees. See #13636
|
| 1522 |
+
X = np.zeros((10, 10))
|
| 1523 |
+
y = np.ones((10,))
|
| 1524 |
+
gbr = RandomForestRegressor(n_estimators=10).fit(X, y)
|
| 1525 |
+
assert_array_equal(gbr.feature_importances_, np.zeros(10, dtype=np.float64))
|
| 1526 |
+
|
| 1527 |
+
|
| 1528 |
+
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS)
|
| 1529 |
+
def test_max_samples_bootstrap(name):
|
| 1530 |
+
# Check invalid `max_samples` values
|
| 1531 |
+
est = FOREST_CLASSIFIERS_REGRESSORS[name](bootstrap=False, max_samples=0.5)
|
| 1532 |
+
err_msg = (
|
| 1533 |
+
r"`max_sample` cannot be set if `bootstrap=False`. "
|
| 1534 |
+
r"Either switch to `bootstrap=True` or set "
|
| 1535 |
+
r"`max_sample=None`."
|
| 1536 |
+
)
|
| 1537 |
+
with pytest.raises(ValueError, match=err_msg):
|
| 1538 |
+
est.fit(X, y)
|
| 1539 |
+
|
| 1540 |
+
|
| 1541 |
+
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS)
|
| 1542 |
+
def test_large_max_samples_exception(name):
|
| 1543 |
+
# Check invalid `max_samples`
|
| 1544 |
+
est = FOREST_CLASSIFIERS_REGRESSORS[name](bootstrap=True, max_samples=int(1e9))
|
| 1545 |
+
match = "`max_samples` must be <= n_samples=6 but got value 1000000000"
|
| 1546 |
+
with pytest.raises(ValueError, match=match):
|
| 1547 |
+
est.fit(X, y)
|
| 1548 |
+
|
| 1549 |
+
|
| 1550 |
+
@pytest.mark.parametrize("name", FOREST_REGRESSORS)
|
| 1551 |
+
def test_max_samples_boundary_regressors(name):
|
| 1552 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
| 1553 |
+
X_reg, y_reg, train_size=0.7, test_size=0.3, random_state=0
|
| 1554 |
+
)
|
| 1555 |
+
|
| 1556 |
+
ms_1_model = FOREST_REGRESSORS[name](
|
| 1557 |
+
bootstrap=True, max_samples=1.0, random_state=0
|
| 1558 |
+
)
|
| 1559 |
+
ms_1_predict = ms_1_model.fit(X_train, y_train).predict(X_test)
|
| 1560 |
+
|
| 1561 |
+
ms_None_model = FOREST_REGRESSORS[name](
|
| 1562 |
+
bootstrap=True, max_samples=None, random_state=0
|
| 1563 |
+
)
|
| 1564 |
+
ms_None_predict = ms_None_model.fit(X_train, y_train).predict(X_test)
|
| 1565 |
+
|
| 1566 |
+
ms_1_ms = mean_squared_error(ms_1_predict, y_test)
|
| 1567 |
+
ms_None_ms = mean_squared_error(ms_None_predict, y_test)
|
| 1568 |
+
|
| 1569 |
+
assert ms_1_ms == pytest.approx(ms_None_ms)
|
| 1570 |
+
|
| 1571 |
+
|
| 1572 |
+
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
|
| 1573 |
+
def test_max_samples_boundary_classifiers(name):
|
| 1574 |
+
X_train, X_test, y_train, _ = train_test_split(
|
| 1575 |
+
X_large, y_large, random_state=0, stratify=y_large
|
| 1576 |
+
)
|
| 1577 |
+
|
| 1578 |
+
ms_1_model = FOREST_CLASSIFIERS[name](
|
| 1579 |
+
bootstrap=True, max_samples=1.0, random_state=0
|
| 1580 |
+
)
|
| 1581 |
+
ms_1_proba = ms_1_model.fit(X_train, y_train).predict_proba(X_test)
|
| 1582 |
+
|
| 1583 |
+
ms_None_model = FOREST_CLASSIFIERS[name](
|
| 1584 |
+
bootstrap=True, max_samples=None, random_state=0
|
| 1585 |
+
)
|
| 1586 |
+
ms_None_proba = ms_None_model.fit(X_train, y_train).predict_proba(X_test)
|
| 1587 |
+
|
| 1588 |
+
np.testing.assert_allclose(ms_1_proba, ms_None_proba)
|
| 1589 |
+
|
| 1590 |
+
|
| 1591 |
+
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
|
| 1592 |
+
def test_forest_y_sparse(csr_container):
|
| 1593 |
+
X = [[1, 2, 3]]
|
| 1594 |
+
y = csr_container([[4, 5, 6]])
|
| 1595 |
+
est = RandomForestClassifier()
|
| 1596 |
+
msg = "sparse multilabel-indicator for y is not supported."
|
| 1597 |
+
with pytest.raises(ValueError, match=msg):
|
| 1598 |
+
est.fit(X, y)
|
| 1599 |
+
|
| 1600 |
+
|
| 1601 |
+
@pytest.mark.parametrize("ForestClass", [RandomForestClassifier, RandomForestRegressor])
|
| 1602 |
+
def test_little_tree_with_small_max_samples(ForestClass):
|
| 1603 |
+
rng = np.random.RandomState(1)
|
| 1604 |
+
|
| 1605 |
+
X = rng.randn(10000, 2)
|
| 1606 |
+
y = rng.randn(10000) > 0
|
| 1607 |
+
|
| 1608 |
+
# First fit with no restriction on max samples
|
| 1609 |
+
est1 = ForestClass(
|
| 1610 |
+
n_estimators=1,
|
| 1611 |
+
random_state=rng,
|
| 1612 |
+
max_samples=None,
|
| 1613 |
+
)
|
| 1614 |
+
|
| 1615 |
+
# Second fit with max samples restricted to just 2
|
| 1616 |
+
est2 = ForestClass(
|
| 1617 |
+
n_estimators=1,
|
| 1618 |
+
random_state=rng,
|
| 1619 |
+
max_samples=2,
|
| 1620 |
+
)
|
| 1621 |
+
|
| 1622 |
+
est1.fit(X, y)
|
| 1623 |
+
est2.fit(X, y)
|
| 1624 |
+
|
| 1625 |
+
tree1 = est1.estimators_[0].tree_
|
| 1626 |
+
tree2 = est2.estimators_[0].tree_
|
| 1627 |
+
|
| 1628 |
+
msg = "Tree without `max_samples` restriction should have more nodes"
|
| 1629 |
+
assert tree1.node_count > tree2.node_count, msg
|
| 1630 |
+
|
| 1631 |
+
|
| 1632 |
+
@pytest.mark.parametrize("Forest", FOREST_REGRESSORS)
|
| 1633 |
+
def test_mse_criterion_object_segfault_smoke_test(Forest):
|
| 1634 |
+
# This is a smoke test to ensure that passing a mutable criterion
|
| 1635 |
+
# does not cause a segfault when fitting with concurrent threads.
|
| 1636 |
+
# Non-regression test for:
|
| 1637 |
+
# https://github.com/scikit-learn/scikit-learn/issues/12623
|
| 1638 |
+
from sklearn.tree._criterion import MSE
|
| 1639 |
+
|
| 1640 |
+
y = y_reg.reshape(-1, 1)
|
| 1641 |
+
n_samples, n_outputs = y.shape
|
| 1642 |
+
mse_criterion = MSE(n_outputs, n_samples)
|
| 1643 |
+
est = FOREST_REGRESSORS[Forest](n_estimators=2, n_jobs=2, criterion=mse_criterion)
|
| 1644 |
+
|
| 1645 |
+
est.fit(X_reg, y)
|
| 1646 |
+
|
| 1647 |
+
|
| 1648 |
+
def test_random_trees_embedding_feature_names_out():
|
| 1649 |
+
"""Check feature names out for Random Trees Embedding."""
|
| 1650 |
+
random_state = np.random.RandomState(0)
|
| 1651 |
+
X = np.abs(random_state.randn(100, 4))
|
| 1652 |
+
hasher = RandomTreesEmbedding(
|
| 1653 |
+
n_estimators=2, max_depth=2, sparse_output=False, random_state=0
|
| 1654 |
+
).fit(X)
|
| 1655 |
+
names = hasher.get_feature_names_out()
|
| 1656 |
+
expected_names = [
|
| 1657 |
+
f"randomtreesembedding_{tree}_{leaf}"
|
| 1658 |
+
# Note: nodes with indices 0, 1 and 4 are internal split nodes and
|
| 1659 |
+
# therefore do not appear in the expected output feature names.
|
| 1660 |
+
for tree, leaf in [
|
| 1661 |
+
(0, 2),
|
| 1662 |
+
(0, 3),
|
| 1663 |
+
(0, 5),
|
| 1664 |
+
(0, 6),
|
| 1665 |
+
(1, 2),
|
| 1666 |
+
(1, 3),
|
| 1667 |
+
(1, 5),
|
| 1668 |
+
(1, 6),
|
| 1669 |
+
]
|
| 1670 |
+
]
|
| 1671 |
+
assert_array_equal(expected_names, names)
|
| 1672 |
+
|
| 1673 |
+
|
| 1674 |
+
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
|
| 1675 |
+
def test_read_only_buffer(csr_container, monkeypatch):
|
| 1676 |
+
"""RandomForestClassifier must work on readonly sparse data.
|
| 1677 |
+
|
| 1678 |
+
Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/25333
|
| 1679 |
+
"""
|
| 1680 |
+
monkeypatch.setattr(
|
| 1681 |
+
sklearn.ensemble._forest,
|
| 1682 |
+
"Parallel",
|
| 1683 |
+
partial(Parallel, max_nbytes=100),
|
| 1684 |
+
)
|
| 1685 |
+
rng = np.random.RandomState(seed=0)
|
| 1686 |
+
|
| 1687 |
+
X, y = make_classification(n_samples=100, n_features=200, random_state=rng)
|
| 1688 |
+
X = csr_container(X, copy=True)
|
| 1689 |
+
|
| 1690 |
+
clf = RandomForestClassifier(n_jobs=2, random_state=rng)
|
| 1691 |
+
cross_val_score(clf, X, y, cv=2)
|
| 1692 |
+
|
| 1693 |
+
|
| 1694 |
+
@pytest.mark.parametrize("class_weight", ["balanced_subsample", None])
|
| 1695 |
+
def test_round_samples_to_one_when_samples_too_low(class_weight):
|
| 1696 |
+
"""Check low max_samples works and is rounded to one.
|
| 1697 |
+
|
| 1698 |
+
Non-regression test for gh-24037.
|
| 1699 |
+
"""
|
| 1700 |
+
X, y = datasets.load_wine(return_X_y=True)
|
| 1701 |
+
forest = RandomForestClassifier(
|
| 1702 |
+
n_estimators=10, max_samples=1e-4, class_weight=class_weight, random_state=0
|
| 1703 |
+
)
|
| 1704 |
+
forest.fit(X, y)
|
| 1705 |
+
|
| 1706 |
+
|
| 1707 |
+
@pytest.mark.parametrize("seed", [None, 1])
|
| 1708 |
+
@pytest.mark.parametrize("bootstrap", [True, False])
|
| 1709 |
+
@pytest.mark.parametrize("ForestClass", FOREST_CLASSIFIERS_REGRESSORS.values())
|
| 1710 |
+
def test_estimators_samples(ForestClass, bootstrap, seed):
|
| 1711 |
+
"""Estimators_samples_ property should be consistent.
|
| 1712 |
+
|
| 1713 |
+
Tests consistency across fits and whether or not the seed for the random generator
|
| 1714 |
+
is set.
|
| 1715 |
+
"""
|
| 1716 |
+
X, y = make_hastie_10_2(n_samples=200, random_state=1)
|
| 1717 |
+
|
| 1718 |
+
if bootstrap:
|
| 1719 |
+
max_samples = 0.5
|
| 1720 |
+
else:
|
| 1721 |
+
max_samples = None
|
| 1722 |
+
est = ForestClass(
|
| 1723 |
+
n_estimators=10,
|
| 1724 |
+
max_samples=max_samples,
|
| 1725 |
+
max_features=0.5,
|
| 1726 |
+
random_state=seed,
|
| 1727 |
+
bootstrap=bootstrap,
|
| 1728 |
+
)
|
| 1729 |
+
est.fit(X, y)
|
| 1730 |
+
|
| 1731 |
+
estimators_samples = est.estimators_samples_.copy()
|
| 1732 |
+
|
| 1733 |
+
# Test repeated calls result in same set of indices
|
| 1734 |
+
assert_array_equal(estimators_samples, est.estimators_samples_)
|
| 1735 |
+
estimators = est.estimators_
|
| 1736 |
+
|
| 1737 |
+
assert isinstance(estimators_samples, list)
|
| 1738 |
+
assert len(estimators_samples) == len(estimators)
|
| 1739 |
+
assert estimators_samples[0].dtype == np.int32
|
| 1740 |
+
|
| 1741 |
+
for i in range(len(estimators)):
|
| 1742 |
+
if bootstrap:
|
| 1743 |
+
assert len(estimators_samples[i]) == len(X) // 2
|
| 1744 |
+
|
| 1745 |
+
# the bootstrap should be a resampling with replacement
|
| 1746 |
+
assert len(np.unique(estimators_samples[i])) < len(estimators_samples[i])
|
| 1747 |
+
else:
|
| 1748 |
+
assert len(set(estimators_samples[i])) == len(X)
|
| 1749 |
+
|
| 1750 |
+
estimator_index = 0
|
| 1751 |
+
estimator_samples = estimators_samples[estimator_index]
|
| 1752 |
+
estimator = estimators[estimator_index]
|
| 1753 |
+
|
| 1754 |
+
X_train = X[estimator_samples]
|
| 1755 |
+
y_train = y[estimator_samples]
|
| 1756 |
+
|
| 1757 |
+
orig_tree_values = estimator.tree_.value
|
| 1758 |
+
estimator = clone(estimator)
|
| 1759 |
+
estimator.fit(X_train, y_train)
|
| 1760 |
+
new_tree_values = estimator.tree_.value
|
| 1761 |
+
assert_allclose(orig_tree_values, new_tree_values)
|
| 1762 |
+
|
| 1763 |
+
|
| 1764 |
+
@pytest.mark.parametrize(
|
| 1765 |
+
"make_data, Forest",
|
| 1766 |
+
[
|
| 1767 |
+
(datasets.make_regression, RandomForestRegressor),
|
| 1768 |
+
(datasets.make_classification, RandomForestClassifier),
|
| 1769 |
+
(datasets.make_regression, ExtraTreesRegressor),
|
| 1770 |
+
(datasets.make_classification, ExtraTreesClassifier),
|
| 1771 |
+
],
|
| 1772 |
+
)
|
| 1773 |
+
def test_missing_values_is_resilient(make_data, Forest):
|
| 1774 |
+
"""Check that forest can deal with missing values and has decent performance."""
|
| 1775 |
+
|
| 1776 |
+
rng = np.random.RandomState(0)
|
| 1777 |
+
n_samples, n_features = 1000, 10
|
| 1778 |
+
X, y = make_data(n_samples=n_samples, n_features=n_features, random_state=rng)
|
| 1779 |
+
|
| 1780 |
+
# Create dataset with missing values
|
| 1781 |
+
X_missing = X.copy()
|
| 1782 |
+
X_missing[rng.choice([False, True], size=X.shape, p=[0.95, 0.05])] = np.nan
|
| 1783 |
+
assert np.isnan(X_missing).any()
|
| 1784 |
+
|
| 1785 |
+
X_missing_train, X_missing_test, y_train, y_test = train_test_split(
|
| 1786 |
+
X_missing, y, random_state=0
|
| 1787 |
+
)
|
| 1788 |
+
|
| 1789 |
+
# Train forest with missing values
|
| 1790 |
+
forest_with_missing = Forest(random_state=rng, n_estimators=50)
|
| 1791 |
+
forest_with_missing.fit(X_missing_train, y_train)
|
| 1792 |
+
score_with_missing = forest_with_missing.score(X_missing_test, y_test)
|
| 1793 |
+
|
| 1794 |
+
# Train forest without missing values
|
| 1795 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
|
| 1796 |
+
forest = Forest(random_state=rng, n_estimators=50)
|
| 1797 |
+
forest.fit(X_train, y_train)
|
| 1798 |
+
score_without_missing = forest.score(X_test, y_test)
|
| 1799 |
+
|
| 1800 |
+
# Score is still 80 percent of the forest's score that had no missing values
|
| 1801 |
+
assert score_with_missing >= 0.80 * score_without_missing
|
| 1802 |
+
|
| 1803 |
+
|
| 1804 |
+
@pytest.mark.parametrize(
|
| 1805 |
+
"Forest",
|
| 1806 |
+
[
|
| 1807 |
+
RandomForestClassifier,
|
| 1808 |
+
RandomForestRegressor,
|
| 1809 |
+
ExtraTreesRegressor,
|
| 1810 |
+
ExtraTreesClassifier,
|
| 1811 |
+
],
|
| 1812 |
+
)
|
| 1813 |
+
def test_missing_value_is_predictive(Forest):
|
| 1814 |
+
"""Check that the forest learns when missing values are only present for
|
| 1815 |
+
a predictive feature."""
|
| 1816 |
+
rng = np.random.RandomState(0)
|
| 1817 |
+
n_samples = 300
|
| 1818 |
+
expected_score = 0.75
|
| 1819 |
+
|
| 1820 |
+
X_non_predictive = rng.standard_normal(size=(n_samples, 10))
|
| 1821 |
+
y = rng.randint(0, high=2, size=n_samples)
|
| 1822 |
+
|
| 1823 |
+
# Create a predictive feature using `y` and with some noise
|
| 1824 |
+
X_random_mask = rng.choice([False, True], size=n_samples, p=[0.95, 0.05])
|
| 1825 |
+
y_mask = y.astype(bool)
|
| 1826 |
+
y_mask[X_random_mask] = ~y_mask[X_random_mask]
|
| 1827 |
+
|
| 1828 |
+
predictive_feature = rng.standard_normal(size=n_samples)
|
| 1829 |
+
predictive_feature[y_mask] = np.nan
|
| 1830 |
+
assert np.isnan(predictive_feature).any()
|
| 1831 |
+
|
| 1832 |
+
X_predictive = X_non_predictive.copy()
|
| 1833 |
+
X_predictive[:, 5] = predictive_feature
|
| 1834 |
+
|
| 1835 |
+
(
|
| 1836 |
+
X_predictive_train,
|
| 1837 |
+
X_predictive_test,
|
| 1838 |
+
X_non_predictive_train,
|
| 1839 |
+
X_non_predictive_test,
|
| 1840 |
+
y_train,
|
| 1841 |
+
y_test,
|
| 1842 |
+
) = train_test_split(X_predictive, X_non_predictive, y, random_state=0)
|
| 1843 |
+
forest_predictive = Forest(random_state=0).fit(X_predictive_train, y_train)
|
| 1844 |
+
forest_non_predictive = Forest(random_state=0).fit(X_non_predictive_train, y_train)
|
| 1845 |
+
|
| 1846 |
+
predictive_test_score = forest_predictive.score(X_predictive_test, y_test)
|
| 1847 |
+
|
| 1848 |
+
assert predictive_test_score >= expected_score
|
| 1849 |
+
assert predictive_test_score >= forest_non_predictive.score(
|
| 1850 |
+
X_non_predictive_test, y_test
|
| 1851 |
+
)
|
| 1852 |
+
|
| 1853 |
+
|
| 1854 |
+
@pytest.mark.parametrize("Forest", FOREST_REGRESSORS.values())
|
| 1855 |
+
def test_non_supported_criterion_raises_error_with_missing_values(Forest):
|
| 1856 |
+
"""Raise error for unsupported criterion when there are missing values."""
|
| 1857 |
+
X = np.array([[0, 1, 2], [np.nan, 0, 2.0]])
|
| 1858 |
+
y = [0.5, 1.0]
|
| 1859 |
+
|
| 1860 |
+
forest = Forest(criterion="absolute_error")
|
| 1861 |
+
|
| 1862 |
+
msg = ".*does not accept missing values"
|
| 1863 |
+
with pytest.raises(ValueError, match=msg):
|
| 1864 |
+
forest.fit(X, y)
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/test_gradient_boosting.py
ADDED
|
@@ -0,0 +1,1711 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import re
|
| 6 |
+
import warnings
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
import pytest
|
| 10 |
+
from numpy.testing import assert_allclose
|
| 11 |
+
|
| 12 |
+
from sklearn import datasets
|
| 13 |
+
from sklearn.base import clone
|
| 14 |
+
from sklearn.datasets import make_classification, make_regression
|
| 15 |
+
from sklearn.dummy import DummyClassifier, DummyRegressor
|
| 16 |
+
from sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor
|
| 17 |
+
from sklearn.ensemble._gb import _safe_divide
|
| 18 |
+
from sklearn.ensemble._gradient_boosting import predict_stages
|
| 19 |
+
from sklearn.exceptions import DataConversionWarning, NotFittedError
|
| 20 |
+
from sklearn.linear_model import LinearRegression
|
| 21 |
+
from sklearn.metrics import mean_squared_error
|
| 22 |
+
from sklearn.model_selection import train_test_split
|
| 23 |
+
from sklearn.pipeline import make_pipeline
|
| 24 |
+
from sklearn.preprocessing import scale
|
| 25 |
+
from sklearn.svm import NuSVR
|
| 26 |
+
from sklearn.utils import check_random_state
|
| 27 |
+
from sklearn.utils._mocking import NoSampleWeightWrapper
|
| 28 |
+
from sklearn.utils._param_validation import InvalidParameterError
|
| 29 |
+
from sklearn.utils._testing import (
|
| 30 |
+
assert_array_almost_equal,
|
| 31 |
+
assert_array_equal,
|
| 32 |
+
skip_if_32bit,
|
| 33 |
+
)
|
| 34 |
+
from sklearn.utils.fixes import COO_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS
|
| 35 |
+
|
| 36 |
+
GRADIENT_BOOSTING_ESTIMATORS = [GradientBoostingClassifier, GradientBoostingRegressor]
|
| 37 |
+
|
| 38 |
+
# toy sample
|
| 39 |
+
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
|
| 40 |
+
y = [-1, -1, -1, 1, 1, 1]
|
| 41 |
+
T = [[-1, -1], [2, 2], [3, 2]]
|
| 42 |
+
true_result = [-1, 1, 1]
|
| 43 |
+
|
| 44 |
+
# also make regression dataset
|
| 45 |
+
X_reg, y_reg = make_regression(
|
| 46 |
+
n_samples=100, n_features=4, n_informative=8, noise=10, random_state=7
|
| 47 |
+
)
|
| 48 |
+
y_reg = scale(y_reg)
|
| 49 |
+
|
| 50 |
+
rng = np.random.RandomState(0)
|
| 51 |
+
# also load the iris dataset
|
| 52 |
+
# and randomly permute it
|
| 53 |
+
iris = datasets.load_iris()
|
| 54 |
+
perm = rng.permutation(iris.target.size)
|
| 55 |
+
iris.data = iris.data[perm]
|
| 56 |
+
iris.target = iris.target[perm]
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def test_exponential_n_classes_gt_2():
|
| 60 |
+
"""Test exponential loss raises for n_classes > 2."""
|
| 61 |
+
clf = GradientBoostingClassifier(loss="exponential")
|
| 62 |
+
msg = "loss='exponential' is only suitable for a binary classification"
|
| 63 |
+
with pytest.raises(ValueError, match=msg):
|
| 64 |
+
clf.fit(iris.data, iris.target)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def test_raise_if_init_has_no_predict_proba():
|
| 68 |
+
"""Test raise if init_ has no predict_proba method."""
|
| 69 |
+
clf = GradientBoostingClassifier(init=GradientBoostingRegressor)
|
| 70 |
+
msg = (
|
| 71 |
+
"The 'init' parameter of GradientBoostingClassifier must be a str among "
|
| 72 |
+
"{'zero'}, None or an object implementing 'fit' and 'predict_proba'."
|
| 73 |
+
)
|
| 74 |
+
with pytest.raises(ValueError, match=msg):
|
| 75 |
+
clf.fit(X, y)
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
@pytest.mark.parametrize("loss", ("log_loss", "exponential"))
|
| 79 |
+
def test_classification_toy(loss, global_random_seed):
|
| 80 |
+
# Check classification on a toy dataset.
|
| 81 |
+
clf = GradientBoostingClassifier(
|
| 82 |
+
loss=loss, n_estimators=10, random_state=global_random_seed
|
| 83 |
+
)
|
| 84 |
+
|
| 85 |
+
with pytest.raises(ValueError):
|
| 86 |
+
clf.predict(T)
|
| 87 |
+
|
| 88 |
+
clf.fit(X, y)
|
| 89 |
+
assert_array_equal(clf.predict(T), true_result)
|
| 90 |
+
assert 10 == len(clf.estimators_)
|
| 91 |
+
|
| 92 |
+
log_loss_decrease = clf.train_score_[:-1] - clf.train_score_[1:]
|
| 93 |
+
assert np.any(log_loss_decrease >= 0.0)
|
| 94 |
+
|
| 95 |
+
leaves = clf.apply(X)
|
| 96 |
+
assert leaves.shape == (6, 10, 1)
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
@pytest.mark.parametrize("loss", ("log_loss", "exponential"))
|
| 100 |
+
def test_classification_synthetic(loss, global_random_seed):
|
| 101 |
+
# Test GradientBoostingClassifier on synthetic dataset used by
|
| 102 |
+
# Hastie et al. in ESLII - Figure 10.9
|
| 103 |
+
# Note that Figure 10.9 reuses the dataset generated for figure 10.2
|
| 104 |
+
# and should have 2_000 train data points and 10_000 test data points.
|
| 105 |
+
# Here we intentionally use a smaller variant to make the test run faster,
|
| 106 |
+
# but the conclusions are still the same, despite the smaller datasets.
|
| 107 |
+
X, y = datasets.make_hastie_10_2(n_samples=2000, random_state=global_random_seed)
|
| 108 |
+
|
| 109 |
+
split_idx = 500
|
| 110 |
+
X_train, X_test = X[:split_idx], X[split_idx:]
|
| 111 |
+
y_train, y_test = y[:split_idx], y[split_idx:]
|
| 112 |
+
|
| 113 |
+
# Increasing the number of trees should decrease the test error
|
| 114 |
+
common_params = {
|
| 115 |
+
"max_depth": 1,
|
| 116 |
+
"learning_rate": 1.0,
|
| 117 |
+
"loss": loss,
|
| 118 |
+
"random_state": global_random_seed,
|
| 119 |
+
}
|
| 120 |
+
gbrt_10_stumps = GradientBoostingClassifier(n_estimators=10, **common_params)
|
| 121 |
+
gbrt_10_stumps.fit(X_train, y_train)
|
| 122 |
+
|
| 123 |
+
gbrt_50_stumps = GradientBoostingClassifier(n_estimators=50, **common_params)
|
| 124 |
+
gbrt_50_stumps.fit(X_train, y_train)
|
| 125 |
+
|
| 126 |
+
assert gbrt_10_stumps.score(X_test, y_test) < gbrt_50_stumps.score(X_test, y_test)
|
| 127 |
+
|
| 128 |
+
# Decision stumps are better suited for this dataset with a large number of
|
| 129 |
+
# estimators.
|
| 130 |
+
common_params = {
|
| 131 |
+
"n_estimators": 200,
|
| 132 |
+
"learning_rate": 1.0,
|
| 133 |
+
"loss": loss,
|
| 134 |
+
"random_state": global_random_seed,
|
| 135 |
+
}
|
| 136 |
+
gbrt_stumps = GradientBoostingClassifier(max_depth=1, **common_params)
|
| 137 |
+
gbrt_stumps.fit(X_train, y_train)
|
| 138 |
+
|
| 139 |
+
gbrt_10_nodes = GradientBoostingClassifier(max_leaf_nodes=10, **common_params)
|
| 140 |
+
gbrt_10_nodes.fit(X_train, y_train)
|
| 141 |
+
|
| 142 |
+
assert gbrt_stumps.score(X_test, y_test) > gbrt_10_nodes.score(X_test, y_test)
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
@pytest.mark.parametrize("loss", ("squared_error", "absolute_error", "huber"))
|
| 146 |
+
@pytest.mark.parametrize("subsample", (1.0, 0.5))
|
| 147 |
+
def test_regression_dataset(loss, subsample, global_random_seed):
|
| 148 |
+
# Check consistency on regression dataset with least squares
|
| 149 |
+
# and least absolute deviation.
|
| 150 |
+
ones = np.ones(len(y_reg))
|
| 151 |
+
last_y_pred = None
|
| 152 |
+
for sample_weight in [None, ones, 2 * ones]:
|
| 153 |
+
# learning_rate, max_depth and n_estimators were adjusted to get a mode
|
| 154 |
+
# that is accurate enough to reach a low MSE on the training set while
|
| 155 |
+
# keeping the resource used to execute this test low enough.
|
| 156 |
+
reg = GradientBoostingRegressor(
|
| 157 |
+
n_estimators=30,
|
| 158 |
+
loss=loss,
|
| 159 |
+
max_depth=4,
|
| 160 |
+
subsample=subsample,
|
| 161 |
+
min_samples_split=2,
|
| 162 |
+
random_state=global_random_seed,
|
| 163 |
+
learning_rate=0.5,
|
| 164 |
+
)
|
| 165 |
+
|
| 166 |
+
reg.fit(X_reg, y_reg, sample_weight=sample_weight)
|
| 167 |
+
leaves = reg.apply(X_reg)
|
| 168 |
+
assert leaves.shape == (100, 30)
|
| 169 |
+
|
| 170 |
+
y_pred = reg.predict(X_reg)
|
| 171 |
+
mse = mean_squared_error(y_reg, y_pred)
|
| 172 |
+
assert mse < 0.05
|
| 173 |
+
|
| 174 |
+
if last_y_pred is not None:
|
| 175 |
+
# FIXME: We temporarily bypass this test. This is due to the fact
|
| 176 |
+
# that GBRT with and without `sample_weight` do not use the same
|
| 177 |
+
# implementation of the median during the initialization with the
|
| 178 |
+
# `DummyRegressor`. In the future, we should make sure that both
|
| 179 |
+
# implementations should be the same. See PR #17377 for more.
|
| 180 |
+
# assert_allclose(last_y_pred, y_pred)
|
| 181 |
+
pass
|
| 182 |
+
|
| 183 |
+
last_y_pred = y_pred
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
@pytest.mark.parametrize("subsample", (1.0, 0.5))
|
| 187 |
+
@pytest.mark.parametrize("sample_weight", (None, 1))
|
| 188 |
+
def test_iris(subsample, sample_weight, global_random_seed):
|
| 189 |
+
if sample_weight == 1:
|
| 190 |
+
sample_weight = np.ones(len(iris.target))
|
| 191 |
+
# Check consistency on dataset iris.
|
| 192 |
+
clf = GradientBoostingClassifier(
|
| 193 |
+
n_estimators=100,
|
| 194 |
+
loss="log_loss",
|
| 195 |
+
random_state=global_random_seed,
|
| 196 |
+
subsample=subsample,
|
| 197 |
+
)
|
| 198 |
+
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
|
| 199 |
+
score = clf.score(iris.data, iris.target)
|
| 200 |
+
assert score > 0.9
|
| 201 |
+
|
| 202 |
+
leaves = clf.apply(iris.data)
|
| 203 |
+
assert leaves.shape == (150, 100, 3)
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
def test_regression_synthetic(global_random_seed):
|
| 207 |
+
# Test on synthetic regression datasets used in Leo Breiman,
|
| 208 |
+
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
|
| 209 |
+
random_state = check_random_state(global_random_seed)
|
| 210 |
+
regression_params = {
|
| 211 |
+
"n_estimators": 100,
|
| 212 |
+
"max_depth": 4,
|
| 213 |
+
"min_samples_split": 2,
|
| 214 |
+
"learning_rate": 0.1,
|
| 215 |
+
"loss": "squared_error",
|
| 216 |
+
"random_state": global_random_seed,
|
| 217 |
+
}
|
| 218 |
+
|
| 219 |
+
# Friedman1
|
| 220 |
+
X, y = datasets.make_friedman1(n_samples=1200, random_state=random_state, noise=1.0)
|
| 221 |
+
X_train, y_train = X[:200], y[:200]
|
| 222 |
+
X_test, y_test = X[200:], y[200:]
|
| 223 |
+
|
| 224 |
+
clf = GradientBoostingRegressor(**regression_params)
|
| 225 |
+
clf.fit(X_train, y_train)
|
| 226 |
+
mse = mean_squared_error(y_test, clf.predict(X_test))
|
| 227 |
+
assert mse < 6.5
|
| 228 |
+
|
| 229 |
+
# Friedman2
|
| 230 |
+
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
|
| 231 |
+
X_train, y_train = X[:200], y[:200]
|
| 232 |
+
X_test, y_test = X[200:], y[200:]
|
| 233 |
+
|
| 234 |
+
clf = GradientBoostingRegressor(**regression_params)
|
| 235 |
+
clf.fit(X_train, y_train)
|
| 236 |
+
mse = mean_squared_error(y_test, clf.predict(X_test))
|
| 237 |
+
assert mse < 2500.0
|
| 238 |
+
|
| 239 |
+
# Friedman3
|
| 240 |
+
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
|
| 241 |
+
X_train, y_train = X[:200], y[:200]
|
| 242 |
+
X_test, y_test = X[200:], y[200:]
|
| 243 |
+
|
| 244 |
+
clf = GradientBoostingRegressor(**regression_params)
|
| 245 |
+
clf.fit(X_train, y_train)
|
| 246 |
+
mse = mean_squared_error(y_test, clf.predict(X_test))
|
| 247 |
+
assert mse < 0.025
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
@pytest.mark.parametrize(
|
| 251 |
+
"GradientBoosting, X, y",
|
| 252 |
+
[
|
| 253 |
+
(GradientBoostingRegressor, X_reg, y_reg),
|
| 254 |
+
(GradientBoostingClassifier, iris.data, iris.target),
|
| 255 |
+
],
|
| 256 |
+
)
|
| 257 |
+
def test_feature_importances(GradientBoosting, X, y):
|
| 258 |
+
# smoke test to check that the gradient boosting expose an attribute
|
| 259 |
+
# feature_importances_
|
| 260 |
+
gbdt = GradientBoosting()
|
| 261 |
+
assert not hasattr(gbdt, "feature_importances_")
|
| 262 |
+
gbdt.fit(X, y)
|
| 263 |
+
assert hasattr(gbdt, "feature_importances_")
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
def test_probability_log(global_random_seed):
|
| 267 |
+
# Predict probabilities.
|
| 268 |
+
clf = GradientBoostingClassifier(n_estimators=100, random_state=global_random_seed)
|
| 269 |
+
|
| 270 |
+
with pytest.raises(ValueError):
|
| 271 |
+
clf.predict_proba(T)
|
| 272 |
+
|
| 273 |
+
clf.fit(X, y)
|
| 274 |
+
assert_array_equal(clf.predict(T), true_result)
|
| 275 |
+
|
| 276 |
+
# check if probabilities are in [0, 1].
|
| 277 |
+
y_proba = clf.predict_proba(T)
|
| 278 |
+
assert np.all(y_proba >= 0.0)
|
| 279 |
+
assert np.all(y_proba <= 1.0)
|
| 280 |
+
|
| 281 |
+
# derive predictions from probabilities
|
| 282 |
+
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
|
| 283 |
+
assert_array_equal(y_pred, true_result)
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
def test_single_class_with_sample_weight():
|
| 287 |
+
sample_weight = [0, 0, 0, 1, 1, 1]
|
| 288 |
+
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
|
| 289 |
+
msg = (
|
| 290 |
+
"y contains 1 class after sample_weight trimmed classes with "
|
| 291 |
+
"zero weights, while a minimum of 2 classes are required."
|
| 292 |
+
)
|
| 293 |
+
with pytest.raises(ValueError, match=msg):
|
| 294 |
+
clf.fit(X, y, sample_weight=sample_weight)
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
|
| 298 |
+
def test_check_inputs_predict_stages(csc_container):
|
| 299 |
+
# check that predict_stages through an error if the type of X is not
|
| 300 |
+
# supported
|
| 301 |
+
x, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
|
| 302 |
+
x_sparse_csc = csc_container(x)
|
| 303 |
+
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
|
| 304 |
+
clf.fit(x, y)
|
| 305 |
+
score = np.zeros((y.shape)).reshape(-1, 1)
|
| 306 |
+
err_msg = "When X is a sparse matrix, a CSR format is expected"
|
| 307 |
+
with pytest.raises(ValueError, match=err_msg):
|
| 308 |
+
predict_stages(clf.estimators_, x_sparse_csc, clf.learning_rate, score)
|
| 309 |
+
x_fortran = np.asfortranarray(x)
|
| 310 |
+
with pytest.raises(ValueError, match="X should be C-ordered np.ndarray"):
|
| 311 |
+
predict_stages(clf.estimators_, x_fortran, clf.learning_rate, score)
|
| 312 |
+
|
| 313 |
+
|
| 314 |
+
def test_max_feature_regression(global_random_seed):
|
| 315 |
+
# Test to make sure random state is set properly.
|
| 316 |
+
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=global_random_seed)
|
| 317 |
+
|
| 318 |
+
X_train, X_test = X[:2000], X[2000:]
|
| 319 |
+
y_train, y_test = y[:2000], y[2000:]
|
| 320 |
+
|
| 321 |
+
gbrt = GradientBoostingClassifier(
|
| 322 |
+
n_estimators=100,
|
| 323 |
+
min_samples_split=5,
|
| 324 |
+
max_depth=2,
|
| 325 |
+
learning_rate=0.1,
|
| 326 |
+
max_features=2,
|
| 327 |
+
random_state=global_random_seed,
|
| 328 |
+
)
|
| 329 |
+
gbrt.fit(X_train, y_train)
|
| 330 |
+
log_loss = gbrt._loss(y_test, gbrt.decision_function(X_test))
|
| 331 |
+
assert log_loss < 0.5, "GB failed with deviance %.4f" % log_loss
|
| 332 |
+
|
| 333 |
+
|
| 334 |
+
def test_feature_importance_regression(
|
| 335 |
+
fetch_california_housing_fxt, global_random_seed
|
| 336 |
+
):
|
| 337 |
+
"""Test that Gini importance is calculated correctly.
|
| 338 |
+
|
| 339 |
+
This test follows the example from [1]_ (pg. 373).
|
| 340 |
+
|
| 341 |
+
.. [1] Friedman, J., Hastie, T., & Tibshirani, R. (2001). The elements
|
| 342 |
+
of statistical learning. New York: Springer series in statistics.
|
| 343 |
+
"""
|
| 344 |
+
california = fetch_california_housing_fxt()
|
| 345 |
+
X, y = california.data, california.target
|
| 346 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
| 347 |
+
X, y, random_state=global_random_seed
|
| 348 |
+
)
|
| 349 |
+
|
| 350 |
+
reg = GradientBoostingRegressor(
|
| 351 |
+
loss="huber",
|
| 352 |
+
learning_rate=0.1,
|
| 353 |
+
max_leaf_nodes=6,
|
| 354 |
+
n_estimators=100,
|
| 355 |
+
random_state=global_random_seed,
|
| 356 |
+
)
|
| 357 |
+
reg.fit(X_train, y_train)
|
| 358 |
+
sorted_idx = np.argsort(reg.feature_importances_)[::-1]
|
| 359 |
+
sorted_features = [california.feature_names[s] for s in sorted_idx]
|
| 360 |
+
|
| 361 |
+
# The most important feature is the median income by far.
|
| 362 |
+
assert sorted_features[0] == "MedInc"
|
| 363 |
+
|
| 364 |
+
# The three subsequent features are the following. Their relative ordering
|
| 365 |
+
# might change a bit depending on the randomness of the trees and the
|
| 366 |
+
# train / test split.
|
| 367 |
+
assert set(sorted_features[1:4]) == {"Longitude", "AveOccup", "Latitude"}
|
| 368 |
+
|
| 369 |
+
|
| 370 |
+
def test_max_features():
|
| 371 |
+
# Test if max features is set properly for floats and str.
|
| 372 |
+
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
|
| 373 |
+
_, n_features = X.shape
|
| 374 |
+
|
| 375 |
+
X_train = X[:2000]
|
| 376 |
+
y_train = y[:2000]
|
| 377 |
+
|
| 378 |
+
gbrt = GradientBoostingClassifier(n_estimators=1, max_features=None)
|
| 379 |
+
gbrt.fit(X_train, y_train)
|
| 380 |
+
assert gbrt.max_features_ == n_features
|
| 381 |
+
|
| 382 |
+
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=None)
|
| 383 |
+
gbrt.fit(X_train, y_train)
|
| 384 |
+
assert gbrt.max_features_ == n_features
|
| 385 |
+
|
| 386 |
+
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
|
| 387 |
+
gbrt.fit(X_train, y_train)
|
| 388 |
+
assert gbrt.max_features_ == int(n_features * 0.3)
|
| 389 |
+
|
| 390 |
+
gbrt = GradientBoostingRegressor(n_estimators=1, max_features="sqrt")
|
| 391 |
+
gbrt.fit(X_train, y_train)
|
| 392 |
+
assert gbrt.max_features_ == int(np.sqrt(n_features))
|
| 393 |
+
|
| 394 |
+
gbrt = GradientBoostingRegressor(n_estimators=1, max_features="log2")
|
| 395 |
+
gbrt.fit(X_train, y_train)
|
| 396 |
+
assert gbrt.max_features_ == int(np.log2(n_features))
|
| 397 |
+
|
| 398 |
+
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.01 / X.shape[1])
|
| 399 |
+
gbrt.fit(X_train, y_train)
|
| 400 |
+
assert gbrt.max_features_ == 1
|
| 401 |
+
|
| 402 |
+
|
| 403 |
+
def test_staged_predict():
|
| 404 |
+
# Test whether staged decision function eventually gives
|
| 405 |
+
# the same prediction.
|
| 406 |
+
X, y = datasets.make_friedman1(n_samples=1200, random_state=1, noise=1.0)
|
| 407 |
+
X_train, y_train = X[:200], y[:200]
|
| 408 |
+
X_test = X[200:]
|
| 409 |
+
clf = GradientBoostingRegressor()
|
| 410 |
+
# test raise ValueError if not fitted
|
| 411 |
+
with pytest.raises(ValueError):
|
| 412 |
+
np.fromiter(clf.staged_predict(X_test), dtype=np.float64)
|
| 413 |
+
|
| 414 |
+
clf.fit(X_train, y_train)
|
| 415 |
+
y_pred = clf.predict(X_test)
|
| 416 |
+
|
| 417 |
+
# test if prediction for last stage equals ``predict``
|
| 418 |
+
for y in clf.staged_predict(X_test):
|
| 419 |
+
assert y.shape == y_pred.shape
|
| 420 |
+
|
| 421 |
+
assert_array_almost_equal(y_pred, y)
|
| 422 |
+
|
| 423 |
+
|
| 424 |
+
def test_staged_predict_proba():
|
| 425 |
+
# Test whether staged predict proba eventually gives
|
| 426 |
+
# the same prediction.
|
| 427 |
+
X, y = datasets.make_hastie_10_2(n_samples=1200, random_state=1)
|
| 428 |
+
X_train, y_train = X[:200], y[:200]
|
| 429 |
+
X_test, y_test = X[200:], y[200:]
|
| 430 |
+
clf = GradientBoostingClassifier(n_estimators=20)
|
| 431 |
+
# test raise NotFittedError if not
|
| 432 |
+
with pytest.raises(NotFittedError):
|
| 433 |
+
np.fromiter(clf.staged_predict_proba(X_test), dtype=np.float64)
|
| 434 |
+
|
| 435 |
+
clf.fit(X_train, y_train)
|
| 436 |
+
|
| 437 |
+
# test if prediction for last stage equals ``predict``
|
| 438 |
+
for y_pred in clf.staged_predict(X_test):
|
| 439 |
+
assert y_test.shape == y_pred.shape
|
| 440 |
+
|
| 441 |
+
assert_array_equal(clf.predict(X_test), y_pred)
|
| 442 |
+
|
| 443 |
+
# test if prediction for last stage equals ``predict_proba``
|
| 444 |
+
for staged_proba in clf.staged_predict_proba(X_test):
|
| 445 |
+
assert y_test.shape[0] == staged_proba.shape[0]
|
| 446 |
+
assert 2 == staged_proba.shape[1]
|
| 447 |
+
|
| 448 |
+
assert_array_almost_equal(clf.predict_proba(X_test), staged_proba)
|
| 449 |
+
|
| 450 |
+
|
| 451 |
+
@pytest.mark.parametrize("Estimator", GRADIENT_BOOSTING_ESTIMATORS)
|
| 452 |
+
def test_staged_functions_defensive(Estimator, global_random_seed):
|
| 453 |
+
# test that staged_functions make defensive copies
|
| 454 |
+
rng = np.random.RandomState(global_random_seed)
|
| 455 |
+
X = rng.uniform(size=(10, 3))
|
| 456 |
+
y = (4 * X[:, 0]).astype(int) + 1 # don't predict zeros
|
| 457 |
+
estimator = Estimator()
|
| 458 |
+
estimator.fit(X, y)
|
| 459 |
+
for func in ["predict", "decision_function", "predict_proba"]:
|
| 460 |
+
staged_func = getattr(estimator, "staged_" + func, None)
|
| 461 |
+
if staged_func is None:
|
| 462 |
+
# regressor has no staged_predict_proba
|
| 463 |
+
continue
|
| 464 |
+
with warnings.catch_warnings(record=True):
|
| 465 |
+
staged_result = list(staged_func(X))
|
| 466 |
+
staged_result[1][:] = 0
|
| 467 |
+
assert np.all(staged_result[0] != 0)
|
| 468 |
+
|
| 469 |
+
|
| 470 |
+
def test_serialization():
|
| 471 |
+
# Check model serialization.
|
| 472 |
+
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
|
| 473 |
+
|
| 474 |
+
clf.fit(X, y)
|
| 475 |
+
assert_array_equal(clf.predict(T), true_result)
|
| 476 |
+
assert 100 == len(clf.estimators_)
|
| 477 |
+
|
| 478 |
+
try:
|
| 479 |
+
import cPickle as pickle
|
| 480 |
+
except ImportError:
|
| 481 |
+
import pickle
|
| 482 |
+
|
| 483 |
+
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
|
| 484 |
+
clf = None
|
| 485 |
+
clf = pickle.loads(serialized_clf)
|
| 486 |
+
assert_array_equal(clf.predict(T), true_result)
|
| 487 |
+
assert 100 == len(clf.estimators_)
|
| 488 |
+
|
| 489 |
+
|
| 490 |
+
def test_degenerate_targets():
|
| 491 |
+
# Check if we can fit even though all targets are equal.
|
| 492 |
+
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
|
| 493 |
+
|
| 494 |
+
# classifier should raise exception
|
| 495 |
+
with pytest.raises(ValueError):
|
| 496 |
+
clf.fit(X, np.ones(len(X)))
|
| 497 |
+
|
| 498 |
+
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
|
| 499 |
+
clf.fit(X, np.ones(len(X)))
|
| 500 |
+
clf.predict([rng.rand(2)])
|
| 501 |
+
assert_array_equal(np.ones((1,), dtype=np.float64), clf.predict([rng.rand(2)]))
|
| 502 |
+
|
| 503 |
+
|
| 504 |
+
def test_quantile_loss(global_random_seed):
|
| 505 |
+
# Check if quantile loss with alpha=0.5 equals absolute_error.
|
| 506 |
+
clf_quantile = GradientBoostingRegressor(
|
| 507 |
+
n_estimators=100,
|
| 508 |
+
loss="quantile",
|
| 509 |
+
max_depth=4,
|
| 510 |
+
alpha=0.5,
|
| 511 |
+
random_state=global_random_seed,
|
| 512 |
+
)
|
| 513 |
+
|
| 514 |
+
clf_quantile.fit(X_reg, y_reg)
|
| 515 |
+
y_quantile = clf_quantile.predict(X_reg)
|
| 516 |
+
|
| 517 |
+
clf_ae = GradientBoostingRegressor(
|
| 518 |
+
n_estimators=100,
|
| 519 |
+
loss="absolute_error",
|
| 520 |
+
max_depth=4,
|
| 521 |
+
random_state=global_random_seed,
|
| 522 |
+
)
|
| 523 |
+
|
| 524 |
+
clf_ae.fit(X_reg, y_reg)
|
| 525 |
+
y_ae = clf_ae.predict(X_reg)
|
| 526 |
+
assert_allclose(y_quantile, y_ae)
|
| 527 |
+
|
| 528 |
+
|
| 529 |
+
def test_symbol_labels():
|
| 530 |
+
# Test with non-integer class labels.
|
| 531 |
+
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
|
| 532 |
+
|
| 533 |
+
symbol_y = list(map(str, y))
|
| 534 |
+
|
| 535 |
+
clf.fit(X, symbol_y)
|
| 536 |
+
assert_array_equal(clf.predict(T), list(map(str, true_result)))
|
| 537 |
+
assert 100 == len(clf.estimators_)
|
| 538 |
+
|
| 539 |
+
|
| 540 |
+
def test_float_class_labels():
|
| 541 |
+
# Test with float class labels.
|
| 542 |
+
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
|
| 543 |
+
|
| 544 |
+
float_y = np.asarray(y, dtype=np.float32)
|
| 545 |
+
|
| 546 |
+
clf.fit(X, float_y)
|
| 547 |
+
assert_array_equal(clf.predict(T), np.asarray(true_result, dtype=np.float32))
|
| 548 |
+
assert 100 == len(clf.estimators_)
|
| 549 |
+
|
| 550 |
+
|
| 551 |
+
def test_shape_y():
|
| 552 |
+
# Test with float class labels.
|
| 553 |
+
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
|
| 554 |
+
|
| 555 |
+
y_ = np.asarray(y, dtype=np.int32)
|
| 556 |
+
y_ = y_[:, np.newaxis]
|
| 557 |
+
|
| 558 |
+
# This will raise a DataConversionWarning that we want to
|
| 559 |
+
# "always" raise, elsewhere the warnings gets ignored in the
|
| 560 |
+
# later tests, and the tests that check for this warning fail
|
| 561 |
+
warn_msg = (
|
| 562 |
+
"A column-vector y was passed when a 1d array was expected. "
|
| 563 |
+
"Please change the shape of y to \\(n_samples, \\), for "
|
| 564 |
+
"example using ravel()."
|
| 565 |
+
)
|
| 566 |
+
with pytest.warns(DataConversionWarning, match=warn_msg):
|
| 567 |
+
clf.fit(X, y_)
|
| 568 |
+
assert_array_equal(clf.predict(T), true_result)
|
| 569 |
+
assert 100 == len(clf.estimators_)
|
| 570 |
+
|
| 571 |
+
|
| 572 |
+
def test_mem_layout():
|
| 573 |
+
# Test with different memory layouts of X and y
|
| 574 |
+
X_ = np.asfortranarray(X)
|
| 575 |
+
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
|
| 576 |
+
clf.fit(X_, y)
|
| 577 |
+
assert_array_equal(clf.predict(T), true_result)
|
| 578 |
+
assert 100 == len(clf.estimators_)
|
| 579 |
+
|
| 580 |
+
X_ = np.ascontiguousarray(X)
|
| 581 |
+
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
|
| 582 |
+
clf.fit(X_, y)
|
| 583 |
+
assert_array_equal(clf.predict(T), true_result)
|
| 584 |
+
assert 100 == len(clf.estimators_)
|
| 585 |
+
|
| 586 |
+
y_ = np.asarray(y, dtype=np.int32)
|
| 587 |
+
y_ = np.ascontiguousarray(y_)
|
| 588 |
+
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
|
| 589 |
+
clf.fit(X, y_)
|
| 590 |
+
assert_array_equal(clf.predict(T), true_result)
|
| 591 |
+
assert 100 == len(clf.estimators_)
|
| 592 |
+
|
| 593 |
+
y_ = np.asarray(y, dtype=np.int32)
|
| 594 |
+
y_ = np.asfortranarray(y_)
|
| 595 |
+
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
|
| 596 |
+
clf.fit(X, y_)
|
| 597 |
+
assert_array_equal(clf.predict(T), true_result)
|
| 598 |
+
assert 100 == len(clf.estimators_)
|
| 599 |
+
|
| 600 |
+
|
| 601 |
+
@pytest.mark.parametrize("GradientBoostingEstimator", GRADIENT_BOOSTING_ESTIMATORS)
|
| 602 |
+
def test_oob_improvement(GradientBoostingEstimator):
|
| 603 |
+
# Test if oob improvement has correct shape and regression test.
|
| 604 |
+
estimator = GradientBoostingEstimator(
|
| 605 |
+
n_estimators=100, random_state=1, subsample=0.5
|
| 606 |
+
)
|
| 607 |
+
estimator.fit(X, y)
|
| 608 |
+
assert estimator.oob_improvement_.shape[0] == 100
|
| 609 |
+
# hard-coded regression test - change if modification in OOB computation
|
| 610 |
+
assert_array_almost_equal(
|
| 611 |
+
estimator.oob_improvement_[:5],
|
| 612 |
+
np.array([0.19, 0.15, 0.12, -0.11, 0.11]),
|
| 613 |
+
decimal=2,
|
| 614 |
+
)
|
| 615 |
+
|
| 616 |
+
|
| 617 |
+
@pytest.mark.parametrize("GradientBoostingEstimator", GRADIENT_BOOSTING_ESTIMATORS)
|
| 618 |
+
def test_oob_scores(GradientBoostingEstimator):
|
| 619 |
+
# Test if oob scores has correct shape and regression test.
|
| 620 |
+
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
|
| 621 |
+
estimator = GradientBoostingEstimator(
|
| 622 |
+
n_estimators=100, random_state=1, subsample=0.5
|
| 623 |
+
)
|
| 624 |
+
estimator.fit(X, y)
|
| 625 |
+
assert estimator.oob_scores_.shape[0] == 100
|
| 626 |
+
assert estimator.oob_scores_[-1] == pytest.approx(estimator.oob_score_)
|
| 627 |
+
|
| 628 |
+
estimator = GradientBoostingEstimator(
|
| 629 |
+
n_estimators=100,
|
| 630 |
+
random_state=1,
|
| 631 |
+
subsample=0.5,
|
| 632 |
+
n_iter_no_change=5,
|
| 633 |
+
)
|
| 634 |
+
estimator.fit(X, y)
|
| 635 |
+
assert estimator.oob_scores_.shape[0] < 100
|
| 636 |
+
assert estimator.oob_scores_[-1] == pytest.approx(estimator.oob_score_)
|
| 637 |
+
|
| 638 |
+
|
| 639 |
+
@pytest.mark.parametrize(
|
| 640 |
+
"GradientBoostingEstimator, oob_attribute",
|
| 641 |
+
[
|
| 642 |
+
(GradientBoostingClassifier, "oob_improvement_"),
|
| 643 |
+
(GradientBoostingClassifier, "oob_scores_"),
|
| 644 |
+
(GradientBoostingClassifier, "oob_score_"),
|
| 645 |
+
(GradientBoostingRegressor, "oob_improvement_"),
|
| 646 |
+
(GradientBoostingRegressor, "oob_scores_"),
|
| 647 |
+
(GradientBoostingRegressor, "oob_score_"),
|
| 648 |
+
],
|
| 649 |
+
)
|
| 650 |
+
def test_oob_attributes_error(GradientBoostingEstimator, oob_attribute):
|
| 651 |
+
"""
|
| 652 |
+
Check that we raise an AttributeError when the OOB statistics were not computed.
|
| 653 |
+
"""
|
| 654 |
+
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
|
| 655 |
+
estimator = GradientBoostingEstimator(
|
| 656 |
+
n_estimators=100,
|
| 657 |
+
random_state=1,
|
| 658 |
+
subsample=1.0,
|
| 659 |
+
)
|
| 660 |
+
estimator.fit(X, y)
|
| 661 |
+
with pytest.raises(AttributeError):
|
| 662 |
+
estimator.oob_attribute
|
| 663 |
+
|
| 664 |
+
|
| 665 |
+
def test_oob_multilcass_iris():
|
| 666 |
+
# Check OOB improvement on multi-class dataset.
|
| 667 |
+
estimator = GradientBoostingClassifier(
|
| 668 |
+
n_estimators=100, loss="log_loss", random_state=1, subsample=0.5
|
| 669 |
+
)
|
| 670 |
+
estimator.fit(iris.data, iris.target)
|
| 671 |
+
score = estimator.score(iris.data, iris.target)
|
| 672 |
+
assert score > 0.9
|
| 673 |
+
assert estimator.oob_improvement_.shape[0] == estimator.n_estimators
|
| 674 |
+
assert estimator.oob_scores_.shape[0] == estimator.n_estimators
|
| 675 |
+
assert estimator.oob_scores_[-1] == pytest.approx(estimator.oob_score_)
|
| 676 |
+
|
| 677 |
+
estimator = GradientBoostingClassifier(
|
| 678 |
+
n_estimators=100,
|
| 679 |
+
loss="log_loss",
|
| 680 |
+
random_state=1,
|
| 681 |
+
subsample=0.5,
|
| 682 |
+
n_iter_no_change=5,
|
| 683 |
+
)
|
| 684 |
+
estimator.fit(iris.data, iris.target)
|
| 685 |
+
score = estimator.score(iris.data, iris.target)
|
| 686 |
+
assert estimator.oob_improvement_.shape[0] < estimator.n_estimators
|
| 687 |
+
assert estimator.oob_scores_.shape[0] < estimator.n_estimators
|
| 688 |
+
assert estimator.oob_scores_[-1] == pytest.approx(estimator.oob_score_)
|
| 689 |
+
|
| 690 |
+
# hard-coded regression test - change if modification in OOB computation
|
| 691 |
+
# FIXME: the following snippet does not yield the same results on 32 bits
|
| 692 |
+
# assert_array_almost_equal(estimator.oob_improvement_[:5],
|
| 693 |
+
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
|
| 694 |
+
# decimal=2)
|
| 695 |
+
|
| 696 |
+
|
| 697 |
+
def test_verbose_output():
|
| 698 |
+
# Check verbose=1 does not cause error.
|
| 699 |
+
import sys
|
| 700 |
+
from io import StringIO
|
| 701 |
+
|
| 702 |
+
old_stdout = sys.stdout
|
| 703 |
+
sys.stdout = StringIO()
|
| 704 |
+
clf = GradientBoostingClassifier(
|
| 705 |
+
n_estimators=100, random_state=1, verbose=1, subsample=0.8
|
| 706 |
+
)
|
| 707 |
+
clf.fit(X, y)
|
| 708 |
+
verbose_output = sys.stdout
|
| 709 |
+
sys.stdout = old_stdout
|
| 710 |
+
|
| 711 |
+
# check output
|
| 712 |
+
verbose_output.seek(0)
|
| 713 |
+
header = verbose_output.readline().rstrip()
|
| 714 |
+
# with OOB
|
| 715 |
+
true_header = " ".join(["%10s"] + ["%16s"] * 3) % (
|
| 716 |
+
"Iter",
|
| 717 |
+
"Train Loss",
|
| 718 |
+
"OOB Improve",
|
| 719 |
+
"Remaining Time",
|
| 720 |
+
)
|
| 721 |
+
assert true_header == header
|
| 722 |
+
|
| 723 |
+
n_lines = sum(1 for l in verbose_output.readlines())
|
| 724 |
+
# one for 1-10 and then 9 for 20-100
|
| 725 |
+
assert 10 + 9 == n_lines
|
| 726 |
+
|
| 727 |
+
|
| 728 |
+
def test_more_verbose_output():
|
| 729 |
+
# Check verbose=2 does not cause error.
|
| 730 |
+
import sys
|
| 731 |
+
from io import StringIO
|
| 732 |
+
|
| 733 |
+
old_stdout = sys.stdout
|
| 734 |
+
sys.stdout = StringIO()
|
| 735 |
+
clf = GradientBoostingClassifier(n_estimators=100, random_state=1, verbose=2)
|
| 736 |
+
clf.fit(X, y)
|
| 737 |
+
verbose_output = sys.stdout
|
| 738 |
+
sys.stdout = old_stdout
|
| 739 |
+
|
| 740 |
+
# check output
|
| 741 |
+
verbose_output.seek(0)
|
| 742 |
+
header = verbose_output.readline().rstrip()
|
| 743 |
+
# no OOB
|
| 744 |
+
true_header = " ".join(["%10s"] + ["%16s"] * 2) % (
|
| 745 |
+
"Iter",
|
| 746 |
+
"Train Loss",
|
| 747 |
+
"Remaining Time",
|
| 748 |
+
)
|
| 749 |
+
assert true_header == header
|
| 750 |
+
|
| 751 |
+
n_lines = sum(1 for l in verbose_output.readlines())
|
| 752 |
+
# 100 lines for n_estimators==100
|
| 753 |
+
assert 100 == n_lines
|
| 754 |
+
|
| 755 |
+
|
| 756 |
+
@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS)
|
| 757 |
+
def test_warm_start(Cls, global_random_seed):
|
| 758 |
+
# Test if warm start equals fit.
|
| 759 |
+
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=global_random_seed)
|
| 760 |
+
est = Cls(n_estimators=200, max_depth=1, random_state=global_random_seed)
|
| 761 |
+
est.fit(X, y)
|
| 762 |
+
|
| 763 |
+
est_ws = Cls(
|
| 764 |
+
n_estimators=100, max_depth=1, warm_start=True, random_state=global_random_seed
|
| 765 |
+
)
|
| 766 |
+
est_ws.fit(X, y)
|
| 767 |
+
est_ws.set_params(n_estimators=200)
|
| 768 |
+
est_ws.fit(X, y)
|
| 769 |
+
|
| 770 |
+
if Cls is GradientBoostingRegressor:
|
| 771 |
+
assert_allclose(est_ws.predict(X), est.predict(X))
|
| 772 |
+
else:
|
| 773 |
+
# Random state is preserved and hence predict_proba must also be
|
| 774 |
+
# same
|
| 775 |
+
assert_array_equal(est_ws.predict(X), est.predict(X))
|
| 776 |
+
assert_allclose(est_ws.predict_proba(X), est.predict_proba(X))
|
| 777 |
+
|
| 778 |
+
|
| 779 |
+
@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS)
|
| 780 |
+
def test_warm_start_n_estimators(Cls, global_random_seed):
|
| 781 |
+
# Test if warm start equals fit - set n_estimators.
|
| 782 |
+
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=global_random_seed)
|
| 783 |
+
est = Cls(n_estimators=300, max_depth=1, random_state=global_random_seed)
|
| 784 |
+
est.fit(X, y)
|
| 785 |
+
|
| 786 |
+
est_ws = Cls(
|
| 787 |
+
n_estimators=100, max_depth=1, warm_start=True, random_state=global_random_seed
|
| 788 |
+
)
|
| 789 |
+
est_ws.fit(X, y)
|
| 790 |
+
est_ws.set_params(n_estimators=300)
|
| 791 |
+
est_ws.fit(X, y)
|
| 792 |
+
|
| 793 |
+
assert_allclose(est_ws.predict(X), est.predict(X))
|
| 794 |
+
|
| 795 |
+
|
| 796 |
+
@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS)
|
| 797 |
+
def test_warm_start_max_depth(Cls):
|
| 798 |
+
# Test if possible to fit trees of different depth in ensemble.
|
| 799 |
+
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
|
| 800 |
+
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
|
| 801 |
+
est.fit(X, y)
|
| 802 |
+
est.set_params(n_estimators=110, max_depth=2)
|
| 803 |
+
est.fit(X, y)
|
| 804 |
+
|
| 805 |
+
# last 10 trees have different depth
|
| 806 |
+
assert est.estimators_[0, 0].max_depth == 1
|
| 807 |
+
for i in range(1, 11):
|
| 808 |
+
assert est.estimators_[-i, 0].max_depth == 2
|
| 809 |
+
|
| 810 |
+
|
| 811 |
+
@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS)
|
| 812 |
+
def test_warm_start_clear(Cls):
|
| 813 |
+
# Test if fit clears state.
|
| 814 |
+
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
|
| 815 |
+
est = Cls(n_estimators=100, max_depth=1)
|
| 816 |
+
est.fit(X, y)
|
| 817 |
+
|
| 818 |
+
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
|
| 819 |
+
est_2.fit(X, y) # inits state
|
| 820 |
+
est_2.set_params(warm_start=False)
|
| 821 |
+
est_2.fit(X, y) # clears old state and equals est
|
| 822 |
+
|
| 823 |
+
assert_array_almost_equal(est_2.predict(X), est.predict(X))
|
| 824 |
+
|
| 825 |
+
|
| 826 |
+
@pytest.mark.parametrize("GradientBoosting", GRADIENT_BOOSTING_ESTIMATORS)
|
| 827 |
+
def test_warm_start_state_oob_scores(GradientBoosting):
|
| 828 |
+
"""
|
| 829 |
+
Check that the states of the OOB scores are cleared when used with `warm_start`.
|
| 830 |
+
"""
|
| 831 |
+
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
|
| 832 |
+
n_estimators = 100
|
| 833 |
+
estimator = GradientBoosting(
|
| 834 |
+
n_estimators=n_estimators,
|
| 835 |
+
max_depth=1,
|
| 836 |
+
subsample=0.5,
|
| 837 |
+
warm_start=True,
|
| 838 |
+
random_state=1,
|
| 839 |
+
)
|
| 840 |
+
estimator.fit(X, y)
|
| 841 |
+
oob_scores, oob_score = estimator.oob_scores_, estimator.oob_score_
|
| 842 |
+
assert len(oob_scores) == n_estimators
|
| 843 |
+
assert oob_scores[-1] == pytest.approx(oob_score)
|
| 844 |
+
|
| 845 |
+
n_more_estimators = 200
|
| 846 |
+
estimator.set_params(n_estimators=n_more_estimators).fit(X, y)
|
| 847 |
+
assert len(estimator.oob_scores_) == n_more_estimators
|
| 848 |
+
assert_allclose(estimator.oob_scores_[:n_estimators], oob_scores)
|
| 849 |
+
|
| 850 |
+
estimator.set_params(n_estimators=n_estimators, warm_start=False).fit(X, y)
|
| 851 |
+
assert estimator.oob_scores_ is not oob_scores
|
| 852 |
+
assert estimator.oob_score_ is not oob_score
|
| 853 |
+
assert_allclose(estimator.oob_scores_, oob_scores)
|
| 854 |
+
assert estimator.oob_score_ == pytest.approx(oob_score)
|
| 855 |
+
assert oob_scores[-1] == pytest.approx(oob_score)
|
| 856 |
+
|
| 857 |
+
|
| 858 |
+
@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS)
|
| 859 |
+
def test_warm_start_smaller_n_estimators(Cls):
|
| 860 |
+
# Test if warm start with smaller n_estimators raises error
|
| 861 |
+
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
|
| 862 |
+
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
|
| 863 |
+
est.fit(X, y)
|
| 864 |
+
est.set_params(n_estimators=99)
|
| 865 |
+
with pytest.raises(ValueError):
|
| 866 |
+
est.fit(X, y)
|
| 867 |
+
|
| 868 |
+
|
| 869 |
+
@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS)
|
| 870 |
+
def test_warm_start_equal_n_estimators(Cls):
|
| 871 |
+
# Test if warm start with equal n_estimators does nothing
|
| 872 |
+
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
|
| 873 |
+
est = Cls(n_estimators=100, max_depth=1)
|
| 874 |
+
est.fit(X, y)
|
| 875 |
+
|
| 876 |
+
est2 = clone(est)
|
| 877 |
+
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
|
| 878 |
+
est2.fit(X, y)
|
| 879 |
+
|
| 880 |
+
assert_array_almost_equal(est2.predict(X), est.predict(X))
|
| 881 |
+
|
| 882 |
+
|
| 883 |
+
@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS)
|
| 884 |
+
def test_warm_start_oob_switch(Cls):
|
| 885 |
+
# Test if oob can be turned on during warm start.
|
| 886 |
+
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
|
| 887 |
+
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
|
| 888 |
+
est.fit(X, y)
|
| 889 |
+
est.set_params(n_estimators=110, subsample=0.5)
|
| 890 |
+
est.fit(X, y)
|
| 891 |
+
|
| 892 |
+
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
|
| 893 |
+
assert_array_equal(est.oob_scores_[:100], np.zeros(100))
|
| 894 |
+
|
| 895 |
+
# the last 10 are not zeros
|
| 896 |
+
assert (est.oob_improvement_[-10:] != 0.0).all()
|
| 897 |
+
assert (est.oob_scores_[-10:] != 0.0).all()
|
| 898 |
+
|
| 899 |
+
assert est.oob_scores_[-1] == pytest.approx(est.oob_score_)
|
| 900 |
+
|
| 901 |
+
|
| 902 |
+
@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS)
|
| 903 |
+
def test_warm_start_oob(Cls):
|
| 904 |
+
# Test if warm start OOB equals fit.
|
| 905 |
+
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
|
| 906 |
+
est = Cls(n_estimators=200, max_depth=1, subsample=0.5, random_state=1)
|
| 907 |
+
est.fit(X, y)
|
| 908 |
+
|
| 909 |
+
est_ws = Cls(
|
| 910 |
+
n_estimators=100, max_depth=1, subsample=0.5, random_state=1, warm_start=True
|
| 911 |
+
)
|
| 912 |
+
est_ws.fit(X, y)
|
| 913 |
+
est_ws.set_params(n_estimators=200)
|
| 914 |
+
est_ws.fit(X, y)
|
| 915 |
+
|
| 916 |
+
assert_array_almost_equal(est_ws.oob_improvement_[:100], est.oob_improvement_[:100])
|
| 917 |
+
assert_array_almost_equal(est_ws.oob_scores_[:100], est.oob_scores_[:100])
|
| 918 |
+
assert est.oob_scores_[-1] == pytest.approx(est.oob_score_)
|
| 919 |
+
assert est_ws.oob_scores_[-1] == pytest.approx(est_ws.oob_score_)
|
| 920 |
+
|
| 921 |
+
|
| 922 |
+
@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS)
|
| 923 |
+
@pytest.mark.parametrize(
|
| 924 |
+
"sparse_container", COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS
|
| 925 |
+
)
|
| 926 |
+
def test_warm_start_sparse(Cls, sparse_container):
|
| 927 |
+
# Test that all sparse matrix types are supported
|
| 928 |
+
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
|
| 929 |
+
est_dense = Cls(
|
| 930 |
+
n_estimators=100, max_depth=1, subsample=0.5, random_state=1, warm_start=True
|
| 931 |
+
)
|
| 932 |
+
est_dense.fit(X, y)
|
| 933 |
+
est_dense.predict(X)
|
| 934 |
+
est_dense.set_params(n_estimators=200)
|
| 935 |
+
est_dense.fit(X, y)
|
| 936 |
+
y_pred_dense = est_dense.predict(X)
|
| 937 |
+
|
| 938 |
+
X_sparse = sparse_container(X)
|
| 939 |
+
|
| 940 |
+
est_sparse = Cls(
|
| 941 |
+
n_estimators=100,
|
| 942 |
+
max_depth=1,
|
| 943 |
+
subsample=0.5,
|
| 944 |
+
random_state=1,
|
| 945 |
+
warm_start=True,
|
| 946 |
+
)
|
| 947 |
+
est_sparse.fit(X_sparse, y)
|
| 948 |
+
est_sparse.predict(X)
|
| 949 |
+
est_sparse.set_params(n_estimators=200)
|
| 950 |
+
est_sparse.fit(X_sparse, y)
|
| 951 |
+
y_pred_sparse = est_sparse.predict(X)
|
| 952 |
+
|
| 953 |
+
assert_array_almost_equal(
|
| 954 |
+
est_dense.oob_improvement_[:100], est_sparse.oob_improvement_[:100]
|
| 955 |
+
)
|
| 956 |
+
assert est_dense.oob_scores_[-1] == pytest.approx(est_dense.oob_score_)
|
| 957 |
+
assert_array_almost_equal(est_dense.oob_scores_[:100], est_sparse.oob_scores_[:100])
|
| 958 |
+
assert est_sparse.oob_scores_[-1] == pytest.approx(est_sparse.oob_score_)
|
| 959 |
+
assert_array_almost_equal(y_pred_dense, y_pred_sparse)
|
| 960 |
+
|
| 961 |
+
|
| 962 |
+
@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS)
|
| 963 |
+
def test_warm_start_fortran(Cls, global_random_seed):
|
| 964 |
+
# Test that feeding a X in Fortran-ordered is giving the same results as
|
| 965 |
+
# in C-ordered
|
| 966 |
+
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=global_random_seed)
|
| 967 |
+
est_c = Cls(n_estimators=1, random_state=global_random_seed, warm_start=True)
|
| 968 |
+
est_fortran = Cls(n_estimators=1, random_state=global_random_seed, warm_start=True)
|
| 969 |
+
|
| 970 |
+
est_c.fit(X, y)
|
| 971 |
+
est_c.set_params(n_estimators=11)
|
| 972 |
+
est_c.fit(X, y)
|
| 973 |
+
|
| 974 |
+
X_fortran = np.asfortranarray(X)
|
| 975 |
+
est_fortran.fit(X_fortran, y)
|
| 976 |
+
est_fortran.set_params(n_estimators=11)
|
| 977 |
+
est_fortran.fit(X_fortran, y)
|
| 978 |
+
|
| 979 |
+
assert_allclose(est_c.predict(X), est_fortran.predict(X))
|
| 980 |
+
|
| 981 |
+
|
| 982 |
+
def early_stopping_monitor(i, est, locals):
|
| 983 |
+
"""Returns True on the 10th iteration."""
|
| 984 |
+
if i == 9:
|
| 985 |
+
return True
|
| 986 |
+
else:
|
| 987 |
+
return False
|
| 988 |
+
|
| 989 |
+
|
| 990 |
+
@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS)
|
| 991 |
+
def test_monitor_early_stopping(Cls):
|
| 992 |
+
# Test if monitor return value works.
|
| 993 |
+
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
|
| 994 |
+
|
| 995 |
+
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
|
| 996 |
+
est.fit(X, y, monitor=early_stopping_monitor)
|
| 997 |
+
assert est.n_estimators == 20 # this is not altered
|
| 998 |
+
assert est.estimators_.shape[0] == 10
|
| 999 |
+
assert est.train_score_.shape[0] == 10
|
| 1000 |
+
assert est.oob_improvement_.shape[0] == 10
|
| 1001 |
+
assert est.oob_scores_.shape[0] == 10
|
| 1002 |
+
assert est.oob_scores_[-1] == pytest.approx(est.oob_score_)
|
| 1003 |
+
|
| 1004 |
+
# try refit
|
| 1005 |
+
est.set_params(n_estimators=30)
|
| 1006 |
+
est.fit(X, y)
|
| 1007 |
+
assert est.n_estimators == 30
|
| 1008 |
+
assert est.estimators_.shape[0] == 30
|
| 1009 |
+
assert est.train_score_.shape[0] == 30
|
| 1010 |
+
assert est.oob_improvement_.shape[0] == 30
|
| 1011 |
+
assert est.oob_scores_.shape[0] == 30
|
| 1012 |
+
assert est.oob_scores_[-1] == pytest.approx(est.oob_score_)
|
| 1013 |
+
|
| 1014 |
+
est = Cls(
|
| 1015 |
+
n_estimators=20, max_depth=1, random_state=1, subsample=0.5, warm_start=True
|
| 1016 |
+
)
|
| 1017 |
+
est.fit(X, y, monitor=early_stopping_monitor)
|
| 1018 |
+
assert est.n_estimators == 20
|
| 1019 |
+
assert est.estimators_.shape[0] == 10
|
| 1020 |
+
assert est.train_score_.shape[0] == 10
|
| 1021 |
+
assert est.oob_improvement_.shape[0] == 10
|
| 1022 |
+
assert est.oob_scores_.shape[0] == 10
|
| 1023 |
+
assert est.oob_scores_[-1] == pytest.approx(est.oob_score_)
|
| 1024 |
+
|
| 1025 |
+
# try refit
|
| 1026 |
+
est.set_params(n_estimators=30, warm_start=False)
|
| 1027 |
+
est.fit(X, y)
|
| 1028 |
+
assert est.n_estimators == 30
|
| 1029 |
+
assert est.train_score_.shape[0] == 30
|
| 1030 |
+
assert est.estimators_.shape[0] == 30
|
| 1031 |
+
assert est.oob_improvement_.shape[0] == 30
|
| 1032 |
+
assert est.oob_scores_.shape[0] == 30
|
| 1033 |
+
assert est.oob_scores_[-1] == pytest.approx(est.oob_score_)
|
| 1034 |
+
|
| 1035 |
+
|
| 1036 |
+
def test_complete_classification():
|
| 1037 |
+
# Test greedy trees with max_depth + 1 leafs.
|
| 1038 |
+
from sklearn.tree._tree import TREE_LEAF
|
| 1039 |
+
|
| 1040 |
+
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
|
| 1041 |
+
k = 4
|
| 1042 |
+
|
| 1043 |
+
est = GradientBoostingClassifier(
|
| 1044 |
+
n_estimators=20, max_depth=None, random_state=1, max_leaf_nodes=k + 1
|
| 1045 |
+
)
|
| 1046 |
+
est.fit(X, y)
|
| 1047 |
+
|
| 1048 |
+
tree = est.estimators_[0, 0].tree_
|
| 1049 |
+
assert tree.max_depth == k
|
| 1050 |
+
assert tree.children_left[tree.children_left == TREE_LEAF].shape[0] == k + 1
|
| 1051 |
+
|
| 1052 |
+
|
| 1053 |
+
def test_complete_regression():
|
| 1054 |
+
# Test greedy trees with max_depth + 1 leafs.
|
| 1055 |
+
from sklearn.tree._tree import TREE_LEAF
|
| 1056 |
+
|
| 1057 |
+
k = 4
|
| 1058 |
+
|
| 1059 |
+
est = GradientBoostingRegressor(
|
| 1060 |
+
n_estimators=20, max_depth=None, random_state=1, max_leaf_nodes=k + 1
|
| 1061 |
+
)
|
| 1062 |
+
est.fit(X_reg, y_reg)
|
| 1063 |
+
|
| 1064 |
+
tree = est.estimators_[-1, 0].tree_
|
| 1065 |
+
assert tree.children_left[tree.children_left == TREE_LEAF].shape[0] == k + 1
|
| 1066 |
+
|
| 1067 |
+
|
| 1068 |
+
def test_zero_estimator_reg(global_random_seed):
|
| 1069 |
+
# Test if init='zero' works for regression by checking that it is better
|
| 1070 |
+
# than a simple baseline.
|
| 1071 |
+
|
| 1072 |
+
baseline = DummyRegressor(strategy="mean").fit(X_reg, y_reg)
|
| 1073 |
+
mse_baseline = mean_squared_error(baseline.predict(X_reg), y_reg)
|
| 1074 |
+
est = GradientBoostingRegressor(
|
| 1075 |
+
n_estimators=5,
|
| 1076 |
+
max_depth=1,
|
| 1077 |
+
random_state=global_random_seed,
|
| 1078 |
+
init="zero",
|
| 1079 |
+
learning_rate=0.5,
|
| 1080 |
+
)
|
| 1081 |
+
est.fit(X_reg, y_reg)
|
| 1082 |
+
y_pred = est.predict(X_reg)
|
| 1083 |
+
mse_gbdt = mean_squared_error(y_reg, y_pred)
|
| 1084 |
+
assert mse_gbdt < mse_baseline
|
| 1085 |
+
|
| 1086 |
+
|
| 1087 |
+
def test_zero_estimator_clf(global_random_seed):
|
| 1088 |
+
# Test if init='zero' works for classification.
|
| 1089 |
+
X = iris.data
|
| 1090 |
+
y = np.array(iris.target)
|
| 1091 |
+
|
| 1092 |
+
est = GradientBoostingClassifier(
|
| 1093 |
+
n_estimators=20, max_depth=1, random_state=global_random_seed, init="zero"
|
| 1094 |
+
)
|
| 1095 |
+
est.fit(X, y)
|
| 1096 |
+
|
| 1097 |
+
assert est.score(X, y) > 0.96
|
| 1098 |
+
|
| 1099 |
+
# binary clf
|
| 1100 |
+
mask = y != 0
|
| 1101 |
+
y[mask] = 1
|
| 1102 |
+
y[~mask] = 0
|
| 1103 |
+
est = GradientBoostingClassifier(
|
| 1104 |
+
n_estimators=20, max_depth=1, random_state=global_random_seed, init="zero"
|
| 1105 |
+
)
|
| 1106 |
+
est.fit(X, y)
|
| 1107 |
+
assert est.score(X, y) > 0.96
|
| 1108 |
+
|
| 1109 |
+
|
| 1110 |
+
@pytest.mark.parametrize("GBEstimator", GRADIENT_BOOSTING_ESTIMATORS)
|
| 1111 |
+
def test_max_leaf_nodes_max_depth(GBEstimator):
|
| 1112 |
+
# Test precedence of max_leaf_nodes over max_depth.
|
| 1113 |
+
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
|
| 1114 |
+
|
| 1115 |
+
k = 4
|
| 1116 |
+
|
| 1117 |
+
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
|
| 1118 |
+
tree = est.estimators_[0, 0].tree_
|
| 1119 |
+
assert tree.max_depth == 1
|
| 1120 |
+
|
| 1121 |
+
est = GBEstimator(max_depth=1).fit(X, y)
|
| 1122 |
+
tree = est.estimators_[0, 0].tree_
|
| 1123 |
+
assert tree.max_depth == 1
|
| 1124 |
+
|
| 1125 |
+
|
| 1126 |
+
@pytest.mark.parametrize("GBEstimator", GRADIENT_BOOSTING_ESTIMATORS)
|
| 1127 |
+
def test_min_impurity_decrease(GBEstimator):
|
| 1128 |
+
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
|
| 1129 |
+
|
| 1130 |
+
est = GBEstimator(min_impurity_decrease=0.1)
|
| 1131 |
+
est.fit(X, y)
|
| 1132 |
+
for tree in est.estimators_.flat:
|
| 1133 |
+
# Simply check if the parameter is passed on correctly. Tree tests
|
| 1134 |
+
# will suffice for the actual working of this param
|
| 1135 |
+
assert tree.min_impurity_decrease == 0.1
|
| 1136 |
+
|
| 1137 |
+
|
| 1138 |
+
def test_warm_start_wo_nestimators_change():
|
| 1139 |
+
# Test if warm_start does nothing if n_estimators is not changed.
|
| 1140 |
+
# Regression test for #3513.
|
| 1141 |
+
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
|
| 1142 |
+
clf.fit([[0, 1], [2, 3]], [0, 1])
|
| 1143 |
+
assert clf.estimators_.shape[0] == 10
|
| 1144 |
+
clf.fit([[0, 1], [2, 3]], [0, 1])
|
| 1145 |
+
assert clf.estimators_.shape[0] == 10
|
| 1146 |
+
|
| 1147 |
+
|
| 1148 |
+
@pytest.mark.parametrize(
|
| 1149 |
+
("loss", "value"),
|
| 1150 |
+
[
|
| 1151 |
+
("squared_error", 0.5),
|
| 1152 |
+
("absolute_error", 0.0),
|
| 1153 |
+
("huber", 0.5),
|
| 1154 |
+
("quantile", 0.5),
|
| 1155 |
+
],
|
| 1156 |
+
)
|
| 1157 |
+
def test_non_uniform_weights_toy_edge_case_reg(loss, value):
|
| 1158 |
+
X = [[1, 0], [1, 0], [1, 0], [0, 1]]
|
| 1159 |
+
y = [0, 0, 1, 0]
|
| 1160 |
+
# ignore the first 2 training samples by setting their weight to 0
|
| 1161 |
+
sample_weight = [0, 0, 1, 1]
|
| 1162 |
+
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2, loss=loss)
|
| 1163 |
+
gb.fit(X, y, sample_weight=sample_weight)
|
| 1164 |
+
assert gb.predict([[1, 0]])[0] >= value
|
| 1165 |
+
|
| 1166 |
+
|
| 1167 |
+
def test_non_uniform_weights_toy_edge_case_clf():
|
| 1168 |
+
X = [[1, 0], [1, 0], [1, 0], [0, 1]]
|
| 1169 |
+
y = [0, 0, 1, 0]
|
| 1170 |
+
# ignore the first 2 training samples by setting their weight to 0
|
| 1171 |
+
sample_weight = [0, 0, 1, 1]
|
| 1172 |
+
for loss in ("log_loss", "exponential"):
|
| 1173 |
+
gb = GradientBoostingClassifier(n_estimators=5, loss=loss)
|
| 1174 |
+
gb.fit(X, y, sample_weight=sample_weight)
|
| 1175 |
+
assert_array_equal(gb.predict([[1, 0]]), [1])
|
| 1176 |
+
|
| 1177 |
+
|
| 1178 |
+
@skip_if_32bit
|
| 1179 |
+
@pytest.mark.parametrize(
|
| 1180 |
+
"EstimatorClass", (GradientBoostingClassifier, GradientBoostingRegressor)
|
| 1181 |
+
)
|
| 1182 |
+
@pytest.mark.parametrize(
|
| 1183 |
+
"sparse_container", COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS
|
| 1184 |
+
)
|
| 1185 |
+
def test_sparse_input(EstimatorClass, sparse_container):
|
| 1186 |
+
y, X = datasets.make_multilabel_classification(
|
| 1187 |
+
random_state=0, n_samples=50, n_features=1, n_classes=20
|
| 1188 |
+
)
|
| 1189 |
+
y = y[:, 0]
|
| 1190 |
+
X_sparse = sparse_container(X)
|
| 1191 |
+
|
| 1192 |
+
dense = EstimatorClass(
|
| 1193 |
+
n_estimators=10, random_state=0, max_depth=2, min_impurity_decrease=1e-7
|
| 1194 |
+
).fit(X, y)
|
| 1195 |
+
sparse = EstimatorClass(
|
| 1196 |
+
n_estimators=10, random_state=0, max_depth=2, min_impurity_decrease=1e-7
|
| 1197 |
+
).fit(X_sparse, y)
|
| 1198 |
+
|
| 1199 |
+
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
|
| 1200 |
+
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
|
| 1201 |
+
assert_array_almost_equal(sparse.feature_importances_, dense.feature_importances_)
|
| 1202 |
+
|
| 1203 |
+
assert_array_almost_equal(sparse.predict(X_sparse), dense.predict(X))
|
| 1204 |
+
assert_array_almost_equal(dense.predict(X_sparse), sparse.predict(X))
|
| 1205 |
+
|
| 1206 |
+
if issubclass(EstimatorClass, GradientBoostingClassifier):
|
| 1207 |
+
assert_array_almost_equal(sparse.predict_proba(X), dense.predict_proba(X))
|
| 1208 |
+
assert_array_almost_equal(
|
| 1209 |
+
sparse.predict_log_proba(X), dense.predict_log_proba(X)
|
| 1210 |
+
)
|
| 1211 |
+
|
| 1212 |
+
assert_array_almost_equal(
|
| 1213 |
+
sparse.decision_function(X_sparse), sparse.decision_function(X)
|
| 1214 |
+
)
|
| 1215 |
+
assert_array_almost_equal(
|
| 1216 |
+
dense.decision_function(X_sparse), sparse.decision_function(X)
|
| 1217 |
+
)
|
| 1218 |
+
for res_sparse, res in zip(
|
| 1219 |
+
sparse.staged_decision_function(X_sparse),
|
| 1220 |
+
sparse.staged_decision_function(X),
|
| 1221 |
+
):
|
| 1222 |
+
assert_array_almost_equal(res_sparse, res)
|
| 1223 |
+
|
| 1224 |
+
|
| 1225 |
+
@pytest.mark.parametrize(
|
| 1226 |
+
"GradientBoostingEstimator", [GradientBoostingClassifier, GradientBoostingRegressor]
|
| 1227 |
+
)
|
| 1228 |
+
def test_gradient_boosting_early_stopping(GradientBoostingEstimator):
|
| 1229 |
+
# Check if early stopping works as expected, that is empirically check that the
|
| 1230 |
+
# number of trained estimators is increasing when the tolerance decreases.
|
| 1231 |
+
|
| 1232 |
+
X, y = make_classification(n_samples=1000, random_state=0)
|
| 1233 |
+
n_estimators = 1000
|
| 1234 |
+
|
| 1235 |
+
gb_large_tol = GradientBoostingEstimator(
|
| 1236 |
+
n_estimators=n_estimators,
|
| 1237 |
+
n_iter_no_change=10,
|
| 1238 |
+
learning_rate=0.1,
|
| 1239 |
+
max_depth=3,
|
| 1240 |
+
random_state=42,
|
| 1241 |
+
tol=1e-1,
|
| 1242 |
+
)
|
| 1243 |
+
|
| 1244 |
+
gb_small_tol = GradientBoostingEstimator(
|
| 1245 |
+
n_estimators=n_estimators,
|
| 1246 |
+
n_iter_no_change=10,
|
| 1247 |
+
learning_rate=0.1,
|
| 1248 |
+
max_depth=3,
|
| 1249 |
+
random_state=42,
|
| 1250 |
+
tol=1e-3,
|
| 1251 |
+
)
|
| 1252 |
+
|
| 1253 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
|
| 1254 |
+
gb_large_tol.fit(X_train, y_train)
|
| 1255 |
+
gb_small_tol.fit(X_train, y_train)
|
| 1256 |
+
|
| 1257 |
+
assert gb_large_tol.n_estimators_ < gb_small_tol.n_estimators_ < n_estimators
|
| 1258 |
+
|
| 1259 |
+
assert gb_large_tol.score(X_test, y_test) > 0.7
|
| 1260 |
+
assert gb_small_tol.score(X_test, y_test) > 0.7
|
| 1261 |
+
|
| 1262 |
+
|
| 1263 |
+
def test_gradient_boosting_without_early_stopping():
|
| 1264 |
+
# When early stopping is not used, the number of trained estimators
|
| 1265 |
+
# must be the one specified.
|
| 1266 |
+
X, y = make_classification(n_samples=1000, random_state=0)
|
| 1267 |
+
|
| 1268 |
+
gbc = GradientBoostingClassifier(
|
| 1269 |
+
n_estimators=50, learning_rate=0.1, max_depth=3, random_state=42
|
| 1270 |
+
)
|
| 1271 |
+
gbc.fit(X, y)
|
| 1272 |
+
gbr = GradientBoostingRegressor(
|
| 1273 |
+
n_estimators=30, learning_rate=0.1, max_depth=3, random_state=42
|
| 1274 |
+
)
|
| 1275 |
+
gbr.fit(X, y)
|
| 1276 |
+
|
| 1277 |
+
# The number of trained estimators must be the one specified.
|
| 1278 |
+
assert gbc.n_estimators_ == 50
|
| 1279 |
+
assert gbr.n_estimators_ == 30
|
| 1280 |
+
|
| 1281 |
+
|
| 1282 |
+
def test_gradient_boosting_validation_fraction():
|
| 1283 |
+
X, y = make_classification(n_samples=1000, random_state=0)
|
| 1284 |
+
|
| 1285 |
+
gbc = GradientBoostingClassifier(
|
| 1286 |
+
n_estimators=100,
|
| 1287 |
+
n_iter_no_change=10,
|
| 1288 |
+
validation_fraction=0.1,
|
| 1289 |
+
learning_rate=0.1,
|
| 1290 |
+
max_depth=3,
|
| 1291 |
+
random_state=42,
|
| 1292 |
+
)
|
| 1293 |
+
gbc2 = clone(gbc).set_params(validation_fraction=0.3)
|
| 1294 |
+
gbc3 = clone(gbc).set_params(n_iter_no_change=20)
|
| 1295 |
+
|
| 1296 |
+
gbr = GradientBoostingRegressor(
|
| 1297 |
+
n_estimators=100,
|
| 1298 |
+
n_iter_no_change=10,
|
| 1299 |
+
learning_rate=0.1,
|
| 1300 |
+
max_depth=3,
|
| 1301 |
+
validation_fraction=0.1,
|
| 1302 |
+
random_state=42,
|
| 1303 |
+
)
|
| 1304 |
+
gbr2 = clone(gbr).set_params(validation_fraction=0.3)
|
| 1305 |
+
gbr3 = clone(gbr).set_params(n_iter_no_change=20)
|
| 1306 |
+
|
| 1307 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
|
| 1308 |
+
# Check if validation_fraction has an effect
|
| 1309 |
+
gbc.fit(X_train, y_train)
|
| 1310 |
+
gbc2.fit(X_train, y_train)
|
| 1311 |
+
assert gbc.n_estimators_ != gbc2.n_estimators_
|
| 1312 |
+
|
| 1313 |
+
gbr.fit(X_train, y_train)
|
| 1314 |
+
gbr2.fit(X_train, y_train)
|
| 1315 |
+
assert gbr.n_estimators_ != gbr2.n_estimators_
|
| 1316 |
+
|
| 1317 |
+
# Check if n_estimators_ increase monotonically with n_iter_no_change
|
| 1318 |
+
# Set validation
|
| 1319 |
+
gbc3.fit(X_train, y_train)
|
| 1320 |
+
gbr3.fit(X_train, y_train)
|
| 1321 |
+
assert gbr.n_estimators_ < gbr3.n_estimators_
|
| 1322 |
+
assert gbc.n_estimators_ < gbc3.n_estimators_
|
| 1323 |
+
|
| 1324 |
+
|
| 1325 |
+
def test_early_stopping_stratified():
|
| 1326 |
+
# Make sure data splitting for early stopping is stratified
|
| 1327 |
+
X = [[1, 2], [2, 3], [3, 4], [4, 5]]
|
| 1328 |
+
y = [0, 0, 0, 1]
|
| 1329 |
+
|
| 1330 |
+
gbc = GradientBoostingClassifier(n_iter_no_change=5)
|
| 1331 |
+
with pytest.raises(
|
| 1332 |
+
ValueError, match="The least populated class in y has only 1 member"
|
| 1333 |
+
):
|
| 1334 |
+
gbc.fit(X, y)
|
| 1335 |
+
|
| 1336 |
+
|
| 1337 |
+
def _make_multiclass():
|
| 1338 |
+
return make_classification(n_classes=3, n_clusters_per_class=1)
|
| 1339 |
+
|
| 1340 |
+
|
| 1341 |
+
@pytest.mark.parametrize(
|
| 1342 |
+
"gb, dataset_maker, init_estimator",
|
| 1343 |
+
[
|
| 1344 |
+
(GradientBoostingClassifier, make_classification, DummyClassifier),
|
| 1345 |
+
(GradientBoostingClassifier, _make_multiclass, DummyClassifier),
|
| 1346 |
+
(GradientBoostingRegressor, make_regression, DummyRegressor),
|
| 1347 |
+
],
|
| 1348 |
+
ids=["binary classification", "multiclass classification", "regression"],
|
| 1349 |
+
)
|
| 1350 |
+
def test_gradient_boosting_with_init(
|
| 1351 |
+
gb, dataset_maker, init_estimator, global_random_seed
|
| 1352 |
+
):
|
| 1353 |
+
# Check that GradientBoostingRegressor works when init is a sklearn
|
| 1354 |
+
# estimator.
|
| 1355 |
+
# Check that an error is raised if trying to fit with sample weight but
|
| 1356 |
+
# initial estimator does not support sample weight
|
| 1357 |
+
|
| 1358 |
+
X, y = dataset_maker()
|
| 1359 |
+
sample_weight = np.random.RandomState(global_random_seed).rand(100)
|
| 1360 |
+
|
| 1361 |
+
# init supports sample weights
|
| 1362 |
+
init_est = init_estimator()
|
| 1363 |
+
gb(init=init_est).fit(X, y, sample_weight=sample_weight)
|
| 1364 |
+
|
| 1365 |
+
# init does not support sample weights
|
| 1366 |
+
init_est = NoSampleWeightWrapper(init_estimator())
|
| 1367 |
+
gb(init=init_est).fit(X, y) # ok no sample weights
|
| 1368 |
+
with pytest.raises(ValueError, match="estimator.*does not support sample weights"):
|
| 1369 |
+
gb(init=init_est).fit(X, y, sample_weight=sample_weight)
|
| 1370 |
+
|
| 1371 |
+
|
| 1372 |
+
def test_gradient_boosting_with_init_pipeline():
|
| 1373 |
+
# Check that the init estimator can be a pipeline (see issue #13466)
|
| 1374 |
+
|
| 1375 |
+
X, y = make_regression(random_state=0)
|
| 1376 |
+
init = make_pipeline(LinearRegression())
|
| 1377 |
+
gb = GradientBoostingRegressor(init=init)
|
| 1378 |
+
gb.fit(X, y) # pipeline without sample_weight works fine
|
| 1379 |
+
|
| 1380 |
+
with pytest.raises(
|
| 1381 |
+
ValueError,
|
| 1382 |
+
match="The initial estimator Pipeline does not support sample weights",
|
| 1383 |
+
):
|
| 1384 |
+
gb.fit(X, y, sample_weight=np.ones(X.shape[0]))
|
| 1385 |
+
|
| 1386 |
+
# Passing sample_weight to a pipeline raises a ValueError. This test makes
|
| 1387 |
+
# sure we make the distinction between ValueError raised by a pipeline that
|
| 1388 |
+
# was passed sample_weight, and a InvalidParameterError raised by a regular
|
| 1389 |
+
# estimator whose input checking failed.
|
| 1390 |
+
invalid_nu = 1.5
|
| 1391 |
+
err_msg = (
|
| 1392 |
+
"The 'nu' parameter of NuSVR must be a float in the"
|
| 1393 |
+
f" range (0.0, 1.0]. Got {invalid_nu} instead."
|
| 1394 |
+
)
|
| 1395 |
+
with pytest.raises(InvalidParameterError, match=re.escape(err_msg)):
|
| 1396 |
+
# Note that NuSVR properly supports sample_weight
|
| 1397 |
+
init = NuSVR(gamma="auto", nu=invalid_nu)
|
| 1398 |
+
gb = GradientBoostingRegressor(init=init)
|
| 1399 |
+
gb.fit(X, y, sample_weight=np.ones(X.shape[0]))
|
| 1400 |
+
|
| 1401 |
+
|
| 1402 |
+
def test_early_stopping_n_classes():
|
| 1403 |
+
# when doing early stopping (_, , y_train, _ = train_test_split(X, y))
|
| 1404 |
+
# there might be classes in y that are missing in y_train. As the init
|
| 1405 |
+
# estimator will be trained on y_train, we need to raise an error if this
|
| 1406 |
+
# happens.
|
| 1407 |
+
|
| 1408 |
+
X = [[1]] * 10
|
| 1409 |
+
y = [0, 0] + [1] * 8 # only 2 negative class over 10 samples
|
| 1410 |
+
gb = GradientBoostingClassifier(
|
| 1411 |
+
n_iter_no_change=5, random_state=0, validation_fraction=0.8
|
| 1412 |
+
)
|
| 1413 |
+
with pytest.raises(
|
| 1414 |
+
ValueError, match="The training data after the early stopping split"
|
| 1415 |
+
):
|
| 1416 |
+
gb.fit(X, y)
|
| 1417 |
+
|
| 1418 |
+
# No error if we let training data be big enough
|
| 1419 |
+
gb = GradientBoostingClassifier(
|
| 1420 |
+
n_iter_no_change=5, random_state=0, validation_fraction=0.4
|
| 1421 |
+
)
|
| 1422 |
+
|
| 1423 |
+
|
| 1424 |
+
def test_gbr_degenerate_feature_importances():
|
| 1425 |
+
# growing an ensemble of single node trees. See #13620
|
| 1426 |
+
X = np.zeros((10, 10))
|
| 1427 |
+
y = np.ones((10,))
|
| 1428 |
+
gbr = GradientBoostingRegressor().fit(X, y)
|
| 1429 |
+
assert_array_equal(gbr.feature_importances_, np.zeros(10, dtype=np.float64))
|
| 1430 |
+
|
| 1431 |
+
|
| 1432 |
+
def test_huber_vs_mean_and_median():
|
| 1433 |
+
"""Check that huber lies between absolute and squared error."""
|
| 1434 |
+
n_rep = 100
|
| 1435 |
+
n_samples = 10
|
| 1436 |
+
y = np.tile(np.arange(n_samples), n_rep)
|
| 1437 |
+
x1 = np.minimum(y, n_samples / 2)
|
| 1438 |
+
x2 = np.minimum(-y, -n_samples / 2)
|
| 1439 |
+
X = np.c_[x1, x2]
|
| 1440 |
+
|
| 1441 |
+
rng = np.random.RandomState(42)
|
| 1442 |
+
# We want an asymmetric distribution.
|
| 1443 |
+
y = y + rng.exponential(scale=1, size=y.shape)
|
| 1444 |
+
|
| 1445 |
+
gbt_absolute_error = GradientBoostingRegressor(loss="absolute_error").fit(X, y)
|
| 1446 |
+
gbt_huber = GradientBoostingRegressor(loss="huber").fit(X, y)
|
| 1447 |
+
gbt_squared_error = GradientBoostingRegressor().fit(X, y)
|
| 1448 |
+
|
| 1449 |
+
gbt_huber_predictions = gbt_huber.predict(X)
|
| 1450 |
+
assert np.all(gbt_absolute_error.predict(X) <= gbt_huber_predictions)
|
| 1451 |
+
assert np.all(gbt_huber_predictions <= gbt_squared_error.predict(X))
|
| 1452 |
+
|
| 1453 |
+
|
| 1454 |
+
def test_safe_divide():
|
| 1455 |
+
"""Test that _safe_divide handles division by zero."""
|
| 1456 |
+
with warnings.catch_warnings():
|
| 1457 |
+
warnings.simplefilter("error")
|
| 1458 |
+
assert _safe_divide(np.float64(1e300), 0) == 0
|
| 1459 |
+
assert _safe_divide(np.float64(0.0), np.float64(0.0)) == 0
|
| 1460 |
+
with pytest.warns(RuntimeWarning, match="overflow"):
|
| 1461 |
+
# np.finfo(float).max = 1.7976931348623157e+308
|
| 1462 |
+
_safe_divide(np.float64(1e300), 1e-10)
|
| 1463 |
+
|
| 1464 |
+
|
| 1465 |
+
def test_squared_error_exact_backward_compat():
|
| 1466 |
+
"""Test squared error GBT backward compat on a simple dataset.
|
| 1467 |
+
|
| 1468 |
+
The results to compare against are taken from scikit-learn v1.2.0.
|
| 1469 |
+
"""
|
| 1470 |
+
n_samples = 10
|
| 1471 |
+
y = np.arange(n_samples)
|
| 1472 |
+
x1 = np.minimum(y, n_samples / 2)
|
| 1473 |
+
x2 = np.minimum(-y, -n_samples / 2)
|
| 1474 |
+
X = np.c_[x1, x2]
|
| 1475 |
+
gbt = GradientBoostingRegressor(loss="squared_error", n_estimators=100).fit(X, y)
|
| 1476 |
+
|
| 1477 |
+
pred_result = np.array(
|
| 1478 |
+
[
|
| 1479 |
+
1.39245726e-04,
|
| 1480 |
+
1.00010468e00,
|
| 1481 |
+
2.00007043e00,
|
| 1482 |
+
3.00004051e00,
|
| 1483 |
+
4.00000802e00,
|
| 1484 |
+
4.99998972e00,
|
| 1485 |
+
5.99996312e00,
|
| 1486 |
+
6.99993395e00,
|
| 1487 |
+
7.99989372e00,
|
| 1488 |
+
8.99985660e00,
|
| 1489 |
+
]
|
| 1490 |
+
)
|
| 1491 |
+
assert_allclose(gbt.predict(X), pred_result, rtol=1e-8)
|
| 1492 |
+
|
| 1493 |
+
train_score = np.array(
|
| 1494 |
+
[
|
| 1495 |
+
4.87246390e-08,
|
| 1496 |
+
3.95590036e-08,
|
| 1497 |
+
3.21267865e-08,
|
| 1498 |
+
2.60970300e-08,
|
| 1499 |
+
2.11820178e-08,
|
| 1500 |
+
1.71995782e-08,
|
| 1501 |
+
1.39695549e-08,
|
| 1502 |
+
1.13391770e-08,
|
| 1503 |
+
9.19931587e-09,
|
| 1504 |
+
7.47000575e-09,
|
| 1505 |
+
]
|
| 1506 |
+
)
|
| 1507 |
+
assert_allclose(gbt.train_score_[-10:], train_score, rtol=1e-8)
|
| 1508 |
+
|
| 1509 |
+
# Same but with sample_weights
|
| 1510 |
+
sample_weights = np.tile([1, 10], n_samples // 2)
|
| 1511 |
+
gbt = GradientBoostingRegressor(loss="squared_error", n_estimators=100).fit(
|
| 1512 |
+
X, y, sample_weight=sample_weights
|
| 1513 |
+
)
|
| 1514 |
+
|
| 1515 |
+
pred_result = np.array(
|
| 1516 |
+
[
|
| 1517 |
+
1.52391462e-04,
|
| 1518 |
+
1.00011168e00,
|
| 1519 |
+
2.00007724e00,
|
| 1520 |
+
3.00004638e00,
|
| 1521 |
+
4.00001302e00,
|
| 1522 |
+
4.99999873e00,
|
| 1523 |
+
5.99997093e00,
|
| 1524 |
+
6.99994329e00,
|
| 1525 |
+
7.99991290e00,
|
| 1526 |
+
8.99988727e00,
|
| 1527 |
+
]
|
| 1528 |
+
)
|
| 1529 |
+
assert_allclose(gbt.predict(X), pred_result, rtol=1e-6, atol=1e-5)
|
| 1530 |
+
|
| 1531 |
+
train_score = np.array(
|
| 1532 |
+
[
|
| 1533 |
+
4.12445296e-08,
|
| 1534 |
+
3.34418322e-08,
|
| 1535 |
+
2.71151383e-08,
|
| 1536 |
+
2.19782469e-08,
|
| 1537 |
+
1.78173649e-08,
|
| 1538 |
+
1.44461976e-08,
|
| 1539 |
+
1.17120123e-08,
|
| 1540 |
+
9.49485678e-09,
|
| 1541 |
+
7.69772505e-09,
|
| 1542 |
+
6.24155316e-09,
|
| 1543 |
+
]
|
| 1544 |
+
)
|
| 1545 |
+
assert_allclose(gbt.train_score_[-10:], train_score, rtol=1e-3, atol=1e-11)
|
| 1546 |
+
|
| 1547 |
+
|
| 1548 |
+
@skip_if_32bit
|
| 1549 |
+
def test_huber_exact_backward_compat():
|
| 1550 |
+
"""Test huber GBT backward compat on a simple dataset.
|
| 1551 |
+
|
| 1552 |
+
The results to compare against are taken from scikit-learn v1.2.0.
|
| 1553 |
+
"""
|
| 1554 |
+
n_samples = 10
|
| 1555 |
+
y = np.arange(n_samples)
|
| 1556 |
+
x1 = np.minimum(y, n_samples / 2)
|
| 1557 |
+
x2 = np.minimum(-y, -n_samples / 2)
|
| 1558 |
+
X = np.c_[x1, x2]
|
| 1559 |
+
gbt = GradientBoostingRegressor(loss="huber", n_estimators=100, alpha=0.8).fit(X, y)
|
| 1560 |
+
|
| 1561 |
+
assert_allclose(gbt._loss.closs.delta, 0.0001655688041282133)
|
| 1562 |
+
|
| 1563 |
+
pred_result = np.array(
|
| 1564 |
+
[
|
| 1565 |
+
1.48120765e-04,
|
| 1566 |
+
9.99949174e-01,
|
| 1567 |
+
2.00116957e00,
|
| 1568 |
+
2.99986716e00,
|
| 1569 |
+
4.00012064e00,
|
| 1570 |
+
5.00002462e00,
|
| 1571 |
+
5.99998898e00,
|
| 1572 |
+
6.99692549e00,
|
| 1573 |
+
8.00006356e00,
|
| 1574 |
+
8.99985099e00,
|
| 1575 |
+
]
|
| 1576 |
+
)
|
| 1577 |
+
assert_allclose(gbt.predict(X), pred_result, rtol=1e-8)
|
| 1578 |
+
|
| 1579 |
+
train_score = np.array(
|
| 1580 |
+
[
|
| 1581 |
+
2.59484709e-07,
|
| 1582 |
+
2.19165900e-07,
|
| 1583 |
+
1.89644782e-07,
|
| 1584 |
+
1.64556454e-07,
|
| 1585 |
+
1.38705110e-07,
|
| 1586 |
+
1.20373736e-07,
|
| 1587 |
+
1.04746082e-07,
|
| 1588 |
+
9.13835687e-08,
|
| 1589 |
+
8.20245756e-08,
|
| 1590 |
+
7.17122188e-08,
|
| 1591 |
+
]
|
| 1592 |
+
)
|
| 1593 |
+
assert_allclose(gbt.train_score_[-10:], train_score, rtol=1e-8)
|
| 1594 |
+
|
| 1595 |
+
|
| 1596 |
+
def test_binomial_error_exact_backward_compat():
|
| 1597 |
+
"""Test binary log_loss GBT backward compat on a simple dataset.
|
| 1598 |
+
|
| 1599 |
+
The results to compare against are taken from scikit-learn v1.2.0.
|
| 1600 |
+
"""
|
| 1601 |
+
n_samples = 10
|
| 1602 |
+
y = np.arange(n_samples) % 2
|
| 1603 |
+
x1 = np.minimum(y, n_samples / 2)
|
| 1604 |
+
x2 = np.minimum(-y, -n_samples / 2)
|
| 1605 |
+
X = np.c_[x1, x2]
|
| 1606 |
+
gbt = GradientBoostingClassifier(loss="log_loss", n_estimators=100).fit(X, y)
|
| 1607 |
+
|
| 1608 |
+
pred_result = np.array(
|
| 1609 |
+
[
|
| 1610 |
+
[9.99978098e-01, 2.19017313e-05],
|
| 1611 |
+
[2.19017313e-05, 9.99978098e-01],
|
| 1612 |
+
[9.99978098e-01, 2.19017313e-05],
|
| 1613 |
+
[2.19017313e-05, 9.99978098e-01],
|
| 1614 |
+
[9.99978098e-01, 2.19017313e-05],
|
| 1615 |
+
[2.19017313e-05, 9.99978098e-01],
|
| 1616 |
+
[9.99978098e-01, 2.19017313e-05],
|
| 1617 |
+
[2.19017313e-05, 9.99978098e-01],
|
| 1618 |
+
[9.99978098e-01, 2.19017313e-05],
|
| 1619 |
+
[2.19017313e-05, 9.99978098e-01],
|
| 1620 |
+
]
|
| 1621 |
+
)
|
| 1622 |
+
assert_allclose(gbt.predict_proba(X), pred_result, rtol=1e-8)
|
| 1623 |
+
|
| 1624 |
+
train_score = np.array(
|
| 1625 |
+
[
|
| 1626 |
+
1.07742210e-04,
|
| 1627 |
+
9.74889078e-05,
|
| 1628 |
+
8.82113863e-05,
|
| 1629 |
+
7.98167784e-05,
|
| 1630 |
+
7.22210566e-05,
|
| 1631 |
+
6.53481907e-05,
|
| 1632 |
+
5.91293869e-05,
|
| 1633 |
+
5.35023988e-05,
|
| 1634 |
+
4.84109045e-05,
|
| 1635 |
+
4.38039423e-05,
|
| 1636 |
+
]
|
| 1637 |
+
)
|
| 1638 |
+
assert_allclose(gbt.train_score_[-10:], train_score, rtol=1e-8)
|
| 1639 |
+
|
| 1640 |
+
|
| 1641 |
+
def test_multinomial_error_exact_backward_compat():
|
| 1642 |
+
"""Test multiclass log_loss GBT backward compat on a simple dataset.
|
| 1643 |
+
|
| 1644 |
+
The results to compare against are taken from scikit-learn v1.2.0.
|
| 1645 |
+
"""
|
| 1646 |
+
n_samples = 10
|
| 1647 |
+
y = np.arange(n_samples) % 4
|
| 1648 |
+
x1 = np.minimum(y, n_samples / 2)
|
| 1649 |
+
x2 = np.minimum(-y, -n_samples / 2)
|
| 1650 |
+
X = np.c_[x1, x2]
|
| 1651 |
+
gbt = GradientBoostingClassifier(loss="log_loss", n_estimators=100).fit(X, y)
|
| 1652 |
+
|
| 1653 |
+
pred_result = np.array(
|
| 1654 |
+
[
|
| 1655 |
+
[9.99999727e-01, 1.11956255e-07, 8.04921671e-08, 8.04921668e-08],
|
| 1656 |
+
[1.11956254e-07, 9.99999727e-01, 8.04921671e-08, 8.04921668e-08],
|
| 1657 |
+
[1.19417637e-07, 1.19417637e-07, 9.99999675e-01, 8.60526098e-08],
|
| 1658 |
+
[1.19417637e-07, 1.19417637e-07, 8.60526088e-08, 9.99999675e-01],
|
| 1659 |
+
[9.99999727e-01, 1.11956255e-07, 8.04921671e-08, 8.04921668e-08],
|
| 1660 |
+
[1.11956254e-07, 9.99999727e-01, 8.04921671e-08, 8.04921668e-08],
|
| 1661 |
+
[1.19417637e-07, 1.19417637e-07, 9.99999675e-01, 8.60526098e-08],
|
| 1662 |
+
[1.19417637e-07, 1.19417637e-07, 8.60526088e-08, 9.99999675e-01],
|
| 1663 |
+
[9.99999727e-01, 1.11956255e-07, 8.04921671e-08, 8.04921668e-08],
|
| 1664 |
+
[1.11956254e-07, 9.99999727e-01, 8.04921671e-08, 8.04921668e-08],
|
| 1665 |
+
]
|
| 1666 |
+
)
|
| 1667 |
+
assert_allclose(gbt.predict_proba(X), pred_result, rtol=1e-8)
|
| 1668 |
+
|
| 1669 |
+
train_score = np.array(
|
| 1670 |
+
[
|
| 1671 |
+
1.13300150e-06,
|
| 1672 |
+
9.75183397e-07,
|
| 1673 |
+
8.39348103e-07,
|
| 1674 |
+
7.22433588e-07,
|
| 1675 |
+
6.21804338e-07,
|
| 1676 |
+
5.35191943e-07,
|
| 1677 |
+
4.60643966e-07,
|
| 1678 |
+
3.96479930e-07,
|
| 1679 |
+
3.41253434e-07,
|
| 1680 |
+
2.93719550e-07,
|
| 1681 |
+
]
|
| 1682 |
+
)
|
| 1683 |
+
assert_allclose(gbt.train_score_[-10:], train_score, rtol=1e-8)
|
| 1684 |
+
|
| 1685 |
+
|
| 1686 |
+
def test_gb_denominator_zero(global_random_seed):
|
| 1687 |
+
"""Test _update_terminal_regions denominator is not zero.
|
| 1688 |
+
|
| 1689 |
+
For instance for log loss based binary classification, the line search step might
|
| 1690 |
+
become nan/inf as denominator = hessian = prob * (1 - prob) and prob = 0 or 1 can
|
| 1691 |
+
happen.
|
| 1692 |
+
Here, we create a situation were this happens (at least with roughly 80%) based
|
| 1693 |
+
on the random seed.
|
| 1694 |
+
"""
|
| 1695 |
+
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=20)
|
| 1696 |
+
|
| 1697 |
+
params = {
|
| 1698 |
+
"learning_rate": 1.0,
|
| 1699 |
+
"subsample": 0.5,
|
| 1700 |
+
"n_estimators": 100,
|
| 1701 |
+
"max_leaf_nodes": 4,
|
| 1702 |
+
"max_depth": None,
|
| 1703 |
+
"random_state": global_random_seed,
|
| 1704 |
+
"min_samples_leaf": 2,
|
| 1705 |
+
}
|
| 1706 |
+
|
| 1707 |
+
clf = GradientBoostingClassifier(**params)
|
| 1708 |
+
# _safe_devide would raise a RuntimeWarning
|
| 1709 |
+
with warnings.catch_warnings():
|
| 1710 |
+
warnings.simplefilter("error")
|
| 1711 |
+
clf.fit(X, y)
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/test_iforest.py
ADDED
|
@@ -0,0 +1,393 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Testing for Isolation Forest algorithm (sklearn.ensemble.iforest).
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
# Authors: The scikit-learn developers
|
| 6 |
+
# SPDX-License-Identifier: BSD-3-Clause
|
| 7 |
+
|
| 8 |
+
import warnings
|
| 9 |
+
from unittest.mock import Mock, patch
|
| 10 |
+
|
| 11 |
+
import numpy as np
|
| 12 |
+
import pytest
|
| 13 |
+
from joblib import parallel_backend
|
| 14 |
+
|
| 15 |
+
from sklearn.datasets import load_diabetes, load_iris, make_classification
|
| 16 |
+
from sklearn.ensemble import IsolationForest
|
| 17 |
+
from sklearn.ensemble._iforest import _average_path_length
|
| 18 |
+
from sklearn.metrics import roc_auc_score
|
| 19 |
+
from sklearn.model_selection import ParameterGrid, train_test_split
|
| 20 |
+
from sklearn.utils import check_random_state
|
| 21 |
+
from sklearn.utils._testing import (
|
| 22 |
+
assert_allclose,
|
| 23 |
+
assert_array_almost_equal,
|
| 24 |
+
assert_array_equal,
|
| 25 |
+
ignore_warnings,
|
| 26 |
+
)
|
| 27 |
+
from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS
|
| 28 |
+
|
| 29 |
+
# load iris & diabetes dataset
|
| 30 |
+
iris = load_iris()
|
| 31 |
+
diabetes = load_diabetes()
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def test_iforest(global_random_seed):
|
| 35 |
+
"""Check Isolation Forest for various parameter settings."""
|
| 36 |
+
X_train = np.array([[0, 1], [1, 2]])
|
| 37 |
+
X_test = np.array([[2, 1], [1, 1]])
|
| 38 |
+
|
| 39 |
+
grid = ParameterGrid(
|
| 40 |
+
{"n_estimators": [3], "max_samples": [0.5, 1.0, 3], "bootstrap": [True, False]}
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
with ignore_warnings():
|
| 44 |
+
for params in grid:
|
| 45 |
+
IsolationForest(random_state=global_random_seed, **params).fit(
|
| 46 |
+
X_train
|
| 47 |
+
).predict(X_test)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
@pytest.mark.parametrize("sparse_container", CSC_CONTAINERS + CSR_CONTAINERS)
|
| 51 |
+
def test_iforest_sparse(global_random_seed, sparse_container):
|
| 52 |
+
"""Check IForest for various parameter settings on sparse input."""
|
| 53 |
+
rng = check_random_state(global_random_seed)
|
| 54 |
+
X_train, X_test = train_test_split(diabetes.data[:50], random_state=rng)
|
| 55 |
+
grid = ParameterGrid({"max_samples": [0.5, 1.0], "bootstrap": [True, False]})
|
| 56 |
+
|
| 57 |
+
X_train_sparse = sparse_container(X_train)
|
| 58 |
+
X_test_sparse = sparse_container(X_test)
|
| 59 |
+
|
| 60 |
+
for params in grid:
|
| 61 |
+
# Trained on sparse format
|
| 62 |
+
sparse_classifier = IsolationForest(
|
| 63 |
+
n_estimators=10, random_state=global_random_seed, **params
|
| 64 |
+
).fit(X_train_sparse)
|
| 65 |
+
sparse_results = sparse_classifier.predict(X_test_sparse)
|
| 66 |
+
|
| 67 |
+
# Trained on dense format
|
| 68 |
+
dense_classifier = IsolationForest(
|
| 69 |
+
n_estimators=10, random_state=global_random_seed, **params
|
| 70 |
+
).fit(X_train)
|
| 71 |
+
dense_results = dense_classifier.predict(X_test)
|
| 72 |
+
|
| 73 |
+
assert_array_equal(sparse_results, dense_results)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def test_iforest_error():
|
| 77 |
+
"""Test that it gives proper exception on deficient input."""
|
| 78 |
+
X = iris.data
|
| 79 |
+
|
| 80 |
+
# The dataset has less than 256 samples, explicitly setting
|
| 81 |
+
# max_samples > n_samples should result in a warning. If not set
|
| 82 |
+
# explicitly there should be no warning
|
| 83 |
+
warn_msg = "max_samples will be set to n_samples for estimation"
|
| 84 |
+
with pytest.warns(UserWarning, match=warn_msg):
|
| 85 |
+
IsolationForest(max_samples=1000).fit(X)
|
| 86 |
+
with warnings.catch_warnings():
|
| 87 |
+
warnings.simplefilter("error", UserWarning)
|
| 88 |
+
IsolationForest(max_samples="auto").fit(X)
|
| 89 |
+
with warnings.catch_warnings():
|
| 90 |
+
warnings.simplefilter("error", UserWarning)
|
| 91 |
+
IsolationForest(max_samples=np.int64(2)).fit(X)
|
| 92 |
+
|
| 93 |
+
# test X_test n_features match X_train one:
|
| 94 |
+
with pytest.raises(ValueError):
|
| 95 |
+
IsolationForest().fit(X).predict(X[:, 1:])
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def test_recalculate_max_depth():
|
| 99 |
+
"""Check max_depth recalculation when max_samples is reset to n_samples"""
|
| 100 |
+
X = iris.data
|
| 101 |
+
clf = IsolationForest().fit(X)
|
| 102 |
+
for est in clf.estimators_:
|
| 103 |
+
assert est.max_depth == int(np.ceil(np.log2(X.shape[0])))
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def test_max_samples_attribute():
|
| 107 |
+
X = iris.data
|
| 108 |
+
clf = IsolationForest().fit(X)
|
| 109 |
+
assert clf.max_samples_ == X.shape[0]
|
| 110 |
+
|
| 111 |
+
clf = IsolationForest(max_samples=500)
|
| 112 |
+
warn_msg = "max_samples will be set to n_samples for estimation"
|
| 113 |
+
with pytest.warns(UserWarning, match=warn_msg):
|
| 114 |
+
clf.fit(X)
|
| 115 |
+
assert clf.max_samples_ == X.shape[0]
|
| 116 |
+
|
| 117 |
+
clf = IsolationForest(max_samples=0.4).fit(X)
|
| 118 |
+
assert clf.max_samples_ == 0.4 * X.shape[0]
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def test_iforest_parallel_regression(global_random_seed):
|
| 122 |
+
"""Check parallel regression."""
|
| 123 |
+
rng = check_random_state(global_random_seed)
|
| 124 |
+
|
| 125 |
+
X_train, X_test = train_test_split(diabetes.data, random_state=rng)
|
| 126 |
+
|
| 127 |
+
ensemble = IsolationForest(n_jobs=3, random_state=global_random_seed).fit(X_train)
|
| 128 |
+
|
| 129 |
+
ensemble.set_params(n_jobs=1)
|
| 130 |
+
y1 = ensemble.predict(X_test)
|
| 131 |
+
ensemble.set_params(n_jobs=2)
|
| 132 |
+
y2 = ensemble.predict(X_test)
|
| 133 |
+
assert_array_almost_equal(y1, y2)
|
| 134 |
+
|
| 135 |
+
ensemble = IsolationForest(n_jobs=1, random_state=global_random_seed).fit(X_train)
|
| 136 |
+
|
| 137 |
+
y3 = ensemble.predict(X_test)
|
| 138 |
+
assert_array_almost_equal(y1, y3)
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def test_iforest_performance(global_random_seed):
|
| 142 |
+
"""Test Isolation Forest performs well"""
|
| 143 |
+
|
| 144 |
+
# Generate train/test data
|
| 145 |
+
rng = check_random_state(global_random_seed)
|
| 146 |
+
X = 0.3 * rng.randn(600, 2)
|
| 147 |
+
X = rng.permutation(np.vstack((X + 2, X - 2)))
|
| 148 |
+
X_train = X[:1000]
|
| 149 |
+
|
| 150 |
+
# Generate some abnormal novel observations
|
| 151 |
+
X_outliers = rng.uniform(low=-1, high=1, size=(200, 2))
|
| 152 |
+
X_test = np.vstack((X[1000:], X_outliers))
|
| 153 |
+
y_test = np.array([0] * 200 + [1] * 200)
|
| 154 |
+
|
| 155 |
+
# fit the model
|
| 156 |
+
clf = IsolationForest(max_samples=100, random_state=rng).fit(X_train)
|
| 157 |
+
|
| 158 |
+
# predict scores (the lower, the more normal)
|
| 159 |
+
y_pred = -clf.decision_function(X_test)
|
| 160 |
+
|
| 161 |
+
# check that there is at most 6 errors (false positive or false negative)
|
| 162 |
+
assert roc_auc_score(y_test, y_pred) > 0.98
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
@pytest.mark.parametrize("contamination", [0.25, "auto"])
|
| 166 |
+
def test_iforest_works(contamination, global_random_seed):
|
| 167 |
+
# toy sample (the last two samples are outliers)
|
| 168 |
+
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [7, 4], [-5, 9]]
|
| 169 |
+
|
| 170 |
+
# Test IsolationForest
|
| 171 |
+
clf = IsolationForest(random_state=global_random_seed, contamination=contamination)
|
| 172 |
+
clf.fit(X)
|
| 173 |
+
decision_func = -clf.decision_function(X)
|
| 174 |
+
pred = clf.predict(X)
|
| 175 |
+
# assert detect outliers:
|
| 176 |
+
assert np.min(decision_func[-2:]) > np.max(decision_func[:-2])
|
| 177 |
+
assert_array_equal(pred, 6 * [1] + 2 * [-1])
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
def test_max_samples_consistency():
|
| 181 |
+
# Make sure validated max_samples in iforest and BaseBagging are identical
|
| 182 |
+
X = iris.data
|
| 183 |
+
clf = IsolationForest().fit(X)
|
| 184 |
+
assert clf.max_samples_ == clf._max_samples
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
def test_iforest_subsampled_features():
|
| 188 |
+
# It tests non-regression for #5732 which failed at predict.
|
| 189 |
+
rng = check_random_state(0)
|
| 190 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
| 191 |
+
diabetes.data[:50], diabetes.target[:50], random_state=rng
|
| 192 |
+
)
|
| 193 |
+
clf = IsolationForest(max_features=0.8)
|
| 194 |
+
clf.fit(X_train, y_train)
|
| 195 |
+
clf.predict(X_test)
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
def test_iforest_average_path_length():
|
| 199 |
+
# It tests non-regression for #8549 which used the wrong formula
|
| 200 |
+
# for average path length, strictly for the integer case
|
| 201 |
+
# Updated to check average path length when input is <= 2 (issue #11839)
|
| 202 |
+
result_one = 2.0 * (np.log(4.0) + np.euler_gamma) - 2.0 * 4.0 / 5.0
|
| 203 |
+
result_two = 2.0 * (np.log(998.0) + np.euler_gamma) - 2.0 * 998.0 / 999.0
|
| 204 |
+
assert_allclose(_average_path_length([0]), [0.0])
|
| 205 |
+
assert_allclose(_average_path_length([1]), [0.0])
|
| 206 |
+
assert_allclose(_average_path_length([2]), [1.0])
|
| 207 |
+
assert_allclose(_average_path_length([5]), [result_one])
|
| 208 |
+
assert_allclose(_average_path_length([999]), [result_two])
|
| 209 |
+
assert_allclose(
|
| 210 |
+
_average_path_length(np.array([1, 2, 5, 999])),
|
| 211 |
+
[0.0, 1.0, result_one, result_two],
|
| 212 |
+
)
|
| 213 |
+
# _average_path_length is increasing
|
| 214 |
+
avg_path_length = _average_path_length(np.arange(5))
|
| 215 |
+
assert_array_equal(avg_path_length, np.sort(avg_path_length))
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
def test_score_samples():
|
| 219 |
+
X_train = [[1, 1], [1, 2], [2, 1]]
|
| 220 |
+
clf1 = IsolationForest(contamination=0.1).fit(X_train)
|
| 221 |
+
clf2 = IsolationForest().fit(X_train)
|
| 222 |
+
assert_array_equal(
|
| 223 |
+
clf1.score_samples([[2.0, 2.0]]),
|
| 224 |
+
clf1.decision_function([[2.0, 2.0]]) + clf1.offset_,
|
| 225 |
+
)
|
| 226 |
+
assert_array_equal(
|
| 227 |
+
clf2.score_samples([[2.0, 2.0]]),
|
| 228 |
+
clf2.decision_function([[2.0, 2.0]]) + clf2.offset_,
|
| 229 |
+
)
|
| 230 |
+
assert_array_equal(
|
| 231 |
+
clf1.score_samples([[2.0, 2.0]]), clf2.score_samples([[2.0, 2.0]])
|
| 232 |
+
)
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
def test_iforest_warm_start():
|
| 236 |
+
"""Test iterative addition of iTrees to an iForest"""
|
| 237 |
+
|
| 238 |
+
rng = check_random_state(0)
|
| 239 |
+
X = rng.randn(20, 2)
|
| 240 |
+
|
| 241 |
+
# fit first 10 trees
|
| 242 |
+
clf = IsolationForest(
|
| 243 |
+
n_estimators=10, max_samples=20, random_state=rng, warm_start=True
|
| 244 |
+
)
|
| 245 |
+
clf.fit(X)
|
| 246 |
+
# remember the 1st tree
|
| 247 |
+
tree_1 = clf.estimators_[0]
|
| 248 |
+
# fit another 10 trees
|
| 249 |
+
clf.set_params(n_estimators=20)
|
| 250 |
+
clf.fit(X)
|
| 251 |
+
# expecting 20 fitted trees and no overwritten trees
|
| 252 |
+
assert len(clf.estimators_) == 20
|
| 253 |
+
assert clf.estimators_[0] is tree_1
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
# mock get_chunk_n_rows to actually test more than one chunk (here one
|
| 257 |
+
# chunk has 3 rows):
|
| 258 |
+
@patch(
|
| 259 |
+
"sklearn.ensemble._iforest.get_chunk_n_rows",
|
| 260 |
+
side_effect=Mock(**{"return_value": 3}),
|
| 261 |
+
)
|
| 262 |
+
@pytest.mark.parametrize("contamination, n_predict_calls", [(0.25, 3), ("auto", 2)])
|
| 263 |
+
def test_iforest_chunks_works1(
|
| 264 |
+
mocked_get_chunk, contamination, n_predict_calls, global_random_seed
|
| 265 |
+
):
|
| 266 |
+
test_iforest_works(contamination, global_random_seed)
|
| 267 |
+
assert mocked_get_chunk.call_count == n_predict_calls
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
# idem with chunk_size = 10 rows
|
| 271 |
+
@patch(
|
| 272 |
+
"sklearn.ensemble._iforest.get_chunk_n_rows",
|
| 273 |
+
side_effect=Mock(**{"return_value": 10}),
|
| 274 |
+
)
|
| 275 |
+
@pytest.mark.parametrize("contamination, n_predict_calls", [(0.25, 3), ("auto", 2)])
|
| 276 |
+
def test_iforest_chunks_works2(
|
| 277 |
+
mocked_get_chunk, contamination, n_predict_calls, global_random_seed
|
| 278 |
+
):
|
| 279 |
+
test_iforest_works(contamination, global_random_seed)
|
| 280 |
+
assert mocked_get_chunk.call_count == n_predict_calls
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
def test_iforest_with_uniform_data():
|
| 284 |
+
"""Test whether iforest predicts inliers when using uniform data"""
|
| 285 |
+
|
| 286 |
+
# 2-d array of all 1s
|
| 287 |
+
X = np.ones((100, 10))
|
| 288 |
+
iforest = IsolationForest()
|
| 289 |
+
iforest.fit(X)
|
| 290 |
+
|
| 291 |
+
rng = np.random.RandomState(0)
|
| 292 |
+
|
| 293 |
+
assert all(iforest.predict(X) == 1)
|
| 294 |
+
assert all(iforest.predict(rng.randn(100, 10)) == 1)
|
| 295 |
+
assert all(iforest.predict(X + 1) == 1)
|
| 296 |
+
assert all(iforest.predict(X - 1) == 1)
|
| 297 |
+
|
| 298 |
+
# 2-d array where columns contain the same value across rows
|
| 299 |
+
X = np.repeat(rng.randn(1, 10), 100, 0)
|
| 300 |
+
iforest = IsolationForest()
|
| 301 |
+
iforest.fit(X)
|
| 302 |
+
|
| 303 |
+
assert all(iforest.predict(X) == 1)
|
| 304 |
+
assert all(iforest.predict(rng.randn(100, 10)) == 1)
|
| 305 |
+
assert all(iforest.predict(np.ones((100, 10))) == 1)
|
| 306 |
+
|
| 307 |
+
# Single row
|
| 308 |
+
X = rng.randn(1, 10)
|
| 309 |
+
iforest = IsolationForest()
|
| 310 |
+
iforest.fit(X)
|
| 311 |
+
|
| 312 |
+
assert all(iforest.predict(X) == 1)
|
| 313 |
+
assert all(iforest.predict(rng.randn(100, 10)) == 1)
|
| 314 |
+
assert all(iforest.predict(np.ones((100, 10))) == 1)
|
| 315 |
+
|
| 316 |
+
|
| 317 |
+
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
|
| 318 |
+
def test_iforest_with_n_jobs_does_not_segfault(csc_container):
|
| 319 |
+
"""Check that Isolation Forest does not segfault with n_jobs=2
|
| 320 |
+
|
| 321 |
+
Non-regression test for #23252
|
| 322 |
+
"""
|
| 323 |
+
X, _ = make_classification(n_samples=85_000, n_features=100, random_state=0)
|
| 324 |
+
X = csc_container(X)
|
| 325 |
+
IsolationForest(n_estimators=10, max_samples=256, n_jobs=2).fit(X)
|
| 326 |
+
|
| 327 |
+
|
| 328 |
+
def test_iforest_preserve_feature_names():
|
| 329 |
+
"""Check that feature names are preserved when contamination is not "auto".
|
| 330 |
+
|
| 331 |
+
Feature names are required for consistency checks during scoring.
|
| 332 |
+
|
| 333 |
+
Non-regression test for Issue #25844
|
| 334 |
+
"""
|
| 335 |
+
pd = pytest.importorskip("pandas")
|
| 336 |
+
rng = np.random.RandomState(0)
|
| 337 |
+
|
| 338 |
+
X = pd.DataFrame(data=rng.randn(4), columns=["a"])
|
| 339 |
+
model = IsolationForest(random_state=0, contamination=0.05)
|
| 340 |
+
|
| 341 |
+
with warnings.catch_warnings():
|
| 342 |
+
warnings.simplefilter("error", UserWarning)
|
| 343 |
+
model.fit(X)
|
| 344 |
+
|
| 345 |
+
|
| 346 |
+
@pytest.mark.parametrize("sparse_container", CSC_CONTAINERS + CSR_CONTAINERS)
|
| 347 |
+
def test_iforest_sparse_input_float_contamination(sparse_container):
|
| 348 |
+
"""Check that `IsolationForest` accepts sparse matrix input and float value for
|
| 349 |
+
contamination.
|
| 350 |
+
|
| 351 |
+
Non-regression test for:
|
| 352 |
+
https://github.com/scikit-learn/scikit-learn/issues/27626
|
| 353 |
+
"""
|
| 354 |
+
X, _ = make_classification(n_samples=50, n_features=4, random_state=0)
|
| 355 |
+
X = sparse_container(X)
|
| 356 |
+
X.sort_indices()
|
| 357 |
+
contamination = 0.1
|
| 358 |
+
iforest = IsolationForest(
|
| 359 |
+
n_estimators=5, contamination=contamination, random_state=0
|
| 360 |
+
).fit(X)
|
| 361 |
+
|
| 362 |
+
X_decision = iforest.decision_function(X)
|
| 363 |
+
assert (X_decision < 0).sum() / X.shape[0] == pytest.approx(contamination)
|
| 364 |
+
|
| 365 |
+
|
| 366 |
+
@pytest.mark.parametrize("n_jobs", [1, 2])
|
| 367 |
+
@pytest.mark.parametrize("contamination", [0.25, "auto"])
|
| 368 |
+
def test_iforest_predict_parallel(global_random_seed, contamination, n_jobs):
|
| 369 |
+
"""Check that `IsolationForest.predict` is parallelized."""
|
| 370 |
+
# toy sample (the last two samples are outliers)
|
| 371 |
+
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [7, 4], [-5, 9]]
|
| 372 |
+
|
| 373 |
+
# Test IsolationForest
|
| 374 |
+
clf = IsolationForest(
|
| 375 |
+
random_state=global_random_seed, contamination=contamination, n_jobs=None
|
| 376 |
+
)
|
| 377 |
+
clf.fit(X)
|
| 378 |
+
decision_func = -clf.decision_function(X)
|
| 379 |
+
pred = clf.predict(X)
|
| 380 |
+
|
| 381 |
+
# assert detect outliers:
|
| 382 |
+
assert np.min(decision_func[-2:]) > np.max(decision_func[:-2])
|
| 383 |
+
assert_array_equal(pred, 6 * [1] + 2 * [-1])
|
| 384 |
+
|
| 385 |
+
clf_parallel = IsolationForest(
|
| 386 |
+
random_state=global_random_seed, contamination=contamination, n_jobs=-1
|
| 387 |
+
)
|
| 388 |
+
clf_parallel.fit(X)
|
| 389 |
+
with parallel_backend("threading", n_jobs=n_jobs):
|
| 390 |
+
pred_paralell = clf_parallel.predict(X)
|
| 391 |
+
|
| 392 |
+
# assert the same results as non-parallel
|
| 393 |
+
assert_array_equal(pred, pred_paralell)
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/test_stacking.py
ADDED
|
@@ -0,0 +1,1019 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Test the stacking classifier and regressor."""
|
| 2 |
+
|
| 3 |
+
# Authors: The scikit-learn developers
|
| 4 |
+
# SPDX-License-Identifier: BSD-3-Clause
|
| 5 |
+
|
| 6 |
+
import re
|
| 7 |
+
from unittest.mock import Mock
|
| 8 |
+
|
| 9 |
+
import numpy as np
|
| 10 |
+
import pytest
|
| 11 |
+
from numpy.testing import assert_array_equal
|
| 12 |
+
from scipy import sparse
|
| 13 |
+
|
| 14 |
+
from sklearn import config_context
|
| 15 |
+
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin, clone
|
| 16 |
+
from sklearn.datasets import (
|
| 17 |
+
load_breast_cancer,
|
| 18 |
+
load_diabetes,
|
| 19 |
+
load_iris,
|
| 20 |
+
make_classification,
|
| 21 |
+
make_multilabel_classification,
|
| 22 |
+
make_regression,
|
| 23 |
+
)
|
| 24 |
+
from sklearn.dummy import DummyClassifier, DummyRegressor
|
| 25 |
+
from sklearn.ensemble import (
|
| 26 |
+
RandomForestClassifier,
|
| 27 |
+
RandomForestRegressor,
|
| 28 |
+
StackingClassifier,
|
| 29 |
+
StackingRegressor,
|
| 30 |
+
)
|
| 31 |
+
from sklearn.exceptions import ConvergenceWarning, NotFittedError
|
| 32 |
+
from sklearn.linear_model import (
|
| 33 |
+
LinearRegression,
|
| 34 |
+
LogisticRegression,
|
| 35 |
+
Ridge,
|
| 36 |
+
RidgeClassifier,
|
| 37 |
+
)
|
| 38 |
+
from sklearn.model_selection import KFold, StratifiedKFold, train_test_split
|
| 39 |
+
from sklearn.neighbors import KNeighborsClassifier
|
| 40 |
+
from sklearn.neural_network import MLPClassifier
|
| 41 |
+
from sklearn.preprocessing import scale
|
| 42 |
+
from sklearn.svm import SVC, LinearSVC, LinearSVR
|
| 43 |
+
from sklearn.tests.metadata_routing_common import (
|
| 44 |
+
ConsumingClassifier,
|
| 45 |
+
ConsumingRegressor,
|
| 46 |
+
_Registry,
|
| 47 |
+
check_recorded_metadata,
|
| 48 |
+
)
|
| 49 |
+
from sklearn.utils._mocking import CheckingClassifier
|
| 50 |
+
from sklearn.utils._testing import (
|
| 51 |
+
assert_allclose,
|
| 52 |
+
assert_allclose_dense_sparse,
|
| 53 |
+
ignore_warnings,
|
| 54 |
+
)
|
| 55 |
+
from sklearn.utils.fixes import COO_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS
|
| 56 |
+
|
| 57 |
+
diabetes = load_diabetes()
|
| 58 |
+
X_diabetes, y_diabetes = diabetes.data, diabetes.target
|
| 59 |
+
iris = load_iris()
|
| 60 |
+
X_iris, y_iris = iris.data, iris.target
|
| 61 |
+
X_multilabel, y_multilabel = make_multilabel_classification(
|
| 62 |
+
n_classes=3, random_state=42
|
| 63 |
+
)
|
| 64 |
+
X_binary, y_binary = make_classification(n_classes=2, random_state=42)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
@pytest.mark.parametrize(
|
| 68 |
+
"cv", [3, StratifiedKFold(n_splits=3, shuffle=True, random_state=42)]
|
| 69 |
+
)
|
| 70 |
+
@pytest.mark.parametrize(
|
| 71 |
+
"final_estimator", [None, RandomForestClassifier(random_state=42)]
|
| 72 |
+
)
|
| 73 |
+
@pytest.mark.parametrize("passthrough", [False, True])
|
| 74 |
+
def test_stacking_classifier_iris(cv, final_estimator, passthrough):
|
| 75 |
+
# prescale the data to avoid convergence warning without using a pipeline
|
| 76 |
+
# for later assert
|
| 77 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
| 78 |
+
scale(X_iris), y_iris, stratify=y_iris, random_state=42
|
| 79 |
+
)
|
| 80 |
+
estimators = [("lr", LogisticRegression()), ("svc", LinearSVC())]
|
| 81 |
+
clf = StackingClassifier(
|
| 82 |
+
estimators=estimators,
|
| 83 |
+
final_estimator=final_estimator,
|
| 84 |
+
cv=cv,
|
| 85 |
+
passthrough=passthrough,
|
| 86 |
+
)
|
| 87 |
+
clf.fit(X_train, y_train)
|
| 88 |
+
clf.predict(X_test)
|
| 89 |
+
clf.predict_proba(X_test)
|
| 90 |
+
assert clf.score(X_test, y_test) > 0.8
|
| 91 |
+
|
| 92 |
+
X_trans = clf.transform(X_test)
|
| 93 |
+
expected_column_count = 10 if passthrough else 6
|
| 94 |
+
assert X_trans.shape[1] == expected_column_count
|
| 95 |
+
if passthrough:
|
| 96 |
+
assert_allclose(X_test, X_trans[:, -4:])
|
| 97 |
+
|
| 98 |
+
clf.set_params(lr="drop")
|
| 99 |
+
clf.fit(X_train, y_train)
|
| 100 |
+
clf.predict(X_test)
|
| 101 |
+
clf.predict_proba(X_test)
|
| 102 |
+
if final_estimator is None:
|
| 103 |
+
# LogisticRegression has decision_function method
|
| 104 |
+
clf.decision_function(X_test)
|
| 105 |
+
|
| 106 |
+
X_trans = clf.transform(X_test)
|
| 107 |
+
expected_column_count_drop = 7 if passthrough else 3
|
| 108 |
+
assert X_trans.shape[1] == expected_column_count_drop
|
| 109 |
+
if passthrough:
|
| 110 |
+
assert_allclose(X_test, X_trans[:, -4:])
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def test_stacking_classifier_drop_column_binary_classification():
|
| 114 |
+
# check that a column is dropped in binary classification
|
| 115 |
+
X, y = load_breast_cancer(return_X_y=True)
|
| 116 |
+
X_train, X_test, y_train, _ = train_test_split(
|
| 117 |
+
scale(X), y, stratify=y, random_state=42
|
| 118 |
+
)
|
| 119 |
+
|
| 120 |
+
# both classifiers implement 'predict_proba' and will both drop one column
|
| 121 |
+
estimators = [
|
| 122 |
+
("lr", LogisticRegression()),
|
| 123 |
+
("rf", RandomForestClassifier(random_state=42)),
|
| 124 |
+
]
|
| 125 |
+
clf = StackingClassifier(estimators=estimators, cv=3)
|
| 126 |
+
|
| 127 |
+
clf.fit(X_train, y_train)
|
| 128 |
+
X_trans = clf.transform(X_test)
|
| 129 |
+
assert X_trans.shape[1] == 2
|
| 130 |
+
|
| 131 |
+
# LinearSVC does not implement 'predict_proba' and will not drop one column
|
| 132 |
+
estimators = [("lr", LogisticRegression()), ("svc", LinearSVC())]
|
| 133 |
+
clf.set_params(estimators=estimators)
|
| 134 |
+
|
| 135 |
+
clf.fit(X_train, y_train)
|
| 136 |
+
X_trans = clf.transform(X_test)
|
| 137 |
+
assert X_trans.shape[1] == 2
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def test_stacking_classifier_drop_estimator():
|
| 141 |
+
# prescale the data to avoid convergence warning without using a pipeline
|
| 142 |
+
# for later assert
|
| 143 |
+
X_train, X_test, y_train, _ = train_test_split(
|
| 144 |
+
scale(X_iris), y_iris, stratify=y_iris, random_state=42
|
| 145 |
+
)
|
| 146 |
+
estimators = [("lr", "drop"), ("svc", LinearSVC(random_state=0))]
|
| 147 |
+
rf = RandomForestClassifier(n_estimators=10, random_state=42)
|
| 148 |
+
clf = StackingClassifier(
|
| 149 |
+
estimators=[("svc", LinearSVC(random_state=0))],
|
| 150 |
+
final_estimator=rf,
|
| 151 |
+
cv=5,
|
| 152 |
+
)
|
| 153 |
+
clf_drop = StackingClassifier(estimators=estimators, final_estimator=rf, cv=5)
|
| 154 |
+
|
| 155 |
+
clf.fit(X_train, y_train)
|
| 156 |
+
clf_drop.fit(X_train, y_train)
|
| 157 |
+
assert_allclose(clf.predict(X_test), clf_drop.predict(X_test))
|
| 158 |
+
assert_allclose(clf.predict_proba(X_test), clf_drop.predict_proba(X_test))
|
| 159 |
+
assert_allclose(clf.transform(X_test), clf_drop.transform(X_test))
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
def test_stacking_regressor_drop_estimator():
|
| 163 |
+
# prescale the data to avoid convergence warning without using a pipeline
|
| 164 |
+
# for later assert
|
| 165 |
+
X_train, X_test, y_train, _ = train_test_split(
|
| 166 |
+
scale(X_diabetes), y_diabetes, random_state=42
|
| 167 |
+
)
|
| 168 |
+
estimators = [("lr", "drop"), ("svr", LinearSVR(random_state=0))]
|
| 169 |
+
rf = RandomForestRegressor(n_estimators=10, random_state=42)
|
| 170 |
+
reg = StackingRegressor(
|
| 171 |
+
estimators=[("svr", LinearSVR(random_state=0))],
|
| 172 |
+
final_estimator=rf,
|
| 173 |
+
cv=5,
|
| 174 |
+
)
|
| 175 |
+
reg_drop = StackingRegressor(estimators=estimators, final_estimator=rf, cv=5)
|
| 176 |
+
|
| 177 |
+
reg.fit(X_train, y_train)
|
| 178 |
+
reg_drop.fit(X_train, y_train)
|
| 179 |
+
assert_allclose(reg.predict(X_test), reg_drop.predict(X_test))
|
| 180 |
+
assert_allclose(reg.transform(X_test), reg_drop.transform(X_test))
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
@pytest.mark.parametrize("cv", [3, KFold(n_splits=3, shuffle=True, random_state=42)])
|
| 184 |
+
@pytest.mark.parametrize(
|
| 185 |
+
"final_estimator, predict_params",
|
| 186 |
+
[
|
| 187 |
+
(None, {}),
|
| 188 |
+
(RandomForestRegressor(random_state=42), {}),
|
| 189 |
+
(DummyRegressor(), {"return_std": True}),
|
| 190 |
+
],
|
| 191 |
+
)
|
| 192 |
+
@pytest.mark.parametrize("passthrough", [False, True])
|
| 193 |
+
def test_stacking_regressor_diabetes(cv, final_estimator, predict_params, passthrough):
|
| 194 |
+
# prescale the data to avoid convergence warning without using a pipeline
|
| 195 |
+
# for later assert
|
| 196 |
+
X_train, X_test, y_train, _ = train_test_split(
|
| 197 |
+
scale(X_diabetes), y_diabetes, random_state=42
|
| 198 |
+
)
|
| 199 |
+
estimators = [("lr", LinearRegression()), ("svr", LinearSVR())]
|
| 200 |
+
reg = StackingRegressor(
|
| 201 |
+
estimators=estimators,
|
| 202 |
+
final_estimator=final_estimator,
|
| 203 |
+
cv=cv,
|
| 204 |
+
passthrough=passthrough,
|
| 205 |
+
)
|
| 206 |
+
reg.fit(X_train, y_train)
|
| 207 |
+
result = reg.predict(X_test, **predict_params)
|
| 208 |
+
expected_result_length = 2 if predict_params else 1
|
| 209 |
+
if predict_params:
|
| 210 |
+
assert len(result) == expected_result_length
|
| 211 |
+
|
| 212 |
+
X_trans = reg.transform(X_test)
|
| 213 |
+
expected_column_count = 12 if passthrough else 2
|
| 214 |
+
assert X_trans.shape[1] == expected_column_count
|
| 215 |
+
if passthrough:
|
| 216 |
+
assert_allclose(X_test, X_trans[:, -10:])
|
| 217 |
+
|
| 218 |
+
reg.set_params(lr="drop")
|
| 219 |
+
reg.fit(X_train, y_train)
|
| 220 |
+
reg.predict(X_test)
|
| 221 |
+
|
| 222 |
+
X_trans = reg.transform(X_test)
|
| 223 |
+
expected_column_count_drop = 11 if passthrough else 1
|
| 224 |
+
assert X_trans.shape[1] == expected_column_count_drop
|
| 225 |
+
if passthrough:
|
| 226 |
+
assert_allclose(X_test, X_trans[:, -10:])
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
@pytest.mark.parametrize(
|
| 230 |
+
"sparse_container", COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS
|
| 231 |
+
)
|
| 232 |
+
def test_stacking_regressor_sparse_passthrough(sparse_container):
|
| 233 |
+
# Check passthrough behavior on a sparse X matrix
|
| 234 |
+
X_train, X_test, y_train, _ = train_test_split(
|
| 235 |
+
sparse_container(scale(X_diabetes)), y_diabetes, random_state=42
|
| 236 |
+
)
|
| 237 |
+
estimators = [("lr", LinearRegression()), ("svr", LinearSVR())]
|
| 238 |
+
rf = RandomForestRegressor(n_estimators=10, random_state=42)
|
| 239 |
+
clf = StackingRegressor(
|
| 240 |
+
estimators=estimators, final_estimator=rf, cv=5, passthrough=True
|
| 241 |
+
)
|
| 242 |
+
clf.fit(X_train, y_train)
|
| 243 |
+
X_trans = clf.transform(X_test)
|
| 244 |
+
assert_allclose_dense_sparse(X_test, X_trans[:, -10:])
|
| 245 |
+
assert sparse.issparse(X_trans)
|
| 246 |
+
assert X_test.format == X_trans.format
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
@pytest.mark.parametrize(
|
| 250 |
+
"sparse_container", COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS
|
| 251 |
+
)
|
| 252 |
+
def test_stacking_classifier_sparse_passthrough(sparse_container):
|
| 253 |
+
# Check passthrough behavior on a sparse X matrix
|
| 254 |
+
X_train, X_test, y_train, _ = train_test_split(
|
| 255 |
+
sparse_container(scale(X_iris)), y_iris, random_state=42
|
| 256 |
+
)
|
| 257 |
+
estimators = [("lr", LogisticRegression()), ("svc", LinearSVC())]
|
| 258 |
+
rf = RandomForestClassifier(n_estimators=10, random_state=42)
|
| 259 |
+
clf = StackingClassifier(
|
| 260 |
+
estimators=estimators, final_estimator=rf, cv=5, passthrough=True
|
| 261 |
+
)
|
| 262 |
+
clf.fit(X_train, y_train)
|
| 263 |
+
X_trans = clf.transform(X_test)
|
| 264 |
+
assert_allclose_dense_sparse(X_test, X_trans[:, -4:])
|
| 265 |
+
assert sparse.issparse(X_trans)
|
| 266 |
+
assert X_test.format == X_trans.format
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
def test_stacking_classifier_drop_binary_prob():
|
| 270 |
+
# check that classifier will drop one of the probability column for
|
| 271 |
+
# binary classification problem
|
| 272 |
+
|
| 273 |
+
# Select only the 2 first classes
|
| 274 |
+
X_, y_ = scale(X_iris[:100]), y_iris[:100]
|
| 275 |
+
|
| 276 |
+
estimators = [("lr", LogisticRegression()), ("rf", RandomForestClassifier())]
|
| 277 |
+
clf = StackingClassifier(estimators=estimators)
|
| 278 |
+
clf.fit(X_, y_)
|
| 279 |
+
X_meta = clf.transform(X_)
|
| 280 |
+
assert X_meta.shape[1] == 2
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
class NoWeightRegressor(RegressorMixin, BaseEstimator):
|
| 284 |
+
def fit(self, X, y):
|
| 285 |
+
self.reg = DummyRegressor()
|
| 286 |
+
return self.reg.fit(X, y)
|
| 287 |
+
|
| 288 |
+
def predict(self, X):
|
| 289 |
+
return np.ones(X.shape[0])
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
class NoWeightClassifier(ClassifierMixin, BaseEstimator):
|
| 293 |
+
def fit(self, X, y):
|
| 294 |
+
self.clf = DummyClassifier(strategy="stratified")
|
| 295 |
+
return self.clf.fit(X, y)
|
| 296 |
+
|
| 297 |
+
|
| 298 |
+
@pytest.mark.parametrize(
|
| 299 |
+
"y, params, type_err, msg_err",
|
| 300 |
+
[
|
| 301 |
+
(y_iris, {"estimators": []}, ValueError, "Invalid 'estimators' attribute,"),
|
| 302 |
+
(
|
| 303 |
+
y_iris,
|
| 304 |
+
{
|
| 305 |
+
"estimators": [
|
| 306 |
+
("lr", LogisticRegression()),
|
| 307 |
+
("svm", SVC(max_iter=50_000)),
|
| 308 |
+
],
|
| 309 |
+
"stack_method": "predict_proba",
|
| 310 |
+
},
|
| 311 |
+
ValueError,
|
| 312 |
+
"does not implement the method predict_proba",
|
| 313 |
+
),
|
| 314 |
+
(
|
| 315 |
+
y_iris,
|
| 316 |
+
{
|
| 317 |
+
"estimators": [
|
| 318 |
+
("lr", LogisticRegression()),
|
| 319 |
+
("cor", NoWeightClassifier()),
|
| 320 |
+
]
|
| 321 |
+
},
|
| 322 |
+
TypeError,
|
| 323 |
+
"does not support sample weight",
|
| 324 |
+
),
|
| 325 |
+
(
|
| 326 |
+
y_iris,
|
| 327 |
+
{
|
| 328 |
+
"estimators": [
|
| 329 |
+
("lr", LogisticRegression()),
|
| 330 |
+
("cor", LinearSVC(max_iter=50_000)),
|
| 331 |
+
],
|
| 332 |
+
"final_estimator": NoWeightClassifier(),
|
| 333 |
+
},
|
| 334 |
+
TypeError,
|
| 335 |
+
"does not support sample weight",
|
| 336 |
+
),
|
| 337 |
+
],
|
| 338 |
+
)
|
| 339 |
+
def test_stacking_classifier_error(y, params, type_err, msg_err):
|
| 340 |
+
with pytest.raises(type_err, match=msg_err):
|
| 341 |
+
clf = StackingClassifier(**params, cv=3)
|
| 342 |
+
clf.fit(scale(X_iris), y, sample_weight=np.ones(X_iris.shape[0]))
|
| 343 |
+
|
| 344 |
+
|
| 345 |
+
@pytest.mark.parametrize(
|
| 346 |
+
"y, params, type_err, msg_err",
|
| 347 |
+
[
|
| 348 |
+
(y_diabetes, {"estimators": []}, ValueError, "Invalid 'estimators' attribute,"),
|
| 349 |
+
(
|
| 350 |
+
y_diabetes,
|
| 351 |
+
{"estimators": [("lr", LinearRegression()), ("cor", NoWeightRegressor())]},
|
| 352 |
+
TypeError,
|
| 353 |
+
"does not support sample weight",
|
| 354 |
+
),
|
| 355 |
+
(
|
| 356 |
+
y_diabetes,
|
| 357 |
+
{
|
| 358 |
+
"estimators": [
|
| 359 |
+
("lr", LinearRegression()),
|
| 360 |
+
("cor", LinearSVR()),
|
| 361 |
+
],
|
| 362 |
+
"final_estimator": NoWeightRegressor(),
|
| 363 |
+
},
|
| 364 |
+
TypeError,
|
| 365 |
+
"does not support sample weight",
|
| 366 |
+
),
|
| 367 |
+
],
|
| 368 |
+
)
|
| 369 |
+
def test_stacking_regressor_error(y, params, type_err, msg_err):
|
| 370 |
+
with pytest.raises(type_err, match=msg_err):
|
| 371 |
+
reg = StackingRegressor(**params, cv=3)
|
| 372 |
+
reg.fit(scale(X_diabetes), y, sample_weight=np.ones(X_diabetes.shape[0]))
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
@pytest.mark.parametrize(
|
| 376 |
+
"estimator, X, y",
|
| 377 |
+
[
|
| 378 |
+
(
|
| 379 |
+
StackingClassifier(
|
| 380 |
+
estimators=[
|
| 381 |
+
("lr", LogisticRegression(random_state=0)),
|
| 382 |
+
("svm", LinearSVC(random_state=0)),
|
| 383 |
+
]
|
| 384 |
+
),
|
| 385 |
+
X_iris[:100],
|
| 386 |
+
y_iris[:100],
|
| 387 |
+
), # keep only classes 0 and 1
|
| 388 |
+
(
|
| 389 |
+
StackingRegressor(
|
| 390 |
+
estimators=[
|
| 391 |
+
("lr", LinearRegression()),
|
| 392 |
+
("svm", LinearSVR(random_state=0)),
|
| 393 |
+
]
|
| 394 |
+
),
|
| 395 |
+
X_diabetes,
|
| 396 |
+
y_diabetes,
|
| 397 |
+
),
|
| 398 |
+
],
|
| 399 |
+
ids=["StackingClassifier", "StackingRegressor"],
|
| 400 |
+
)
|
| 401 |
+
def test_stacking_randomness(estimator, X, y):
|
| 402 |
+
# checking that fixing the random state of the CV will lead to the same
|
| 403 |
+
# results
|
| 404 |
+
estimator_full = clone(estimator)
|
| 405 |
+
estimator_full.set_params(
|
| 406 |
+
cv=KFold(shuffle=True, random_state=np.random.RandomState(0))
|
| 407 |
+
)
|
| 408 |
+
|
| 409 |
+
estimator_drop = clone(estimator)
|
| 410 |
+
estimator_drop.set_params(lr="drop")
|
| 411 |
+
estimator_drop.set_params(
|
| 412 |
+
cv=KFold(shuffle=True, random_state=np.random.RandomState(0))
|
| 413 |
+
)
|
| 414 |
+
|
| 415 |
+
assert_allclose(
|
| 416 |
+
estimator_full.fit(X, y).transform(X)[:, 1:],
|
| 417 |
+
estimator_drop.fit(X, y).transform(X),
|
| 418 |
+
)
|
| 419 |
+
|
| 420 |
+
|
| 421 |
+
def test_stacking_classifier_stratify_default():
|
| 422 |
+
# check that we stratify the classes for the default CV
|
| 423 |
+
clf = StackingClassifier(
|
| 424 |
+
estimators=[
|
| 425 |
+
("lr", LogisticRegression(max_iter=10_000)),
|
| 426 |
+
("svm", LinearSVC(max_iter=10_000)),
|
| 427 |
+
]
|
| 428 |
+
)
|
| 429 |
+
# since iris is not shuffled, a simple k-fold would not contain the
|
| 430 |
+
# 3 classes during training
|
| 431 |
+
clf.fit(X_iris, y_iris)
|
| 432 |
+
|
| 433 |
+
|
| 434 |
+
@pytest.mark.parametrize(
|
| 435 |
+
"stacker, X, y",
|
| 436 |
+
[
|
| 437 |
+
(
|
| 438 |
+
StackingClassifier(
|
| 439 |
+
estimators=[
|
| 440 |
+
("lr", LogisticRegression()),
|
| 441 |
+
("svm", LinearSVC(random_state=42)),
|
| 442 |
+
],
|
| 443 |
+
final_estimator=LogisticRegression(),
|
| 444 |
+
cv=KFold(shuffle=True, random_state=42),
|
| 445 |
+
),
|
| 446 |
+
*load_breast_cancer(return_X_y=True),
|
| 447 |
+
),
|
| 448 |
+
(
|
| 449 |
+
StackingRegressor(
|
| 450 |
+
estimators=[
|
| 451 |
+
("lr", LinearRegression()),
|
| 452 |
+
("svm", LinearSVR(random_state=42)),
|
| 453 |
+
],
|
| 454 |
+
final_estimator=LinearRegression(),
|
| 455 |
+
cv=KFold(shuffle=True, random_state=42),
|
| 456 |
+
),
|
| 457 |
+
X_diabetes,
|
| 458 |
+
y_diabetes,
|
| 459 |
+
),
|
| 460 |
+
],
|
| 461 |
+
ids=["StackingClassifier", "StackingRegressor"],
|
| 462 |
+
)
|
| 463 |
+
def test_stacking_with_sample_weight(stacker, X, y):
|
| 464 |
+
# check that sample weights has an influence on the fitting
|
| 465 |
+
# note: ConvergenceWarning are catch since we are not worrying about the
|
| 466 |
+
# convergence here
|
| 467 |
+
n_half_samples = len(y) // 2
|
| 468 |
+
total_sample_weight = np.array(
|
| 469 |
+
[0.1] * n_half_samples + [0.9] * (len(y) - n_half_samples)
|
| 470 |
+
)
|
| 471 |
+
X_train, X_test, y_train, _, sample_weight_train, _ = train_test_split(
|
| 472 |
+
X, y, total_sample_weight, random_state=42
|
| 473 |
+
)
|
| 474 |
+
|
| 475 |
+
with ignore_warnings(category=ConvergenceWarning):
|
| 476 |
+
stacker.fit(X_train, y_train)
|
| 477 |
+
y_pred_no_weight = stacker.predict(X_test)
|
| 478 |
+
|
| 479 |
+
with ignore_warnings(category=ConvergenceWarning):
|
| 480 |
+
stacker.fit(X_train, y_train, sample_weight=np.ones(y_train.shape))
|
| 481 |
+
y_pred_unit_weight = stacker.predict(X_test)
|
| 482 |
+
|
| 483 |
+
assert_allclose(y_pred_no_weight, y_pred_unit_weight)
|
| 484 |
+
|
| 485 |
+
with ignore_warnings(category=ConvergenceWarning):
|
| 486 |
+
stacker.fit(X_train, y_train, sample_weight=sample_weight_train)
|
| 487 |
+
y_pred_biased = stacker.predict(X_test)
|
| 488 |
+
|
| 489 |
+
assert np.abs(y_pred_no_weight - y_pred_biased).sum() > 0
|
| 490 |
+
|
| 491 |
+
|
| 492 |
+
def test_stacking_classifier_sample_weight_fit_param():
|
| 493 |
+
# check sample_weight is passed to all invocations of fit
|
| 494 |
+
stacker = StackingClassifier(
|
| 495 |
+
estimators=[("lr", CheckingClassifier(expected_sample_weight=True))],
|
| 496 |
+
final_estimator=CheckingClassifier(expected_sample_weight=True),
|
| 497 |
+
)
|
| 498 |
+
stacker.fit(X_iris, y_iris, sample_weight=np.ones(X_iris.shape[0]))
|
| 499 |
+
|
| 500 |
+
|
| 501 |
+
@pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning")
|
| 502 |
+
@pytest.mark.parametrize(
|
| 503 |
+
"stacker, X, y",
|
| 504 |
+
[
|
| 505 |
+
(
|
| 506 |
+
StackingClassifier(
|
| 507 |
+
estimators=[
|
| 508 |
+
("lr", LogisticRegression()),
|
| 509 |
+
("svm", LinearSVC(random_state=42)),
|
| 510 |
+
],
|
| 511 |
+
final_estimator=LogisticRegression(),
|
| 512 |
+
),
|
| 513 |
+
*load_breast_cancer(return_X_y=True),
|
| 514 |
+
),
|
| 515 |
+
(
|
| 516 |
+
StackingRegressor(
|
| 517 |
+
estimators=[
|
| 518 |
+
("lr", LinearRegression()),
|
| 519 |
+
("svm", LinearSVR(random_state=42)),
|
| 520 |
+
],
|
| 521 |
+
final_estimator=LinearRegression(),
|
| 522 |
+
),
|
| 523 |
+
X_diabetes,
|
| 524 |
+
y_diabetes,
|
| 525 |
+
),
|
| 526 |
+
],
|
| 527 |
+
ids=["StackingClassifier", "StackingRegressor"],
|
| 528 |
+
)
|
| 529 |
+
def test_stacking_cv_influence(stacker, X, y):
|
| 530 |
+
# check that the stacking affects the fit of the final estimator but not
|
| 531 |
+
# the fit of the base estimators
|
| 532 |
+
# note: ConvergenceWarning are catch since we are not worrying about the
|
| 533 |
+
# convergence here
|
| 534 |
+
stacker_cv_3 = clone(stacker)
|
| 535 |
+
stacker_cv_5 = clone(stacker)
|
| 536 |
+
|
| 537 |
+
stacker_cv_3.set_params(cv=3)
|
| 538 |
+
stacker_cv_5.set_params(cv=5)
|
| 539 |
+
|
| 540 |
+
stacker_cv_3.fit(X, y)
|
| 541 |
+
stacker_cv_5.fit(X, y)
|
| 542 |
+
|
| 543 |
+
# the base estimators should be identical
|
| 544 |
+
for est_cv_3, est_cv_5 in zip(stacker_cv_3.estimators_, stacker_cv_5.estimators_):
|
| 545 |
+
assert_allclose(est_cv_3.coef_, est_cv_5.coef_)
|
| 546 |
+
|
| 547 |
+
# the final estimator should be different
|
| 548 |
+
with pytest.raises(AssertionError, match="Not equal"):
|
| 549 |
+
assert_allclose(
|
| 550 |
+
stacker_cv_3.final_estimator_.coef_, stacker_cv_5.final_estimator_.coef_
|
| 551 |
+
)
|
| 552 |
+
|
| 553 |
+
|
| 554 |
+
@pytest.mark.parametrize(
|
| 555 |
+
"Stacker, Estimator, stack_method, final_estimator, X, y",
|
| 556 |
+
[
|
| 557 |
+
(
|
| 558 |
+
StackingClassifier,
|
| 559 |
+
DummyClassifier,
|
| 560 |
+
"predict_proba",
|
| 561 |
+
LogisticRegression(random_state=42),
|
| 562 |
+
X_iris,
|
| 563 |
+
y_iris,
|
| 564 |
+
),
|
| 565 |
+
(
|
| 566 |
+
StackingRegressor,
|
| 567 |
+
DummyRegressor,
|
| 568 |
+
"predict",
|
| 569 |
+
LinearRegression(),
|
| 570 |
+
X_diabetes,
|
| 571 |
+
y_diabetes,
|
| 572 |
+
),
|
| 573 |
+
],
|
| 574 |
+
)
|
| 575 |
+
def test_stacking_prefit(Stacker, Estimator, stack_method, final_estimator, X, y):
|
| 576 |
+
"""Check the behaviour of stacking when `cv='prefit'`"""
|
| 577 |
+
X_train1, X_train2, y_train1, y_train2 = train_test_split(
|
| 578 |
+
X, y, random_state=42, test_size=0.5
|
| 579 |
+
)
|
| 580 |
+
estimators = [
|
| 581 |
+
("d0", Estimator().fit(X_train1, y_train1)),
|
| 582 |
+
("d1", Estimator().fit(X_train1, y_train1)),
|
| 583 |
+
]
|
| 584 |
+
|
| 585 |
+
# mock out fit and stack_method to be asserted later
|
| 586 |
+
for _, estimator in estimators:
|
| 587 |
+
estimator.fit = Mock(name="fit")
|
| 588 |
+
stack_func = getattr(estimator, stack_method)
|
| 589 |
+
predict_method_mocked = Mock(side_effect=stack_func)
|
| 590 |
+
# Mocking a method will not provide a `__name__` while Python methods
|
| 591 |
+
# do and we are using it in `_get_response_method`.
|
| 592 |
+
predict_method_mocked.__name__ = stack_method
|
| 593 |
+
setattr(estimator, stack_method, predict_method_mocked)
|
| 594 |
+
|
| 595 |
+
stacker = Stacker(
|
| 596 |
+
estimators=estimators, cv="prefit", final_estimator=final_estimator
|
| 597 |
+
)
|
| 598 |
+
stacker.fit(X_train2, y_train2)
|
| 599 |
+
|
| 600 |
+
assert stacker.estimators_ == [estimator for _, estimator in estimators]
|
| 601 |
+
# fit was not called again
|
| 602 |
+
assert all(estimator.fit.call_count == 0 for estimator in stacker.estimators_)
|
| 603 |
+
|
| 604 |
+
# stack method is called with the proper inputs
|
| 605 |
+
for estimator in stacker.estimators_:
|
| 606 |
+
stack_func_mock = getattr(estimator, stack_method)
|
| 607 |
+
stack_func_mock.assert_called_with(X_train2)
|
| 608 |
+
|
| 609 |
+
|
| 610 |
+
@pytest.mark.parametrize(
|
| 611 |
+
"stacker, X, y",
|
| 612 |
+
[
|
| 613 |
+
(
|
| 614 |
+
StackingClassifier(
|
| 615 |
+
estimators=[("lr", LogisticRegression()), ("svm", SVC())],
|
| 616 |
+
cv="prefit",
|
| 617 |
+
),
|
| 618 |
+
X_iris,
|
| 619 |
+
y_iris,
|
| 620 |
+
),
|
| 621 |
+
(
|
| 622 |
+
StackingRegressor(
|
| 623 |
+
estimators=[
|
| 624 |
+
("lr", LinearRegression()),
|
| 625 |
+
("svm", LinearSVR()),
|
| 626 |
+
],
|
| 627 |
+
cv="prefit",
|
| 628 |
+
),
|
| 629 |
+
X_diabetes,
|
| 630 |
+
y_diabetes,
|
| 631 |
+
),
|
| 632 |
+
],
|
| 633 |
+
)
|
| 634 |
+
def test_stacking_prefit_error(stacker, X, y):
|
| 635 |
+
# check that NotFittedError is raised
|
| 636 |
+
# if base estimators are not fitted when cv="prefit"
|
| 637 |
+
with pytest.raises(NotFittedError):
|
| 638 |
+
stacker.fit(X, y)
|
| 639 |
+
|
| 640 |
+
|
| 641 |
+
@pytest.mark.parametrize(
|
| 642 |
+
"make_dataset, Stacking, Estimator",
|
| 643 |
+
[
|
| 644 |
+
(make_classification, StackingClassifier, LogisticRegression),
|
| 645 |
+
(make_regression, StackingRegressor, LinearRegression),
|
| 646 |
+
],
|
| 647 |
+
)
|
| 648 |
+
def test_stacking_without_n_features_in(make_dataset, Stacking, Estimator):
|
| 649 |
+
# Stacking supports estimators without `n_features_in_`. Regression test
|
| 650 |
+
# for #17353
|
| 651 |
+
|
| 652 |
+
class MyEstimator(Estimator):
|
| 653 |
+
"""Estimator without n_features_in_"""
|
| 654 |
+
|
| 655 |
+
def fit(self, X, y):
|
| 656 |
+
super().fit(X, y)
|
| 657 |
+
del self.n_features_in_
|
| 658 |
+
|
| 659 |
+
X, y = make_dataset(random_state=0, n_samples=100)
|
| 660 |
+
stacker = Stacking(estimators=[("lr", MyEstimator())])
|
| 661 |
+
|
| 662 |
+
msg = f"{Stacking.__name__} object has no attribute n_features_in_"
|
| 663 |
+
with pytest.raises(AttributeError, match=msg):
|
| 664 |
+
stacker.n_features_in_
|
| 665 |
+
|
| 666 |
+
# Does not raise
|
| 667 |
+
stacker.fit(X, y)
|
| 668 |
+
|
| 669 |
+
msg = "'MyEstimator' object has no attribute 'n_features_in_'"
|
| 670 |
+
with pytest.raises(AttributeError, match=msg):
|
| 671 |
+
stacker.n_features_in_
|
| 672 |
+
|
| 673 |
+
|
| 674 |
+
@pytest.mark.parametrize(
|
| 675 |
+
"estimator",
|
| 676 |
+
[
|
| 677 |
+
# output a 2D array of the probability of the positive class for each output
|
| 678 |
+
MLPClassifier(random_state=42),
|
| 679 |
+
# output a list of 2D array containing the probability of each class
|
| 680 |
+
# for each output
|
| 681 |
+
RandomForestClassifier(random_state=42),
|
| 682 |
+
],
|
| 683 |
+
ids=["MLPClassifier", "RandomForestClassifier"],
|
| 684 |
+
)
|
| 685 |
+
def test_stacking_classifier_multilabel_predict_proba(estimator):
|
| 686 |
+
"""Check the behaviour for the multilabel classification case and the
|
| 687 |
+
`predict_proba` stacking method.
|
| 688 |
+
|
| 689 |
+
Estimators are not consistent with the output arrays and we need to ensure that
|
| 690 |
+
we handle all cases.
|
| 691 |
+
"""
|
| 692 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
| 693 |
+
X_multilabel, y_multilabel, stratify=y_multilabel, random_state=42
|
| 694 |
+
)
|
| 695 |
+
n_outputs = 3
|
| 696 |
+
|
| 697 |
+
estimators = [("est", estimator)]
|
| 698 |
+
stacker = StackingClassifier(
|
| 699 |
+
estimators=estimators,
|
| 700 |
+
final_estimator=KNeighborsClassifier(),
|
| 701 |
+
stack_method="predict_proba",
|
| 702 |
+
).fit(X_train, y_train)
|
| 703 |
+
|
| 704 |
+
X_trans = stacker.transform(X_test)
|
| 705 |
+
assert X_trans.shape == (X_test.shape[0], n_outputs)
|
| 706 |
+
# we should not have any collinear classes and thus nothing should sum to 1
|
| 707 |
+
assert not any(np.isclose(X_trans.sum(axis=1), 1.0))
|
| 708 |
+
|
| 709 |
+
y_pred = stacker.predict(X_test)
|
| 710 |
+
assert y_pred.shape == y_test.shape
|
| 711 |
+
|
| 712 |
+
|
| 713 |
+
def test_stacking_classifier_multilabel_decision_function():
|
| 714 |
+
"""Check the behaviour for the multilabel classification case and the
|
| 715 |
+
`decision_function` stacking method. Only `RidgeClassifier` supports this
|
| 716 |
+
case.
|
| 717 |
+
"""
|
| 718 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
| 719 |
+
X_multilabel, y_multilabel, stratify=y_multilabel, random_state=42
|
| 720 |
+
)
|
| 721 |
+
n_outputs = 3
|
| 722 |
+
|
| 723 |
+
estimators = [("est", RidgeClassifier())]
|
| 724 |
+
stacker = StackingClassifier(
|
| 725 |
+
estimators=estimators,
|
| 726 |
+
final_estimator=KNeighborsClassifier(),
|
| 727 |
+
stack_method="decision_function",
|
| 728 |
+
).fit(X_train, y_train)
|
| 729 |
+
|
| 730 |
+
X_trans = stacker.transform(X_test)
|
| 731 |
+
assert X_trans.shape == (X_test.shape[0], n_outputs)
|
| 732 |
+
|
| 733 |
+
y_pred = stacker.predict(X_test)
|
| 734 |
+
assert y_pred.shape == y_test.shape
|
| 735 |
+
|
| 736 |
+
|
| 737 |
+
@pytest.mark.parametrize("stack_method", ["auto", "predict"])
|
| 738 |
+
@pytest.mark.parametrize("passthrough", [False, True])
|
| 739 |
+
def test_stacking_classifier_multilabel_auto_predict(stack_method, passthrough):
|
| 740 |
+
"""Check the behaviour for the multilabel classification case for stack methods
|
| 741 |
+
supported for all estimators or automatically picked up.
|
| 742 |
+
"""
|
| 743 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
| 744 |
+
X_multilabel, y_multilabel, stratify=y_multilabel, random_state=42
|
| 745 |
+
)
|
| 746 |
+
y_train_before_fit = y_train.copy()
|
| 747 |
+
n_outputs = 3
|
| 748 |
+
|
| 749 |
+
estimators = [
|
| 750 |
+
("mlp", MLPClassifier(random_state=42)),
|
| 751 |
+
("rf", RandomForestClassifier(random_state=42)),
|
| 752 |
+
("ridge", RidgeClassifier()),
|
| 753 |
+
]
|
| 754 |
+
final_estimator = KNeighborsClassifier()
|
| 755 |
+
|
| 756 |
+
clf = StackingClassifier(
|
| 757 |
+
estimators=estimators,
|
| 758 |
+
final_estimator=final_estimator,
|
| 759 |
+
passthrough=passthrough,
|
| 760 |
+
stack_method=stack_method,
|
| 761 |
+
).fit(X_train, y_train)
|
| 762 |
+
|
| 763 |
+
# make sure we don't change `y_train` inplace
|
| 764 |
+
assert_array_equal(y_train_before_fit, y_train)
|
| 765 |
+
|
| 766 |
+
y_pred = clf.predict(X_test)
|
| 767 |
+
assert y_pred.shape == y_test.shape
|
| 768 |
+
|
| 769 |
+
if stack_method == "auto":
|
| 770 |
+
expected_stack_methods = ["predict_proba", "predict_proba", "decision_function"]
|
| 771 |
+
else:
|
| 772 |
+
expected_stack_methods = ["predict"] * len(estimators)
|
| 773 |
+
assert clf.stack_method_ == expected_stack_methods
|
| 774 |
+
|
| 775 |
+
n_features_X_trans = n_outputs * len(estimators)
|
| 776 |
+
if passthrough:
|
| 777 |
+
n_features_X_trans += X_train.shape[1]
|
| 778 |
+
X_trans = clf.transform(X_test)
|
| 779 |
+
assert X_trans.shape == (X_test.shape[0], n_features_X_trans)
|
| 780 |
+
|
| 781 |
+
assert_array_equal(clf.classes_, [np.array([0, 1])] * n_outputs)
|
| 782 |
+
|
| 783 |
+
|
| 784 |
+
@pytest.mark.parametrize(
|
| 785 |
+
"stacker, feature_names, X, y, expected_names",
|
| 786 |
+
[
|
| 787 |
+
(
|
| 788 |
+
StackingClassifier(
|
| 789 |
+
estimators=[
|
| 790 |
+
("lr", LogisticRegression(random_state=0)),
|
| 791 |
+
("svm", LinearSVC(random_state=0)),
|
| 792 |
+
]
|
| 793 |
+
),
|
| 794 |
+
iris.feature_names,
|
| 795 |
+
X_iris,
|
| 796 |
+
y_iris,
|
| 797 |
+
[
|
| 798 |
+
"stackingclassifier_lr0",
|
| 799 |
+
"stackingclassifier_lr1",
|
| 800 |
+
"stackingclassifier_lr2",
|
| 801 |
+
"stackingclassifier_svm0",
|
| 802 |
+
"stackingclassifier_svm1",
|
| 803 |
+
"stackingclassifier_svm2",
|
| 804 |
+
],
|
| 805 |
+
),
|
| 806 |
+
(
|
| 807 |
+
StackingClassifier(
|
| 808 |
+
estimators=[
|
| 809 |
+
("lr", LogisticRegression(random_state=0)),
|
| 810 |
+
("other", "drop"),
|
| 811 |
+
("svm", LinearSVC(random_state=0)),
|
| 812 |
+
]
|
| 813 |
+
),
|
| 814 |
+
iris.feature_names,
|
| 815 |
+
X_iris[:100],
|
| 816 |
+
y_iris[:100], # keep only classes 0 and 1
|
| 817 |
+
[
|
| 818 |
+
"stackingclassifier_lr",
|
| 819 |
+
"stackingclassifier_svm",
|
| 820 |
+
],
|
| 821 |
+
),
|
| 822 |
+
(
|
| 823 |
+
StackingRegressor(
|
| 824 |
+
estimators=[
|
| 825 |
+
("lr", LinearRegression()),
|
| 826 |
+
("svm", LinearSVR(random_state=0)),
|
| 827 |
+
]
|
| 828 |
+
),
|
| 829 |
+
diabetes.feature_names,
|
| 830 |
+
X_diabetes,
|
| 831 |
+
y_diabetes,
|
| 832 |
+
[
|
| 833 |
+
"stackingregressor_lr",
|
| 834 |
+
"stackingregressor_svm",
|
| 835 |
+
],
|
| 836 |
+
),
|
| 837 |
+
],
|
| 838 |
+
ids=[
|
| 839 |
+
"StackingClassifier_multiclass",
|
| 840 |
+
"StackingClassifier_binary",
|
| 841 |
+
"StackingRegressor",
|
| 842 |
+
],
|
| 843 |
+
)
|
| 844 |
+
@pytest.mark.parametrize("passthrough", [True, False])
|
| 845 |
+
def test_get_feature_names_out(
|
| 846 |
+
stacker, feature_names, X, y, expected_names, passthrough
|
| 847 |
+
):
|
| 848 |
+
"""Check get_feature_names_out works for stacking."""
|
| 849 |
+
|
| 850 |
+
stacker.set_params(passthrough=passthrough)
|
| 851 |
+
stacker.fit(scale(X), y)
|
| 852 |
+
|
| 853 |
+
if passthrough:
|
| 854 |
+
expected_names = np.concatenate((expected_names, feature_names))
|
| 855 |
+
|
| 856 |
+
names_out = stacker.get_feature_names_out(feature_names)
|
| 857 |
+
assert_array_equal(names_out, expected_names)
|
| 858 |
+
|
| 859 |
+
|
| 860 |
+
def test_stacking_classifier_base_regressor():
|
| 861 |
+
"""Check that a regressor can be used as the first layer in `StackingClassifier`."""
|
| 862 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
| 863 |
+
scale(X_iris), y_iris, stratify=y_iris, random_state=42
|
| 864 |
+
)
|
| 865 |
+
clf = StackingClassifier(estimators=[("ridge", Ridge())])
|
| 866 |
+
clf.fit(X_train, y_train)
|
| 867 |
+
clf.predict(X_test)
|
| 868 |
+
clf.predict_proba(X_test)
|
| 869 |
+
assert clf.score(X_test, y_test) > 0.8
|
| 870 |
+
|
| 871 |
+
|
| 872 |
+
def test_stacking_final_estimator_attribute_error():
|
| 873 |
+
"""Check that we raise the proper AttributeError when the final estimator
|
| 874 |
+
does not implement the `decision_function` method, which is decorated with
|
| 875 |
+
`available_if`.
|
| 876 |
+
|
| 877 |
+
Non-regression test for:
|
| 878 |
+
https://github.com/scikit-learn/scikit-learn/issues/28108
|
| 879 |
+
"""
|
| 880 |
+
X, y = make_classification(random_state=42)
|
| 881 |
+
|
| 882 |
+
estimators = [
|
| 883 |
+
("lr", LogisticRegression()),
|
| 884 |
+
("rf", RandomForestClassifier(n_estimators=2, random_state=42)),
|
| 885 |
+
]
|
| 886 |
+
# RandomForestClassifier does not implement 'decision_function' and should raise
|
| 887 |
+
# an AttributeError
|
| 888 |
+
final_estimator = RandomForestClassifier(n_estimators=2, random_state=42)
|
| 889 |
+
clf = StackingClassifier(
|
| 890 |
+
estimators=estimators, final_estimator=final_estimator, cv=3
|
| 891 |
+
)
|
| 892 |
+
|
| 893 |
+
outer_msg = "This 'StackingClassifier' has no attribute 'decision_function'"
|
| 894 |
+
inner_msg = "'RandomForestClassifier' object has no attribute 'decision_function'"
|
| 895 |
+
with pytest.raises(AttributeError, match=outer_msg) as exec_info:
|
| 896 |
+
clf.fit(X, y).decision_function(X)
|
| 897 |
+
assert isinstance(exec_info.value.__cause__, AttributeError)
|
| 898 |
+
assert inner_msg in str(exec_info.value.__cause__)
|
| 899 |
+
|
| 900 |
+
|
| 901 |
+
# Metadata Routing Tests
|
| 902 |
+
# ======================
|
| 903 |
+
|
| 904 |
+
|
| 905 |
+
@pytest.mark.parametrize(
|
| 906 |
+
"Estimator, Child",
|
| 907 |
+
[
|
| 908 |
+
(StackingClassifier, ConsumingClassifier),
|
| 909 |
+
(StackingRegressor, ConsumingRegressor),
|
| 910 |
+
],
|
| 911 |
+
)
|
| 912 |
+
def test_routing_passed_metadata_not_supported(Estimator, Child):
|
| 913 |
+
"""Test that the right error message is raised when metadata is passed while
|
| 914 |
+
not supported when `enable_metadata_routing=False`."""
|
| 915 |
+
|
| 916 |
+
with pytest.raises(
|
| 917 |
+
ValueError, match="is only supported if enable_metadata_routing=True"
|
| 918 |
+
):
|
| 919 |
+
Estimator(["clf", Child()]).fit(
|
| 920 |
+
X_iris, y_iris, sample_weight=[1, 1, 1, 1, 1], metadata="a"
|
| 921 |
+
)
|
| 922 |
+
|
| 923 |
+
|
| 924 |
+
@pytest.mark.parametrize(
|
| 925 |
+
"Estimator, Child",
|
| 926 |
+
[
|
| 927 |
+
(StackingClassifier, ConsumingClassifier),
|
| 928 |
+
(StackingRegressor, ConsumingRegressor),
|
| 929 |
+
],
|
| 930 |
+
)
|
| 931 |
+
@config_context(enable_metadata_routing=True)
|
| 932 |
+
def test_get_metadata_routing_without_fit(Estimator, Child):
|
| 933 |
+
# Test that metadata_routing() doesn't raise when called before fit.
|
| 934 |
+
est = Estimator([("sub_est", Child())])
|
| 935 |
+
est.get_metadata_routing()
|
| 936 |
+
|
| 937 |
+
|
| 938 |
+
@pytest.mark.parametrize(
|
| 939 |
+
"Estimator, Child",
|
| 940 |
+
[
|
| 941 |
+
(StackingClassifier, ConsumingClassifier),
|
| 942 |
+
(StackingRegressor, ConsumingRegressor),
|
| 943 |
+
],
|
| 944 |
+
)
|
| 945 |
+
@pytest.mark.parametrize(
|
| 946 |
+
"prop, prop_value", [("sample_weight", np.ones(X_iris.shape[0])), ("metadata", "a")]
|
| 947 |
+
)
|
| 948 |
+
@config_context(enable_metadata_routing=True)
|
| 949 |
+
def test_metadata_routing_for_stacking_estimators(Estimator, Child, prop, prop_value):
|
| 950 |
+
"""Test that metadata is routed correctly for Stacking*."""
|
| 951 |
+
|
| 952 |
+
est = Estimator(
|
| 953 |
+
[
|
| 954 |
+
(
|
| 955 |
+
"sub_est1",
|
| 956 |
+
Child(registry=_Registry()).set_fit_request(**{prop: True}),
|
| 957 |
+
),
|
| 958 |
+
(
|
| 959 |
+
"sub_est2",
|
| 960 |
+
Child(registry=_Registry()).set_fit_request(**{prop: True}),
|
| 961 |
+
),
|
| 962 |
+
],
|
| 963 |
+
final_estimator=Child(registry=_Registry()).set_predict_request(**{prop: True}),
|
| 964 |
+
)
|
| 965 |
+
|
| 966 |
+
est.fit(X_iris, y_iris, **{prop: prop_value})
|
| 967 |
+
est.fit_transform(X_iris, y_iris, **{prop: prop_value})
|
| 968 |
+
|
| 969 |
+
est.predict(X_iris, **{prop: prop_value})
|
| 970 |
+
|
| 971 |
+
for estimator in est.estimators:
|
| 972 |
+
# access sub-estimator in (name, est) with estimator[1]:
|
| 973 |
+
registry = estimator[1].registry
|
| 974 |
+
assert len(registry)
|
| 975 |
+
for sub_est in registry:
|
| 976 |
+
check_recorded_metadata(
|
| 977 |
+
obj=sub_est,
|
| 978 |
+
method="fit",
|
| 979 |
+
parent="fit",
|
| 980 |
+
split_params=(prop),
|
| 981 |
+
**{prop: prop_value},
|
| 982 |
+
)
|
| 983 |
+
# access final_estimator:
|
| 984 |
+
registry = est.final_estimator_.registry
|
| 985 |
+
assert len(registry)
|
| 986 |
+
check_recorded_metadata(
|
| 987 |
+
obj=registry[-1],
|
| 988 |
+
method="predict",
|
| 989 |
+
parent="predict",
|
| 990 |
+
split_params=(prop),
|
| 991 |
+
**{prop: prop_value},
|
| 992 |
+
)
|
| 993 |
+
|
| 994 |
+
|
| 995 |
+
@pytest.mark.parametrize(
|
| 996 |
+
"Estimator, Child",
|
| 997 |
+
[
|
| 998 |
+
(StackingClassifier, ConsumingClassifier),
|
| 999 |
+
(StackingRegressor, ConsumingRegressor),
|
| 1000 |
+
],
|
| 1001 |
+
)
|
| 1002 |
+
@config_context(enable_metadata_routing=True)
|
| 1003 |
+
def test_metadata_routing_error_for_stacking_estimators(Estimator, Child):
|
| 1004 |
+
"""Test that the right error is raised when metadata is not requested."""
|
| 1005 |
+
sample_weight, metadata = np.ones(X_iris.shape[0]), "a"
|
| 1006 |
+
|
| 1007 |
+
est = Estimator([("sub_est", Child())])
|
| 1008 |
+
|
| 1009 |
+
error_message = (
|
| 1010 |
+
"[sample_weight, metadata] are passed but are not explicitly set as requested"
|
| 1011 |
+
f" or not requested for {Child.__name__}.fit"
|
| 1012 |
+
)
|
| 1013 |
+
|
| 1014 |
+
with pytest.raises(ValueError, match=re.escape(error_message)):
|
| 1015 |
+
est.fit(X_iris, y_iris, sample_weight=sample_weight, metadata=metadata)
|
| 1016 |
+
|
| 1017 |
+
|
| 1018 |
+
# End of Metadata Routing Tests
|
| 1019 |
+
# =============================
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/test_voting.py
ADDED
|
@@ -0,0 +1,787 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Testing for the VotingClassifier and VotingRegressor"""
|
| 2 |
+
|
| 3 |
+
import re
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import pytest
|
| 7 |
+
|
| 8 |
+
from sklearn import config_context, datasets
|
| 9 |
+
from sklearn.base import BaseEstimator, ClassifierMixin, clone
|
| 10 |
+
from sklearn.datasets import make_multilabel_classification
|
| 11 |
+
from sklearn.dummy import DummyRegressor
|
| 12 |
+
from sklearn.ensemble import (
|
| 13 |
+
RandomForestClassifier,
|
| 14 |
+
RandomForestRegressor,
|
| 15 |
+
VotingClassifier,
|
| 16 |
+
VotingRegressor,
|
| 17 |
+
)
|
| 18 |
+
from sklearn.exceptions import NotFittedError
|
| 19 |
+
from sklearn.linear_model import LinearRegression, LogisticRegression
|
| 20 |
+
from sklearn.model_selection import GridSearchCV, cross_val_score, train_test_split
|
| 21 |
+
from sklearn.multiclass import OneVsRestClassifier
|
| 22 |
+
from sklearn.naive_bayes import GaussianNB
|
| 23 |
+
from sklearn.neighbors import KNeighborsClassifier
|
| 24 |
+
from sklearn.preprocessing import StandardScaler
|
| 25 |
+
from sklearn.svm import SVC
|
| 26 |
+
from sklearn.tests.metadata_routing_common import (
|
| 27 |
+
ConsumingClassifier,
|
| 28 |
+
ConsumingRegressor,
|
| 29 |
+
_Registry,
|
| 30 |
+
check_recorded_metadata,
|
| 31 |
+
)
|
| 32 |
+
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
|
| 33 |
+
from sklearn.utils._testing import (
|
| 34 |
+
assert_almost_equal,
|
| 35 |
+
assert_array_almost_equal,
|
| 36 |
+
assert_array_equal,
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
# Load datasets
|
| 40 |
+
iris = datasets.load_iris()
|
| 41 |
+
X, y = iris.data[:, 1:3], iris.target
|
| 42 |
+
# Scaled to solve ConvergenceWarning throw by Logistic Regression
|
| 43 |
+
X_scaled = StandardScaler().fit_transform(X)
|
| 44 |
+
|
| 45 |
+
X_r, y_r = datasets.load_diabetes(return_X_y=True)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
@pytest.mark.parametrize(
|
| 49 |
+
"params, err_msg",
|
| 50 |
+
[
|
| 51 |
+
(
|
| 52 |
+
{"estimators": []},
|
| 53 |
+
"Invalid 'estimators' attribute, 'estimators' should be a non-empty list",
|
| 54 |
+
),
|
| 55 |
+
(
|
| 56 |
+
{"estimators": [("lr", LogisticRegression())], "weights": [1, 2]},
|
| 57 |
+
"Number of `estimators` and weights must be equal",
|
| 58 |
+
),
|
| 59 |
+
],
|
| 60 |
+
)
|
| 61 |
+
def test_voting_classifier_estimator_init(params, err_msg):
|
| 62 |
+
ensemble = VotingClassifier(**params)
|
| 63 |
+
with pytest.raises(ValueError, match=err_msg):
|
| 64 |
+
ensemble.fit(X, y)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def test_predictproba_hardvoting():
|
| 68 |
+
eclf = VotingClassifier(
|
| 69 |
+
estimators=[("lr1", LogisticRegression()), ("lr2", LogisticRegression())],
|
| 70 |
+
voting="hard",
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
inner_msg = "predict_proba is not available when voting='hard'"
|
| 74 |
+
outer_msg = "'VotingClassifier' has no attribute 'predict_proba'"
|
| 75 |
+
with pytest.raises(AttributeError, match=outer_msg) as exec_info:
|
| 76 |
+
eclf.predict_proba
|
| 77 |
+
assert isinstance(exec_info.value.__cause__, AttributeError)
|
| 78 |
+
assert inner_msg in str(exec_info.value.__cause__)
|
| 79 |
+
|
| 80 |
+
assert not hasattr(eclf, "predict_proba")
|
| 81 |
+
eclf.fit(X_scaled, y)
|
| 82 |
+
assert not hasattr(eclf, "predict_proba")
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def test_notfitted():
|
| 86 |
+
eclf = VotingClassifier(
|
| 87 |
+
estimators=[("lr1", LogisticRegression()), ("lr2", LogisticRegression())],
|
| 88 |
+
voting="soft",
|
| 89 |
+
)
|
| 90 |
+
ereg = VotingRegressor([("dr", DummyRegressor())])
|
| 91 |
+
msg = (
|
| 92 |
+
"This %s instance is not fitted yet. Call 'fit'"
|
| 93 |
+
" with appropriate arguments before using this estimator."
|
| 94 |
+
)
|
| 95 |
+
with pytest.raises(NotFittedError, match=msg % "VotingClassifier"):
|
| 96 |
+
eclf.predict(X)
|
| 97 |
+
with pytest.raises(NotFittedError, match=msg % "VotingClassifier"):
|
| 98 |
+
eclf.predict_proba(X)
|
| 99 |
+
with pytest.raises(NotFittedError, match=msg % "VotingClassifier"):
|
| 100 |
+
eclf.transform(X)
|
| 101 |
+
with pytest.raises(NotFittedError, match=msg % "VotingRegressor"):
|
| 102 |
+
ereg.predict(X_r)
|
| 103 |
+
with pytest.raises(NotFittedError, match=msg % "VotingRegressor"):
|
| 104 |
+
ereg.transform(X_r)
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def test_majority_label_iris(global_random_seed):
|
| 108 |
+
"""Check classification by majority label on dataset iris."""
|
| 109 |
+
clf1 = LogisticRegression(solver="liblinear", random_state=global_random_seed)
|
| 110 |
+
clf2 = RandomForestClassifier(n_estimators=10, random_state=global_random_seed)
|
| 111 |
+
clf3 = GaussianNB()
|
| 112 |
+
eclf = VotingClassifier(
|
| 113 |
+
estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)], voting="hard"
|
| 114 |
+
)
|
| 115 |
+
scores = cross_val_score(eclf, X, y, scoring="accuracy")
|
| 116 |
+
|
| 117 |
+
assert scores.mean() >= 0.9
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def test_tie_situation():
|
| 121 |
+
"""Check voting classifier selects smaller class label in tie situation."""
|
| 122 |
+
clf1 = LogisticRegression(random_state=123, solver="liblinear")
|
| 123 |
+
clf2 = RandomForestClassifier(random_state=123)
|
| 124 |
+
eclf = VotingClassifier(estimators=[("lr", clf1), ("rf", clf2)], voting="hard")
|
| 125 |
+
assert clf1.fit(X, y).predict(X)[73] == 2
|
| 126 |
+
assert clf2.fit(X, y).predict(X)[73] == 1
|
| 127 |
+
assert eclf.fit(X, y).predict(X)[73] == 1
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def test_weights_iris(global_random_seed):
|
| 131 |
+
"""Check classification by average probabilities on dataset iris."""
|
| 132 |
+
clf1 = LogisticRegression(random_state=global_random_seed)
|
| 133 |
+
clf2 = RandomForestClassifier(n_estimators=10, random_state=global_random_seed)
|
| 134 |
+
clf3 = GaussianNB()
|
| 135 |
+
eclf = VotingClassifier(
|
| 136 |
+
estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)],
|
| 137 |
+
voting="soft",
|
| 138 |
+
weights=[1, 2, 10],
|
| 139 |
+
)
|
| 140 |
+
scores = cross_val_score(eclf, X_scaled, y, scoring="accuracy")
|
| 141 |
+
assert scores.mean() >= 0.9
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
def test_weights_regressor():
|
| 145 |
+
"""Check weighted average regression prediction on diabetes dataset."""
|
| 146 |
+
reg1 = DummyRegressor(strategy="mean")
|
| 147 |
+
reg2 = DummyRegressor(strategy="median")
|
| 148 |
+
reg3 = DummyRegressor(strategy="quantile", quantile=0.2)
|
| 149 |
+
ereg = VotingRegressor(
|
| 150 |
+
[("mean", reg1), ("median", reg2), ("quantile", reg3)], weights=[1, 2, 10]
|
| 151 |
+
)
|
| 152 |
+
|
| 153 |
+
X_r_train, X_r_test, y_r_train, y_r_test = train_test_split(
|
| 154 |
+
X_r, y_r, test_size=0.25
|
| 155 |
+
)
|
| 156 |
+
|
| 157 |
+
reg1_pred = reg1.fit(X_r_train, y_r_train).predict(X_r_test)
|
| 158 |
+
reg2_pred = reg2.fit(X_r_train, y_r_train).predict(X_r_test)
|
| 159 |
+
reg3_pred = reg3.fit(X_r_train, y_r_train).predict(X_r_test)
|
| 160 |
+
ereg_pred = ereg.fit(X_r_train, y_r_train).predict(X_r_test)
|
| 161 |
+
|
| 162 |
+
avg = np.average(
|
| 163 |
+
np.asarray([reg1_pred, reg2_pred, reg3_pred]), axis=0, weights=[1, 2, 10]
|
| 164 |
+
)
|
| 165 |
+
assert_almost_equal(ereg_pred, avg, decimal=2)
|
| 166 |
+
|
| 167 |
+
ereg_weights_none = VotingRegressor(
|
| 168 |
+
[("mean", reg1), ("median", reg2), ("quantile", reg3)], weights=None
|
| 169 |
+
)
|
| 170 |
+
ereg_weights_equal = VotingRegressor(
|
| 171 |
+
[("mean", reg1), ("median", reg2), ("quantile", reg3)], weights=[1, 1, 1]
|
| 172 |
+
)
|
| 173 |
+
ereg_weights_none.fit(X_r_train, y_r_train)
|
| 174 |
+
ereg_weights_equal.fit(X_r_train, y_r_train)
|
| 175 |
+
ereg_none_pred = ereg_weights_none.predict(X_r_test)
|
| 176 |
+
ereg_equal_pred = ereg_weights_equal.predict(X_r_test)
|
| 177 |
+
assert_almost_equal(ereg_none_pred, ereg_equal_pred, decimal=2)
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
def test_predict_on_toy_problem(global_random_seed):
|
| 181 |
+
"""Manually check predicted class labels for toy dataset."""
|
| 182 |
+
clf1 = LogisticRegression(random_state=global_random_seed)
|
| 183 |
+
clf2 = RandomForestClassifier(n_estimators=10, random_state=global_random_seed)
|
| 184 |
+
clf3 = GaussianNB()
|
| 185 |
+
|
| 186 |
+
X = np.array(
|
| 187 |
+
[[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2], [2.1, 1.4], [3.1, 2.3]]
|
| 188 |
+
)
|
| 189 |
+
|
| 190 |
+
y = np.array([1, 1, 1, 2, 2, 2])
|
| 191 |
+
|
| 192 |
+
assert_array_equal(clf1.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2])
|
| 193 |
+
assert_array_equal(clf2.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2])
|
| 194 |
+
assert_array_equal(clf3.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2])
|
| 195 |
+
|
| 196 |
+
eclf = VotingClassifier(
|
| 197 |
+
estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)],
|
| 198 |
+
voting="hard",
|
| 199 |
+
weights=[1, 1, 1],
|
| 200 |
+
)
|
| 201 |
+
assert_array_equal(eclf.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2])
|
| 202 |
+
|
| 203 |
+
eclf = VotingClassifier(
|
| 204 |
+
estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)],
|
| 205 |
+
voting="soft",
|
| 206 |
+
weights=[1, 1, 1],
|
| 207 |
+
)
|
| 208 |
+
assert_array_equal(eclf.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2])
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
def test_predict_proba_on_toy_problem():
|
| 212 |
+
"""Calculate predicted probabilities on toy dataset."""
|
| 213 |
+
clf1 = LogisticRegression(random_state=123)
|
| 214 |
+
clf2 = RandomForestClassifier(random_state=123)
|
| 215 |
+
clf3 = GaussianNB()
|
| 216 |
+
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
|
| 217 |
+
y = np.array([1, 1, 2, 2])
|
| 218 |
+
|
| 219 |
+
clf1_res = np.array(
|
| 220 |
+
[
|
| 221 |
+
[0.59790391, 0.40209609],
|
| 222 |
+
[0.57622162, 0.42377838],
|
| 223 |
+
[0.50728456, 0.49271544],
|
| 224 |
+
[0.40241774, 0.59758226],
|
| 225 |
+
]
|
| 226 |
+
)
|
| 227 |
+
|
| 228 |
+
clf2_res = np.array([[0.8, 0.2], [0.8, 0.2], [0.2, 0.8], [0.3, 0.7]])
|
| 229 |
+
|
| 230 |
+
clf3_res = np.array(
|
| 231 |
+
[[0.9985082, 0.0014918], [0.99845843, 0.00154157], [0.0, 1.0], [0.0, 1.0]]
|
| 232 |
+
)
|
| 233 |
+
|
| 234 |
+
t00 = (2 * clf1_res[0][0] + clf2_res[0][0] + clf3_res[0][0]) / 4
|
| 235 |
+
t11 = (2 * clf1_res[1][1] + clf2_res[1][1] + clf3_res[1][1]) / 4
|
| 236 |
+
t21 = (2 * clf1_res[2][1] + clf2_res[2][1] + clf3_res[2][1]) / 4
|
| 237 |
+
t31 = (2 * clf1_res[3][1] + clf2_res[3][1] + clf3_res[3][1]) / 4
|
| 238 |
+
|
| 239 |
+
eclf = VotingClassifier(
|
| 240 |
+
estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)],
|
| 241 |
+
voting="soft",
|
| 242 |
+
weights=[2, 1, 1],
|
| 243 |
+
)
|
| 244 |
+
eclf_res = eclf.fit(X, y).predict_proba(X)
|
| 245 |
+
|
| 246 |
+
assert_almost_equal(t00, eclf_res[0][0], decimal=1)
|
| 247 |
+
assert_almost_equal(t11, eclf_res[1][1], decimal=1)
|
| 248 |
+
assert_almost_equal(t21, eclf_res[2][1], decimal=1)
|
| 249 |
+
assert_almost_equal(t31, eclf_res[3][1], decimal=1)
|
| 250 |
+
|
| 251 |
+
inner_msg = "predict_proba is not available when voting='hard'"
|
| 252 |
+
outer_msg = "'VotingClassifier' has no attribute 'predict_proba'"
|
| 253 |
+
with pytest.raises(AttributeError, match=outer_msg) as exec_info:
|
| 254 |
+
eclf = VotingClassifier(
|
| 255 |
+
estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)], voting="hard"
|
| 256 |
+
)
|
| 257 |
+
eclf.fit(X, y).predict_proba(X)
|
| 258 |
+
|
| 259 |
+
assert isinstance(exec_info.value.__cause__, AttributeError)
|
| 260 |
+
assert inner_msg in str(exec_info.value.__cause__)
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
def test_multilabel():
|
| 264 |
+
"""Check if error is raised for multilabel classification."""
|
| 265 |
+
X, y = make_multilabel_classification(
|
| 266 |
+
n_classes=2, n_labels=1, allow_unlabeled=False, random_state=123
|
| 267 |
+
)
|
| 268 |
+
clf = OneVsRestClassifier(SVC(kernel="linear"))
|
| 269 |
+
|
| 270 |
+
eclf = VotingClassifier(estimators=[("ovr", clf)], voting="hard")
|
| 271 |
+
|
| 272 |
+
try:
|
| 273 |
+
eclf.fit(X, y)
|
| 274 |
+
except NotImplementedError:
|
| 275 |
+
return
|
| 276 |
+
|
| 277 |
+
|
| 278 |
+
def test_gridsearch():
|
| 279 |
+
"""Check GridSearch support."""
|
| 280 |
+
clf1 = LogisticRegression(random_state=1)
|
| 281 |
+
clf2 = RandomForestClassifier(random_state=1, n_estimators=3)
|
| 282 |
+
clf3 = GaussianNB()
|
| 283 |
+
eclf = VotingClassifier(
|
| 284 |
+
estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)], voting="soft"
|
| 285 |
+
)
|
| 286 |
+
|
| 287 |
+
params = {
|
| 288 |
+
"lr__C": [1.0, 100.0],
|
| 289 |
+
"voting": ["soft", "hard"],
|
| 290 |
+
"weights": [[0.5, 0.5, 0.5], [1.0, 0.5, 0.5]],
|
| 291 |
+
}
|
| 292 |
+
|
| 293 |
+
grid = GridSearchCV(estimator=eclf, param_grid=params, cv=2)
|
| 294 |
+
grid.fit(X_scaled, y)
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
def test_parallel_fit(global_random_seed):
|
| 298 |
+
"""Check parallel backend of VotingClassifier on toy dataset."""
|
| 299 |
+
clf1 = LogisticRegression(random_state=global_random_seed)
|
| 300 |
+
clf2 = RandomForestClassifier(n_estimators=10, random_state=global_random_seed)
|
| 301 |
+
clf3 = GaussianNB()
|
| 302 |
+
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
|
| 303 |
+
y = np.array([1, 1, 2, 2])
|
| 304 |
+
|
| 305 |
+
eclf1 = VotingClassifier(
|
| 306 |
+
estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)], voting="soft", n_jobs=1
|
| 307 |
+
).fit(X, y)
|
| 308 |
+
eclf2 = VotingClassifier(
|
| 309 |
+
estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)], voting="soft", n_jobs=2
|
| 310 |
+
).fit(X, y)
|
| 311 |
+
|
| 312 |
+
assert_array_equal(eclf1.predict(X), eclf2.predict(X))
|
| 313 |
+
assert_array_almost_equal(eclf1.predict_proba(X), eclf2.predict_proba(X))
|
| 314 |
+
|
| 315 |
+
|
| 316 |
+
# TODO(1.7): remove warning filter when sample_weight is kwarg only
|
| 317 |
+
@pytest.mark.filterwarnings("ignore::FutureWarning")
|
| 318 |
+
def test_sample_weight(global_random_seed):
|
| 319 |
+
"""Tests sample_weight parameter of VotingClassifier"""
|
| 320 |
+
clf1 = LogisticRegression(random_state=global_random_seed)
|
| 321 |
+
clf2 = RandomForestClassifier(n_estimators=10, random_state=global_random_seed)
|
| 322 |
+
clf3 = SVC(probability=True, random_state=global_random_seed)
|
| 323 |
+
eclf1 = VotingClassifier(
|
| 324 |
+
estimators=[("lr", clf1), ("rf", clf2), ("svc", clf3)], voting="soft"
|
| 325 |
+
).fit(X_scaled, y, sample_weight=np.ones((len(y),)))
|
| 326 |
+
eclf2 = VotingClassifier(
|
| 327 |
+
estimators=[("lr", clf1), ("rf", clf2), ("svc", clf3)], voting="soft"
|
| 328 |
+
).fit(X_scaled, y)
|
| 329 |
+
assert_array_equal(eclf1.predict(X_scaled), eclf2.predict(X_scaled))
|
| 330 |
+
assert_array_almost_equal(
|
| 331 |
+
eclf1.predict_proba(X_scaled), eclf2.predict_proba(X_scaled)
|
| 332 |
+
)
|
| 333 |
+
sample_weight = np.random.RandomState(global_random_seed).uniform(size=(len(y),))
|
| 334 |
+
eclf3 = VotingClassifier(estimators=[("lr", clf1)], voting="soft")
|
| 335 |
+
eclf3.fit(X_scaled, y, sample_weight)
|
| 336 |
+
clf1.fit(X_scaled, y, sample_weight)
|
| 337 |
+
assert_array_equal(eclf3.predict(X_scaled), clf1.predict(X_scaled))
|
| 338 |
+
assert_array_almost_equal(
|
| 339 |
+
eclf3.predict_proba(X_scaled), clf1.predict_proba(X_scaled)
|
| 340 |
+
)
|
| 341 |
+
|
| 342 |
+
# check that an error is raised and indicative if sample_weight is not
|
| 343 |
+
# supported.
|
| 344 |
+
clf4 = KNeighborsClassifier()
|
| 345 |
+
eclf3 = VotingClassifier(
|
| 346 |
+
estimators=[("lr", clf1), ("svc", clf3), ("knn", clf4)], voting="soft"
|
| 347 |
+
)
|
| 348 |
+
msg = "Underlying estimator KNeighborsClassifier does not support sample weights."
|
| 349 |
+
with pytest.raises(TypeError, match=msg):
|
| 350 |
+
eclf3.fit(X_scaled, y, sample_weight)
|
| 351 |
+
|
| 352 |
+
# check that _fit_single_estimator will raise the right error
|
| 353 |
+
# it should raise the original error if this is not linked to sample_weight
|
| 354 |
+
class ClassifierErrorFit(ClassifierMixin, BaseEstimator):
|
| 355 |
+
def fit(self, X_scaled, y, sample_weight):
|
| 356 |
+
raise TypeError("Error unrelated to sample_weight.")
|
| 357 |
+
|
| 358 |
+
clf = ClassifierErrorFit()
|
| 359 |
+
with pytest.raises(TypeError, match="Error unrelated to sample_weight"):
|
| 360 |
+
clf.fit(X_scaled, y, sample_weight=sample_weight)
|
| 361 |
+
|
| 362 |
+
|
| 363 |
+
def test_sample_weight_kwargs():
|
| 364 |
+
"""Check that VotingClassifier passes sample_weight as kwargs"""
|
| 365 |
+
|
| 366 |
+
class MockClassifier(ClassifierMixin, BaseEstimator):
|
| 367 |
+
"""Mock Classifier to check that sample_weight is received as kwargs"""
|
| 368 |
+
|
| 369 |
+
def fit(self, X, y, *args, **sample_weight):
|
| 370 |
+
assert "sample_weight" in sample_weight
|
| 371 |
+
|
| 372 |
+
clf = MockClassifier()
|
| 373 |
+
eclf = VotingClassifier(estimators=[("mock", clf)], voting="soft")
|
| 374 |
+
|
| 375 |
+
# Should not raise an error.
|
| 376 |
+
eclf.fit(X, y, sample_weight=np.ones((len(y),)))
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
def test_voting_classifier_set_params(global_random_seed):
|
| 380 |
+
# check equivalence in the output when setting underlying estimators
|
| 381 |
+
clf1 = LogisticRegression(random_state=global_random_seed)
|
| 382 |
+
clf2 = RandomForestClassifier(
|
| 383 |
+
n_estimators=10, random_state=global_random_seed, max_depth=None
|
| 384 |
+
)
|
| 385 |
+
clf3 = GaussianNB()
|
| 386 |
+
|
| 387 |
+
eclf1 = VotingClassifier(
|
| 388 |
+
[("lr", clf1), ("rf", clf2)], voting="soft", weights=[1, 2]
|
| 389 |
+
).fit(X_scaled, y)
|
| 390 |
+
eclf2 = VotingClassifier(
|
| 391 |
+
[("lr", clf1), ("nb", clf3)], voting="soft", weights=[1, 2]
|
| 392 |
+
)
|
| 393 |
+
eclf2.set_params(nb=clf2).fit(X_scaled, y)
|
| 394 |
+
|
| 395 |
+
assert_array_equal(eclf1.predict(X_scaled), eclf2.predict(X_scaled))
|
| 396 |
+
assert_array_almost_equal(
|
| 397 |
+
eclf1.predict_proba(X_scaled), eclf2.predict_proba(X_scaled)
|
| 398 |
+
)
|
| 399 |
+
assert eclf2.estimators[0][1].get_params() == clf1.get_params()
|
| 400 |
+
assert eclf2.estimators[1][1].get_params() == clf2.get_params()
|
| 401 |
+
|
| 402 |
+
|
| 403 |
+
def test_set_estimator_drop():
|
| 404 |
+
# VotingClassifier set_params should be able to set estimators as drop
|
| 405 |
+
# Test predict
|
| 406 |
+
clf1 = LogisticRegression(random_state=123)
|
| 407 |
+
clf2 = RandomForestClassifier(n_estimators=10, random_state=123)
|
| 408 |
+
clf3 = GaussianNB()
|
| 409 |
+
eclf1 = VotingClassifier(
|
| 410 |
+
estimators=[("lr", clf1), ("rf", clf2), ("nb", clf3)],
|
| 411 |
+
voting="hard",
|
| 412 |
+
weights=[1, 0, 0.5],
|
| 413 |
+
).fit(X, y)
|
| 414 |
+
|
| 415 |
+
eclf2 = VotingClassifier(
|
| 416 |
+
estimators=[("lr", clf1), ("rf", clf2), ("nb", clf3)],
|
| 417 |
+
voting="hard",
|
| 418 |
+
weights=[1, 1, 0.5],
|
| 419 |
+
)
|
| 420 |
+
eclf2.set_params(rf="drop").fit(X, y)
|
| 421 |
+
|
| 422 |
+
assert_array_equal(eclf1.predict(X), eclf2.predict(X))
|
| 423 |
+
|
| 424 |
+
assert dict(eclf2.estimators)["rf"] == "drop"
|
| 425 |
+
assert len(eclf2.estimators_) == 2
|
| 426 |
+
assert all(
|
| 427 |
+
isinstance(est, (LogisticRegression, GaussianNB)) for est in eclf2.estimators_
|
| 428 |
+
)
|
| 429 |
+
assert eclf2.get_params()["rf"] == "drop"
|
| 430 |
+
|
| 431 |
+
eclf1.set_params(voting="soft").fit(X, y)
|
| 432 |
+
eclf2.set_params(voting="soft").fit(X, y)
|
| 433 |
+
|
| 434 |
+
assert_array_equal(eclf1.predict(X), eclf2.predict(X))
|
| 435 |
+
assert_array_almost_equal(eclf1.predict_proba(X), eclf2.predict_proba(X))
|
| 436 |
+
msg = "All estimators are dropped. At least one is required"
|
| 437 |
+
with pytest.raises(ValueError, match=msg):
|
| 438 |
+
eclf2.set_params(lr="drop", rf="drop", nb="drop").fit(X, y)
|
| 439 |
+
|
| 440 |
+
# Test soft voting transform
|
| 441 |
+
X1 = np.array([[1], [2]])
|
| 442 |
+
y1 = np.array([1, 2])
|
| 443 |
+
eclf1 = VotingClassifier(
|
| 444 |
+
estimators=[("rf", clf2), ("nb", clf3)],
|
| 445 |
+
voting="soft",
|
| 446 |
+
weights=[0, 0.5],
|
| 447 |
+
flatten_transform=False,
|
| 448 |
+
).fit(X1, y1)
|
| 449 |
+
|
| 450 |
+
eclf2 = VotingClassifier(
|
| 451 |
+
estimators=[("rf", clf2), ("nb", clf3)],
|
| 452 |
+
voting="soft",
|
| 453 |
+
weights=[1, 0.5],
|
| 454 |
+
flatten_transform=False,
|
| 455 |
+
)
|
| 456 |
+
eclf2.set_params(rf="drop").fit(X1, y1)
|
| 457 |
+
assert_array_almost_equal(
|
| 458 |
+
eclf1.transform(X1),
|
| 459 |
+
np.array([[[0.7, 0.3], [0.3, 0.7]], [[1.0, 0.0], [0.0, 1.0]]]),
|
| 460 |
+
)
|
| 461 |
+
assert_array_almost_equal(eclf2.transform(X1), np.array([[[1.0, 0.0], [0.0, 1.0]]]))
|
| 462 |
+
eclf1.set_params(voting="hard")
|
| 463 |
+
eclf2.set_params(voting="hard")
|
| 464 |
+
assert_array_equal(eclf1.transform(X1), np.array([[0, 0], [1, 1]]))
|
| 465 |
+
assert_array_equal(eclf2.transform(X1), np.array([[0], [1]]))
|
| 466 |
+
|
| 467 |
+
|
| 468 |
+
def test_estimator_weights_format(global_random_seed):
|
| 469 |
+
# Test estimator weights inputs as list and array
|
| 470 |
+
clf1 = LogisticRegression(random_state=global_random_seed)
|
| 471 |
+
clf2 = RandomForestClassifier(n_estimators=10, random_state=global_random_seed)
|
| 472 |
+
eclf1 = VotingClassifier(
|
| 473 |
+
estimators=[("lr", clf1), ("rf", clf2)], weights=[1, 2], voting="soft"
|
| 474 |
+
)
|
| 475 |
+
eclf2 = VotingClassifier(
|
| 476 |
+
estimators=[("lr", clf1), ("rf", clf2)], weights=np.array((1, 2)), voting="soft"
|
| 477 |
+
)
|
| 478 |
+
eclf1.fit(X_scaled, y)
|
| 479 |
+
eclf2.fit(X_scaled, y)
|
| 480 |
+
assert_array_almost_equal(
|
| 481 |
+
eclf1.predict_proba(X_scaled), eclf2.predict_proba(X_scaled)
|
| 482 |
+
)
|
| 483 |
+
|
| 484 |
+
|
| 485 |
+
def test_transform(global_random_seed):
|
| 486 |
+
"""Check transform method of VotingClassifier on toy dataset."""
|
| 487 |
+
clf1 = LogisticRegression(random_state=global_random_seed)
|
| 488 |
+
clf2 = RandomForestClassifier(n_estimators=10, random_state=global_random_seed)
|
| 489 |
+
clf3 = GaussianNB()
|
| 490 |
+
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
|
| 491 |
+
y = np.array([1, 1, 2, 2])
|
| 492 |
+
|
| 493 |
+
eclf1 = VotingClassifier(
|
| 494 |
+
estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)], voting="soft"
|
| 495 |
+
).fit(X, y)
|
| 496 |
+
eclf2 = VotingClassifier(
|
| 497 |
+
estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)],
|
| 498 |
+
voting="soft",
|
| 499 |
+
flatten_transform=True,
|
| 500 |
+
).fit(X, y)
|
| 501 |
+
eclf3 = VotingClassifier(
|
| 502 |
+
estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)],
|
| 503 |
+
voting="soft",
|
| 504 |
+
flatten_transform=False,
|
| 505 |
+
).fit(X, y)
|
| 506 |
+
|
| 507 |
+
assert_array_equal(eclf1.transform(X).shape, (4, 6))
|
| 508 |
+
assert_array_equal(eclf2.transform(X).shape, (4, 6))
|
| 509 |
+
assert_array_equal(eclf3.transform(X).shape, (3, 4, 2))
|
| 510 |
+
assert_array_almost_equal(eclf1.transform(X), eclf2.transform(X))
|
| 511 |
+
assert_array_almost_equal(
|
| 512 |
+
eclf3.transform(X).swapaxes(0, 1).reshape((4, 6)), eclf2.transform(X)
|
| 513 |
+
)
|
| 514 |
+
|
| 515 |
+
|
| 516 |
+
@pytest.mark.parametrize(
|
| 517 |
+
"X, y, voter",
|
| 518 |
+
[
|
| 519 |
+
(
|
| 520 |
+
X,
|
| 521 |
+
y,
|
| 522 |
+
VotingClassifier(
|
| 523 |
+
[
|
| 524 |
+
("lr", LogisticRegression()),
|
| 525 |
+
("rf", RandomForestClassifier(n_estimators=5)),
|
| 526 |
+
]
|
| 527 |
+
),
|
| 528 |
+
),
|
| 529 |
+
(
|
| 530 |
+
X_r,
|
| 531 |
+
y_r,
|
| 532 |
+
VotingRegressor(
|
| 533 |
+
[
|
| 534 |
+
("lr", LinearRegression()),
|
| 535 |
+
("rf", RandomForestRegressor(n_estimators=5)),
|
| 536 |
+
]
|
| 537 |
+
),
|
| 538 |
+
),
|
| 539 |
+
],
|
| 540 |
+
)
|
| 541 |
+
def test_none_estimator_with_weights(X, y, voter):
|
| 542 |
+
# check that an estimator can be set to 'drop' and passing some weight
|
| 543 |
+
# regression test for
|
| 544 |
+
# https://github.com/scikit-learn/scikit-learn/issues/13777
|
| 545 |
+
voter = clone(voter)
|
| 546 |
+
# Scaled to solve ConvergenceWarning throw by Logistic Regression
|
| 547 |
+
X_scaled = StandardScaler().fit_transform(X)
|
| 548 |
+
voter.fit(X_scaled, y, sample_weight=np.ones(y.shape))
|
| 549 |
+
voter.set_params(lr="drop")
|
| 550 |
+
voter.fit(X_scaled, y, sample_weight=np.ones(y.shape))
|
| 551 |
+
y_pred = voter.predict(X_scaled)
|
| 552 |
+
assert y_pred.shape == y.shape
|
| 553 |
+
|
| 554 |
+
|
| 555 |
+
@pytest.mark.parametrize(
|
| 556 |
+
"est",
|
| 557 |
+
[
|
| 558 |
+
VotingRegressor(
|
| 559 |
+
estimators=[
|
| 560 |
+
("lr", LinearRegression()),
|
| 561 |
+
("tree", DecisionTreeRegressor(random_state=0)),
|
| 562 |
+
]
|
| 563 |
+
),
|
| 564 |
+
VotingClassifier(
|
| 565 |
+
estimators=[
|
| 566 |
+
("lr", LogisticRegression(random_state=0)),
|
| 567 |
+
("tree", DecisionTreeClassifier(random_state=0)),
|
| 568 |
+
]
|
| 569 |
+
),
|
| 570 |
+
],
|
| 571 |
+
ids=["VotingRegressor", "VotingClassifier"],
|
| 572 |
+
)
|
| 573 |
+
def test_n_features_in(est):
|
| 574 |
+
X = [[1, 2], [3, 4], [5, 6]]
|
| 575 |
+
y = [0, 1, 2]
|
| 576 |
+
|
| 577 |
+
assert not hasattr(est, "n_features_in_")
|
| 578 |
+
est.fit(X, y)
|
| 579 |
+
assert est.n_features_in_ == 2
|
| 580 |
+
|
| 581 |
+
|
| 582 |
+
@pytest.mark.parametrize(
|
| 583 |
+
"estimator",
|
| 584 |
+
[
|
| 585 |
+
VotingRegressor(
|
| 586 |
+
estimators=[
|
| 587 |
+
("lr", LinearRegression()),
|
| 588 |
+
("rf", RandomForestRegressor(random_state=123)),
|
| 589 |
+
],
|
| 590 |
+
verbose=True,
|
| 591 |
+
),
|
| 592 |
+
VotingClassifier(
|
| 593 |
+
estimators=[
|
| 594 |
+
("lr", LogisticRegression(random_state=123)),
|
| 595 |
+
("rf", RandomForestClassifier(random_state=123)),
|
| 596 |
+
],
|
| 597 |
+
verbose=True,
|
| 598 |
+
),
|
| 599 |
+
],
|
| 600 |
+
)
|
| 601 |
+
def test_voting_verbose(estimator, capsys):
|
| 602 |
+
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
|
| 603 |
+
y = np.array([1, 1, 2, 2])
|
| 604 |
+
|
| 605 |
+
pattern = (
|
| 606 |
+
r"\[Voting\].*\(1 of 2\) Processing lr, total=.*\n"
|
| 607 |
+
r"\[Voting\].*\(2 of 2\) Processing rf, total=.*\n$"
|
| 608 |
+
)
|
| 609 |
+
clone(estimator).fit(X, y)
|
| 610 |
+
assert re.match(pattern, capsys.readouterr()[0])
|
| 611 |
+
|
| 612 |
+
|
| 613 |
+
def test_get_features_names_out_regressor():
|
| 614 |
+
"""Check get_feature_names_out output for regressor."""
|
| 615 |
+
|
| 616 |
+
X = [[1, 2], [3, 4], [5, 6]]
|
| 617 |
+
y = [0, 1, 2]
|
| 618 |
+
|
| 619 |
+
voting = VotingRegressor(
|
| 620 |
+
estimators=[
|
| 621 |
+
("lr", LinearRegression()),
|
| 622 |
+
("tree", DecisionTreeRegressor(random_state=0)),
|
| 623 |
+
("ignore", "drop"),
|
| 624 |
+
]
|
| 625 |
+
)
|
| 626 |
+
voting.fit(X, y)
|
| 627 |
+
|
| 628 |
+
names_out = voting.get_feature_names_out()
|
| 629 |
+
expected_names = ["votingregressor_lr", "votingregressor_tree"]
|
| 630 |
+
assert_array_equal(names_out, expected_names)
|
| 631 |
+
|
| 632 |
+
|
| 633 |
+
@pytest.mark.parametrize(
|
| 634 |
+
"kwargs, expected_names",
|
| 635 |
+
[
|
| 636 |
+
(
|
| 637 |
+
{"voting": "soft", "flatten_transform": True},
|
| 638 |
+
[
|
| 639 |
+
"votingclassifier_lr0",
|
| 640 |
+
"votingclassifier_lr1",
|
| 641 |
+
"votingclassifier_lr2",
|
| 642 |
+
"votingclassifier_tree0",
|
| 643 |
+
"votingclassifier_tree1",
|
| 644 |
+
"votingclassifier_tree2",
|
| 645 |
+
],
|
| 646 |
+
),
|
| 647 |
+
({"voting": "hard"}, ["votingclassifier_lr", "votingclassifier_tree"]),
|
| 648 |
+
],
|
| 649 |
+
)
|
| 650 |
+
def test_get_features_names_out_classifier(kwargs, expected_names):
|
| 651 |
+
"""Check get_feature_names_out for classifier for different settings."""
|
| 652 |
+
X = [[1, 2], [3, 4], [5, 6], [1, 1.2]]
|
| 653 |
+
y = [0, 1, 2, 0]
|
| 654 |
+
|
| 655 |
+
voting = VotingClassifier(
|
| 656 |
+
estimators=[
|
| 657 |
+
("lr", LogisticRegression(random_state=0)),
|
| 658 |
+
("tree", DecisionTreeClassifier(random_state=0)),
|
| 659 |
+
],
|
| 660 |
+
**kwargs,
|
| 661 |
+
)
|
| 662 |
+
voting.fit(X, y)
|
| 663 |
+
X_trans = voting.transform(X)
|
| 664 |
+
names_out = voting.get_feature_names_out()
|
| 665 |
+
|
| 666 |
+
assert X_trans.shape[1] == len(expected_names)
|
| 667 |
+
assert_array_equal(names_out, expected_names)
|
| 668 |
+
|
| 669 |
+
|
| 670 |
+
def test_get_features_names_out_classifier_error():
|
| 671 |
+
"""Check that error is raised when voting="soft" and flatten_transform=False."""
|
| 672 |
+
X = [[1, 2], [3, 4], [5, 6]]
|
| 673 |
+
y = [0, 1, 2]
|
| 674 |
+
|
| 675 |
+
voting = VotingClassifier(
|
| 676 |
+
estimators=[
|
| 677 |
+
("lr", LogisticRegression(random_state=0)),
|
| 678 |
+
("tree", DecisionTreeClassifier(random_state=0)),
|
| 679 |
+
],
|
| 680 |
+
voting="soft",
|
| 681 |
+
flatten_transform=False,
|
| 682 |
+
)
|
| 683 |
+
voting.fit(X, y)
|
| 684 |
+
|
| 685 |
+
msg = (
|
| 686 |
+
"get_feature_names_out is not supported when `voting='soft'` and "
|
| 687 |
+
"`flatten_transform=False`"
|
| 688 |
+
)
|
| 689 |
+
with pytest.raises(ValueError, match=msg):
|
| 690 |
+
voting.get_feature_names_out()
|
| 691 |
+
|
| 692 |
+
|
| 693 |
+
# Metadata Routing Tests
|
| 694 |
+
# ======================
|
| 695 |
+
|
| 696 |
+
|
| 697 |
+
@pytest.mark.parametrize(
|
| 698 |
+
"Estimator, Child",
|
| 699 |
+
[(VotingClassifier, ConsumingClassifier), (VotingRegressor, ConsumingRegressor)],
|
| 700 |
+
)
|
| 701 |
+
def test_routing_passed_metadata_not_supported(Estimator, Child):
|
| 702 |
+
"""Test that the right error message is raised when metadata is passed while
|
| 703 |
+
not supported when `enable_metadata_routing=False`."""
|
| 704 |
+
|
| 705 |
+
X = np.array([[0, 1], [2, 2], [4, 6]])
|
| 706 |
+
y = [1, 2, 3]
|
| 707 |
+
|
| 708 |
+
with pytest.raises(
|
| 709 |
+
ValueError, match="is only supported if enable_metadata_routing=True"
|
| 710 |
+
):
|
| 711 |
+
Estimator(["clf", Child()]).fit(X, y, sample_weight=[1, 1, 1], metadata="a")
|
| 712 |
+
|
| 713 |
+
|
| 714 |
+
@pytest.mark.parametrize(
|
| 715 |
+
"Estimator, Child",
|
| 716 |
+
[(VotingClassifier, ConsumingClassifier), (VotingRegressor, ConsumingRegressor)],
|
| 717 |
+
)
|
| 718 |
+
@config_context(enable_metadata_routing=True)
|
| 719 |
+
def test_get_metadata_routing_without_fit(Estimator, Child):
|
| 720 |
+
# Test that metadata_routing() doesn't raise when called before fit.
|
| 721 |
+
est = Estimator([("sub_est", Child())])
|
| 722 |
+
est.get_metadata_routing()
|
| 723 |
+
|
| 724 |
+
|
| 725 |
+
@pytest.mark.parametrize(
|
| 726 |
+
"Estimator, Child",
|
| 727 |
+
[(VotingClassifier, ConsumingClassifier), (VotingRegressor, ConsumingRegressor)],
|
| 728 |
+
)
|
| 729 |
+
@pytest.mark.parametrize("prop", ["sample_weight", "metadata"])
|
| 730 |
+
@config_context(enable_metadata_routing=True)
|
| 731 |
+
def test_metadata_routing_for_voting_estimators(Estimator, Child, prop):
|
| 732 |
+
"""Test that metadata is routed correctly for Voting*."""
|
| 733 |
+
X = np.array([[0, 1], [2, 2], [4, 6]])
|
| 734 |
+
y = [1, 2, 3]
|
| 735 |
+
sample_weight, metadata = [1, 1, 1], "a"
|
| 736 |
+
|
| 737 |
+
est = Estimator(
|
| 738 |
+
[
|
| 739 |
+
(
|
| 740 |
+
"sub_est1",
|
| 741 |
+
Child(registry=_Registry()).set_fit_request(**{prop: True}),
|
| 742 |
+
),
|
| 743 |
+
(
|
| 744 |
+
"sub_est2",
|
| 745 |
+
Child(registry=_Registry()).set_fit_request(**{prop: True}),
|
| 746 |
+
),
|
| 747 |
+
]
|
| 748 |
+
)
|
| 749 |
+
|
| 750 |
+
est.fit(X, y, **{prop: sample_weight if prop == "sample_weight" else metadata})
|
| 751 |
+
|
| 752 |
+
for estimator in est.estimators:
|
| 753 |
+
if prop == "sample_weight":
|
| 754 |
+
kwargs = {prop: sample_weight}
|
| 755 |
+
else:
|
| 756 |
+
kwargs = {prop: metadata}
|
| 757 |
+
# access sub-estimator in (name, est) with estimator[1]
|
| 758 |
+
registry = estimator[1].registry
|
| 759 |
+
assert len(registry)
|
| 760 |
+
for sub_est in registry:
|
| 761 |
+
check_recorded_metadata(obj=sub_est, method="fit", parent="fit", **kwargs)
|
| 762 |
+
|
| 763 |
+
|
| 764 |
+
@pytest.mark.parametrize(
|
| 765 |
+
"Estimator, Child",
|
| 766 |
+
[(VotingClassifier, ConsumingClassifier), (VotingRegressor, ConsumingRegressor)],
|
| 767 |
+
)
|
| 768 |
+
@config_context(enable_metadata_routing=True)
|
| 769 |
+
def test_metadata_routing_error_for_voting_estimators(Estimator, Child):
|
| 770 |
+
"""Test that the right error is raised when metadata is not requested."""
|
| 771 |
+
X = np.array([[0, 1], [2, 2], [4, 6]])
|
| 772 |
+
y = [1, 2, 3]
|
| 773 |
+
sample_weight, metadata = [1, 1, 1], "a"
|
| 774 |
+
|
| 775 |
+
est = Estimator([("sub_est", Child())])
|
| 776 |
+
|
| 777 |
+
error_message = (
|
| 778 |
+
"[sample_weight, metadata] are passed but are not explicitly set as requested"
|
| 779 |
+
f" or not requested for {Child.__name__}.fit"
|
| 780 |
+
)
|
| 781 |
+
|
| 782 |
+
with pytest.raises(ValueError, match=re.escape(error_message)):
|
| 783 |
+
est.fit(X, y, sample_weight=sample_weight, metadata=metadata)
|
| 784 |
+
|
| 785 |
+
|
| 786 |
+
# End of Metadata Routing Tests
|
| 787 |
+
# =============================
|
openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/test_weight_boosting.py
ADDED
|
@@ -0,0 +1,639 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Testing for the boost module (sklearn.ensemble.boost)."""
|
| 2 |
+
|
| 3 |
+
import re
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import pytest
|
| 7 |
+
|
| 8 |
+
from sklearn import datasets
|
| 9 |
+
from sklearn.base import BaseEstimator, clone
|
| 10 |
+
from sklearn.dummy import DummyClassifier, DummyRegressor
|
| 11 |
+
from sklearn.ensemble import AdaBoostClassifier, AdaBoostRegressor
|
| 12 |
+
from sklearn.ensemble._weight_boosting import _samme_proba
|
| 13 |
+
from sklearn.linear_model import LinearRegression
|
| 14 |
+
from sklearn.model_selection import GridSearchCV, train_test_split
|
| 15 |
+
from sklearn.svm import SVC, SVR
|
| 16 |
+
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
|
| 17 |
+
from sklearn.utils import shuffle
|
| 18 |
+
from sklearn.utils._mocking import NoSampleWeightWrapper
|
| 19 |
+
from sklearn.utils._testing import (
|
| 20 |
+
assert_allclose,
|
| 21 |
+
assert_array_almost_equal,
|
| 22 |
+
assert_array_equal,
|
| 23 |
+
)
|
| 24 |
+
from sklearn.utils.fixes import (
|
| 25 |
+
COO_CONTAINERS,
|
| 26 |
+
CSC_CONTAINERS,
|
| 27 |
+
CSR_CONTAINERS,
|
| 28 |
+
DOK_CONTAINERS,
|
| 29 |
+
LIL_CONTAINERS,
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
# Common random state
|
| 33 |
+
rng = np.random.RandomState(0)
|
| 34 |
+
|
| 35 |
+
# Toy sample
|
| 36 |
+
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
|
| 37 |
+
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
|
| 38 |
+
y_regr = [-1, -1, -1, 1, 1, 1]
|
| 39 |
+
T = [[-1, -1], [2, 2], [3, 2]]
|
| 40 |
+
y_t_class = ["foo", 1, 1]
|
| 41 |
+
y_t_regr = [-1, 1, 1]
|
| 42 |
+
|
| 43 |
+
# Load the iris dataset and randomly permute it
|
| 44 |
+
iris = datasets.load_iris()
|
| 45 |
+
perm = rng.permutation(iris.target.size)
|
| 46 |
+
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
|
| 47 |
+
|
| 48 |
+
# Load the diabetes dataset and randomly permute it
|
| 49 |
+
diabetes = datasets.load_diabetes()
|
| 50 |
+
diabetes.data, diabetes.target = shuffle(
|
| 51 |
+
diabetes.data, diabetes.target, random_state=rng
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def test_samme_proba():
|
| 56 |
+
# Test the `_samme_proba` helper function.
|
| 57 |
+
|
| 58 |
+
# Define some example (bad) `predict_proba` output.
|
| 59 |
+
probs = np.array(
|
| 60 |
+
[[1, 1e-6, 0], [0.19, 0.6, 0.2], [-999, 0.51, 0.5], [1e-6, 1, 1e-9]]
|
| 61 |
+
)
|
| 62 |
+
probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]
|
| 63 |
+
|
| 64 |
+
# _samme_proba calls estimator.predict_proba.
|
| 65 |
+
# Make a mock object so I can control what gets returned.
|
| 66 |
+
class MockEstimator:
|
| 67 |
+
def predict_proba(self, X):
|
| 68 |
+
assert_array_equal(X.shape, probs.shape)
|
| 69 |
+
return probs
|
| 70 |
+
|
| 71 |
+
mock = MockEstimator()
|
| 72 |
+
|
| 73 |
+
samme_proba = _samme_proba(mock, 3, np.ones_like(probs))
|
| 74 |
+
|
| 75 |
+
assert_array_equal(samme_proba.shape, probs.shape)
|
| 76 |
+
assert np.isfinite(samme_proba).all()
|
| 77 |
+
|
| 78 |
+
# Make sure that the correct elements come out as smallest --
|
| 79 |
+
# `_samme_proba` should preserve the ordering in each example.
|
| 80 |
+
assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])
|
| 81 |
+
assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def test_oneclass_adaboost_proba():
|
| 85 |
+
# Test predict_proba robustness for one class label input.
|
| 86 |
+
# In response to issue #7501
|
| 87 |
+
# https://github.com/scikit-learn/scikit-learn/issues/7501
|
| 88 |
+
y_t = np.ones(len(X))
|
| 89 |
+
clf = AdaBoostClassifier().fit(X, y_t)
|
| 90 |
+
assert_array_almost_equal(clf.predict_proba(X), np.ones((len(X), 1)))
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def test_classification_toy():
|
| 94 |
+
# Check classification on a toy dataset.
|
| 95 |
+
clf = AdaBoostClassifier(random_state=0)
|
| 96 |
+
clf.fit(X, y_class)
|
| 97 |
+
assert_array_equal(clf.predict(T), y_t_class)
|
| 98 |
+
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
|
| 99 |
+
assert clf.predict_proba(T).shape == (len(T), 2)
|
| 100 |
+
assert clf.decision_function(T).shape == (len(T),)
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def test_regression_toy():
|
| 104 |
+
# Check classification on a toy dataset.
|
| 105 |
+
clf = AdaBoostRegressor(random_state=0)
|
| 106 |
+
clf.fit(X, y_regr)
|
| 107 |
+
assert_array_equal(clf.predict(T), y_t_regr)
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def test_iris():
|
| 111 |
+
# Check consistency on dataset iris.
|
| 112 |
+
classes = np.unique(iris.target)
|
| 113 |
+
|
| 114 |
+
clf = AdaBoostClassifier()
|
| 115 |
+
clf.fit(iris.data, iris.target)
|
| 116 |
+
|
| 117 |
+
assert_array_equal(classes, clf.classes_)
|
| 118 |
+
proba = clf.predict_proba(iris.data)
|
| 119 |
+
|
| 120 |
+
assert proba.shape[1] == len(classes)
|
| 121 |
+
assert clf.decision_function(iris.data).shape[1] == len(classes)
|
| 122 |
+
|
| 123 |
+
score = clf.score(iris.data, iris.target)
|
| 124 |
+
assert score > 0.9, f"Failed with {score = }"
|
| 125 |
+
|
| 126 |
+
# Check we used multiple estimators
|
| 127 |
+
assert len(clf.estimators_) > 1
|
| 128 |
+
# Check for distinct random states (see issue #7408)
|
| 129 |
+
assert len(set(est.random_state for est in clf.estimators_)) == len(clf.estimators_)
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
@pytest.mark.parametrize("loss", ["linear", "square", "exponential"])
|
| 133 |
+
def test_diabetes(loss):
|
| 134 |
+
# Check consistency on dataset diabetes.
|
| 135 |
+
reg = AdaBoostRegressor(loss=loss, random_state=0)
|
| 136 |
+
reg.fit(diabetes.data, diabetes.target)
|
| 137 |
+
score = reg.score(diabetes.data, diabetes.target)
|
| 138 |
+
assert score > 0.55
|
| 139 |
+
|
| 140 |
+
# Check we used multiple estimators
|
| 141 |
+
assert len(reg.estimators_) > 1
|
| 142 |
+
# Check for distinct random states (see issue #7408)
|
| 143 |
+
assert len(set(est.random_state for est in reg.estimators_)) == len(reg.estimators_)
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def test_staged_predict():
|
| 147 |
+
# Check staged predictions.
|
| 148 |
+
rng = np.random.RandomState(0)
|
| 149 |
+
iris_weights = rng.randint(10, size=iris.target.shape)
|
| 150 |
+
diabetes_weights = rng.randint(10, size=diabetes.target.shape)
|
| 151 |
+
|
| 152 |
+
clf = AdaBoostClassifier(n_estimators=10)
|
| 153 |
+
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
|
| 154 |
+
|
| 155 |
+
predictions = clf.predict(iris.data)
|
| 156 |
+
staged_predictions = [p for p in clf.staged_predict(iris.data)]
|
| 157 |
+
proba = clf.predict_proba(iris.data)
|
| 158 |
+
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
|
| 159 |
+
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
|
| 160 |
+
staged_scores = [
|
| 161 |
+
s for s in clf.staged_score(iris.data, iris.target, sample_weight=iris_weights)
|
| 162 |
+
]
|
| 163 |
+
|
| 164 |
+
assert len(staged_predictions) == 10
|
| 165 |
+
assert_array_almost_equal(predictions, staged_predictions[-1])
|
| 166 |
+
assert len(staged_probas) == 10
|
| 167 |
+
assert_array_almost_equal(proba, staged_probas[-1])
|
| 168 |
+
assert len(staged_scores) == 10
|
| 169 |
+
assert_array_almost_equal(score, staged_scores[-1])
|
| 170 |
+
|
| 171 |
+
# AdaBoost regression
|
| 172 |
+
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
|
| 173 |
+
clf.fit(diabetes.data, diabetes.target, sample_weight=diabetes_weights)
|
| 174 |
+
|
| 175 |
+
predictions = clf.predict(diabetes.data)
|
| 176 |
+
staged_predictions = [p for p in clf.staged_predict(diabetes.data)]
|
| 177 |
+
score = clf.score(diabetes.data, diabetes.target, sample_weight=diabetes_weights)
|
| 178 |
+
staged_scores = [
|
| 179 |
+
s
|
| 180 |
+
for s in clf.staged_score(
|
| 181 |
+
diabetes.data, diabetes.target, sample_weight=diabetes_weights
|
| 182 |
+
)
|
| 183 |
+
]
|
| 184 |
+
|
| 185 |
+
assert len(staged_predictions) == 10
|
| 186 |
+
assert_array_almost_equal(predictions, staged_predictions[-1])
|
| 187 |
+
assert len(staged_scores) == 10
|
| 188 |
+
assert_array_almost_equal(score, staged_scores[-1])
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
def test_gridsearch():
|
| 192 |
+
# Check that base trees can be grid-searched.
|
| 193 |
+
# AdaBoost classification
|
| 194 |
+
boost = AdaBoostClassifier(estimator=DecisionTreeClassifier())
|
| 195 |
+
parameters = {
|
| 196 |
+
"n_estimators": (1, 2),
|
| 197 |
+
"estimator__max_depth": (1, 2),
|
| 198 |
+
}
|
| 199 |
+
clf = GridSearchCV(boost, parameters)
|
| 200 |
+
clf.fit(iris.data, iris.target)
|
| 201 |
+
|
| 202 |
+
# AdaBoost regression
|
| 203 |
+
boost = AdaBoostRegressor(estimator=DecisionTreeRegressor(), random_state=0)
|
| 204 |
+
parameters = {"n_estimators": (1, 2), "estimator__max_depth": (1, 2)}
|
| 205 |
+
clf = GridSearchCV(boost, parameters)
|
| 206 |
+
clf.fit(diabetes.data, diabetes.target)
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
def test_pickle():
|
| 210 |
+
# Check pickability.
|
| 211 |
+
import pickle
|
| 212 |
+
|
| 213 |
+
# Adaboost classifier
|
| 214 |
+
obj = AdaBoostClassifier()
|
| 215 |
+
obj.fit(iris.data, iris.target)
|
| 216 |
+
score = obj.score(iris.data, iris.target)
|
| 217 |
+
s = pickle.dumps(obj)
|
| 218 |
+
|
| 219 |
+
obj2 = pickle.loads(s)
|
| 220 |
+
assert type(obj2) == obj.__class__
|
| 221 |
+
score2 = obj2.score(iris.data, iris.target)
|
| 222 |
+
assert score == score2
|
| 223 |
+
|
| 224 |
+
# Adaboost regressor
|
| 225 |
+
obj = AdaBoostRegressor(random_state=0)
|
| 226 |
+
obj.fit(diabetes.data, diabetes.target)
|
| 227 |
+
score = obj.score(diabetes.data, diabetes.target)
|
| 228 |
+
s = pickle.dumps(obj)
|
| 229 |
+
|
| 230 |
+
obj2 = pickle.loads(s)
|
| 231 |
+
assert type(obj2) == obj.__class__
|
| 232 |
+
score2 = obj2.score(diabetes.data, diabetes.target)
|
| 233 |
+
assert score == score2
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
def test_importances():
|
| 237 |
+
# Check variable importances.
|
| 238 |
+
X, y = datasets.make_classification(
|
| 239 |
+
n_samples=2000,
|
| 240 |
+
n_features=10,
|
| 241 |
+
n_informative=3,
|
| 242 |
+
n_redundant=0,
|
| 243 |
+
n_repeated=0,
|
| 244 |
+
shuffle=False,
|
| 245 |
+
random_state=1,
|
| 246 |
+
)
|
| 247 |
+
|
| 248 |
+
clf = AdaBoostClassifier()
|
| 249 |
+
|
| 250 |
+
clf.fit(X, y)
|
| 251 |
+
importances = clf.feature_importances_
|
| 252 |
+
|
| 253 |
+
assert importances.shape[0] == 10
|
| 254 |
+
assert (importances[:3, np.newaxis] >= importances[3:]).all()
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
def test_adaboost_classifier_sample_weight_error():
|
| 258 |
+
# Test that it gives proper exception on incorrect sample weight.
|
| 259 |
+
clf = AdaBoostClassifier()
|
| 260 |
+
msg = re.escape("sample_weight.shape == (1,), expected (6,)")
|
| 261 |
+
with pytest.raises(ValueError, match=msg):
|
| 262 |
+
clf.fit(X, y_class, sample_weight=np.asarray([-1]))
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
def test_estimator():
|
| 266 |
+
# Test different estimators.
|
| 267 |
+
from sklearn.ensemble import RandomForestClassifier
|
| 268 |
+
|
| 269 |
+
# XXX doesn't work with y_class because RF doesn't support classes_
|
| 270 |
+
# Shouldn't AdaBoost run a LabelBinarizer?
|
| 271 |
+
clf = AdaBoostClassifier(RandomForestClassifier())
|
| 272 |
+
clf.fit(X, y_regr)
|
| 273 |
+
|
| 274 |
+
clf = AdaBoostClassifier(SVC())
|
| 275 |
+
clf.fit(X, y_class)
|
| 276 |
+
|
| 277 |
+
from sklearn.ensemble import RandomForestRegressor
|
| 278 |
+
|
| 279 |
+
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
|
| 280 |
+
clf.fit(X, y_regr)
|
| 281 |
+
|
| 282 |
+
clf = AdaBoostRegressor(SVR(), random_state=0)
|
| 283 |
+
clf.fit(X, y_regr)
|
| 284 |
+
|
| 285 |
+
# Check that an empty discrete ensemble fails in fit, not predict.
|
| 286 |
+
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
|
| 287 |
+
y_fail = ["foo", "bar", 1, 2]
|
| 288 |
+
clf = AdaBoostClassifier(SVC())
|
| 289 |
+
with pytest.raises(ValueError, match="worse than random"):
|
| 290 |
+
clf.fit(X_fail, y_fail)
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
def test_sample_weights_infinite():
|
| 294 |
+
msg = "Sample weights have reached infinite values"
|
| 295 |
+
clf = AdaBoostClassifier(n_estimators=30, learning_rate=23.0)
|
| 296 |
+
with pytest.warns(UserWarning, match=msg):
|
| 297 |
+
clf.fit(iris.data, iris.target)
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
@pytest.mark.parametrize(
|
| 301 |
+
"sparse_container, expected_internal_type",
|
| 302 |
+
zip(
|
| 303 |
+
[
|
| 304 |
+
*CSC_CONTAINERS,
|
| 305 |
+
*CSR_CONTAINERS,
|
| 306 |
+
*LIL_CONTAINERS,
|
| 307 |
+
*COO_CONTAINERS,
|
| 308 |
+
*DOK_CONTAINERS,
|
| 309 |
+
],
|
| 310 |
+
CSC_CONTAINERS + 4 * CSR_CONTAINERS,
|
| 311 |
+
),
|
| 312 |
+
)
|
| 313 |
+
def test_sparse_classification(sparse_container, expected_internal_type):
|
| 314 |
+
# Check classification with sparse input.
|
| 315 |
+
|
| 316 |
+
class CustomSVC(SVC):
|
| 317 |
+
"""SVC variant that records the nature of the training set."""
|
| 318 |
+
|
| 319 |
+
def fit(self, X, y, sample_weight=None):
|
| 320 |
+
"""Modification on fit caries data type for later verification."""
|
| 321 |
+
super().fit(X, y, sample_weight=sample_weight)
|
| 322 |
+
self.data_type_ = type(X)
|
| 323 |
+
return self
|
| 324 |
+
|
| 325 |
+
X, y = datasets.make_multilabel_classification(
|
| 326 |
+
n_classes=1, n_samples=15, n_features=5, random_state=42
|
| 327 |
+
)
|
| 328 |
+
# Flatten y to a 1d array
|
| 329 |
+
y = np.ravel(y)
|
| 330 |
+
|
| 331 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
|
| 332 |
+
|
| 333 |
+
X_train_sparse = sparse_container(X_train)
|
| 334 |
+
X_test_sparse = sparse_container(X_test)
|
| 335 |
+
|
| 336 |
+
# Trained on sparse format
|
| 337 |
+
sparse_classifier = AdaBoostClassifier(
|
| 338 |
+
estimator=CustomSVC(probability=True),
|
| 339 |
+
random_state=1,
|
| 340 |
+
).fit(X_train_sparse, y_train)
|
| 341 |
+
|
| 342 |
+
# Trained on dense format
|
| 343 |
+
dense_classifier = AdaBoostClassifier(
|
| 344 |
+
estimator=CustomSVC(probability=True),
|
| 345 |
+
random_state=1,
|
| 346 |
+
).fit(X_train, y_train)
|
| 347 |
+
|
| 348 |
+
# predict
|
| 349 |
+
sparse_clf_results = sparse_classifier.predict(X_test_sparse)
|
| 350 |
+
dense_clf_results = dense_classifier.predict(X_test)
|
| 351 |
+
assert_array_equal(sparse_clf_results, dense_clf_results)
|
| 352 |
+
|
| 353 |
+
# decision_function
|
| 354 |
+
sparse_clf_results = sparse_classifier.decision_function(X_test_sparse)
|
| 355 |
+
dense_clf_results = dense_classifier.decision_function(X_test)
|
| 356 |
+
assert_array_almost_equal(sparse_clf_results, dense_clf_results)
|
| 357 |
+
|
| 358 |
+
# predict_log_proba
|
| 359 |
+
sparse_clf_results = sparse_classifier.predict_log_proba(X_test_sparse)
|
| 360 |
+
dense_clf_results = dense_classifier.predict_log_proba(X_test)
|
| 361 |
+
assert_array_almost_equal(sparse_clf_results, dense_clf_results)
|
| 362 |
+
|
| 363 |
+
# predict_proba
|
| 364 |
+
sparse_clf_results = sparse_classifier.predict_proba(X_test_sparse)
|
| 365 |
+
dense_clf_results = dense_classifier.predict_proba(X_test)
|
| 366 |
+
assert_array_almost_equal(sparse_clf_results, dense_clf_results)
|
| 367 |
+
|
| 368 |
+
# score
|
| 369 |
+
sparse_clf_results = sparse_classifier.score(X_test_sparse, y_test)
|
| 370 |
+
dense_clf_results = dense_classifier.score(X_test, y_test)
|
| 371 |
+
assert_array_almost_equal(sparse_clf_results, dense_clf_results)
|
| 372 |
+
|
| 373 |
+
# staged_decision_function
|
| 374 |
+
sparse_clf_results = sparse_classifier.staged_decision_function(X_test_sparse)
|
| 375 |
+
dense_clf_results = dense_classifier.staged_decision_function(X_test)
|
| 376 |
+
for sparse_clf_res, dense_clf_res in zip(sparse_clf_results, dense_clf_results):
|
| 377 |
+
assert_array_almost_equal(sparse_clf_res, dense_clf_res)
|
| 378 |
+
|
| 379 |
+
# staged_predict
|
| 380 |
+
sparse_clf_results = sparse_classifier.staged_predict(X_test_sparse)
|
| 381 |
+
dense_clf_results = dense_classifier.staged_predict(X_test)
|
| 382 |
+
for sparse_clf_res, dense_clf_res in zip(sparse_clf_results, dense_clf_results):
|
| 383 |
+
assert_array_equal(sparse_clf_res, dense_clf_res)
|
| 384 |
+
|
| 385 |
+
# staged_predict_proba
|
| 386 |
+
sparse_clf_results = sparse_classifier.staged_predict_proba(X_test_sparse)
|
| 387 |
+
dense_clf_results = dense_classifier.staged_predict_proba(X_test)
|
| 388 |
+
for sparse_clf_res, dense_clf_res in zip(sparse_clf_results, dense_clf_results):
|
| 389 |
+
assert_array_almost_equal(sparse_clf_res, dense_clf_res)
|
| 390 |
+
|
| 391 |
+
# staged_score
|
| 392 |
+
sparse_clf_results = sparse_classifier.staged_score(X_test_sparse, y_test)
|
| 393 |
+
dense_clf_results = dense_classifier.staged_score(X_test, y_test)
|
| 394 |
+
for sparse_clf_res, dense_clf_res in zip(sparse_clf_results, dense_clf_results):
|
| 395 |
+
assert_array_equal(sparse_clf_res, dense_clf_res)
|
| 396 |
+
|
| 397 |
+
# Verify sparsity of data is maintained during training
|
| 398 |
+
types = [i.data_type_ for i in sparse_classifier.estimators_]
|
| 399 |
+
|
| 400 |
+
assert all([t == expected_internal_type for t in types])
|
| 401 |
+
|
| 402 |
+
|
| 403 |
+
@pytest.mark.parametrize(
|
| 404 |
+
"sparse_container, expected_internal_type",
|
| 405 |
+
zip(
|
| 406 |
+
[
|
| 407 |
+
*CSC_CONTAINERS,
|
| 408 |
+
*CSR_CONTAINERS,
|
| 409 |
+
*LIL_CONTAINERS,
|
| 410 |
+
*COO_CONTAINERS,
|
| 411 |
+
*DOK_CONTAINERS,
|
| 412 |
+
],
|
| 413 |
+
CSC_CONTAINERS + 4 * CSR_CONTAINERS,
|
| 414 |
+
),
|
| 415 |
+
)
|
| 416 |
+
def test_sparse_regression(sparse_container, expected_internal_type):
|
| 417 |
+
# Check regression with sparse input.
|
| 418 |
+
|
| 419 |
+
class CustomSVR(SVR):
|
| 420 |
+
"""SVR variant that records the nature of the training set."""
|
| 421 |
+
|
| 422 |
+
def fit(self, X, y, sample_weight=None):
|
| 423 |
+
"""Modification on fit caries data type for later verification."""
|
| 424 |
+
super().fit(X, y, sample_weight=sample_weight)
|
| 425 |
+
self.data_type_ = type(X)
|
| 426 |
+
return self
|
| 427 |
+
|
| 428 |
+
X, y = datasets.make_regression(
|
| 429 |
+
n_samples=15, n_features=50, n_targets=1, random_state=42
|
| 430 |
+
)
|
| 431 |
+
|
| 432 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
|
| 433 |
+
|
| 434 |
+
X_train_sparse = sparse_container(X_train)
|
| 435 |
+
X_test_sparse = sparse_container(X_test)
|
| 436 |
+
|
| 437 |
+
# Trained on sparse format
|
| 438 |
+
sparse_regressor = AdaBoostRegressor(estimator=CustomSVR(), random_state=1).fit(
|
| 439 |
+
X_train_sparse, y_train
|
| 440 |
+
)
|
| 441 |
+
|
| 442 |
+
# Trained on dense format
|
| 443 |
+
dense_regressor = AdaBoostRegressor(estimator=CustomSVR(), random_state=1).fit(
|
| 444 |
+
X_train, y_train
|
| 445 |
+
)
|
| 446 |
+
|
| 447 |
+
# predict
|
| 448 |
+
sparse_regr_results = sparse_regressor.predict(X_test_sparse)
|
| 449 |
+
dense_regr_results = dense_regressor.predict(X_test)
|
| 450 |
+
assert_array_almost_equal(sparse_regr_results, dense_regr_results)
|
| 451 |
+
|
| 452 |
+
# staged_predict
|
| 453 |
+
sparse_regr_results = sparse_regressor.staged_predict(X_test_sparse)
|
| 454 |
+
dense_regr_results = dense_regressor.staged_predict(X_test)
|
| 455 |
+
for sparse_regr_res, dense_regr_res in zip(sparse_regr_results, dense_regr_results):
|
| 456 |
+
assert_array_almost_equal(sparse_regr_res, dense_regr_res)
|
| 457 |
+
|
| 458 |
+
types = [i.data_type_ for i in sparse_regressor.estimators_]
|
| 459 |
+
|
| 460 |
+
assert all([t == expected_internal_type for t in types])
|
| 461 |
+
|
| 462 |
+
|
| 463 |
+
def test_sample_weight_adaboost_regressor():
|
| 464 |
+
"""
|
| 465 |
+
AdaBoostRegressor should work without sample_weights in the base estimator
|
| 466 |
+
The random weighted sampling is done internally in the _boost method in
|
| 467 |
+
AdaBoostRegressor.
|
| 468 |
+
"""
|
| 469 |
+
|
| 470 |
+
class DummyEstimator(BaseEstimator):
|
| 471 |
+
def fit(self, X, y):
|
| 472 |
+
pass
|
| 473 |
+
|
| 474 |
+
def predict(self, X):
|
| 475 |
+
return np.zeros(X.shape[0])
|
| 476 |
+
|
| 477 |
+
boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3)
|
| 478 |
+
boost.fit(X, y_regr)
|
| 479 |
+
assert len(boost.estimator_weights_) == len(boost.estimator_errors_)
|
| 480 |
+
|
| 481 |
+
|
| 482 |
+
def test_multidimensional_X():
|
| 483 |
+
"""
|
| 484 |
+
Check that the AdaBoost estimators can work with n-dimensional
|
| 485 |
+
data matrix
|
| 486 |
+
"""
|
| 487 |
+
rng = np.random.RandomState(0)
|
| 488 |
+
|
| 489 |
+
X = rng.randn(51, 3, 3)
|
| 490 |
+
yc = rng.choice([0, 1], 51)
|
| 491 |
+
yr = rng.randn(51)
|
| 492 |
+
|
| 493 |
+
boost = AdaBoostClassifier(DummyClassifier(strategy="most_frequent"))
|
| 494 |
+
boost.fit(X, yc)
|
| 495 |
+
boost.predict(X)
|
| 496 |
+
boost.predict_proba(X)
|
| 497 |
+
|
| 498 |
+
boost = AdaBoostRegressor(DummyRegressor())
|
| 499 |
+
boost.fit(X, yr)
|
| 500 |
+
boost.predict(X)
|
| 501 |
+
|
| 502 |
+
|
| 503 |
+
def test_adaboostclassifier_without_sample_weight():
|
| 504 |
+
X, y = iris.data, iris.target
|
| 505 |
+
estimator = NoSampleWeightWrapper(DummyClassifier())
|
| 506 |
+
clf = AdaBoostClassifier(estimator=estimator)
|
| 507 |
+
err_msg = "{} doesn't support sample_weight".format(estimator.__class__.__name__)
|
| 508 |
+
with pytest.raises(ValueError, match=err_msg):
|
| 509 |
+
clf.fit(X, y)
|
| 510 |
+
|
| 511 |
+
|
| 512 |
+
def test_adaboostregressor_sample_weight():
|
| 513 |
+
# check that giving weight will have an influence on the error computed
|
| 514 |
+
# for a weak learner
|
| 515 |
+
rng = np.random.RandomState(42)
|
| 516 |
+
X = np.linspace(0, 100, num=1000)
|
| 517 |
+
y = (0.8 * X + 0.2) + (rng.rand(X.shape[0]) * 0.0001)
|
| 518 |
+
X = X.reshape(-1, 1)
|
| 519 |
+
|
| 520 |
+
# add an arbitrary outlier
|
| 521 |
+
X[-1] *= 10
|
| 522 |
+
y[-1] = 10000
|
| 523 |
+
|
| 524 |
+
# random_state=0 ensure that the underlying bootstrap will use the outlier
|
| 525 |
+
regr_no_outlier = AdaBoostRegressor(
|
| 526 |
+
estimator=LinearRegression(), n_estimators=1, random_state=0
|
| 527 |
+
)
|
| 528 |
+
regr_with_weight = clone(regr_no_outlier)
|
| 529 |
+
regr_with_outlier = clone(regr_no_outlier)
|
| 530 |
+
|
| 531 |
+
# fit 3 models:
|
| 532 |
+
# - a model containing the outlier
|
| 533 |
+
# - a model without the outlier
|
| 534 |
+
# - a model containing the outlier but with a null sample-weight
|
| 535 |
+
regr_with_outlier.fit(X, y)
|
| 536 |
+
regr_no_outlier.fit(X[:-1], y[:-1])
|
| 537 |
+
sample_weight = np.ones_like(y)
|
| 538 |
+
sample_weight[-1] = 0
|
| 539 |
+
regr_with_weight.fit(X, y, sample_weight=sample_weight)
|
| 540 |
+
|
| 541 |
+
score_with_outlier = regr_with_outlier.score(X[:-1], y[:-1])
|
| 542 |
+
score_no_outlier = regr_no_outlier.score(X[:-1], y[:-1])
|
| 543 |
+
score_with_weight = regr_with_weight.score(X[:-1], y[:-1])
|
| 544 |
+
|
| 545 |
+
assert score_with_outlier < score_no_outlier
|
| 546 |
+
assert score_with_outlier < score_with_weight
|
| 547 |
+
assert score_no_outlier == pytest.approx(score_with_weight)
|
| 548 |
+
|
| 549 |
+
|
| 550 |
+
def test_adaboost_consistent_predict():
|
| 551 |
+
# check that predict_proba and predict give consistent results
|
| 552 |
+
# regression test for:
|
| 553 |
+
# https://github.com/scikit-learn/scikit-learn/issues/14084
|
| 554 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
| 555 |
+
*datasets.load_digits(return_X_y=True), random_state=42
|
| 556 |
+
)
|
| 557 |
+
model = AdaBoostClassifier(random_state=42)
|
| 558 |
+
model.fit(X_train, y_train)
|
| 559 |
+
|
| 560 |
+
assert_array_equal(
|
| 561 |
+
np.argmax(model.predict_proba(X_test), axis=1), model.predict(X_test)
|
| 562 |
+
)
|
| 563 |
+
|
| 564 |
+
|
| 565 |
+
@pytest.mark.parametrize(
|
| 566 |
+
"model, X, y",
|
| 567 |
+
[
|
| 568 |
+
(AdaBoostClassifier(), iris.data, iris.target),
|
| 569 |
+
(AdaBoostRegressor(), diabetes.data, diabetes.target),
|
| 570 |
+
],
|
| 571 |
+
)
|
| 572 |
+
def test_adaboost_negative_weight_error(model, X, y):
|
| 573 |
+
sample_weight = np.ones_like(y)
|
| 574 |
+
sample_weight[-1] = -10
|
| 575 |
+
|
| 576 |
+
err_msg = "Negative values in data passed to `sample_weight`"
|
| 577 |
+
with pytest.raises(ValueError, match=err_msg):
|
| 578 |
+
model.fit(X, y, sample_weight=sample_weight)
|
| 579 |
+
|
| 580 |
+
|
| 581 |
+
def test_adaboost_numerically_stable_feature_importance_with_small_weights():
|
| 582 |
+
"""Check that we don't create NaN feature importance with numerically
|
| 583 |
+
instable inputs.
|
| 584 |
+
|
| 585 |
+
Non-regression test for:
|
| 586 |
+
https://github.com/scikit-learn/scikit-learn/issues/20320
|
| 587 |
+
"""
|
| 588 |
+
rng = np.random.RandomState(42)
|
| 589 |
+
X = rng.normal(size=(1000, 10))
|
| 590 |
+
y = rng.choice([0, 1], size=1000)
|
| 591 |
+
sample_weight = np.ones_like(y) * 1e-263
|
| 592 |
+
tree = DecisionTreeClassifier(max_depth=10, random_state=12)
|
| 593 |
+
ada_model = AdaBoostClassifier(estimator=tree, n_estimators=20, random_state=12)
|
| 594 |
+
ada_model.fit(X, y, sample_weight=sample_weight)
|
| 595 |
+
assert np.isnan(ada_model.feature_importances_).sum() == 0
|
| 596 |
+
|
| 597 |
+
|
| 598 |
+
def test_adaboost_decision_function(global_random_seed):
|
| 599 |
+
"""Check that the decision function respects the symmetric constraint for weak
|
| 600 |
+
learners.
|
| 601 |
+
|
| 602 |
+
Non-regression test for:
|
| 603 |
+
https://github.com/scikit-learn/scikit-learn/issues/26520
|
| 604 |
+
"""
|
| 605 |
+
n_classes = 3
|
| 606 |
+
X, y = datasets.make_classification(
|
| 607 |
+
n_classes=n_classes, n_clusters_per_class=1, random_state=global_random_seed
|
| 608 |
+
)
|
| 609 |
+
clf = AdaBoostClassifier(n_estimators=1, random_state=global_random_seed).fit(X, y)
|
| 610 |
+
|
| 611 |
+
y_score = clf.decision_function(X)
|
| 612 |
+
assert_allclose(y_score.sum(axis=1), 0, atol=1e-8)
|
| 613 |
+
|
| 614 |
+
# With a single learner, we expect to have a decision function in
|
| 615 |
+
# {1, - 1 / (n_classes - 1)}.
|
| 616 |
+
assert set(np.unique(y_score)) == {1, -1 / (n_classes - 1)}
|
| 617 |
+
|
| 618 |
+
# We can assert the same for staged_decision_function since we have a single learner
|
| 619 |
+
for y_score in clf.staged_decision_function(X):
|
| 620 |
+
assert_allclose(y_score.sum(axis=1), 0, atol=1e-8)
|
| 621 |
+
|
| 622 |
+
# With a single learner, we expect to have a decision function in
|
| 623 |
+
# {1, - 1 / (n_classes - 1)}.
|
| 624 |
+
assert set(np.unique(y_score)) == {1, -1 / (n_classes - 1)}
|
| 625 |
+
|
| 626 |
+
clf.set_params(n_estimators=5).fit(X, y)
|
| 627 |
+
|
| 628 |
+
y_score = clf.decision_function(X)
|
| 629 |
+
assert_allclose(y_score.sum(axis=1), 0, atol=1e-8)
|
| 630 |
+
|
| 631 |
+
for y_score in clf.staged_decision_function(X):
|
| 632 |
+
assert_allclose(y_score.sum(axis=1), 0, atol=1e-8)
|
| 633 |
+
|
| 634 |
+
|
| 635 |
+
# TODO(1.8): remove
|
| 636 |
+
def test_deprecated_algorithm():
|
| 637 |
+
adaboost_clf = AdaBoostClassifier(n_estimators=1, algorithm="SAMME")
|
| 638 |
+
with pytest.warns(FutureWarning, match="The parameter 'algorithm' is deprecated"):
|
| 639 |
+
adaboost_clf.fit(X, y_class)
|
phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_cholesky_solve_helper_cpu_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cpu {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor _cholesky_solve_helper(const at::Tensor & self, const at::Tensor & A, bool upper);
|
| 21 |
+
|
| 22 |
+
} // namespace cpu
|
| 23 |
+
} // namespace at
|
phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_index_put_impl_cpu_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cpu {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor & _index_put_impl_(at::Tensor & self, const c10::List<::std::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate=false, bool unsafe=false);
|
| 21 |
+
|
| 22 |
+
} // namespace cpu
|
| 23 |
+
} // namespace at
|
phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_jagged_to_padded_dense_forward.h
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <optional>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/_jagged_to_padded_dense_forward_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::_jagged_to_padded_dense_forward(Tensor values, Tensor[] offsets, SymInt[] max_lengths, float padding_value=0.0) -> Tensor
|
| 26 |
+
inline at::Tensor _jagged_to_padded_dense_forward(const at::Tensor & values, at::TensorList offsets, at::IntArrayRef max_lengths, double padding_value=0.0) {
|
| 27 |
+
return at::_ops::_jagged_to_padded_dense_forward::call(values, offsets, c10::fromIntArrayRefSlow(max_lengths), padding_value);
|
| 28 |
+
}
|
| 29 |
+
namespace symint {
|
| 30 |
+
template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
|
| 31 |
+
at::Tensor _jagged_to_padded_dense_forward(const at::Tensor & values, at::TensorList offsets, at::IntArrayRef max_lengths, double padding_value=0.0) {
|
| 32 |
+
return at::_ops::_jagged_to_padded_dense_forward::call(values, offsets, c10::fromIntArrayRefSlow(max_lengths), padding_value);
|
| 33 |
+
}
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
// aten::_jagged_to_padded_dense_forward(Tensor values, Tensor[] offsets, SymInt[] max_lengths, float padding_value=0.0) -> Tensor
|
| 37 |
+
inline at::Tensor _jagged_to_padded_dense_forward_symint(const at::Tensor & values, at::TensorList offsets, c10::SymIntArrayRef max_lengths, double padding_value=0.0) {
|
| 38 |
+
return at::_ops::_jagged_to_padded_dense_forward::call(values, offsets, max_lengths, padding_value);
|
| 39 |
+
}
|
| 40 |
+
namespace symint {
|
| 41 |
+
template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
|
| 42 |
+
at::Tensor _jagged_to_padded_dense_forward(const at::Tensor & values, at::TensorList offsets, c10::SymIntArrayRef max_lengths, double padding_value=0.0) {
|
| 43 |
+
return at::_ops::_jagged_to_padded_dense_forward::call(values, offsets, max_lengths, padding_value);
|
| 44 |
+
}
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
}
|
phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_eigvals_cuda_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cuda {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor _linalg_eigvals(const at::Tensor & self);
|
| 21 |
+
|
| 22 |
+
} // namespace cuda
|
| 23 |
+
} // namespace at
|
phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_cudnn_attention_native.h
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,c10::SymInt,c10::SymInt,at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_cudnn_attention_cuda(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & attn_bias, bool compute_log_sumexp, double dropout_p=0.0, bool is_causal=false, bool return_debug_mask=false, ::std::optional<double> scale=::std::nullopt);
|
| 20 |
+
} // namespace native
|
| 21 |
+
} // namespace at
|