diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_base.py b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..db5a0944a72c3813a9e34af9970cb0fe1a893b45 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_base.py @@ -0,0 +1,304 @@ +"""Base class for ensemble-based estimators.""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +from abc import ABCMeta, abstractmethod + +import numpy as np +from joblib import effective_n_jobs + +from ..base import BaseEstimator, MetaEstimatorMixin, clone, is_classifier, is_regressor +from ..utils import Bunch, check_random_state +from ..utils._tags import get_tags +from ..utils._user_interface import _print_elapsed_time +from ..utils.metadata_routing import _routing_enabled +from ..utils.metaestimators import _BaseComposition + + +def _fit_single_estimator( + estimator, X, y, fit_params, message_clsname=None, message=None +): + """Private function used to fit an estimator within a job.""" + # TODO(SLEP6): remove if-condition for unrouted sample_weight when metadata + # routing can't be disabled. + if not _routing_enabled() and "sample_weight" in fit_params: + try: + with _print_elapsed_time(message_clsname, message): + estimator.fit(X, y, sample_weight=fit_params["sample_weight"]) + except TypeError as exc: + if "unexpected keyword argument 'sample_weight'" in str(exc): + raise TypeError( + "Underlying estimator {} does not support sample weights.".format( + estimator.__class__.__name__ + ) + ) from exc + raise + else: + with _print_elapsed_time(message_clsname, message): + estimator.fit(X, y, **fit_params) + return estimator + + +def _set_random_states(estimator, random_state=None): + """Set fixed random_state parameters for an estimator. + + Finds all parameters ending ``random_state`` and sets them to integers + derived from ``random_state``. + + Parameters + ---------- + estimator : estimator supporting get/set_params + Estimator with potential randomness managed by random_state + parameters. + + random_state : int, RandomState instance or None, default=None + Pseudo-random number generator to control the generation of the random + integers. Pass an int for reproducible output across multiple function + calls. + See :term:`Glossary `. + + Notes + ----- + This does not necessarily set *all* ``random_state`` attributes that + control an estimator's randomness, only those accessible through + ``estimator.get_params()``. ``random_state``s not controlled include + those belonging to: + + * cross-validation splitters + * ``scipy.stats`` rvs + """ + random_state = check_random_state(random_state) + to_set = {} + for key in sorted(estimator.get_params(deep=True)): + if key == "random_state" or key.endswith("__random_state"): + to_set[key] = random_state.randint(np.iinfo(np.int32).max) + + if to_set: + estimator.set_params(**to_set) + + +class BaseEnsemble(MetaEstimatorMixin, BaseEstimator, metaclass=ABCMeta): + """Base class for all ensemble classes. + + Warning: This class should not be used directly. Use derived classes + instead. + + Parameters + ---------- + estimator : object + The base estimator from which the ensemble is built. + + n_estimators : int, default=10 + The number of estimators in the ensemble. + + estimator_params : list of str, default=tuple() + The list of attributes to use as parameters when instantiating a + new base estimator. If none are given, default parameters are used. + + Attributes + ---------- + estimator_ : estimator + The base estimator from which the ensemble is grown. + + estimators_ : list of estimators + The collection of fitted base estimators. + """ + + @abstractmethod + def __init__( + self, + estimator=None, + *, + n_estimators=10, + estimator_params=tuple(), + ): + # Set parameters + self.estimator = estimator + self.n_estimators = n_estimators + self.estimator_params = estimator_params + + # Don't instantiate estimators now! Parameters of estimator might + # still change. Eg., when grid-searching with the nested object syntax. + # self.estimators_ needs to be filled by the derived classes in fit. + + def _validate_estimator(self, default=None): + """Check the base estimator. + + Sets the `estimator_` attributes. + """ + if self.estimator is not None: + self.estimator_ = self.estimator + else: + self.estimator_ = default + + def _make_estimator(self, append=True, random_state=None): + """Make and configure a copy of the `estimator_` attribute. + + Warning: This method should be used to properly instantiate new + sub-estimators. + """ + estimator = clone(self.estimator_) + estimator.set_params(**{p: getattr(self, p) for p in self.estimator_params}) + + if random_state is not None: + _set_random_states(estimator, random_state) + + if append: + self.estimators_.append(estimator) + + return estimator + + def __len__(self): + """Return the number of estimators in the ensemble.""" + return len(self.estimators_) + + def __getitem__(self, index): + """Return the index'th estimator in the ensemble.""" + return self.estimators_[index] + + def __iter__(self): + """Return iterator over estimators in the ensemble.""" + return iter(self.estimators_) + + +def _partition_estimators(n_estimators, n_jobs): + """Private function used to partition estimators between jobs.""" + # Compute the number of jobs + n_jobs = min(effective_n_jobs(n_jobs), n_estimators) + + # Partition estimators between jobs + n_estimators_per_job = np.full(n_jobs, n_estimators // n_jobs, dtype=int) + n_estimators_per_job[: n_estimators % n_jobs] += 1 + starts = np.cumsum(n_estimators_per_job) + + return n_jobs, n_estimators_per_job.tolist(), [0] + starts.tolist() + + +class _BaseHeterogeneousEnsemble( + MetaEstimatorMixin, _BaseComposition, metaclass=ABCMeta +): + """Base class for heterogeneous ensemble of learners. + + Parameters + ---------- + estimators : list of (str, estimator) tuples + The ensemble of estimators to use in the ensemble. Each element of the + list is defined as a tuple of string (i.e. name of the estimator) and + an estimator instance. An estimator can be set to `'drop'` using + `set_params`. + + Attributes + ---------- + estimators_ : list of estimators + The elements of the estimators parameter, having been fitted on the + training data. If an estimator has been set to `'drop'`, it will not + appear in `estimators_`. + """ + + @property + def named_estimators(self): + """Dictionary to access any fitted sub-estimators by name. + + Returns + ------- + :class:`~sklearn.utils.Bunch` + """ + return Bunch(**dict(self.estimators)) + + @abstractmethod + def __init__(self, estimators): + self.estimators = estimators + + def _validate_estimators(self): + if len(self.estimators) == 0: + raise ValueError( + "Invalid 'estimators' attribute, 'estimators' should be a " + "non-empty list of (string, estimator) tuples." + ) + names, estimators = zip(*self.estimators) + # defined by MetaEstimatorMixin + self._validate_names(names) + + has_estimator = any(est != "drop" for est in estimators) + if not has_estimator: + raise ValueError( + "All estimators are dropped. At least one is required " + "to be an estimator." + ) + + is_estimator_type = is_classifier if is_classifier(self) else is_regressor + + for est in estimators: + if est != "drop" and not is_estimator_type(est): + raise ValueError( + "The estimator {} should be a {}.".format( + est.__class__.__name__, is_estimator_type.__name__[3:] + ) + ) + + return names, estimators + + def set_params(self, **params): + """ + Set the parameters of an estimator from the ensemble. + + Valid parameter keys can be listed with `get_params()`. Note that you + can directly set the parameters of the estimators contained in + `estimators`. + + Parameters + ---------- + **params : keyword arguments + Specific parameters using e.g. + `set_params(parameter_name=new_value)`. In addition, to setting the + parameters of the estimator, the individual estimator of the + estimators can also be set, or can be removed by setting them to + 'drop'. + + Returns + ------- + self : object + Estimator instance. + """ + super()._set_params("estimators", **params) + return self + + def get_params(self, deep=True): + """ + Get the parameters of an estimator from the ensemble. + + Returns the parameters given in the constructor as well as the + estimators contained within the `estimators` parameter. + + Parameters + ---------- + deep : bool, default=True + Setting it to True gets the various estimators and the parameters + of the estimators as well. + + Returns + ------- + params : dict + Parameter and estimator names mapped to their values or parameter + names mapped to their values. + """ + return super()._get_params("estimators", deep=deep) + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + try: + tags.input_tags.allow_nan = all( + get_tags(est[1]).input_tags.allow_nan if est[1] != "drop" else True + for est in self.estimators + ) + tags.input_tags.sparse = all( + get_tags(est[1]).input_tags.sparse if est[1] != "drop" else True + for est in self.estimators + ) + except Exception: + # If `estimators` does not comply with our API (list of tuples) then it will + # fail. In this case, we assume that `allow_nan` and `sparse` are False but + # the parameter validation will raise an error during `fit`. + pass # pragma: no cover + return tags diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_gradient_boosting.pyx b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_gradient_boosting.pyx new file mode 100644 index 0000000000000000000000000000000000000000..cd9845a217c7d505ff227637ec7c3f092a432849 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_gradient_boosting.pyx @@ -0,0 +1,262 @@ +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +from libc.stdlib cimport free +from libc.string cimport memset + +import numpy as np +from scipy.sparse import issparse + +from ..utils._typedefs cimport float32_t, float64_t, intp_t, int32_t, uint8_t +# Note: _tree uses cimport numpy, cnp.import_array, so we need to include +# numpy headers in the build configuration of this extension +from ..tree._tree cimport Node +from ..tree._tree cimport Tree +from ..tree._utils cimport safe_realloc + + +# no namespace lookup for numpy dtype and array creation +from numpy import zeros as np_zeros + + +# constant to mark tree leafs +cdef intp_t TREE_LEAF = -1 + +cdef void _predict_regression_tree_inplace_fast_dense( + const float32_t[:, ::1] X, + Node* root_node, + double *value, + double scale, + Py_ssize_t k, + float64_t[:, :] out +) noexcept nogil: + """Predicts output for regression tree and stores it in ``out[i, k]``. + + This function operates directly on the data arrays of the tree + data structures. This is 5x faster than the variant above because + it allows us to avoid buffer validation. + + The function assumes that the ndarray that wraps ``X`` is + c-continuous. + + Parameters + ---------- + X : float32_t 2d memory view + The memory view on the data ndarray of the input ``X``. + Assumes that the array is c-continuous. + root_node : tree Node pointer + Pointer to the main node array of the :class:``sklearn.tree.Tree``. + value : np.float64_t pointer + The pointer to the data array of the ``value`` array attribute + of the :class:``sklearn.tree.Tree``. + scale : double + A constant to scale the predictions. + k : int + The index of the tree output to be predicted. Must satisfy + 0 <= ``k`` < ``K``. + out : memory view on array of type np.float64_t + The data array where the predictions are stored. + ``out`` is assumed to be a two-dimensional array of + shape ``(n_samples, K)``. + """ + cdef intp_t n_samples = X.shape[0] + cdef Py_ssize_t i + cdef Node *node + for i in range(n_samples): + node = root_node + # While node not a leaf + while node.left_child != TREE_LEAF: + if X[i, node.feature] <= node.threshold: + node = root_node + node.left_child + else: + node = root_node + node.right_child + out[i, k] += scale * value[node - root_node] + + +def _predict_regression_tree_stages_sparse( + object[:, :] estimators, + object X, + double scale, + float64_t[:, :] out +): + """Predicts output for regression tree inplace and adds scaled value to ``out[i, k]``. + + The function assumes that the ndarray that wraps ``X`` is csr_matrix. + """ + cdef const float32_t[::1] X_data = X.data + cdef const int32_t[::1] X_indices = X.indices + cdef const int32_t[::1] X_indptr = X.indptr + + cdef intp_t n_samples = X.shape[0] + cdef intp_t n_features = X.shape[1] + cdef intp_t n_stages = estimators.shape[0] + cdef intp_t n_outputs = estimators.shape[1] + + # Indices and temporary variables + cdef intp_t sample_i + cdef intp_t feature_i + cdef intp_t stage_i + cdef intp_t output_i + cdef Node *root_node = NULL + cdef Node *node = NULL + cdef double *value = NULL + + cdef Tree tree + cdef Node** nodes = NULL + cdef double** values = NULL + safe_realloc(&nodes, n_stages * n_outputs) + safe_realloc(&values, n_stages * n_outputs) + for stage_i in range(n_stages): + for output_i in range(n_outputs): + tree = estimators[stage_i, output_i].tree_ + nodes[stage_i * n_outputs + output_i] = tree.nodes + values[stage_i * n_outputs + output_i] = tree.value + + # Initialize auxiliary data-structure + cdef float32_t feature_value = 0. + cdef float32_t* X_sample = NULL + + # feature_to_sample as a data structure records the last seen sample + # for each feature; functionally, it is an efficient way to identify + # which features are nonzero in the present sample. + cdef intp_t* feature_to_sample = NULL + + safe_realloc(&X_sample, n_features) + safe_realloc(&feature_to_sample, n_features) + + memset(feature_to_sample, -1, n_features * sizeof(intp_t)) + + # Cycle through all samples + for sample_i in range(n_samples): + for feature_i in range(X_indptr[sample_i], X_indptr[sample_i + 1]): + feature_to_sample[X_indices[feature_i]] = sample_i + X_sample[X_indices[feature_i]] = X_data[feature_i] + + # Cycle through all stages + for stage_i in range(n_stages): + # Cycle through all trees + for output_i in range(n_outputs): + root_node = nodes[stage_i * n_outputs + output_i] + value = values[stage_i * n_outputs + output_i] + node = root_node + + # While node not a leaf + while node.left_child != TREE_LEAF: + # ... and node.right_child != TREE_LEAF: + if feature_to_sample[node.feature] == sample_i: + feature_value = X_sample[node.feature] + else: + feature_value = 0. + + if feature_value <= node.threshold: + node = root_node + node.left_child + else: + node = root_node + node.right_child + out[sample_i, output_i] += scale * value[node - root_node] + + # Free auxiliary arrays + free(X_sample) + free(feature_to_sample) + free(nodes) + free(values) + + +def predict_stages( + object[:, :] estimators, + object X, + double scale, + float64_t[:, :] out +): + """Add predictions of ``estimators`` to ``out``. + + Each estimator is scaled by ``scale`` before its prediction + is added to ``out``. + """ + cdef Py_ssize_t i + cdef Py_ssize_t k + cdef Py_ssize_t n_estimators = estimators.shape[0] + cdef Py_ssize_t K = estimators.shape[1] + cdef Tree tree + + if issparse(X): + if X.format != 'csr': + raise ValueError("When X is a sparse matrix, a CSR format is" + " expected, got {!r}".format(type(X))) + _predict_regression_tree_stages_sparse( + estimators=estimators, X=X, scale=scale, out=out + ) + else: + if not isinstance(X, np.ndarray) or np.isfortran(X): + raise ValueError(f"X should be C-ordered np.ndarray, got {type(X)}") + + for i in range(n_estimators): + for k in range(K): + tree = estimators[i, k].tree_ + + # avoid buffer validation by casting to ndarray + # and get data pointer + # need brackets because of casting operator priority + _predict_regression_tree_inplace_fast_dense( + X=X, + root_node=tree.nodes, + value=tree.value, + scale=scale, + k=k, + out=out + ) + # out[:, k] += scale * tree.predict(X).ravel() + + +def predict_stage( + object[:, :] estimators, + int stage, + object X, + double scale, + float64_t[:, :] out +): + """Add predictions of ``estimators[stage]`` to ``out``. + + Each estimator in the stage is scaled by ``scale`` before + its prediction is added to ``out``. + """ + return predict_stages( + estimators=estimators[stage:stage + 1], X=X, scale=scale, out=out + ) + + +def _random_sample_mask( + intp_t n_total_samples, + intp_t n_total_in_bag, + random_state +): + """Create a random sample mask where ``n_total_in_bag`` elements are set. + + Parameters + ---------- + n_total_samples : int + The length of the resulting mask. + + n_total_in_bag : int + The number of elements in the sample mask which are set to 1. + + random_state : RandomState + A numpy ``RandomState`` object. + + Returns + ------- + sample_mask : np.ndarray, shape=[n_total_samples] + An ndarray where ``n_total_in_bag`` elements are set to ``True`` + the others are ``False``. + """ + cdef float64_t[::1] rand = random_state.uniform(size=n_total_samples) + cdef uint8_t[::1] sample_mask = np_zeros((n_total_samples,), dtype=bool) + + cdef intp_t n_bagged = 0 + cdef intp_t i = 0 + + for i in range(n_total_samples): + if rand[i] * (n_total_samples - i) < (n_total_in_bag - n_bagged): + sample_mask[i] = 1 + n_bagged += 1 + + return sample_mask.base diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__init__.py b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5939d83c8483812187c39d373e425630a9e44fe5 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__init__.py @@ -0,0 +1,8 @@ +"""This module implements histogram-based gradient boosting estimators. + +The implementation is a port from pygbm which is itself strongly inspired +from LightGBM. +""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/utils.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c93d565456aa45b8982af98c7dc2c306c711c905 Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/utils.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_binning.pyx b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_binning.pyx new file mode 100644 index 0000000000000000000000000000000000000000..12dad3ffabd8cedfc9f53216b8b3761d3af6b2ca --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_binning.pyx @@ -0,0 +1,84 @@ +# Author: Nicolas Hug + +from cython.parallel import prange +from libc.math cimport isnan + +from .common cimport X_DTYPE_C, X_BINNED_DTYPE_C +from ...utils._typedefs cimport uint8_t + + +def _map_to_bins(const X_DTYPE_C [:, :] data, + list binning_thresholds, + const uint8_t[::1] is_categorical, + const uint8_t missing_values_bin_idx, + int n_threads, + X_BINNED_DTYPE_C [::1, :] binned): + """Bin continuous and categorical values to discrete integer-coded levels. + + A given value x is mapped into bin value i iff + thresholds[i - 1] < x <= thresholds[i] + + Parameters + ---------- + data : ndarray, shape (n_samples, n_features) + The data to bin. + binning_thresholds : list of arrays + For each feature, stores the increasing numeric values that are + used to separate the bins. + is_categorical : ndarray of uint8_t of shape (n_features,) + Indicates categorical features. + n_threads : int + Number of OpenMP threads to use. + binned : ndarray, shape (n_samples, n_features) + Output array, must be fortran aligned. + """ + cdef: + int feature_idx + + for feature_idx in range(data.shape[1]): + _map_col_to_bins( + data[:, feature_idx], + binning_thresholds[feature_idx], + is_categorical[feature_idx], + missing_values_bin_idx, + n_threads, + binned[:, feature_idx] + ) + + +cdef void _map_col_to_bins( + const X_DTYPE_C [:] data, + const X_DTYPE_C [:] binning_thresholds, + const uint8_t is_categorical, + const uint8_t missing_values_bin_idx, + int n_threads, + X_BINNED_DTYPE_C [:] binned +): + """Binary search to find the bin index for each value in the data.""" + cdef: + int i + int left + int right + int middle + + for i in prange(data.shape[0], schedule='static', nogil=True, + num_threads=n_threads): + if ( + isnan(data[i]) or + # To follow LightGBM's conventions, negative values for + # categorical features are considered as missing values. + (is_categorical and data[i] < 0) + ): + binned[i] = missing_values_bin_idx + else: + # for known values, use binary search + left, right = 0, binning_thresholds.shape[0] + while left < right: + # equal to (right + left - 1) // 2 but avoids overflow + middle = left + (right - left - 1) // 2 + if data[i] <= binning_thresholds[middle]: + right = middle + else: + left = middle + 1 + + binned[i] = left diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_bitset.pxd b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_bitset.pxd new file mode 100644 index 0000000000000000000000000000000000000000..c44477cfa2300620c457152d86f8053ef44cf720 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_bitset.pxd @@ -0,0 +1,20 @@ +from .common cimport X_BINNED_DTYPE_C +from .common cimport BITSET_DTYPE_C +from .common cimport BITSET_INNER_DTYPE_C +from .common cimport X_DTYPE_C +from ...utils._typedefs cimport uint8_t + + +cdef void init_bitset(BITSET_DTYPE_C bitset) noexcept nogil + +cdef void set_bitset(BITSET_DTYPE_C bitset, X_BINNED_DTYPE_C val) noexcept nogil + +cdef uint8_t in_bitset(BITSET_DTYPE_C bitset, X_BINNED_DTYPE_C val) noexcept nogil + +cpdef uint8_t in_bitset_memoryview(const BITSET_INNER_DTYPE_C[:] bitset, + X_BINNED_DTYPE_C val) noexcept nogil + +cdef uint8_t in_bitset_2d_memoryview( + const BITSET_INNER_DTYPE_C[:, :] bitset, + X_BINNED_DTYPE_C val, + unsigned int row) noexcept nogil diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_predictor.pyx b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_predictor.pyx new file mode 100644 index 0000000000000000000000000000000000000000..5317b8277817a6e68432caa9d1a4c9561dba2c1f --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_predictor.pyx @@ -0,0 +1,255 @@ +# Author: Nicolas Hug + +from cython.parallel import prange +from libc.math cimport isnan +import numpy as np + +from ...utils._typedefs cimport intp_t, uint8_t +from .common cimport X_DTYPE_C +from .common cimport Y_DTYPE_C +from .common import Y_DTYPE +from .common cimport X_BINNED_DTYPE_C +from .common cimport BITSET_INNER_DTYPE_C +from .common cimport node_struct +from ._bitset cimport in_bitset_2d_memoryview + + +def _predict_from_raw_data( # raw data = non-binned data + const node_struct [:] nodes, + const X_DTYPE_C [:, :] numeric_data, + const BITSET_INNER_DTYPE_C [:, ::1] raw_left_cat_bitsets, + const BITSET_INNER_DTYPE_C [:, ::1] known_cat_bitsets, + const unsigned int [::1] f_idx_map, + int n_threads, + Y_DTYPE_C [:] out): + + cdef: + int i + + for i in prange(numeric_data.shape[0], schedule='static', nogil=True, + num_threads=n_threads): + out[i] = _predict_one_from_raw_data( + nodes, numeric_data, raw_left_cat_bitsets, + known_cat_bitsets, + f_idx_map, i) + + +cdef inline Y_DTYPE_C _predict_one_from_raw_data( + const node_struct [:] nodes, + const X_DTYPE_C [:, :] numeric_data, + const BITSET_INNER_DTYPE_C [:, ::1] raw_left_cat_bitsets, + const BITSET_INNER_DTYPE_C [:, ::1] known_cat_bitsets, + const unsigned int [::1] f_idx_map, + const int row) noexcept nogil: + # Need to pass the whole array and the row index, else prange won't work. + # See issue Cython #2798 + + cdef: + node_struct node = nodes[0] + unsigned int node_idx = 0 + X_DTYPE_C data_val + + while True: + if node.is_leaf: + return node.value + + data_val = numeric_data[row, node.feature_idx] + + if isnan(data_val): + if node.missing_go_to_left: + node_idx = node.left + else: + node_idx = node.right + elif node.is_categorical: + if data_val < 0: + # data_val is not in the accepted range, so it is treated as missing value + node_idx = node.left if node.missing_go_to_left else node.right + elif in_bitset_2d_memoryview( + raw_left_cat_bitsets, + data_val, + node.bitset_idx): + node_idx = node.left + elif in_bitset_2d_memoryview( + known_cat_bitsets, + data_val, + f_idx_map[node.feature_idx]): + node_idx = node.right + else: + # Treat unknown categories as missing. + node_idx = node.left if node.missing_go_to_left else node.right + else: + if data_val <= node.num_threshold: + node_idx = node.left + else: + node_idx = node.right + node = nodes[node_idx] + + +def _predict_from_binned_data( + node_struct [:] nodes, + const X_BINNED_DTYPE_C [:, :] binned_data, + BITSET_INNER_DTYPE_C [:, :] binned_left_cat_bitsets, + const uint8_t missing_values_bin_idx, + int n_threads, + Y_DTYPE_C [:] out): + + cdef: + int i + + for i in prange(binned_data.shape[0], schedule='static', nogil=True, + num_threads=n_threads): + out[i] = _predict_one_from_binned_data(nodes, + binned_data, + binned_left_cat_bitsets, i, + missing_values_bin_idx) + + +cdef inline Y_DTYPE_C _predict_one_from_binned_data( + node_struct [:] nodes, + const X_BINNED_DTYPE_C [:, :] binned_data, + const BITSET_INNER_DTYPE_C [:, :] binned_left_cat_bitsets, + const int row, + const uint8_t missing_values_bin_idx) noexcept nogil: + # Need to pass the whole array and the row index, else prange won't work. + # See issue Cython #2798 + + cdef: + node_struct node = nodes[0] + unsigned int node_idx = 0 + X_BINNED_DTYPE_C data_val + + while True: + if node.is_leaf: + return node.value + + data_val = binned_data[row, node.feature_idx] + + if data_val == missing_values_bin_idx: + if node.missing_go_to_left: + node_idx = node.left + else: + node_idx = node.right + elif node.is_categorical: + if in_bitset_2d_memoryview( + binned_left_cat_bitsets, + data_val, + node.bitset_idx): + node_idx = node.left + else: + node_idx = node.right + else: + if data_val <= node.bin_threshold: + node_idx = node.left + else: + node_idx = node.right + node = nodes[node_idx] + + +def _compute_partial_dependence( + node_struct [:] nodes, + const X_DTYPE_C [:, ::1] X, + const intp_t [:] target_features, + Y_DTYPE_C [:] out +): + """Partial dependence of the response on the ``target_features`` set. + + For each sample in ``X`` a tree traversal is performed. + Each traversal starts from the root with weight 1.0. + + At each non-leaf node that splits on a target feature, either + the left child or the right child is visited based on the feature + value of the current sample, and the weight is not modified. + At each non-leaf node that splits on a complementary feature, + both children are visited and the weight is multiplied by the fraction + of training samples which went to each child. + + At each leaf, the value of the node is multiplied by the current + weight (weights sum to 1 for all visited terminal nodes). + + Parameters + ---------- + nodes : view on array of PREDICTOR_RECORD_DTYPE, shape (n_nodes) + The array representing the predictor tree. + X : view on 2d ndarray, shape (n_samples, n_target_features) + The grid points on which the partial dependence should be + evaluated. + target_features : view on 1d ndarray of intp_t, shape (n_target_features) + The set of target features for which the partial dependence + should be evaluated. + out : view on 1d ndarray, shape (n_samples) + The value of the partial dependence function on each grid + point. + """ + + cdef: + unsigned int current_node_idx + unsigned int [:] node_idx_stack = np.zeros(shape=nodes.shape[0], + dtype=np.uint32) + Y_DTYPE_C [::1] weight_stack = np.zeros(shape=nodes.shape[0], + dtype=Y_DTYPE) + node_struct * current_node # pointer to avoid copying attributes + + unsigned int sample_idx + intp_t feature_idx + unsigned stack_size + Y_DTYPE_C left_sample_frac + Y_DTYPE_C current_weight + Y_DTYPE_C total_weight # used for sanity check only + bint is_target_feature + + for sample_idx in range(X.shape[0]): + # init stacks for current sample + stack_size = 1 + node_idx_stack[0] = 0 # root node + weight_stack[0] = 1 # all the samples are in the root node + total_weight = 0 + + while stack_size > 0: + + # pop the stack + stack_size -= 1 + current_node_idx = node_idx_stack[stack_size] + current_node = &nodes[current_node_idx] + + if current_node.is_leaf: + out[sample_idx] += (weight_stack[stack_size] * + current_node.value) + total_weight += weight_stack[stack_size] + else: + # determine if the split feature is a target feature + is_target_feature = False + for feature_idx in range(target_features.shape[0]): + if target_features[feature_idx] == current_node.feature_idx: + is_target_feature = True + break + + if is_target_feature: + # In this case, we push left or right child on stack + if X[sample_idx, feature_idx] <= current_node.num_threshold: + node_idx_stack[stack_size] = current_node.left + else: + node_idx_stack[stack_size] = current_node.right + stack_size += 1 + else: + # In this case, we push both children onto the stack, + # and give a weight proportional to the number of + # samples going through each branch. + + # push left child + node_idx_stack[stack_size] = current_node.left + left_sample_frac = ( + nodes[current_node.left].count / + current_node.count) + current_weight = weight_stack[stack_size] + weight_stack[stack_size] = current_weight * left_sample_frac + stack_size += 1 + + # push right child + node_idx_stack[stack_size] = current_node.right + weight_stack[stack_size] = ( + current_weight * (1 - left_sample_frac)) + stack_size += 1 + + # Sanity check. Should never happen. + if not (0.999 < total_weight < 1.001): + raise ValueError("Total weight should be 1.0 but was %.9f" %total_weight) diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/common.pxd b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/common.pxd new file mode 100644 index 0000000000000000000000000000000000000000..9ff9fc89800d7bcd04a0a9d202d828a2079a6f28 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/common.pxd @@ -0,0 +1,43 @@ +from ...utils._typedefs cimport float32_t, float64_t, intp_t, uint8_t, uint32_t + + +ctypedef float64_t X_DTYPE_C +ctypedef uint8_t X_BINNED_DTYPE_C +ctypedef float64_t Y_DTYPE_C +ctypedef float32_t G_H_DTYPE_C +ctypedef uint32_t BITSET_INNER_DTYPE_C +ctypedef BITSET_INNER_DTYPE_C[8] BITSET_DTYPE_C + + +cdef packed struct hist_struct: + # Same as histogram dtype but we need a struct to declare views. It needs + # to be packed since by default numpy dtypes aren't aligned + Y_DTYPE_C sum_gradients + Y_DTYPE_C sum_hessians + unsigned int count + + +cdef packed struct node_struct: + # Equivalent struct to PREDICTOR_RECORD_DTYPE to use in memory views. It + # needs to be packed since by default numpy dtypes aren't aligned + Y_DTYPE_C value + unsigned int count + intp_t feature_idx + X_DTYPE_C num_threshold + uint8_t missing_go_to_left + unsigned int left + unsigned int right + Y_DTYPE_C gain + unsigned int depth + uint8_t is_leaf + X_BINNED_DTYPE_C bin_threshold + uint8_t is_categorical + # The index of the corresponding bitsets in the Predictor's bitset arrays. + # Only used if is_categorical is True + unsigned int bitset_idx + + +cpdef enum MonotonicConstraint: + NO_CST = 0 + POS = 1 + NEG = -1 diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/common.pyx b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/common.pyx new file mode 100644 index 0000000000000000000000000000000000000000..6b20e32813d5b88e533936e7ace1693ac4e5d7ec --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/common.pyx @@ -0,0 +1,44 @@ +import numpy as np + +# Y_DYTPE is the dtype to which the targets y are converted to. This is also +# dtype for leaf values, gains, and sums of gradients / hessians. The gradients +# and hessians arrays are stored as floats to avoid using too much memory. +Y_DTYPE = np.float64 +X_DTYPE = np.float64 +X_BINNED_DTYPE = np.uint8 # hence max_bins == 256 +# dtype for gradients and hessians arrays +G_H_DTYPE = np.float32 +X_BITSET_INNER_DTYPE = np.uint32 + +# Note that we use Y_DTYPE=float64 to avoid issues with floating point precision when +# summing gradients and hessians (both float32). Those are difficult to protect via +# tools like (Kahan-) Neumaier summation as in CPython, see +# https://github.com/python/cpython/issues/100425, or pairwise summation as numpy, see +# https://github.com/numpy/numpy/pull/3685, due to the way histograms are summed +# (number of additions per bin is not known in advance). See also comment in +# _subtract_histograms. +HISTOGRAM_DTYPE = np.dtype([ + ('sum_gradients', Y_DTYPE), # sum of sample gradients in bin + ('sum_hessians', Y_DTYPE), # sum of sample hessians in bin + ('count', np.uint32), # number of samples in bin +]) + +PREDICTOR_RECORD_DTYPE = np.dtype([ + ('value', Y_DTYPE), + ('count', np.uint32), + ('feature_idx', np.intp), + ('num_threshold', X_DTYPE), + ('missing_go_to_left', np.uint8), + ('left', np.uint32), + ('right', np.uint32), + ('gain', Y_DTYPE), + ('depth', np.uint32), + ('is_leaf', np.uint8), + ('bin_threshold', X_BINNED_DTYPE), + ('is_categorical', np.uint8), + # The index of the corresponding bitsets in the Predictor's bitset arrays. + # Only used if is_categorical is True + ('bitset_idx', np.uint32) +]) + +ALMOST_INF = 1e300 # see LightGBM AvoidInf() diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/grower.py b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/grower.py new file mode 100644 index 0000000000000000000000000000000000000000..a71e564056f8f928ce90f11b30f66097e5c69398 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/grower.py @@ -0,0 +1,807 @@ +""" +This module contains the TreeGrower class. + +TreeGrower builds a regression tree fitting a Newton-Raphson step, based on +the gradients and hessians of the training data. +""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import numbers +from heapq import heappop, heappush +from timeit import default_timer as time + +import numpy as np + +from sklearn.utils._openmp_helpers import _openmp_effective_n_threads + +from ...utils.arrayfuncs import sum_parallel +from ._bitset import set_raw_bitset_from_binned_bitset +from .common import ( + PREDICTOR_RECORD_DTYPE, + X_BITSET_INNER_DTYPE, + MonotonicConstraint, +) +from .histogram import HistogramBuilder +from .predictor import TreePredictor +from .splitting import Splitter + + +class TreeNode: + """Tree Node class used in TreeGrower. + + This isn't used for prediction purposes, only for training (see + TreePredictor). + + Parameters + ---------- + depth : int + The depth of the node, i.e. its distance from the root. + sample_indices : ndarray of shape (n_samples_at_node,), dtype=np.uint32 + The indices of the samples at the node. + partition_start : int + start position of the node's sample_indices in splitter.partition. + partition_stop : int + stop position of the node's sample_indices in splitter.partition. + sum_gradients : float + The sum of the gradients of the samples at the node. + sum_hessians : float + The sum of the hessians of the samples at the node. + + Attributes + ---------- + depth : int + The depth of the node, i.e. its distance from the root. + sample_indices : ndarray of shape (n_samples_at_node,), dtype=np.uint32 + The indices of the samples at the node. + sum_gradients : float + The sum of the gradients of the samples at the node. + sum_hessians : float + The sum of the hessians of the samples at the node. + split_info : SplitInfo or None + The result of the split evaluation. + is_leaf : bool + True if node is a leaf + left_child : TreeNode or None + The left child of the node. None for leaves. + right_child : TreeNode or None + The right child of the node. None for leaves. + value : float or None + The value of the leaf, as computed in finalize_leaf(). None for + non-leaf nodes. + partition_start : int + start position of the node's sample_indices in splitter.partition. + partition_stop : int + stop position of the node's sample_indices in splitter.partition. + allowed_features : None or ndarray, dtype=int + Indices of features allowed to split for children. + interaction_cst_indices : None or list of ints + Indices of the interaction sets that have to be applied on splits of + child nodes. The fewer sets the stronger the constraint as fewer sets + contain fewer features. + children_lower_bound : float + children_upper_bound : float + """ + + def __init__( + self, + *, + depth, + sample_indices, + partition_start, + partition_stop, + sum_gradients, + sum_hessians, + value=None, + ): + self.depth = depth + self.sample_indices = sample_indices + self.n_samples = sample_indices.shape[0] + self.sum_gradients = sum_gradients + self.sum_hessians = sum_hessians + self.value = value + self.is_leaf = False + self.allowed_features = None + self.interaction_cst_indices = None + self.set_children_bounds(float("-inf"), float("+inf")) + self.split_info = None + self.left_child = None + self.right_child = None + self.histograms = None + # start and stop indices of the node in the splitter.partition + # array. Concretely, + # self.sample_indices = view(self.splitter.partition[start:stop]) + # Please see the comments about splitter.partition and + # splitter.split_indices for more info about this design. + # These 2 attributes are only used in _update_raw_prediction, because we + # need to iterate over the leaves and I don't know how to efficiently + # store the sample_indices views because they're all of different sizes. + self.partition_start = partition_start + self.partition_stop = partition_stop + + def set_children_bounds(self, lower, upper): + """Set children values bounds to respect monotonic constraints.""" + + # These are bounds for the node's *children* values, not the node's + # value. The bounds are used in the splitter when considering potential + # left and right child. + self.children_lower_bound = lower + self.children_upper_bound = upper + + def __lt__(self, other_node): + """Comparison for priority queue. + + Nodes with high gain are higher priority than nodes with low gain. + + heapq.heappush only need the '<' operator. + heapq.heappop take the smallest item first (smaller is higher + priority). + + Parameters + ---------- + other_node : TreeNode + The node to compare with. + """ + return self.split_info.gain > other_node.split_info.gain + + +class TreeGrower: + """Tree grower class used to build a tree. + + The tree is fitted to predict the values of a Newton-Raphson step. The + splits are considered in a best-first fashion, and the quality of a + split is defined in splitting._split_gain. + + Parameters + ---------- + X_binned : ndarray of shape (n_samples, n_features), dtype=np.uint8 + The binned input samples. Must be Fortran-aligned. + gradients : ndarray of shape (n_samples,) + The gradients of each training sample. Those are the gradients of the + loss w.r.t the predictions, evaluated at iteration ``i - 1``. + hessians : ndarray of shape (n_samples,) + The hessians of each training sample. Those are the hessians of the + loss w.r.t the predictions, evaluated at iteration ``i - 1``. + max_leaf_nodes : int, default=None + The maximum number of leaves for each tree. If None, there is no + maximum limit. + max_depth : int, default=None + The maximum depth of each tree. The depth of a tree is the number of + edges to go from the root to the deepest leaf. + Depth isn't constrained by default. + min_samples_leaf : int, default=20 + The minimum number of samples per leaf. + min_gain_to_split : float, default=0. + The minimum gain needed to split a node. Splits with lower gain will + be ignored. + min_hessian_to_split : float, default=1e-3 + The minimum sum of hessians needed in each node. Splits that result in + at least one child having a sum of hessians less than + ``min_hessian_to_split`` are discarded. + n_bins : int, default=256 + The total number of bins, including the bin for missing values. Used + to define the shape of the histograms. + n_bins_non_missing : ndarray, dtype=np.uint32, default=None + For each feature, gives the number of bins actually used for + non-missing values. For features with a lot of unique values, this + is equal to ``n_bins - 1``. If it's an int, all features are + considered to have the same number of bins. If None, all features + are considered to have ``n_bins - 1`` bins. + has_missing_values : bool or ndarray, dtype=bool, default=False + Whether each feature contains missing values (in the training data). + If it's a bool, the same value is used for all features. + is_categorical : ndarray of bool of shape (n_features,), default=None + Indicates categorical features. + monotonic_cst : array-like of int of shape (n_features,), dtype=int, default=None + Indicates the monotonic constraint to enforce on each feature. + - 1: monotonic increase + - 0: no constraint + - -1: monotonic decrease + + Read more in the :ref:`User Guide `. + interaction_cst : list of sets of integers, default=None + List of interaction constraints. + l2_regularization : float, default=0. + The L2 regularization parameter penalizing leaves with small hessians. + Use ``0`` for no regularization (default). + feature_fraction_per_split : float, default=1 + Proportion of randomly chosen features in each and every node split. + This is a form of regularization, smaller values make the trees weaker + learners and might prevent overfitting. + rng : Generator + Numpy random Generator used for feature subsampling. + shrinkage : float, default=1. + The shrinkage parameter to apply to the leaves values, also known as + learning rate. + n_threads : int, default=None + Number of OpenMP threads to use. `_openmp_effective_n_threads` is called + to determine the effective number of threads use, which takes cgroups CPU + quotes into account. See the docstring of `_openmp_effective_n_threads` + for details. + + Attributes + ---------- + histogram_builder : HistogramBuilder + splitter : Splitter + root : TreeNode + finalized_leaves : list of TreeNode + splittable_nodes : list of TreeNode + missing_values_bin_idx : int + Equals n_bins - 1 + n_categorical_splits : int + n_features : int + n_nodes : int + total_find_split_time : float + Time spent finding the best splits + total_compute_hist_time : float + Time spent computing histograms + total_apply_split_time : float + Time spent splitting nodes + with_monotonic_cst : bool + Whether there are monotonic constraints that apply. False iff monotonic_cst is + None. + """ + + def __init__( + self, + X_binned, + gradients, + hessians, + max_leaf_nodes=None, + max_depth=None, + min_samples_leaf=20, + min_gain_to_split=0.0, + min_hessian_to_split=1e-3, + n_bins=256, + n_bins_non_missing=None, + has_missing_values=False, + is_categorical=None, + monotonic_cst=None, + interaction_cst=None, + l2_regularization=0.0, + feature_fraction_per_split=1.0, + rng=np.random.default_rng(), + shrinkage=1.0, + n_threads=None, + ): + self._validate_parameters( + X_binned, + min_gain_to_split, + min_hessian_to_split, + ) + n_threads = _openmp_effective_n_threads(n_threads) + + if n_bins_non_missing is None: + n_bins_non_missing = n_bins - 1 + + if isinstance(n_bins_non_missing, numbers.Integral): + n_bins_non_missing = np.array( + [n_bins_non_missing] * X_binned.shape[1], dtype=np.uint32 + ) + else: + n_bins_non_missing = np.asarray(n_bins_non_missing, dtype=np.uint32) + + if isinstance(has_missing_values, bool): + has_missing_values = [has_missing_values] * X_binned.shape[1] + has_missing_values = np.asarray(has_missing_values, dtype=np.uint8) + + # `monotonic_cst` validation is done in _validate_monotonic_cst + # at the estimator level and therefore the following should not be + # needed when using the public API. + if monotonic_cst is None: + monotonic_cst = np.full( + shape=X_binned.shape[1], + fill_value=MonotonicConstraint.NO_CST, + dtype=np.int8, + ) + else: + monotonic_cst = np.asarray(monotonic_cst, dtype=np.int8) + self.with_monotonic_cst = np.any(monotonic_cst != MonotonicConstraint.NO_CST) + + if is_categorical is None: + is_categorical = np.zeros(shape=X_binned.shape[1], dtype=np.uint8) + else: + is_categorical = np.asarray(is_categorical, dtype=np.uint8) + + if np.any( + np.logical_and( + is_categorical == 1, monotonic_cst != MonotonicConstraint.NO_CST + ) + ): + raise ValueError("Categorical features cannot have monotonic constraints.") + + hessians_are_constant = hessians.shape[0] == 1 + self.histogram_builder = HistogramBuilder( + X_binned, n_bins, gradients, hessians, hessians_are_constant, n_threads + ) + missing_values_bin_idx = n_bins - 1 + self.splitter = Splitter( + X_binned=X_binned, + n_bins_non_missing=n_bins_non_missing, + missing_values_bin_idx=missing_values_bin_idx, + has_missing_values=has_missing_values, + is_categorical=is_categorical, + monotonic_cst=monotonic_cst, + l2_regularization=l2_regularization, + min_hessian_to_split=min_hessian_to_split, + min_samples_leaf=min_samples_leaf, + min_gain_to_split=min_gain_to_split, + hessians_are_constant=hessians_are_constant, + feature_fraction_per_split=feature_fraction_per_split, + rng=rng, + n_threads=n_threads, + ) + self.X_binned = X_binned + self.max_leaf_nodes = max_leaf_nodes + self.max_depth = max_depth + self.min_samples_leaf = min_samples_leaf + self.min_gain_to_split = min_gain_to_split + self.n_bins_non_missing = n_bins_non_missing + self.missing_values_bin_idx = missing_values_bin_idx + self.has_missing_values = has_missing_values + self.is_categorical = is_categorical + self.monotonic_cst = monotonic_cst + self.interaction_cst = interaction_cst + self.l2_regularization = l2_regularization + self.shrinkage = shrinkage + self.n_features = X_binned.shape[1] + self.n_threads = n_threads + self.splittable_nodes = [] + self.finalized_leaves = [] + self.total_find_split_time = 0.0 # time spent finding the best splits + self.total_compute_hist_time = 0.0 # time spent computing histograms + self.total_apply_split_time = 0.0 # time spent splitting nodes + self.n_categorical_splits = 0 + self._initialize_root(gradients, hessians) + self.n_nodes = 1 + + def _validate_parameters( + self, + X_binned, + min_gain_to_split, + min_hessian_to_split, + ): + """Validate parameters passed to __init__. + + Also validate parameters passed to splitter. + """ + if X_binned.dtype != np.uint8: + raise NotImplementedError("X_binned must be of type uint8.") + if not X_binned.flags.f_contiguous: + raise ValueError( + "X_binned should be passed as Fortran contiguous " + "array for maximum efficiency." + ) + if min_gain_to_split < 0: + raise ValueError( + "min_gain_to_split={} must be positive.".format(min_gain_to_split) + ) + if min_hessian_to_split < 0: + raise ValueError( + "min_hessian_to_split={} must be positive.".format(min_hessian_to_split) + ) + + def grow(self): + """Grow the tree, from root to leaves.""" + while self.splittable_nodes: + self.split_next() + + self._apply_shrinkage() + + def _apply_shrinkage(self): + """Multiply leaves values by shrinkage parameter. + + This must be done at the very end of the growing process. If this were + done during the growing process e.g. in finalize_leaf(), then a leaf + would be shrunk but its sibling would potentially not be (if it's a + non-leaf), which would lead to a wrong computation of the 'middle' + value needed to enforce the monotonic constraints. + """ + for leaf in self.finalized_leaves: + leaf.value *= self.shrinkage + + def _initialize_root(self, gradients, hessians): + """Initialize root node and finalize it if needed.""" + n_samples = self.X_binned.shape[0] + depth = 0 + sum_gradients = sum_parallel(gradients, self.n_threads) + if self.histogram_builder.hessians_are_constant: + sum_hessians = hessians[0] * n_samples + else: + sum_hessians = sum_parallel(hessians, self.n_threads) + self.root = TreeNode( + depth=depth, + sample_indices=self.splitter.partition, + partition_start=0, + partition_stop=n_samples, + sum_gradients=sum_gradients, + sum_hessians=sum_hessians, + value=0, + ) + + if self.root.n_samples < 2 * self.min_samples_leaf: + # Do not even bother computing any splitting statistics. + self._finalize_leaf(self.root) + return + if sum_hessians < self.splitter.min_hessian_to_split: + self._finalize_leaf(self.root) + return + + if self.interaction_cst is not None: + self.root.interaction_cst_indices = range(len(self.interaction_cst)) + allowed_features = set().union(*self.interaction_cst) + self.root.allowed_features = np.fromiter( + allowed_features, dtype=np.uint32, count=len(allowed_features) + ) + + tic = time() + self.root.histograms = self.histogram_builder.compute_histograms_brute( + self.root.sample_indices, self.root.allowed_features + ) + self.total_compute_hist_time += time() - tic + + tic = time() + self._compute_best_split_and_push(self.root) + self.total_find_split_time += time() - tic + + def _compute_best_split_and_push(self, node): + """Compute the best possible split (SplitInfo) of a given node. + + Also push it in the heap of splittable nodes if gain isn't zero. + The gain of a node is 0 if either all the leaves are pure + (best gain = 0), or if no split would satisfy the constraints, + (min_hessians_to_split, min_gain_to_split, min_samples_leaf) + """ + + node.split_info = self.splitter.find_node_split( + n_samples=node.n_samples, + histograms=node.histograms, + sum_gradients=node.sum_gradients, + sum_hessians=node.sum_hessians, + value=node.value, + lower_bound=node.children_lower_bound, + upper_bound=node.children_upper_bound, + allowed_features=node.allowed_features, + ) + + if node.split_info.gain <= 0: # no valid split + self._finalize_leaf(node) + else: + heappush(self.splittable_nodes, node) + + def split_next(self): + """Split the node with highest potential gain. + + Returns + ------- + left : TreeNode + The resulting left child. + right : TreeNode + The resulting right child. + """ + # Consider the node with the highest loss reduction (a.k.a. gain) + node = heappop(self.splittable_nodes) + + tic = time() + ( + sample_indices_left, + sample_indices_right, + right_child_pos, + ) = self.splitter.split_indices(node.split_info, node.sample_indices) + self.total_apply_split_time += time() - tic + + depth = node.depth + 1 + n_leaf_nodes = len(self.finalized_leaves) + len(self.splittable_nodes) + n_leaf_nodes += 2 + + left_child_node = TreeNode( + depth=depth, + sample_indices=sample_indices_left, + partition_start=node.partition_start, + partition_stop=node.partition_start + right_child_pos, + sum_gradients=node.split_info.sum_gradient_left, + sum_hessians=node.split_info.sum_hessian_left, + value=node.split_info.value_left, + ) + right_child_node = TreeNode( + depth=depth, + sample_indices=sample_indices_right, + partition_start=left_child_node.partition_stop, + partition_stop=node.partition_stop, + sum_gradients=node.split_info.sum_gradient_right, + sum_hessians=node.split_info.sum_hessian_right, + value=node.split_info.value_right, + ) + + node.right_child = right_child_node + node.left_child = left_child_node + + # set interaction constraints (the indices of the constraints sets) + if self.interaction_cst is not None: + # Calculate allowed_features and interaction_cst_indices only once. Child + # nodes inherit them before they get split. + ( + left_child_node.allowed_features, + left_child_node.interaction_cst_indices, + ) = self._compute_interactions(node) + right_child_node.interaction_cst_indices = ( + left_child_node.interaction_cst_indices + ) + right_child_node.allowed_features = left_child_node.allowed_features + + if not self.has_missing_values[node.split_info.feature_idx]: + # If no missing values are encountered at fit time, then samples + # with missing values during predict() will go to whichever child + # has the most samples. + node.split_info.missing_go_to_left = ( + left_child_node.n_samples > right_child_node.n_samples + ) + + self.n_nodes += 2 + self.n_categorical_splits += node.split_info.is_categorical + + if self.max_leaf_nodes is not None and n_leaf_nodes == self.max_leaf_nodes: + self._finalize_leaf(left_child_node) + self._finalize_leaf(right_child_node) + self._finalize_splittable_nodes() + return left_child_node, right_child_node + + if self.max_depth is not None and depth == self.max_depth: + self._finalize_leaf(left_child_node) + self._finalize_leaf(right_child_node) + return left_child_node, right_child_node + + if left_child_node.n_samples < self.min_samples_leaf * 2: + self._finalize_leaf(left_child_node) + if right_child_node.n_samples < self.min_samples_leaf * 2: + self._finalize_leaf(right_child_node) + + if self.with_monotonic_cst: + # Set value bounds for respecting monotonic constraints + # See test_nodes_values() for details + if ( + self.monotonic_cst[node.split_info.feature_idx] + == MonotonicConstraint.NO_CST + ): + lower_left = lower_right = node.children_lower_bound + upper_left = upper_right = node.children_upper_bound + else: + mid = (left_child_node.value + right_child_node.value) / 2 + if ( + self.monotonic_cst[node.split_info.feature_idx] + == MonotonicConstraint.POS + ): + lower_left, upper_left = node.children_lower_bound, mid + lower_right, upper_right = mid, node.children_upper_bound + else: # NEG + lower_left, upper_left = mid, node.children_upper_bound + lower_right, upper_right = node.children_lower_bound, mid + left_child_node.set_children_bounds(lower_left, upper_left) + right_child_node.set_children_bounds(lower_right, upper_right) + + # Compute histograms of children, and compute their best possible split + # (if needed) + should_split_left = not left_child_node.is_leaf + should_split_right = not right_child_node.is_leaf + if should_split_left or should_split_right: + # We will compute the histograms of both nodes even if one of them + # is a leaf, since computing the second histogram is very cheap + # (using histogram subtraction). + n_samples_left = left_child_node.sample_indices.shape[0] + n_samples_right = right_child_node.sample_indices.shape[0] + if n_samples_left < n_samples_right: + smallest_child = left_child_node + largest_child = right_child_node + else: + smallest_child = right_child_node + largest_child = left_child_node + + # We use the brute O(n_samples) method on the child that has the + # smallest number of samples, and the subtraction trick O(n_bins) + # on the other one. + # Note that both left and right child have the same allowed_features. + tic = time() + smallest_child.histograms = self.histogram_builder.compute_histograms_brute( + smallest_child.sample_indices, smallest_child.allowed_features + ) + largest_child.histograms = ( + self.histogram_builder.compute_histograms_subtraction( + node.histograms, + smallest_child.histograms, + smallest_child.allowed_features, + ) + ) + # node.histograms is reused in largest_child.histograms. To break cyclic + # memory references and help garbage collection, we set it to None. + node.histograms = None + self.total_compute_hist_time += time() - tic + + tic = time() + if should_split_left: + self._compute_best_split_and_push(left_child_node) + if should_split_right: + self._compute_best_split_and_push(right_child_node) + self.total_find_split_time += time() - tic + + # Release memory used by histograms as they are no longer needed + # for leaf nodes since they won't be split. + for child in (left_child_node, right_child_node): + if child.is_leaf: + del child.histograms + + # Release memory used by histograms as they are no longer needed for + # internal nodes once children histograms have been computed. + del node.histograms + + return left_child_node, right_child_node + + def _compute_interactions(self, node): + r"""Compute features allowed by interactions to be inherited by child nodes. + + Example: Assume constraints [{0, 1}, {1, 2}]. + 1 <- Both constraint groups could be applied from now on + / \ + 1 2 <- Left split still fulfills both constraint groups. + / \ / \ Right split at feature 2 has only group {1, 2} from now on. + + LightGBM uses the same logic for overlapping groups. See + https://github.com/microsoft/LightGBM/issues/4481 for details. + + Parameters: + ---------- + node : TreeNode + A node that might have children. Based on its feature_idx, the interaction + constraints for possible child nodes are computed. + + Returns + ------- + allowed_features : ndarray, dtype=uint32 + Indices of features allowed to split for children. + interaction_cst_indices : list of ints + Indices of the interaction sets that have to be applied on splits of + child nodes. The fewer sets the stronger the constraint as fewer sets + contain fewer features. + """ + # Note: + # - Case of no interactions is already captured before function call. + # - This is for nodes that are already split and have a + # node.split_info.feature_idx. + allowed_features = set() + interaction_cst_indices = [] + for i in node.interaction_cst_indices: + if node.split_info.feature_idx in self.interaction_cst[i]: + interaction_cst_indices.append(i) + allowed_features.update(self.interaction_cst[i]) + return ( + np.fromiter(allowed_features, dtype=np.uint32, count=len(allowed_features)), + interaction_cst_indices, + ) + + def _finalize_leaf(self, node): + """Make node a leaf of the tree being grown.""" + + node.is_leaf = True + self.finalized_leaves.append(node) + + def _finalize_splittable_nodes(self): + """Transform all splittable nodes into leaves. + + Used when some constraint is met e.g. maximum number of leaves or + maximum depth.""" + while len(self.splittable_nodes) > 0: + node = self.splittable_nodes.pop() + self._finalize_leaf(node) + + def make_predictor(self, binning_thresholds): + """Make a TreePredictor object out of the current tree. + + Parameters + ---------- + binning_thresholds : array-like of floats + Corresponds to the bin_thresholds_ attribute of the BinMapper. + For each feature, this stores: + + - the bin frontiers for continuous features + - the unique raw category values for categorical features + + Returns + ------- + A TreePredictor object. + """ + predictor_nodes = np.zeros(self.n_nodes, dtype=PREDICTOR_RECORD_DTYPE) + binned_left_cat_bitsets = np.zeros( + (self.n_categorical_splits, 8), dtype=X_BITSET_INNER_DTYPE + ) + raw_left_cat_bitsets = np.zeros( + (self.n_categorical_splits, 8), dtype=X_BITSET_INNER_DTYPE + ) + _fill_predictor_arrays( + predictor_nodes, + binned_left_cat_bitsets, + raw_left_cat_bitsets, + self.root, + binning_thresholds, + self.n_bins_non_missing, + ) + return TreePredictor( + predictor_nodes, binned_left_cat_bitsets, raw_left_cat_bitsets + ) + + +def _fill_predictor_arrays( + predictor_nodes, + binned_left_cat_bitsets, + raw_left_cat_bitsets, + grower_node, + binning_thresholds, + n_bins_non_missing, + next_free_node_idx=0, + next_free_bitset_idx=0, +): + """Helper used in make_predictor to set the TreePredictor fields.""" + node = predictor_nodes[next_free_node_idx] + node["count"] = grower_node.n_samples + node["depth"] = grower_node.depth + if grower_node.split_info is not None: + node["gain"] = grower_node.split_info.gain + else: + node["gain"] = -1 + + node["value"] = grower_node.value + + if grower_node.is_leaf: + # Leaf node + node["is_leaf"] = True + return next_free_node_idx + 1, next_free_bitset_idx + + split_info = grower_node.split_info + feature_idx, bin_idx = split_info.feature_idx, split_info.bin_idx + node["feature_idx"] = feature_idx + node["bin_threshold"] = bin_idx + node["missing_go_to_left"] = split_info.missing_go_to_left + node["is_categorical"] = split_info.is_categorical + + if split_info.bin_idx == n_bins_non_missing[feature_idx] - 1: + # Split is on the last non-missing bin: it's a "split on nans". + # All nans go to the right, the rest go to the left. + # Note: for categorical splits, bin_idx is 0 and we rely on the bitset + node["num_threshold"] = np.inf + elif split_info.is_categorical: + categories = binning_thresholds[feature_idx] + node["bitset_idx"] = next_free_bitset_idx + binned_left_cat_bitsets[next_free_bitset_idx] = split_info.left_cat_bitset + set_raw_bitset_from_binned_bitset( + raw_left_cat_bitsets[next_free_bitset_idx], + split_info.left_cat_bitset, + categories, + ) + next_free_bitset_idx += 1 + else: + node["num_threshold"] = binning_thresholds[feature_idx][bin_idx] + + next_free_node_idx += 1 + + node["left"] = next_free_node_idx + next_free_node_idx, next_free_bitset_idx = _fill_predictor_arrays( + predictor_nodes, + binned_left_cat_bitsets, + raw_left_cat_bitsets, + grower_node.left_child, + binning_thresholds=binning_thresholds, + n_bins_non_missing=n_bins_non_missing, + next_free_node_idx=next_free_node_idx, + next_free_bitset_idx=next_free_bitset_idx, + ) + + node["right"] = next_free_node_idx + return _fill_predictor_arrays( + predictor_nodes, + binned_left_cat_bitsets, + raw_left_cat_bitsets, + grower_node.right_child, + binning_thresholds=binning_thresholds, + n_bins_non_missing=n_bins_non_missing, + next_free_node_idx=next_free_node_idx, + next_free_bitset_idx=next_free_bitset_idx, + ) diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/predictor.py b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..59bb6499c450114db3171342d7bb97111db64b81 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/predictor.py @@ -0,0 +1,146 @@ +""" +This module contains the TreePredictor class which is used for prediction. +""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import numpy as np + +from ._predictor import ( + _compute_partial_dependence, + _predict_from_binned_data, + _predict_from_raw_data, +) +from .common import PREDICTOR_RECORD_DTYPE, Y_DTYPE + + +class TreePredictor: + """Tree class used for predictions. + + Parameters + ---------- + nodes : ndarray of PREDICTOR_RECORD_DTYPE + The nodes of the tree. + binned_left_cat_bitsets : ndarray of shape (n_categorical_splits, 8), dtype=uint32 + Array of bitsets for binned categories used in predict_binned when a + split is categorical. + raw_left_cat_bitsets : ndarray of shape (n_categorical_splits, 8), dtype=uint32 + Array of bitsets for raw categories used in predict when a split is + categorical. + """ + + def __init__(self, nodes, binned_left_cat_bitsets, raw_left_cat_bitsets): + self.nodes = nodes + self.binned_left_cat_bitsets = binned_left_cat_bitsets + self.raw_left_cat_bitsets = raw_left_cat_bitsets + + def get_n_leaf_nodes(self): + """Return number of leaves.""" + return int(self.nodes["is_leaf"].sum()) + + def get_max_depth(self): + """Return maximum depth among all leaves.""" + return int(self.nodes["depth"].max()) + + def predict(self, X, known_cat_bitsets, f_idx_map, n_threads): + """Predict raw values for non-binned data. + + Parameters + ---------- + X : ndarray, shape (n_samples, n_features) + The input samples. + + known_cat_bitsets : ndarray of shape (n_categorical_features, 8) + Array of bitsets of known categories, for each categorical feature. + + f_idx_map : ndarray of shape (n_features,) + Map from original feature index to the corresponding index in the + known_cat_bitsets array. + + n_threads : int + Number of OpenMP threads to use. + + Returns + ------- + y : ndarray, shape (n_samples,) + The raw predicted values. + """ + out = np.empty(X.shape[0], dtype=Y_DTYPE) + + _predict_from_raw_data( + self.nodes, + X, + self.raw_left_cat_bitsets, + known_cat_bitsets, + f_idx_map, + n_threads, + out, + ) + return out + + def predict_binned(self, X, missing_values_bin_idx, n_threads): + """Predict raw values for binned data. + + Parameters + ---------- + X : ndarray, shape (n_samples, n_features) + The input samples. + missing_values_bin_idx : uint8 + Index of the bin that is used for missing values. This is the + index of the last bin and is always equal to max_bins (as passed + to the GBDT classes), or equivalently to n_bins - 1. + n_threads : int + Number of OpenMP threads to use. + + Returns + ------- + y : ndarray, shape (n_samples,) + The raw predicted values. + """ + out = np.empty(X.shape[0], dtype=Y_DTYPE) + _predict_from_binned_data( + self.nodes, + X, + self.binned_left_cat_bitsets, + missing_values_bin_idx, + n_threads, + out, + ) + return out + + def compute_partial_dependence(self, grid, target_features, out): + """Fast partial dependence computation. + + Parameters + ---------- + grid : ndarray, shape (n_samples, n_target_features) + The grid points on which the partial dependence should be + evaluated. + target_features : ndarray, shape (n_target_features) + The set of target features for which the partial dependence + should be evaluated. + out : ndarray, shape (n_samples) + The value of the partial dependence function on each grid + point. + """ + _compute_partial_dependence(self.nodes, grid, target_features, out) + + def __setstate__(self, state): + try: + super().__setstate__(state) + except AttributeError: + self.__dict__.update(state) + + # The dtype of feature_idx is np.intp which is platform dependent. Here, we + # make sure that saving and loading on different bitness systems works without + # errors. For instance, on a 64 bit Python runtime, np.intp = np.int64, + # while on 32 bit np.intp = np.int32. + # + # TODO: consider always using platform agnostic dtypes for fitted + # estimator attributes. For this particular estimator, this would + # mean replacing the intp field of PREDICTOR_RECORD_DTYPE by an int32 + # field. Ideally this should be done consistently throughout + # scikit-learn along with a common test. + if self.nodes.dtype != PREDICTOR_RECORD_DTYPE: + self.nodes = self.nodes.astype(PREDICTOR_RECORD_DTYPE, casting="same_kind") diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/splitting.pyx b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/splitting.pyx new file mode 100644 index 0000000000000000000000000000000000000000..bb0c34876a3d0a5ce89ae122b2b66a454965010a --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/splitting.pyx @@ -0,0 +1,1191 @@ +"""This module contains routines and data structures to: + +- Find the best possible split of a node. For a given node, a split is + characterized by a feature and a bin. +- Apply a split to a node, i.e. split the indices of the samples at the node + into the newly created left and right children. +""" +# Author: Nicolas Hug + +cimport cython +from cython.parallel import prange +import numpy as np +from libc.math cimport INFINITY, ceil +from libc.stdlib cimport malloc, free, qsort +from libc.string cimport memcpy + +from ...utils._typedefs cimport uint8_t +from .common cimport X_BINNED_DTYPE_C +from .common cimport Y_DTYPE_C +from .common cimport hist_struct +from .common cimport BITSET_INNER_DTYPE_C +from .common cimport BITSET_DTYPE_C +from .common cimport MonotonicConstraint +from ._bitset cimport init_bitset +from ._bitset cimport set_bitset +from ._bitset cimport in_bitset + + +cdef struct split_info_struct: + # Same as the SplitInfo class, but we need a C struct to use it in the + # nogil sections and to use in arrays. + Y_DTYPE_C gain + int feature_idx + unsigned int bin_idx + uint8_t missing_go_to_left + Y_DTYPE_C sum_gradient_left + Y_DTYPE_C sum_gradient_right + Y_DTYPE_C sum_hessian_left + Y_DTYPE_C sum_hessian_right + unsigned int n_samples_left + unsigned int n_samples_right + Y_DTYPE_C value_left + Y_DTYPE_C value_right + uint8_t is_categorical + BITSET_DTYPE_C left_cat_bitset + + +# used in categorical splits for sorting categories by increasing values of +# sum_gradients / sum_hessians +cdef struct categorical_info: + X_BINNED_DTYPE_C bin_idx + Y_DTYPE_C value + + +class SplitInfo: + """Pure data class to store information about a potential split. + + Parameters + ---------- + gain : float + The gain of the split. + feature_idx : int + The index of the feature to be split. + bin_idx : int + The index of the bin on which the split is made. Should be ignored if + `is_categorical` is True: `left_cat_bitset` will be used to determine + the split. + missing_go_to_left : bool + Whether missing values should go to the left child. This is used + whether the split is categorical or not. + sum_gradient_left : float + The sum of the gradients of all the samples in the left child. + sum_hessian_left : float + The sum of the hessians of all the samples in the left child. + sum_gradient_right : float + The sum of the gradients of all the samples in the right child. + sum_hessian_right : float + The sum of the hessians of all the samples in the right child. + n_samples_left : int, default=0 + The number of samples in the left child. + n_samples_right : int + The number of samples in the right child. + is_categorical : bool + Whether the split is done on a categorical feature. + left_cat_bitset : ndarray of shape=(8,), dtype=uint32 or None + Bitset representing the categories that go to the left. This is used + only when `is_categorical` is True. + Note that missing values are part of that bitset if there are missing + values in the training data. For missing values, we rely on that + bitset for splitting, but at prediction time, we rely on + missing_go_to_left. + """ + def __init__(self, gain, feature_idx, bin_idx, + missing_go_to_left, sum_gradient_left, sum_hessian_left, + sum_gradient_right, sum_hessian_right, n_samples_left, + n_samples_right, value_left, value_right, + is_categorical, left_cat_bitset): + self.gain = gain + self.feature_idx = feature_idx + self.bin_idx = bin_idx + self.missing_go_to_left = missing_go_to_left + self.sum_gradient_left = sum_gradient_left + self.sum_hessian_left = sum_hessian_left + self.sum_gradient_right = sum_gradient_right + self.sum_hessian_right = sum_hessian_right + self.n_samples_left = n_samples_left + self.n_samples_right = n_samples_right + self.value_left = value_left + self.value_right = value_right + self.is_categorical = is_categorical + self.left_cat_bitset = left_cat_bitset + + +@cython.final +cdef class Splitter: + """Splitter used to find the best possible split at each node. + + A split (see SplitInfo) is characterized by a feature and a bin. + + The Splitter is also responsible for partitioning the samples among the + leaves of the tree (see split_indices() and the partition attribute). + + Parameters + ---------- + X_binned : ndarray of int, shape (n_samples, n_features) + The binned input samples. Must be Fortran-aligned. + n_bins_non_missing : ndarray, shape (n_features,) + For each feature, gives the number of bins actually used for + non-missing values. + missing_values_bin_idx : uint8 + Index of the bin that is used for missing values. This is the index of + the last bin and is always equal to max_bins (as passed to the GBDT + classes), or equivalently to n_bins - 1. + has_missing_values : ndarray, shape (n_features,) + Whether missing values were observed in the training data, for each + feature. + is_categorical : ndarray of bool of shape (n_features,) + Indicates categorical features. + monotonic_cst : ndarray of int of shape (n_features,), dtype=int + Indicates the monotonic constraint to enforce on each feature. + - 1: monotonic increase + - 0: no constraint + - -1: monotonic decrease + + Read more in the :ref:`User Guide `. + l2_regularization : float + The L2 regularization parameter. + min_hessian_to_split : float, default=1e-3 + The minimum sum of hessians needed in each node. Splits that result in + at least one child having a sum of hessians less than + min_hessian_to_split are discarded. + min_samples_leaf : int, default=20 + The minimum number of samples per leaf. + min_gain_to_split : float, default=0.0 + The minimum gain needed to split a node. Splits with lower gain will + be ignored. + hessians_are_constant: bool, default is False + Whether hessians are constant. + feature_fraction_per_split : float, default=1 + Proportion of randomly chosen features in each and every node split. + This is a form of regularization, smaller values make the trees weaker + learners and might prevent overfitting. + rng : Generator + n_threads : int, default=1 + Number of OpenMP threads to use. + """ + cdef public: + const X_BINNED_DTYPE_C [::1, :] X_binned + unsigned int n_features + const unsigned int [::1] n_bins_non_missing + uint8_t missing_values_bin_idx + const uint8_t [::1] has_missing_values + const uint8_t [::1] is_categorical + const signed char [::1] monotonic_cst + uint8_t hessians_are_constant + Y_DTYPE_C l2_regularization + Y_DTYPE_C min_hessian_to_split + unsigned int min_samples_leaf + Y_DTYPE_C min_gain_to_split + Y_DTYPE_C feature_fraction_per_split + rng + + unsigned int [::1] partition + unsigned int [::1] left_indices_buffer + unsigned int [::1] right_indices_buffer + int n_threads + + def __init__(self, + const X_BINNED_DTYPE_C [::1, :] X_binned, + const unsigned int [::1] n_bins_non_missing, + const uint8_t missing_values_bin_idx, + const uint8_t [::1] has_missing_values, + const uint8_t [::1] is_categorical, + const signed char [::1] monotonic_cst, + Y_DTYPE_C l2_regularization, + Y_DTYPE_C min_hessian_to_split=1e-3, + unsigned int min_samples_leaf=20, + Y_DTYPE_C min_gain_to_split=0., + uint8_t hessians_are_constant=False, + Y_DTYPE_C feature_fraction_per_split=1.0, + rng=np.random.RandomState(), + unsigned int n_threads=1): + + self.X_binned = X_binned + self.n_features = X_binned.shape[1] + self.n_bins_non_missing = n_bins_non_missing + self.missing_values_bin_idx = missing_values_bin_idx + self.has_missing_values = has_missing_values + self.is_categorical = is_categorical + self.monotonic_cst = monotonic_cst + self.l2_regularization = l2_regularization + self.min_hessian_to_split = min_hessian_to_split + self.min_samples_leaf = min_samples_leaf + self.min_gain_to_split = min_gain_to_split + self.hessians_are_constant = hessians_are_constant + self.feature_fraction_per_split = feature_fraction_per_split + self.rng = rng + self.n_threads = n_threads + + # The partition array maps each sample index into the leaves of the + # tree (a leaf in this context is a node that isn't split yet, not + # necessarily a 'finalized' leaf). Initially, the root contains all + # the indices, e.g.: + # partition = [abcdefghijkl] + # After a call to split_indices, it may look e.g. like this: + # partition = [cef|abdghijkl] + # we have 2 leaves, the left one is at position 0 and the second one at + # position 3. The order of the samples is irrelevant. + self.partition = np.arange(X_binned.shape[0], dtype=np.uint32) + # buffers used in split_indices to support parallel splitting. + self.left_indices_buffer = np.empty_like(self.partition) + self.right_indices_buffer = np.empty_like(self.partition) + + def split_indices(Splitter self, split_info, unsigned int [::1] + sample_indices): + """Split samples into left and right arrays. + + The split is performed according to the best possible split + (split_info). + + Ultimately, this is nothing but a partition of the sample_indices + array with a given pivot, exactly like a quicksort subroutine. + + Parameters + ---------- + split_info : SplitInfo + The SplitInfo of the node to split. + sample_indices : ndarray of unsigned int, shape (n_samples_at_node,) + The indices of the samples at the node to split. This is a view + on self.partition, and it is modified inplace by placing the + indices of the left child at the beginning, and the indices of + the right child at the end. + + Returns + ------- + left_indices : ndarray of int, shape (n_left_samples,) + The indices of the samples in the left child. This is a view on + self.partition. + right_indices : ndarray of int, shape (n_right_samples,) + The indices of the samples in the right child. This is a view on + self.partition. + right_child_position : int + The position of the right child in ``sample_indices``. + """ + # This is a multi-threaded implementation inspired by lightgbm. Here + # is a quick break down. Let's suppose we want to split a node with 24 + # samples named from a to x. self.partition looks like this (the * are + # indices in other leaves that we don't care about): + # partition = [*************abcdefghijklmnopqrstuvwx****************] + # ^ ^ + # node_position node_position + node.n_samples + + # Ultimately, we want to reorder the samples inside the boundaries of + # the leaf (which becomes a node) to now represent the samples in its + # left and right child. For example: + # partition = [*************abefilmnopqrtuxcdghjksvw*****************] + # ^ ^ + # left_child_pos right_child_pos + # Note that left_child_pos always takes the value of node_position, + # and right_child_pos = left_child_pos + left_child.n_samples. The + # order of the samples inside a leaf is irrelevant. + + # 1. sample_indices is a view on this region a..x. We conceptually + # divide it into n_threads regions. Each thread will be responsible + # for its own region. Here is an example with 4 threads: + # sample_indices = [abcdef|ghijkl|mnopqr|stuvwx] + # 2. Each thread processes 6 = 24 // 4 entries and maps them into + # left_indices_buffer or right_indices_buffer. For example, we could + # have the following mapping ('.' denotes an undefined entry): + # - left_indices_buffer = [abef..|il....|mnopqr|tux...] + # - right_indices_buffer = [cd....|ghjk..|......|svw...] + # 3. We keep track of the start positions of the regions (the '|') in + # ``offset_in_buffers`` as well as the size of each region. We also + # keep track of the number of samples put into the left/right child + # by each thread. Concretely: + # - left_counts = [4, 2, 6, 3] + # - right_counts = [2, 4, 0, 3] + # 4. Finally, we put left/right_indices_buffer back into the + # sample_indices, without any undefined entries and the partition + # looks as expected + # partition = [*************abefilmnopqrtuxcdghjksvw***************] + + # Note: We here show left/right_indices_buffer as being the same size + # as sample_indices for simplicity, but in reality they are of the + # same size as partition. + + cdef: + int n_samples = sample_indices.shape[0] + X_BINNED_DTYPE_C bin_idx = split_info.bin_idx + uint8_t missing_go_to_left = split_info.missing_go_to_left + uint8_t missing_values_bin_idx = self.missing_values_bin_idx + int feature_idx = split_info.feature_idx + const X_BINNED_DTYPE_C [::1] X_binned = \ + self.X_binned[:, feature_idx] + unsigned int [::1] left_indices_buffer = self.left_indices_buffer + unsigned int [::1] right_indices_buffer = self.right_indices_buffer + uint8_t is_categorical = split_info.is_categorical + # Cython is unhappy if we set left_cat_bitset to + # split_info.left_cat_bitset directly, so we need a tmp var + BITSET_INNER_DTYPE_C [:] cat_bitset_tmp = split_info.left_cat_bitset + BITSET_DTYPE_C left_cat_bitset + int n_threads = self.n_threads + + int [:] sizes = np.full(n_threads, n_samples // n_threads, + dtype=np.int32) + int [:] offset_in_buffers = np.zeros(n_threads, dtype=np.int32) + int [:] left_counts = np.empty(n_threads, dtype=np.int32) + int [:] right_counts = np.empty(n_threads, dtype=np.int32) + int left_count + int right_count + int start + int stop + int i + int thread_idx + int sample_idx + int right_child_position + uint8_t turn_left + int [:] left_offset = np.zeros(n_threads, dtype=np.int32) + int [:] right_offset = np.zeros(n_threads, dtype=np.int32) + + # only set left_cat_bitset when is_categorical is True + if is_categorical: + left_cat_bitset = &cat_bitset_tmp[0] + + with nogil: + for thread_idx in range(n_samples % n_threads): + sizes[thread_idx] += 1 + + for thread_idx in range(1, n_threads): + offset_in_buffers[thread_idx] = \ + offset_in_buffers[thread_idx - 1] + sizes[thread_idx - 1] + + # map indices from sample_indices to left/right_indices_buffer + for thread_idx in prange(n_threads, schedule='static', + chunksize=1, num_threads=n_threads): + left_count = 0 + right_count = 0 + + start = offset_in_buffers[thread_idx] + stop = start + sizes[thread_idx] + for i in range(start, stop): + sample_idx = sample_indices[i] + turn_left = sample_goes_left( + missing_go_to_left, + missing_values_bin_idx, bin_idx, + X_binned[sample_idx], is_categorical, + left_cat_bitset) + + if turn_left: + left_indices_buffer[start + left_count] = sample_idx + left_count = left_count + 1 + else: + right_indices_buffer[start + right_count] = sample_idx + right_count = right_count + 1 + + left_counts[thread_idx] = left_count + right_counts[thread_idx] = right_count + + # position of right child = just after the left child + right_child_position = 0 + for thread_idx in range(n_threads): + right_child_position += left_counts[thread_idx] + + # offset of each thread in sample_indices for left and right + # child, i.e. where each thread will start to write. + right_offset[0] = right_child_position + for thread_idx in range(1, n_threads): + left_offset[thread_idx] = \ + left_offset[thread_idx - 1] + left_counts[thread_idx - 1] + right_offset[thread_idx] = \ + right_offset[thread_idx - 1] + right_counts[thread_idx - 1] + + # map indices in left/right_indices_buffer back into + # sample_indices. This also updates self.partition since + # sample_indices is a view. + for thread_idx in prange(n_threads, schedule='static', + chunksize=1, num_threads=n_threads): + memcpy( + &sample_indices[left_offset[thread_idx]], + &left_indices_buffer[offset_in_buffers[thread_idx]], + sizeof(unsigned int) * left_counts[thread_idx] + ) + if right_counts[thread_idx] > 0: + # If we're splitting the rightmost node of the tree, i.e. the + # rightmost node in the partition array, and if n_threads >= 2, one + # might have right_counts[-1] = 0 and right_offset[-1] = len(sample_indices) + # leading to evaluating + # + # &sample_indices[right_offset[-1]] = &samples_indices[n_samples_at_node] + # = &partition[n_samples_in_tree] + # + # which is an out-of-bounds read access that can cause a segmentation fault. + # When boundscheck=True, removing this check produces this exception: + # + # IndexError: Out of bounds on buffer access + # + memcpy( + &sample_indices[right_offset[thread_idx]], + &right_indices_buffer[offset_in_buffers[thread_idx]], + sizeof(unsigned int) * right_counts[thread_idx] + ) + + return (sample_indices[:right_child_position], + sample_indices[right_child_position:], + right_child_position) + + def find_node_split( + Splitter self, + unsigned int n_samples, + hist_struct [:, ::1] histograms, # IN + const Y_DTYPE_C sum_gradients, + const Y_DTYPE_C sum_hessians, + const Y_DTYPE_C value, + const Y_DTYPE_C lower_bound=-INFINITY, + const Y_DTYPE_C upper_bound=INFINITY, + const unsigned int [:] allowed_features=None, + ): + """For each feature, find the best bin to split on at a given node. + + Return the best split info among all features. + + Parameters + ---------- + n_samples : int + The number of samples at the node. + histograms : ndarray of HISTOGRAM_DTYPE of \ + shape (n_features, max_bins) + The histograms of the current node. + sum_gradients : float + The sum of the gradients for each sample at the node. + sum_hessians : float + The sum of the hessians for each sample at the node. + value : float + The bounded value of the current node. We directly pass the value + instead of re-computing it from sum_gradients and sum_hessians, + because we need to compute the loss and the gain based on the + *bounded* value: computing the value from + sum_gradients / sum_hessians would give the unbounded value, and + the interaction with min_gain_to_split would not be correct + anymore. Side note: we can't use the lower_bound / upper_bound + parameters either because these refer to the bounds of the + children, not the bounds of the current node. + lower_bound : float + Lower bound for the children values for respecting the monotonic + constraints. + upper_bound : float + Upper bound for the children values for respecting the monotonic + constraints. + allowed_features : None or ndarray, dtype=np.uint32 + Indices of the features that are allowed by interaction constraints to be + split. + + Returns + ------- + best_split_info : SplitInfo + The info about the best possible split among all features. + """ + cdef: + int feature_idx + int split_info_idx + int best_split_info_idx + int n_allowed_features + split_info_struct split_info + split_info_struct * split_infos + const uint8_t [::1] has_missing_values = self.has_missing_values + const uint8_t [::1] is_categorical = self.is_categorical + const signed char [::1] monotonic_cst = self.monotonic_cst + int n_threads = self.n_threads + bint has_interaction_cst = False + Y_DTYPE_C feature_fraction_per_split = self.feature_fraction_per_split + uint8_t [:] subsample_mask # same as npy_bool + int n_subsampled_features + + has_interaction_cst = allowed_features is not None + if has_interaction_cst: + n_allowed_features = allowed_features.shape[0] + else: + n_allowed_features = self.n_features + + if feature_fraction_per_split < 1.0: + # We do all random sampling before the nogil and make sure that we sample + # exactly n_subsampled_features >= 1 features. + n_subsampled_features = max( + 1, + int(ceil(feature_fraction_per_split * n_allowed_features)), + ) + subsample_mask_arr = np.full(n_allowed_features, False) + subsample_mask_arr[:n_subsampled_features] = True + self.rng.shuffle(subsample_mask_arr) + # https://github.com/numpy/numpy/issues/18273 + subsample_mask = subsample_mask_arr + + with nogil: + + split_infos = malloc( + n_allowed_features * sizeof(split_info_struct)) + + # split_info_idx is index of split_infos of size n_allowed_features. + # features_idx is the index of the feature column in X. + for split_info_idx in prange(n_allowed_features, schedule='static', + num_threads=n_threads): + if has_interaction_cst: + feature_idx = allowed_features[split_info_idx] + else: + feature_idx = split_info_idx + + split_infos[split_info_idx].feature_idx = feature_idx + + # For each feature, find best bin to split on + # Start with a gain of -1 if no better split is found, that + # means one of the constraints isn't respected + # (min_samples_leaf, etc.) and the grower will later turn the + # node into a leaf. + split_infos[split_info_idx].gain = -1 + split_infos[split_info_idx].is_categorical = is_categorical[feature_idx] + + # Note that subsample_mask is indexed by split_info_idx and not by + # feature_idx because we only need to exclude the same features again + # and again. We do NOT need to access the features directly by using + # allowed_features. + if feature_fraction_per_split < 1.0 and not subsample_mask[split_info_idx]: + continue + + if is_categorical[feature_idx]: + self._find_best_bin_to_split_category( + feature_idx, has_missing_values[feature_idx], + histograms, n_samples, sum_gradients, sum_hessians, + value, monotonic_cst[feature_idx], lower_bound, + upper_bound, &split_infos[split_info_idx]) + else: + # We will scan bins from left to right (in all cases), and + # if there are any missing values, we will also scan bins + # from right to left. This way, we can consider whichever + # case yields the best gain: either missing values go to + # the right (left to right scan) or to the left (right to + # left case). See algo 3 from the XGBoost paper + # https://arxiv.org/abs/1603.02754 + # Note: for the categorical features above, this isn't + # needed since missing values are considered a native + # category. + self._find_best_bin_to_split_left_to_right( + feature_idx, has_missing_values[feature_idx], + histograms, n_samples, sum_gradients, sum_hessians, + value, monotonic_cst[feature_idx], + lower_bound, upper_bound, &split_infos[split_info_idx]) + + if has_missing_values[feature_idx]: + # We need to explore both directions to check whether + # sending the nans to the left child would lead to a higher + # gain + self._find_best_bin_to_split_right_to_left( + feature_idx, histograms, n_samples, + sum_gradients, sum_hessians, + value, monotonic_cst[feature_idx], + lower_bound, upper_bound, &split_infos[split_info_idx]) + + # then compute best possible split among all features + # split_info is set to the best of split_infos + best_split_info_idx = self._find_best_feature_to_split_helper( + split_infos, n_allowed_features + ) + split_info = split_infos[best_split_info_idx] + + out = SplitInfo( + split_info.gain, + split_info.feature_idx, + split_info.bin_idx, + split_info.missing_go_to_left, + split_info.sum_gradient_left, + split_info.sum_hessian_left, + split_info.sum_gradient_right, + split_info.sum_hessian_right, + split_info.n_samples_left, + split_info.n_samples_right, + split_info.value_left, + split_info.value_right, + split_info.is_categorical, + None, # left_cat_bitset will only be set if the split is categorical + ) + # Only set bitset if the split is categorical + if split_info.is_categorical: + out.left_cat_bitset = np.asarray(split_info.left_cat_bitset, dtype=np.uint32) + + free(split_infos) + return out + + cdef int _find_best_feature_to_split_helper( + self, + split_info_struct * split_infos, # IN + int n_allowed_features, + ) noexcept nogil: + """Return the index of split_infos with the best feature split.""" + cdef: + int split_info_idx + int best_split_info_idx = 0 + + for split_info_idx in range(1, n_allowed_features): + if (split_infos[split_info_idx].gain > split_infos[best_split_info_idx].gain): + best_split_info_idx = split_info_idx + return best_split_info_idx + + cdef void _find_best_bin_to_split_left_to_right( + Splitter self, + unsigned int feature_idx, + uint8_t has_missing_values, + const hist_struct [:, ::1] histograms, # IN + unsigned int n_samples, + Y_DTYPE_C sum_gradients, + Y_DTYPE_C sum_hessians, + Y_DTYPE_C value, + signed char monotonic_cst, + Y_DTYPE_C lower_bound, + Y_DTYPE_C upper_bound, + split_info_struct * split_info) noexcept nogil: # OUT + """Find best bin to split on for a given feature. + + Splits that do not satisfy the splitting constraints + (min_gain_to_split, etc.) are discarded here. + + We scan node from left to right. This version is called whether there + are missing values or not. If any, missing values are assigned to the + right node. + """ + cdef: + unsigned int bin_idx + unsigned int n_samples_left + unsigned int n_samples_right + unsigned int n_samples_ = n_samples + # We set the 'end' variable such that the last non-missing-values + # bin never goes to the left child (which would result in and + # empty right child), unless there are missing values, since these + # would go to the right child. + unsigned int end = \ + self.n_bins_non_missing[feature_idx] - 1 + has_missing_values + Y_DTYPE_C sum_hessian_left + Y_DTYPE_C sum_hessian_right + Y_DTYPE_C sum_gradient_left + Y_DTYPE_C sum_gradient_right + Y_DTYPE_C loss_current_node + Y_DTYPE_C gain + uint8_t found_better_split = False + + Y_DTYPE_C best_sum_hessian_left + Y_DTYPE_C best_sum_gradient_left + unsigned int best_bin_idx + unsigned int best_n_samples_left + Y_DTYPE_C best_gain = -1 + + sum_gradient_left, sum_hessian_left = 0., 0. + n_samples_left = 0 + + loss_current_node = _loss_from_value(value, sum_gradients) + + for bin_idx in range(end): + n_samples_left += histograms[feature_idx, bin_idx].count + n_samples_right = n_samples_ - n_samples_left + + if self.hessians_are_constant: + sum_hessian_left += histograms[feature_idx, bin_idx].count + else: + sum_hessian_left += \ + histograms[feature_idx, bin_idx].sum_hessians + sum_hessian_right = sum_hessians - sum_hessian_left + + sum_gradient_left += histograms[feature_idx, bin_idx].sum_gradients + sum_gradient_right = sum_gradients - sum_gradient_left + + if n_samples_left < self.min_samples_leaf: + continue + if n_samples_right < self.min_samples_leaf: + # won't get any better + break + + if sum_hessian_left < self.min_hessian_to_split: + continue + if sum_hessian_right < self.min_hessian_to_split: + # won't get any better (hessians are > 0 since loss is convex) + break + + gain = _split_gain(sum_gradient_left, sum_hessian_left, + sum_gradient_right, sum_hessian_right, + loss_current_node, + monotonic_cst, + lower_bound, + upper_bound, + self.l2_regularization) + + if gain > best_gain and gain > self.min_gain_to_split: + found_better_split = True + best_gain = gain + best_bin_idx = bin_idx + best_sum_gradient_left = sum_gradient_left + best_sum_hessian_left = sum_hessian_left + best_n_samples_left = n_samples_left + + if found_better_split: + split_info.gain = best_gain + split_info.bin_idx = best_bin_idx + # we scan from left to right so missing values go to the right + split_info.missing_go_to_left = False + split_info.sum_gradient_left = best_sum_gradient_left + split_info.sum_gradient_right = sum_gradients - best_sum_gradient_left + split_info.sum_hessian_left = best_sum_hessian_left + split_info.sum_hessian_right = sum_hessians - best_sum_hessian_left + split_info.n_samples_left = best_n_samples_left + split_info.n_samples_right = n_samples - best_n_samples_left + + # We recompute best values here but it's cheap + split_info.value_left = compute_node_value( + split_info.sum_gradient_left, split_info.sum_hessian_left, + lower_bound, upper_bound, self.l2_regularization) + + split_info.value_right = compute_node_value( + split_info.sum_gradient_right, split_info.sum_hessian_right, + lower_bound, upper_bound, self.l2_regularization) + + cdef void _find_best_bin_to_split_right_to_left( + self, + unsigned int feature_idx, + const hist_struct [:, ::1] histograms, # IN + unsigned int n_samples, + Y_DTYPE_C sum_gradients, + Y_DTYPE_C sum_hessians, + Y_DTYPE_C value, + signed char monotonic_cst, + Y_DTYPE_C lower_bound, + Y_DTYPE_C upper_bound, + split_info_struct * split_info) noexcept nogil: # OUT + """Find best bin to split on for a given feature. + + Splits that do not satisfy the splitting constraints + (min_gain_to_split, etc.) are discarded here. + + We scan node from right to left. This version is only called when + there are missing values. Missing values are assigned to the left + child. + + If no missing value are present in the data this method isn't called + since only calling _find_best_bin_to_split_left_to_right is enough. + """ + + cdef: + unsigned int bin_idx + unsigned int n_samples_left + unsigned int n_samples_right + unsigned int n_samples_ = n_samples + Y_DTYPE_C sum_hessian_left + Y_DTYPE_C sum_hessian_right + Y_DTYPE_C sum_gradient_left + Y_DTYPE_C sum_gradient_right + Y_DTYPE_C loss_current_node + Y_DTYPE_C gain + unsigned int start = self.n_bins_non_missing[feature_idx] - 2 + uint8_t found_better_split = False + + Y_DTYPE_C best_sum_hessian_left + Y_DTYPE_C best_sum_gradient_left + unsigned int best_bin_idx + unsigned int best_n_samples_left + Y_DTYPE_C best_gain = split_info.gain # computed during previous scan + + sum_gradient_right, sum_hessian_right = 0., 0. + n_samples_right = 0 + + loss_current_node = _loss_from_value(value, sum_gradients) + + for bin_idx in range(start, -1, -1): + n_samples_right += histograms[feature_idx, bin_idx + 1].count + n_samples_left = n_samples_ - n_samples_right + + if self.hessians_are_constant: + sum_hessian_right += histograms[feature_idx, bin_idx + 1].count + else: + sum_hessian_right += \ + histograms[feature_idx, bin_idx + 1].sum_hessians + sum_hessian_left = sum_hessians - sum_hessian_right + + sum_gradient_right += \ + histograms[feature_idx, bin_idx + 1].sum_gradients + sum_gradient_left = sum_gradients - sum_gradient_right + + if n_samples_right < self.min_samples_leaf: + continue + if n_samples_left < self.min_samples_leaf: + # won't get any better + break + + if sum_hessian_right < self.min_hessian_to_split: + continue + if sum_hessian_left < self.min_hessian_to_split: + # won't get any better (hessians are > 0 since loss is convex) + break + + gain = _split_gain(sum_gradient_left, sum_hessian_left, + sum_gradient_right, sum_hessian_right, + loss_current_node, + monotonic_cst, + lower_bound, + upper_bound, + self.l2_regularization) + + if gain > best_gain and gain > self.min_gain_to_split: + found_better_split = True + best_gain = gain + best_bin_idx = bin_idx + best_sum_gradient_left = sum_gradient_left + best_sum_hessian_left = sum_hessian_left + best_n_samples_left = n_samples_left + + if found_better_split: + split_info.gain = best_gain + split_info.bin_idx = best_bin_idx + # we scan from right to left so missing values go to the left + split_info.missing_go_to_left = True + split_info.sum_gradient_left = best_sum_gradient_left + split_info.sum_gradient_right = sum_gradients - best_sum_gradient_left + split_info.sum_hessian_left = best_sum_hessian_left + split_info.sum_hessian_right = sum_hessians - best_sum_hessian_left + split_info.n_samples_left = best_n_samples_left + split_info.n_samples_right = n_samples - best_n_samples_left + + # We recompute best values here but it's cheap + split_info.value_left = compute_node_value( + split_info.sum_gradient_left, split_info.sum_hessian_left, + lower_bound, upper_bound, self.l2_regularization) + + split_info.value_right = compute_node_value( + split_info.sum_gradient_right, split_info.sum_hessian_right, + lower_bound, upper_bound, self.l2_regularization) + + cdef void _find_best_bin_to_split_category( + self, + unsigned int feature_idx, + uint8_t has_missing_values, + const hist_struct [:, ::1] histograms, # IN + unsigned int n_samples, + Y_DTYPE_C sum_gradients, + Y_DTYPE_C sum_hessians, + Y_DTYPE_C value, + char monotonic_cst, + Y_DTYPE_C lower_bound, + Y_DTYPE_C upper_bound, + split_info_struct * split_info) noexcept nogil: # OUT + """Find best split for categorical features. + + Categories are first sorted according to their variance, and then + a scan is performed as if categories were ordered quantities. + + Ref: "On Grouping for Maximum Homogeneity", Walter D. Fisher + """ + + cdef: + unsigned int bin_idx + unsigned int n_bins_non_missing = self.n_bins_non_missing[feature_idx] + unsigned int missing_values_bin_idx = self.missing_values_bin_idx + categorical_info * cat_infos + unsigned int sorted_cat_idx + unsigned int n_used_bins = 0 + int [2] scan_direction + int direction = 0 + int best_direction = 0 + unsigned int middle + unsigned int i + const hist_struct[::1] feature_hist = histograms[feature_idx, :] + Y_DTYPE_C sum_gradients_bin + Y_DTYPE_C sum_hessians_bin + Y_DTYPE_C loss_current_node + Y_DTYPE_C sum_gradient_left, sum_hessian_left + Y_DTYPE_C sum_gradient_right, sum_hessian_right + unsigned int n_samples_left, n_samples_right + Y_DTYPE_C gain + Y_DTYPE_C best_gain = -1.0 + uint8_t found_better_split = False + Y_DTYPE_C best_sum_hessian_left + Y_DTYPE_C best_sum_gradient_left + unsigned int best_n_samples_left + unsigned int best_cat_infos_thresh + # Reduces the effect of noises in categorical features, + # especially for categories with few data. Called cat_smooth in + # LightGBM. TODO: Make this user adjustable? + Y_DTYPE_C MIN_CAT_SUPPORT = 10. + # this is equal to 1 for losses where hessians are constant + Y_DTYPE_C support_factor = n_samples / sum_hessians + + # Details on the split finding: + # We first order categories by their sum_gradients / sum_hessians + # values, and we exclude categories that don't respect MIN_CAT_SUPPORT + # from this sorted array. Missing values are treated just like any + # other category. The low-support categories will always be mapped to + # the right child. We scan the sorted categories array from left to + # right and from right to left, and we stop at the middle. + + # Considering ordered categories A B C D, with E being a low-support + # category: A B C D + # ^ + # midpoint + # The scans will consider the following split-points: + # * left to right: + # A - B C D E + # A B - C D E + # * right to left: + # D - A B C E + # C D - A B E + + # Note that since we stop at the middle and since low-support + # categories (E) are always mapped to the right, the following splits + # aren't considered: + # A E - B C D + # D E - A B C + # Basically, we're forcing E to always be mapped to the child that has + # *at least half of the categories* (and this child is always the right + # child, by convention). + + # Also note that if we scanned in only one direction (e.g. left to + # right), we would only consider the following splits: + # A - B C D E + # A B - C D E + # A B C - D E + # and thus we would be missing on D - A B C E and on C D - A B E + + cat_infos = malloc( + (n_bins_non_missing + has_missing_values) * sizeof(categorical_info)) + + # fill cat_infos while filtering out categories based on MIN_CAT_SUPPORT + for bin_idx in range(n_bins_non_missing): + if self.hessians_are_constant: + sum_hessians_bin = feature_hist[bin_idx].count + else: + sum_hessians_bin = feature_hist[bin_idx].sum_hessians + if sum_hessians_bin * support_factor >= MIN_CAT_SUPPORT: + cat_infos[n_used_bins].bin_idx = bin_idx + sum_gradients_bin = feature_hist[bin_idx].sum_gradients + + cat_infos[n_used_bins].value = ( + sum_gradients_bin / (sum_hessians_bin + MIN_CAT_SUPPORT) + ) + n_used_bins += 1 + + # Also add missing values bin so that nans are considered as a category + if has_missing_values: + if self.hessians_are_constant: + sum_hessians_bin = feature_hist[missing_values_bin_idx].count + else: + sum_hessians_bin = feature_hist[missing_values_bin_idx].sum_hessians + if sum_hessians_bin * support_factor >= MIN_CAT_SUPPORT: + cat_infos[n_used_bins].bin_idx = missing_values_bin_idx + sum_gradients_bin = ( + feature_hist[missing_values_bin_idx].sum_gradients + ) + + cat_infos[n_used_bins].value = ( + sum_gradients_bin / (sum_hessians_bin + MIN_CAT_SUPPORT) + ) + n_used_bins += 1 + + # not enough categories to form a split + if n_used_bins <= 1: + free(cat_infos) + return + + qsort(cat_infos, n_used_bins, sizeof(categorical_info), + compare_cat_infos) + + loss_current_node = _loss_from_value(value, sum_gradients) + + scan_direction[0], scan_direction[1] = 1, -1 + for direction in scan_direction: + if direction == 1: + middle = (n_used_bins + 1) // 2 + else: + middle = (n_used_bins + 1) // 2 - 1 + + # The categories we'll consider will go to the left child + sum_gradient_left, sum_hessian_left = 0., 0. + n_samples_left = 0 + + for i in range(middle): + sorted_cat_idx = i if direction == 1 else n_used_bins - 1 - i + bin_idx = cat_infos[sorted_cat_idx].bin_idx + + n_samples_left += feature_hist[bin_idx].count + n_samples_right = n_samples - n_samples_left + + if self.hessians_are_constant: + sum_hessian_left += feature_hist[bin_idx].count + else: + sum_hessian_left += feature_hist[bin_idx].sum_hessians + sum_hessian_right = sum_hessians - sum_hessian_left + + sum_gradient_left += feature_hist[bin_idx].sum_gradients + sum_gradient_right = sum_gradients - sum_gradient_left + + if ( + n_samples_left < self.min_samples_leaf or + sum_hessian_left < self.min_hessian_to_split + ): + continue + if ( + n_samples_right < self.min_samples_leaf or + sum_hessian_right < self.min_hessian_to_split + ): + break + + gain = _split_gain(sum_gradient_left, sum_hessian_left, + sum_gradient_right, sum_hessian_right, + loss_current_node, monotonic_cst, + lower_bound, upper_bound, + self.l2_regularization) + if gain > best_gain and gain > self.min_gain_to_split: + found_better_split = True + best_gain = gain + best_cat_infos_thresh = sorted_cat_idx + best_sum_gradient_left = sum_gradient_left + best_sum_hessian_left = sum_hessian_left + best_n_samples_left = n_samples_left + best_direction = direction + + if found_better_split: + split_info.gain = best_gain + + # split_info.bin_idx is unused for categorical splits: left_cat_bitset + # is used instead and set below + split_info.bin_idx = 0 + + split_info.sum_gradient_left = best_sum_gradient_left + split_info.sum_gradient_right = sum_gradients - best_sum_gradient_left + split_info.sum_hessian_left = best_sum_hessian_left + split_info.sum_hessian_right = sum_hessians - best_sum_hessian_left + split_info.n_samples_left = best_n_samples_left + split_info.n_samples_right = n_samples - best_n_samples_left + + # We recompute best values here but it's cheap + split_info.value_left = compute_node_value( + split_info.sum_gradient_left, split_info.sum_hessian_left, + lower_bound, upper_bound, self.l2_regularization) + + split_info.value_right = compute_node_value( + split_info.sum_gradient_right, split_info.sum_hessian_right, + lower_bound, upper_bound, self.l2_regularization) + + # create bitset with values from best_cat_infos_thresh + init_bitset(split_info.left_cat_bitset) + if best_direction == 1: + for sorted_cat_idx in range(best_cat_infos_thresh + 1): + bin_idx = cat_infos[sorted_cat_idx].bin_idx + set_bitset(split_info.left_cat_bitset, bin_idx) + else: + for sorted_cat_idx in range(n_used_bins - 1, best_cat_infos_thresh - 1, -1): + bin_idx = cat_infos[sorted_cat_idx].bin_idx + set_bitset(split_info.left_cat_bitset, bin_idx) + + if has_missing_values: + split_info.missing_go_to_left = in_bitset( + split_info.left_cat_bitset, missing_values_bin_idx) + + free(cat_infos) + + +cdef int compare_cat_infos(const void * a, const void * b) noexcept nogil: + return -1 if (a).value < (b).value else 1 + +cdef inline Y_DTYPE_C _split_gain( + Y_DTYPE_C sum_gradient_left, + Y_DTYPE_C sum_hessian_left, + Y_DTYPE_C sum_gradient_right, + Y_DTYPE_C sum_hessian_right, + Y_DTYPE_C loss_current_node, + signed char monotonic_cst, + Y_DTYPE_C lower_bound, + Y_DTYPE_C upper_bound, + Y_DTYPE_C l2_regularization) noexcept nogil: + """Loss reduction + + Compute the reduction in loss after taking a split, compared to keeping + the node a leaf of the tree. + + See Equation 7 of: + :arxiv:`T. Chen, C. Guestrin, (2016) XGBoost: A Scalable Tree Boosting System, + <1603.02754>.` + """ + cdef: + Y_DTYPE_C gain + Y_DTYPE_C value_left + Y_DTYPE_C value_right + + # Compute values of potential left and right children + value_left = compute_node_value(sum_gradient_left, sum_hessian_left, + lower_bound, upper_bound, + l2_regularization) + value_right = compute_node_value(sum_gradient_right, sum_hessian_right, + lower_bound, upper_bound, + l2_regularization) + + if ((monotonic_cst == MonotonicConstraint.POS and value_left > value_right) or + (monotonic_cst == MonotonicConstraint.NEG and value_left < value_right)): + # don't consider this split since it does not respect the monotonic + # constraints. Note that these comparisons need to be done on values + # that have already been clipped to take the monotonic constraints into + # account (if any). + return -1 + + gain = loss_current_node + gain -= _loss_from_value(value_left, sum_gradient_left) + gain -= _loss_from_value(value_right, sum_gradient_right) + # Note that for the gain to be correct (and for min_gain_to_split to work + # as expected), we need all values to be bounded (current node, left child + # and right child). + + return gain + +cdef inline Y_DTYPE_C _loss_from_value( + Y_DTYPE_C value, + Y_DTYPE_C sum_gradient) noexcept nogil: + """Return loss of a node from its (bounded) value + + See Equation 6 of: + :arxiv:`T. Chen, C. Guestrin, (2016) XGBoost: A Scalable Tree Boosting System, + <1603.02754>.` + """ + return sum_gradient * value + +cdef inline uint8_t sample_goes_left( + uint8_t missing_go_to_left, + uint8_t missing_values_bin_idx, + X_BINNED_DTYPE_C split_bin_idx, + X_BINNED_DTYPE_C bin_value, + uint8_t is_categorical, + BITSET_DTYPE_C left_cat_bitset) noexcept nogil: + """Helper to decide whether sample should go to left or right child.""" + + if is_categorical: + # note: if any, missing values are encoded in left_cat_bitset + return in_bitset(left_cat_bitset, bin_value) + else: + return ( + ( + missing_go_to_left and + bin_value == missing_values_bin_idx + ) + or ( + bin_value <= split_bin_idx + )) + + +cpdef inline Y_DTYPE_C compute_node_value( + Y_DTYPE_C sum_gradient, + Y_DTYPE_C sum_hessian, + Y_DTYPE_C lower_bound, + Y_DTYPE_C upper_bound, + Y_DTYPE_C l2_regularization) noexcept nogil: + """Compute a node's value. + + The value is capped in the [lower_bound, upper_bound] interval to respect + monotonic constraints. Shrinkage is ignored. + + See Equation 5 of: + :arxiv:`T. Chen, C. Guestrin, (2016) XGBoost: A Scalable Tree Boosting System, + <1603.02754>.` + """ + + cdef: + Y_DTYPE_C value + + value = -sum_gradient / (sum_hessian + l2_regularization + 1e-15) + + if value < lower_bound: + value = lower_bound + elif value > upper_bound: + value = upper_bound + + return value diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/__init__.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c3e3e10f9f89c570de45234e3c407762e5f449c Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_binning.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_binning.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..691b8105c910327a2ba66b1c59408542af7bad78 Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_binning.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_bitset.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_bitset.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fcc4ced074adf7f7257360bca118158810e893d5 Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_bitset.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_grower.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_grower.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a6ce946d5c6d695647c1ddfdb73b17b9b1eebe2 Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_grower.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_histogram.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_histogram.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..74aa70bd9baf831e7b4fc0bd9db8e068d61ee5ee Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_histogram.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_monotonic_constraints.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_monotonic_constraints.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16f39c219a4c0f1e2df86498531c6b350912fe93 Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_monotonic_constraints.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_predictor.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_predictor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0472daefba041927f7e78babcebe8a42eb6d4268 Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_predictor.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_splitting.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_splitting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61a96b8b6a06a71e0aa52e6fae6d6cd910e6845f Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_splitting.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_warm_start.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_warm_start.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8fb40e389dd93e373fa52a2f3348688ec59f4d6c Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_warm_start.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_monotonic_constraints.py b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_monotonic_constraints.py new file mode 100644 index 0000000000000000000000000000000000000000..56b6068d794e8c96c24ee0ef18dbad3f66ad64b0 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_monotonic_constraints.py @@ -0,0 +1,446 @@ +import re + +import numpy as np +import pytest + +from sklearn.ensemble import ( + HistGradientBoostingClassifier, + HistGradientBoostingRegressor, +) +from sklearn.ensemble._hist_gradient_boosting.common import ( + G_H_DTYPE, + X_BINNED_DTYPE, + MonotonicConstraint, +) +from sklearn.ensemble._hist_gradient_boosting.grower import TreeGrower +from sklearn.ensemble._hist_gradient_boosting.histogram import HistogramBuilder +from sklearn.ensemble._hist_gradient_boosting.splitting import ( + Splitter, + compute_node_value, +) +from sklearn.utils._openmp_helpers import _openmp_effective_n_threads +from sklearn.utils._testing import _convert_container + +n_threads = _openmp_effective_n_threads() + + +def is_increasing(a): + return (np.diff(a) >= 0.0).all() + + +def is_decreasing(a): + return (np.diff(a) <= 0.0).all() + + +def assert_leaves_values_monotonic(predictor, monotonic_cst): + # make sure leaves values (from left to right) are either all increasing + # or all decreasing (or neither) depending on the monotonic constraint. + nodes = predictor.nodes + + def get_leaves_values(): + """get leaves values from left to right""" + values = [] + + def depth_first_collect_leaf_values(node_idx): + node = nodes[node_idx] + if node["is_leaf"]: + values.append(node["value"]) + return + depth_first_collect_leaf_values(node["left"]) + depth_first_collect_leaf_values(node["right"]) + + depth_first_collect_leaf_values(0) # start at root (0) + return values + + values = get_leaves_values() + + if monotonic_cst == MonotonicConstraint.NO_CST: + # some increasing, some decreasing + assert not is_increasing(values) and not is_decreasing(values) + elif monotonic_cst == MonotonicConstraint.POS: + # all increasing + assert is_increasing(values) + else: # NEG + # all decreasing + assert is_decreasing(values) + + +def assert_children_values_monotonic(predictor, monotonic_cst): + # Make sure siblings values respect the monotonic constraints. Left should + # be lower (resp greater) than right child if constraint is POS (resp. + # NEG). + # Note that this property alone isn't enough to ensure full monotonicity, + # since we also need to guanrantee that all the descendents of the left + # child won't be greater (resp. lower) than the right child, or its + # descendents. That's why we need to bound the predicted values (this is + # tested in assert_children_values_bounded) + nodes = predictor.nodes + left_lower = [] + left_greater = [] + for node in nodes: + if node["is_leaf"]: + continue + + left_idx = node["left"] + right_idx = node["right"] + + if nodes[left_idx]["value"] < nodes[right_idx]["value"]: + left_lower.append(node) + elif nodes[left_idx]["value"] > nodes[right_idx]["value"]: + left_greater.append(node) + + if monotonic_cst == MonotonicConstraint.NO_CST: + assert left_lower and left_greater + elif monotonic_cst == MonotonicConstraint.POS: + assert left_lower and not left_greater + else: # NEG + assert not left_lower and left_greater + + +def assert_children_values_bounded(grower, monotonic_cst): + # Make sure that the values of the children of a node are bounded by the + # middle value between that node and its sibling (if there is a monotonic + # constraint). + # As a bonus, we also check that the siblings values are properly ordered + # which is slightly redundant with assert_children_values_monotonic (but + # this check is done on the grower nodes whereas + # assert_children_values_monotonic is done on the predictor nodes) + + if monotonic_cst == MonotonicConstraint.NO_CST: + return + + def recursively_check_children_node_values(node, right_sibling=None): + if node.is_leaf: + return + if right_sibling is not None: + middle = (node.value + right_sibling.value) / 2 + if monotonic_cst == MonotonicConstraint.POS: + assert node.left_child.value <= node.right_child.value <= middle + if not right_sibling.is_leaf: + assert ( + middle + <= right_sibling.left_child.value + <= right_sibling.right_child.value + ) + else: # NEG + assert node.left_child.value >= node.right_child.value >= middle + if not right_sibling.is_leaf: + assert ( + middle + >= right_sibling.left_child.value + >= right_sibling.right_child.value + ) + + recursively_check_children_node_values( + node.left_child, right_sibling=node.right_child + ) + recursively_check_children_node_values(node.right_child) + + recursively_check_children_node_values(grower.root) + + +@pytest.mark.parametrize("seed", range(3)) +@pytest.mark.parametrize( + "monotonic_cst", + ( + MonotonicConstraint.NO_CST, + MonotonicConstraint.POS, + MonotonicConstraint.NEG, + ), +) +def test_nodes_values(monotonic_cst, seed): + # Build a single tree with only one feature, and make sure the nodes + # values respect the monotonic constraints. + + # Considering the following tree with a monotonic POS constraint, we + # should have: + # + # root + # / \ + # 5 10 # middle = 7.5 + # / \ / \ + # a b c d + # + # a <= b and c <= d (assert_children_values_monotonic) + # a, b <= middle <= c, d (assert_children_values_bounded) + # a <= b <= c <= d (assert_leaves_values_monotonic) + # + # The last one is a consequence of the others, but can't hurt to check + + rng = np.random.RandomState(seed) + n_samples = 1000 + n_features = 1 + X_binned = rng.randint(0, 255, size=(n_samples, n_features), dtype=np.uint8) + X_binned = np.asfortranarray(X_binned) + + gradients = rng.normal(size=n_samples).astype(G_H_DTYPE) + hessians = np.ones(shape=1, dtype=G_H_DTYPE) + + grower = TreeGrower( + X_binned, gradients, hessians, monotonic_cst=[monotonic_cst], shrinkage=0.1 + ) + grower.grow() + + # grow() will shrink the leaves values at the very end. For our comparison + # tests, we need to revert the shrinkage of the leaves, else we would + # compare the value of a leaf (shrunk) with a node (not shrunk) and the + # test would not be correct. + for leave in grower.finalized_leaves: + leave.value /= grower.shrinkage + + # We pass undefined binning_thresholds because we won't use predict anyway + predictor = grower.make_predictor( + binning_thresholds=np.zeros((X_binned.shape[1], X_binned.max() + 1)) + ) + + # The consistency of the bounds can only be checked on the tree grower + # as the node bounds are not copied into the predictor tree. The + # consistency checks on the values of node children and leaves can be + # done either on the grower tree or on the predictor tree. We only + # do those checks on the predictor tree as the latter is derived from + # the former. + assert_children_values_monotonic(predictor, monotonic_cst) + assert_children_values_bounded(grower, monotonic_cst) + assert_leaves_values_monotonic(predictor, monotonic_cst) + + +@pytest.mark.parametrize("use_feature_names", (True, False)) +def test_predictions(global_random_seed, use_feature_names): + # Train a model with a POS constraint on the first non-categorical feature + # and a NEG constraint on the second non-categorical feature, and make sure + # the constraints are respected by checking the predictions. + # test adapted from lightgbm's test_monotone_constraint(), itself inspired + # by https://xgboost.readthedocs.io/en/latest/tutorials/monotonic.html + + rng = np.random.RandomState(global_random_seed) + + n_samples = 1000 + f_0 = rng.rand(n_samples) # positive correlation with y + f_1 = rng.rand(n_samples) # negative correlation with y + + # extra categorical features, no correlation with y, + # to check the correctness of monotonicity constraint remapping, see issue #28898 + f_a = rng.randint(low=0, high=9, size=n_samples) + f_b = rng.randint(low=0, high=9, size=n_samples) + f_c = rng.randint(low=0, high=9, size=n_samples) + + X = np.c_[f_a, f_0, f_b, f_1, f_c] + columns_name = ["f_a", "f_0", "f_b", "f_1", "f_c"] + constructor_name = "dataframe" if use_feature_names else "array" + X = _convert_container(X, constructor_name, columns_name=columns_name) + + noise = rng.normal(loc=0.0, scale=0.01, size=n_samples) + y = 5 * f_0 + np.sin(10 * np.pi * f_0) - 5 * f_1 - np.cos(10 * np.pi * f_1) + noise + + if use_feature_names: + monotonic_cst = {"f_0": +1, "f_1": -1} + categorical_features = ["f_a", "f_b", "f_c"] + else: + monotonic_cst = [0, +1, 0, -1, 0] + categorical_features = [0, 2, 4] + + gbdt = HistGradientBoostingRegressor( + monotonic_cst=monotonic_cst, categorical_features=categorical_features + ) + gbdt.fit(X, y) + + linspace = np.linspace(0, 1, 100) + sin = np.sin(linspace) + constant = np.full_like(linspace, fill_value=0.5) + + # We now assert the predictions properly respect the constraints, on each + # feature. When testing for a feature we need to set the other one to a + # constant, because the monotonic constraints are only a "all else being + # equal" type of constraints: + # a constraint on the first feature only means that + # x0 < x0' => f(x0, x1) < f(x0', x1) + # while x1 stays constant. + # The constraint does not guanrantee that + # x0 < x0' => f(x0, x1) < f(x0', x1') + + # First non-categorical feature (POS) + # assert pred is all increasing when f_0 is all increasing + X = np.c_[constant, linspace, constant, constant, constant] + X = _convert_container(X, constructor_name, columns_name=columns_name) + pred = gbdt.predict(X) + assert is_increasing(pred) + # assert pred actually follows the variations of f_0 + X = np.c_[constant, sin, constant, constant, constant] + X = _convert_container(X, constructor_name, columns_name=columns_name) + pred = gbdt.predict(X) + assert np.all((np.diff(pred) >= 0) == (np.diff(sin) >= 0)) + + # Second non-categorical feature (NEG) + # assert pred is all decreasing when f_1 is all increasing + X = np.c_[constant, constant, constant, linspace, constant] + X = _convert_container(X, constructor_name, columns_name=columns_name) + pred = gbdt.predict(X) + assert is_decreasing(pred) + # assert pred actually follows the inverse variations of f_1 + X = np.c_[constant, constant, constant, sin, constant] + X = _convert_container(X, constructor_name, columns_name=columns_name) + pred = gbdt.predict(X) + assert ((np.diff(pred) <= 0) == (np.diff(sin) >= 0)).all() + + +def test_input_error(): + X = [[1, 2], [2, 3], [3, 4]] + y = [0, 1, 2] + + gbdt = HistGradientBoostingRegressor(monotonic_cst=[1, 0, -1]) + with pytest.raises( + ValueError, match=re.escape("monotonic_cst has shape (3,) but the input data") + ): + gbdt.fit(X, y) + + for monotonic_cst in ([1, 3], [1, -3], [0.3, -0.7]): + gbdt = HistGradientBoostingRegressor(monotonic_cst=monotonic_cst) + expected_msg = re.escape( + "must be an array-like of -1, 0 or 1. Observed values:" + ) + with pytest.raises(ValueError, match=expected_msg): + gbdt.fit(X, y) + + gbdt = HistGradientBoostingClassifier(monotonic_cst=[0, 1]) + with pytest.raises( + ValueError, + match="monotonic constraints are not supported for multiclass classification", + ): + gbdt.fit(X, y) + + +def test_input_error_related_to_feature_names(): + pd = pytest.importorskip("pandas") + X = pd.DataFrame({"a": [0, 1, 2], "b": [0, 1, 2]}) + y = np.array([0, 1, 0]) + + monotonic_cst = {"d": 1, "a": 1, "c": -1} + gbdt = HistGradientBoostingRegressor(monotonic_cst=monotonic_cst) + expected_msg = re.escape( + "monotonic_cst contains 2 unexpected feature names: ['c', 'd']." + ) + with pytest.raises(ValueError, match=expected_msg): + gbdt.fit(X, y) + + monotonic_cst = {k: 1 for k in "abcdefghijklmnopqrstuvwxyz"} + gbdt = HistGradientBoostingRegressor(monotonic_cst=monotonic_cst) + expected_msg = re.escape( + "monotonic_cst contains 24 unexpected feature names: " + "['c', 'd', 'e', 'f', 'g', '...']." + ) + with pytest.raises(ValueError, match=expected_msg): + gbdt.fit(X, y) + + monotonic_cst = {"a": 1} + gbdt = HistGradientBoostingRegressor(monotonic_cst=monotonic_cst) + expected_msg = re.escape( + "HistGradientBoostingRegressor was not fitted on data with feature " + "names. Pass monotonic_cst as an integer array instead." + ) + with pytest.raises(ValueError, match=expected_msg): + gbdt.fit(X.values, y) + + monotonic_cst = {"b": -1, "a": "+"} + gbdt = HistGradientBoostingRegressor(monotonic_cst=monotonic_cst) + expected_msg = re.escape("monotonic_cst['a'] must be either -1, 0 or 1. Got '+'.") + with pytest.raises(ValueError, match=expected_msg): + gbdt.fit(X, y) + + +def test_bounded_value_min_gain_to_split(): + # The purpose of this test is to show that when computing the gain at a + # given split, the value of the current node should be properly bounded to + # respect the monotonic constraints, because it strongly interacts with + # min_gain_to_split. We build a simple example where gradients are [1, 1, + # 100, 1, 1] (hessians are all ones). The best split happens on the 3rd + # bin, and depending on whether the value of the node is bounded or not, + # the min_gain_to_split constraint is or isn't satisfied. + l2_regularization = 0 + min_hessian_to_split = 0 + min_samples_leaf = 1 + n_bins = n_samples = 5 + X_binned = np.arange(n_samples).reshape(-1, 1).astype(X_BINNED_DTYPE) + sample_indices = np.arange(n_samples, dtype=np.uint32) + all_hessians = np.ones(n_samples, dtype=G_H_DTYPE) + all_gradients = np.array([1, 1, 100, 1, 1], dtype=G_H_DTYPE) + sum_gradients = all_gradients.sum() + sum_hessians = all_hessians.sum() + hessians_are_constant = False + + builder = HistogramBuilder( + X_binned, n_bins, all_gradients, all_hessians, hessians_are_constant, n_threads + ) + n_bins_non_missing = np.array([n_bins - 1] * X_binned.shape[1], dtype=np.uint32) + has_missing_values = np.array([False] * X_binned.shape[1], dtype=np.uint8) + monotonic_cst = np.array( + [MonotonicConstraint.NO_CST] * X_binned.shape[1], dtype=np.int8 + ) + is_categorical = np.zeros_like(monotonic_cst, dtype=np.uint8) + missing_values_bin_idx = n_bins - 1 + children_lower_bound, children_upper_bound = -np.inf, np.inf + + min_gain_to_split = 2000 + splitter = Splitter( + X_binned, + n_bins_non_missing, + missing_values_bin_idx, + has_missing_values, + is_categorical, + monotonic_cst, + l2_regularization, + min_hessian_to_split, + min_samples_leaf, + min_gain_to_split, + hessians_are_constant, + ) + + histograms = builder.compute_histograms_brute(sample_indices) + + # Since the gradient array is [1, 1, 100, 1, 1] + # the max possible gain happens on the 3rd bin (or equivalently in the 2nd) + # and is equal to about 1307, which less than min_gain_to_split = 2000, so + # the node is considered unsplittable (gain = -1) + current_lower_bound, current_upper_bound = -np.inf, np.inf + value = compute_node_value( + sum_gradients, + sum_hessians, + current_lower_bound, + current_upper_bound, + l2_regularization, + ) + # the unbounded value is equal to -sum_gradients / sum_hessians + assert value == pytest.approx(-104 / 5) + split_info = splitter.find_node_split( + n_samples, + histograms, + sum_gradients, + sum_hessians, + value, + lower_bound=children_lower_bound, + upper_bound=children_upper_bound, + ) + assert split_info.gain == -1 # min_gain_to_split not respected + + # here again the max possible gain is on the 3rd bin but we now cap the + # value of the node into [-10, inf]. + # This means the gain is now about 2430 which is more than the + # min_gain_to_split constraint. + current_lower_bound, current_upper_bound = -10, np.inf + value = compute_node_value( + sum_gradients, + sum_hessians, + current_lower_bound, + current_upper_bound, + l2_regularization, + ) + assert value == -10 + split_info = splitter.find_node_split( + n_samples, + histograms, + sum_gradients, + sum_hessians, + value, + lower_bound=children_lower_bound, + upper_bound=children_upper_bound, + ) + assert split_info.gain > min_gain_to_split diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_predictor.py b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..3c3c9ae81bac2d498c460bfb5f2173f8c48693d1 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_predictor.py @@ -0,0 +1,187 @@ +import numpy as np +import pytest +from numpy.testing import assert_allclose + +from sklearn.datasets import make_regression +from sklearn.ensemble._hist_gradient_boosting._bitset import ( + set_bitset_memoryview, + set_raw_bitset_from_binned_bitset, +) +from sklearn.ensemble._hist_gradient_boosting.binning import _BinMapper +from sklearn.ensemble._hist_gradient_boosting.common import ( + ALMOST_INF, + G_H_DTYPE, + PREDICTOR_RECORD_DTYPE, + X_BINNED_DTYPE, + X_BITSET_INNER_DTYPE, + X_DTYPE, +) +from sklearn.ensemble._hist_gradient_boosting.grower import TreeGrower +from sklearn.ensemble._hist_gradient_boosting.predictor import TreePredictor +from sklearn.metrics import r2_score +from sklearn.model_selection import train_test_split +from sklearn.utils._openmp_helpers import _openmp_effective_n_threads + +n_threads = _openmp_effective_n_threads() + + +@pytest.mark.parametrize("n_bins", [200, 256]) +def test_regression_dataset(n_bins): + X, y = make_regression( + n_samples=500, n_features=10, n_informative=5, random_state=42 + ) + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) + + mapper = _BinMapper(n_bins=n_bins, random_state=42) + X_train_binned = mapper.fit_transform(X_train) + + # Init gradients and hessians to that of least squares loss + gradients = -y_train.astype(G_H_DTYPE) + hessians = np.ones(1, dtype=G_H_DTYPE) + + min_samples_leaf = 10 + max_leaf_nodes = 30 + grower = TreeGrower( + X_train_binned, + gradients, + hessians, + min_samples_leaf=min_samples_leaf, + max_leaf_nodes=max_leaf_nodes, + n_bins=n_bins, + n_bins_non_missing=mapper.n_bins_non_missing_, + ) + grower.grow() + + predictor = grower.make_predictor(binning_thresholds=mapper.bin_thresholds_) + + known_cat_bitsets = np.zeros((0, 8), dtype=X_BITSET_INNER_DTYPE) + f_idx_map = np.zeros(0, dtype=np.uint32) + + y_pred_train = predictor.predict(X_train, known_cat_bitsets, f_idx_map, n_threads) + assert r2_score(y_train, y_pred_train) > 0.82 + + y_pred_test = predictor.predict(X_test, known_cat_bitsets, f_idx_map, n_threads) + assert r2_score(y_test, y_pred_test) > 0.67 + + +@pytest.mark.parametrize( + "num_threshold, expected_predictions", + [ + (-np.inf, [0, 1, 1, 1]), + (10, [0, 0, 1, 1]), + (20, [0, 0, 0, 1]), + (ALMOST_INF, [0, 0, 0, 1]), + (np.inf, [0, 0, 0, 0]), + ], +) +def test_infinite_values_and_thresholds(num_threshold, expected_predictions): + # Make sure infinite values and infinite thresholds are handled properly. + # In particular, if a value is +inf and the threshold is ALMOST_INF the + # sample should go to the right child. If the threshold is inf (split on + # nan), the +inf sample will go to the left child. + + X = np.array([-np.inf, 10, 20, np.inf]).reshape(-1, 1) + nodes = np.zeros(3, dtype=PREDICTOR_RECORD_DTYPE) + + # We just construct a simple tree with 1 root and 2 children + # parent node + nodes[0]["left"] = 1 + nodes[0]["right"] = 2 + nodes[0]["feature_idx"] = 0 + nodes[0]["num_threshold"] = num_threshold + + # left child + nodes[1]["is_leaf"] = True + nodes[1]["value"] = 0 + + # right child + nodes[2]["is_leaf"] = True + nodes[2]["value"] = 1 + + binned_cat_bitsets = np.zeros((0, 8), dtype=X_BITSET_INNER_DTYPE) + raw_categorical_bitsets = np.zeros((0, 8), dtype=X_BITSET_INNER_DTYPE) + known_cat_bitset = np.zeros((0, 8), dtype=X_BITSET_INNER_DTYPE) + f_idx_map = np.zeros(0, dtype=np.uint32) + + predictor = TreePredictor(nodes, binned_cat_bitsets, raw_categorical_bitsets) + predictions = predictor.predict(X, known_cat_bitset, f_idx_map, n_threads) + + assert np.all(predictions == expected_predictions) + + +@pytest.mark.parametrize( + "bins_go_left, expected_predictions", + [ + ([0, 3, 4, 6], [1, 0, 0, 1, 1, 0]), + ([0, 1, 2, 6], [1, 1, 1, 0, 0, 0]), + ([3, 5, 6], [0, 0, 0, 1, 0, 1]), + ], +) +def test_categorical_predictor(bins_go_left, expected_predictions): + # Test predictor outputs are correct with categorical features + + X_binned = np.array([[0, 1, 2, 3, 4, 5]], dtype=X_BINNED_DTYPE).T + categories = np.array([2, 5, 6, 8, 10, 15], dtype=X_DTYPE) + + bins_go_left = np.array(bins_go_left, dtype=X_BINNED_DTYPE) + + # We just construct a simple tree with 1 root and 2 children + # parent node + nodes = np.zeros(3, dtype=PREDICTOR_RECORD_DTYPE) + nodes[0]["left"] = 1 + nodes[0]["right"] = 2 + nodes[0]["feature_idx"] = 0 + nodes[0]["is_categorical"] = True + nodes[0]["missing_go_to_left"] = True + + # left child + nodes[1]["is_leaf"] = True + nodes[1]["value"] = 1 + + # right child + nodes[2]["is_leaf"] = True + nodes[2]["value"] = 0 + + binned_cat_bitsets = np.zeros((1, 8), dtype=X_BITSET_INNER_DTYPE) + raw_categorical_bitsets = np.zeros((1, 8), dtype=X_BITSET_INNER_DTYPE) + for go_left in bins_go_left: + set_bitset_memoryview(binned_cat_bitsets[0], go_left) + + set_raw_bitset_from_binned_bitset( + raw_categorical_bitsets[0], binned_cat_bitsets[0], categories + ) + + predictor = TreePredictor(nodes, binned_cat_bitsets, raw_categorical_bitsets) + + # Check binned data gives correct predictions + prediction_binned = predictor.predict_binned( + X_binned, missing_values_bin_idx=6, n_threads=n_threads + ) + assert_allclose(prediction_binned, expected_predictions) + + # manually construct bitset + known_cat_bitsets = np.zeros((1, 8), dtype=np.uint32) + known_cat_bitsets[0, 0] = np.sum(2**categories, dtype=np.uint32) + f_idx_map = np.array([0], dtype=np.uint32) + + # Check with un-binned data + predictions = predictor.predict( + categories.reshape(-1, 1), known_cat_bitsets, f_idx_map, n_threads + ) + assert_allclose(predictions, expected_predictions) + + # Check missing goes left because missing_values_bin_idx=6 + X_binned_missing = np.array([[6]], dtype=X_BINNED_DTYPE).T + predictions = predictor.predict_binned( + X_binned_missing, missing_values_bin_idx=6, n_threads=n_threads + ) + assert_allclose(predictions, [1]) + + # missing and unknown go left + predictions = predictor.predict( + np.array([[np.nan, 17]], dtype=X_DTYPE).T, + known_cat_bitsets, + f_idx_map, + n_threads, + ) + assert_allclose(predictions, [1, 1]) diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_iforest.py b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_iforest.py new file mode 100644 index 0000000000000000000000000000000000000000..15ab0d6b382ebfd3d9f35833bc5776fc50c72377 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_iforest.py @@ -0,0 +1,673 @@ +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import numbers +import threading +from numbers import Integral, Real +from warnings import warn + +import numpy as np +from scipy.sparse import issparse + +from ..base import OutlierMixin, _fit_context +from ..tree import ExtraTreeRegressor +from ..tree._tree import DTYPE as tree_dtype +from ..utils import ( + check_array, + check_random_state, + gen_batches, +) +from ..utils._chunking import get_chunk_n_rows +from ..utils._param_validation import Interval, RealNotInt, StrOptions +from ..utils.parallel import Parallel, delayed +from ..utils.validation import _num_samples, check_is_fitted, validate_data +from ._bagging import BaseBagging + +__all__ = ["IsolationForest"] + + +def _parallel_compute_tree_depths( + tree, + X, + features, + tree_decision_path_lengths, + tree_avg_path_lengths, + depths, + lock, +): + """Parallel computation of isolation tree depth.""" + if features is None: + X_subset = X + else: + X_subset = X[:, features] + + leaves_index = tree.apply(X_subset, check_input=False) + + with lock: + depths += ( + tree_decision_path_lengths[leaves_index] + + tree_avg_path_lengths[leaves_index] + - 1.0 + ) + + +class IsolationForest(OutlierMixin, BaseBagging): + """ + Isolation Forest Algorithm. + + Return the anomaly score of each sample using the IsolationForest algorithm + + The IsolationForest 'isolates' observations by randomly selecting a feature + and then randomly selecting a split value between the maximum and minimum + values of the selected feature. + + Since recursive partitioning can be represented by a tree structure, the + number of splittings required to isolate a sample is equivalent to the path + length from the root node to the terminating node. + + This path length, averaged over a forest of such random trees, is a + measure of normality and our decision function. + + Random partitioning produces noticeably shorter paths for anomalies. + Hence, when a forest of random trees collectively produce shorter path + lengths for particular samples, they are highly likely to be anomalies. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.18 + + Parameters + ---------- + n_estimators : int, default=100 + The number of base estimators in the ensemble. + + max_samples : "auto", int or float, default="auto" + The number of samples to draw from X to train each base estimator. + + - If int, then draw `max_samples` samples. + - If float, then draw `max_samples * X.shape[0]` samples. + - If "auto", then `max_samples=min(256, n_samples)`. + + If max_samples is larger than the number of samples provided, + all samples will be used for all trees (no sampling). + + contamination : 'auto' or float, default='auto' + The amount of contamination of the data set, i.e. the proportion + of outliers in the data set. Used when fitting to define the threshold + on the scores of the samples. + + - If 'auto', the threshold is determined as in the + original paper. + - If float, the contamination should be in the range (0, 0.5]. + + .. versionchanged:: 0.22 + The default value of ``contamination`` changed from 0.1 + to ``'auto'``. + + max_features : int or float, default=1.0 + The number of features to draw from X to train each base estimator. + + - If int, then draw `max_features` features. + - If float, then draw `max(1, int(max_features * n_features_in_))` features. + + Note: using a float number less than 1.0 or integer less than number of + features will enable feature subsampling and leads to a longer runtime. + + bootstrap : bool, default=False + If True, individual trees are fit on random subsets of the training + data sampled with replacement. If False, sampling without replacement + is performed. + + n_jobs : int, default=None + The number of jobs to run in parallel for :meth:`fit`. ``None`` means 1 + unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using + all processors. See :term:`Glossary ` for more details. + + random_state : int, RandomState instance or None, default=None + Controls the pseudo-randomness of the selection of the feature + and split values for each branching step and each tree in the forest. + + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + verbose : int, default=0 + Controls the verbosity of the tree building process. + + warm_start : bool, default=False + When set to ``True``, reuse the solution of the previous call to fit + and add more estimators to the ensemble, otherwise, just fit a whole + new forest. See :term:`the Glossary `. + + .. versionadded:: 0.21 + + Attributes + ---------- + estimator_ : :class:`~sklearn.tree.ExtraTreeRegressor` instance + The child estimator template used to create the collection of + fitted sub-estimators. + + .. versionadded:: 1.2 + `base_estimator_` was renamed to `estimator_`. + + estimators_ : list of ExtraTreeRegressor instances + The collection of fitted sub-estimators. + + estimators_features_ : list of ndarray + The subset of drawn features for each base estimator. + + estimators_samples_ : list of ndarray + The subset of drawn samples (i.e., the in-bag samples) for each base + estimator. + + max_samples_ : int + The actual number of samples. + + offset_ : float + Offset used to define the decision function from the raw scores. We + have the relation: ``decision_function = score_samples - offset_``. + ``offset_`` is defined as follows. When the contamination parameter is + set to "auto", the offset is equal to -0.5 as the scores of inliers are + close to 0 and the scores of outliers are close to -1. When a + contamination parameter different than "auto" is provided, the offset + is defined in such a way we obtain the expected number of outliers + (samples with decision function < 0) in training. + + .. versionadded:: 0.20 + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + sklearn.covariance.EllipticEnvelope : An object for detecting outliers in a + Gaussian distributed dataset. + sklearn.svm.OneClassSVM : Unsupervised Outlier Detection. + Estimate the support of a high-dimensional distribution. + The implementation is based on libsvm. + sklearn.neighbors.LocalOutlierFactor : Unsupervised Outlier Detection + using Local Outlier Factor (LOF). + + Notes + ----- + The implementation is based on an ensemble of ExtraTreeRegressor. The + maximum depth of each tree is set to ``ceil(log_2(n))`` where + :math:`n` is the number of samples used to build the tree + (see (Liu et al., 2008) for more details). + + References + ---------- + .. [1] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. "Isolation forest." + Data Mining, 2008. ICDM'08. Eighth IEEE International Conference on. + .. [2] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. "Isolation-based + anomaly detection." ACM Transactions on Knowledge Discovery from + Data (TKDD) 6.1 (2012): 3. + + Examples + -------- + >>> from sklearn.ensemble import IsolationForest + >>> X = [[-1.1], [0.3], [0.5], [100]] + >>> clf = IsolationForest(random_state=0).fit(X) + >>> clf.predict([[0.1], [0], [90]]) + array([ 1, 1, -1]) + + For an example of using isolation forest for anomaly detection see + :ref:`sphx_glr_auto_examples_ensemble_plot_isolation_forest.py`. + """ + + _parameter_constraints: dict = { + "n_estimators": [Interval(Integral, 1, None, closed="left")], + "max_samples": [ + StrOptions({"auto"}), + Interval(Integral, 1, None, closed="left"), + Interval(RealNotInt, 0, 1, closed="right"), + ], + "contamination": [ + StrOptions({"auto"}), + Interval(Real, 0, 0.5, closed="right"), + ], + "max_features": [ + Integral, + Interval(Real, 0, 1, closed="right"), + ], + "bootstrap": ["boolean"], + "n_jobs": [Integral, None], + "random_state": ["random_state"], + "verbose": ["verbose"], + "warm_start": ["boolean"], + } + + def __init__( + self, + *, + n_estimators=100, + max_samples="auto", + contamination="auto", + max_features=1.0, + bootstrap=False, + n_jobs=None, + random_state=None, + verbose=0, + warm_start=False, + ): + super().__init__( + estimator=None, + # here above max_features has no links with self.max_features + bootstrap=bootstrap, + bootstrap_features=False, + n_estimators=n_estimators, + max_samples=max_samples, + max_features=max_features, + warm_start=warm_start, + n_jobs=n_jobs, + random_state=random_state, + verbose=verbose, + ) + + self.contamination = contamination + + def _get_estimator(self): + return ExtraTreeRegressor( + # here max_features has no links with self.max_features + max_features=1, + splitter="random", + random_state=self.random_state, + ) + + def _set_oob_score(self, X, y): + raise NotImplementedError("OOB score not supported by iforest") + + def _parallel_args(self): + # ExtraTreeRegressor releases the GIL, so it's more efficient to use + # a thread-based backend rather than a process-based backend so as + # to avoid suffering from communication overhead and extra memory + # copies. This is only used in the fit method. + return {"prefer": "threads"} + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None, sample_weight=None): + """ + Fit estimator. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. Use ``dtype=np.float32`` for maximum + efficiency. Sparse matrices are also supported, use sparse + ``csc_matrix`` for maximum efficiency. + + y : Ignored + Not used, present for API consistency by convention. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. If None, then samples are equally weighted. + + Returns + ------- + self : object + Fitted estimator. + """ + X = validate_data( + self, X, accept_sparse=["csc"], dtype=tree_dtype, ensure_all_finite=False + ) + if issparse(X): + # Pre-sort indices to avoid that each individual tree of the + # ensemble sorts the indices. + X.sort_indices() + + rnd = check_random_state(self.random_state) + y = rnd.uniform(size=X.shape[0]) + + # ensure that max_sample is in [1, n_samples]: + n_samples = X.shape[0] + + if isinstance(self.max_samples, str) and self.max_samples == "auto": + max_samples = min(256, n_samples) + + elif isinstance(self.max_samples, numbers.Integral): + if self.max_samples > n_samples: + warn( + "max_samples (%s) is greater than the " + "total number of samples (%s). max_samples " + "will be set to n_samples for estimation." + % (self.max_samples, n_samples) + ) + max_samples = n_samples + else: + max_samples = self.max_samples + else: # max_samples is float + max_samples = int(self.max_samples * X.shape[0]) + + self.max_samples_ = max_samples + max_depth = int(np.ceil(np.log2(max(max_samples, 2)))) + super()._fit( + X, + y, + max_samples, + max_depth=max_depth, + sample_weight=sample_weight, + check_input=False, + ) + + self._average_path_length_per_tree, self._decision_path_lengths = zip( + *[ + ( + _average_path_length(tree.tree_.n_node_samples), + tree.tree_.compute_node_depths(), + ) + for tree in self.estimators_ + ] + ) + + if self.contamination == "auto": + # 0.5 plays a special role as described in the original paper. + # we take the opposite as we consider the opposite of their score. + self.offset_ = -0.5 + return self + + # Else, define offset_ wrt contamination parameter + # To avoid performing input validation a second time we call + # _score_samples rather than score_samples. + # _score_samples expects a CSR matrix, so we convert if necessary. + if issparse(X): + X = X.tocsr() + self.offset_ = np.percentile(self._score_samples(X), 100.0 * self.contamination) + + return self + + def predict(self, X): + """ + Predict if a particular sample is an outlier or not. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. Internally, it will be converted to + ``dtype=np.float32`` and if a sparse matrix is provided + to a sparse ``csr_matrix``. + + Returns + ------- + is_inlier : ndarray of shape (n_samples,) + For each observation, tells whether or not (+1 or -1) it should + be considered as an inlier according to the fitted model. + + Notes + ----- + The predict method can be parallelized by setting a joblib context. This + inherently does NOT use the ``n_jobs`` parameter initialized in the class, + which is used during ``fit``. This is because, predict may actually be faster + without parallelization for a small number of samples, + such as for 1000 samples or less. The user can set the + number of jobs in the joblib context to control the number of parallel jobs. + + .. code-block:: python + + from joblib import parallel_backend + + # Note, we use threading here as the predict method is not CPU bound. + with parallel_backend("threading", n_jobs=4): + model.predict(X) + """ + check_is_fitted(self) + decision_func = self.decision_function(X) + is_inlier = np.ones_like(decision_func, dtype=int) + is_inlier[decision_func < 0] = -1 + return is_inlier + + def decision_function(self, X): + """ + Average anomaly score of X of the base classifiers. + + The anomaly score of an input sample is computed as + the mean anomaly score of the trees in the forest. + + The measure of normality of an observation given a tree is the depth + of the leaf containing this observation, which is equivalent to + the number of splittings required to isolate this point. In case of + several observations n_left in the leaf, the average path length of + a n_left samples isolation tree is added. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. Internally, it will be converted to + ``dtype=np.float32`` and if a sparse matrix is provided + to a sparse ``csr_matrix``. + + Returns + ------- + scores : ndarray of shape (n_samples,) + The anomaly score of the input samples. + The lower, the more abnormal. Negative scores represent outliers, + positive scores represent inliers. + + Notes + ----- + The decision_function method can be parallelized by setting a joblib context. + This inherently does NOT use the ``n_jobs`` parameter initialized in the class, + which is used during ``fit``. This is because, calculating the score may + actually be faster without parallelization for a small number of samples, + such as for 1000 samples or less. + The user can set the number of jobs in the joblib context to control the + number of parallel jobs. + + .. code-block:: python + + from joblib import parallel_backend + + # Note, we use threading here as the decision_function method is + # not CPU bound. + with parallel_backend("threading", n_jobs=4): + model.decision_function(X) + """ + # We subtract self.offset_ to make 0 be the threshold value for being + # an outlier: + + return self.score_samples(X) - self.offset_ + + def score_samples(self, X): + """ + Opposite of the anomaly score defined in the original paper. + + The anomaly score of an input sample is computed as + the mean anomaly score of the trees in the forest. + + The measure of normality of an observation given a tree is the depth + of the leaf containing this observation, which is equivalent to + the number of splittings required to isolate this point. In case of + several observations n_left in the leaf, the average path length of + a n_left samples isolation tree is added. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. + + Returns + ------- + scores : ndarray of shape (n_samples,) + The anomaly score of the input samples. + The lower, the more abnormal. + + Notes + ----- + The score function method can be parallelized by setting a joblib context. This + inherently does NOT use the ``n_jobs`` parameter initialized in the class, + which is used during ``fit``. This is because, calculating the score may + actually be faster without parallelization for a small number of samples, + such as for 1000 samples or less. + The user can set the number of jobs in the joblib context to control the + number of parallel jobs. + + .. code-block:: python + + from joblib import parallel_backend + + # Note, we use threading here as the score_samples method is not CPU bound. + with parallel_backend("threading", n_jobs=4): + model.score(X) + """ + # Check data + X = validate_data( + self, + X, + accept_sparse="csr", + dtype=tree_dtype, + reset=False, + ensure_all_finite=False, + ) + + return self._score_samples(X) + + def _score_samples(self, X): + """Private version of score_samples without input validation. + + Input validation would remove feature names, so we disable it. + """ + # Code structure from ForestClassifier/predict_proba + + check_is_fitted(self) + + # Take the opposite of the scores as bigger is better (here less abnormal) + return -self._compute_chunked_score_samples(X) + + def _compute_chunked_score_samples(self, X): + n_samples = _num_samples(X) + + if self._max_features == X.shape[1]: + subsample_features = False + else: + subsample_features = True + + # We get as many rows as possible within our working_memory budget + # (defined by sklearn.get_config()['working_memory']) to store + # self._max_features in each row during computation. + # + # Note: + # - this will get at least 1 row, even if 1 row of score will + # exceed working_memory. + # - this does only account for temporary memory usage while loading + # the data needed to compute the scores -- the returned scores + # themselves are 1D. + + chunk_n_rows = get_chunk_n_rows( + row_bytes=16 * self._max_features, max_n_rows=n_samples + ) + slices = gen_batches(n_samples, chunk_n_rows) + + scores = np.zeros(n_samples, order="f") + + for sl in slices: + # compute score on the slices of test samples: + scores[sl] = self._compute_score_samples(X[sl], subsample_features) + + return scores + + def _compute_score_samples(self, X, subsample_features): + """ + Compute the score of each samples in X going through the extra trees. + + Parameters + ---------- + X : array-like or sparse matrix + Data matrix. + + subsample_features : bool + Whether features should be subsampled. + + Returns + ------- + scores : ndarray of shape (n_samples,) + The score of each sample in X. + """ + n_samples = X.shape[0] + + depths = np.zeros(n_samples, order="f") + + average_path_length_max_samples = _average_path_length([self._max_samples]) + + # Note: we use default n_jobs value, i.e. sequential computation, which + # we expect to be more performant that parallelizing for small number + # of samples, e.g. < 1k samples. Default n_jobs value can be overriden + # by using joblib.parallel_backend context manager around + # ._compute_score_samples. Using a higher n_jobs may speed up the + # computation of the scores, e.g. for > 1k samples. See + # https://github.com/scikit-learn/scikit-learn/pull/28622 for more + # details. + lock = threading.Lock() + Parallel( + verbose=self.verbose, + require="sharedmem", + )( + delayed(_parallel_compute_tree_depths)( + tree, + X, + features if subsample_features else None, + self._decision_path_lengths[tree_idx], + self._average_path_length_per_tree[tree_idx], + depths, + lock, + ) + for tree_idx, (tree, features) in enumerate( + zip(self.estimators_, self.estimators_features_) + ) + ) + + denominator = len(self.estimators_) * average_path_length_max_samples + scores = 2 ** ( + # For a single training sample, denominator and depth are 0. + # Therefore, we set the score manually to 1. + -np.divide( + depths, denominator, out=np.ones_like(depths), where=denominator != 0 + ) + ) + return scores + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.input_tags.allow_nan = True + return tags + + +def _average_path_length(n_samples_leaf): + """ + The average path length in a n_samples iTree, which is equal to + the average path length of an unsuccessful BST search since the + latter has the same structure as an isolation tree. + Parameters + ---------- + n_samples_leaf : array-like of shape (n_samples,) + The number of training samples in each test sample leaf, for + each estimators. + + Returns + ------- + average_path_length : ndarray of shape (n_samples,) + """ + + n_samples_leaf = check_array(n_samples_leaf, ensure_2d=False) + + n_samples_leaf_shape = n_samples_leaf.shape + n_samples_leaf = n_samples_leaf.reshape((1, -1)) + average_path_length = np.zeros(n_samples_leaf.shape) + + mask_1 = n_samples_leaf <= 1 + mask_2 = n_samples_leaf == 2 + not_mask = ~np.logical_or(mask_1, mask_2) + + average_path_length[mask_1] = 0.0 + average_path_length[mask_2] = 1.0 + average_path_length[not_mask] = ( + 2.0 * (np.log(n_samples_leaf[not_mask] - 1.0) + np.euler_gamma) + - 2.0 * (n_samples_leaf[not_mask] - 1.0) / n_samples_leaf[not_mask] + ) + + return average_path_length.reshape(n_samples_leaf_shape) diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_weight_boosting.py b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_weight_boosting.py new file mode 100644 index 0000000000000000000000000000000000000000..8503c4fdb8ae7d9549c7fa4fb412831f562d2667 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/_weight_boosting.py @@ -0,0 +1,1173 @@ +"""Weight Boosting. + +This module contains weight boosting estimators for both classification and +regression. + +The module structure is the following: + +- The `BaseWeightBoosting` base class implements a common ``fit`` method + for all the estimators in the module. Regression and classification + only differ from each other in the loss function that is optimized. + +- :class:`~sklearn.ensemble.AdaBoostClassifier` implements adaptive boosting + (AdaBoost-SAMME) for classification problems. + +- :class:`~sklearn.ensemble.AdaBoostRegressor` implements adaptive boosting + (AdaBoost.R2) for regression problems. +""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import warnings +from abc import ABCMeta, abstractmethod +from numbers import Integral, Real + +import numpy as np + +from ..base import ( + ClassifierMixin, + RegressorMixin, + _fit_context, + is_classifier, + is_regressor, +) +from ..metrics import accuracy_score, r2_score +from ..tree import DecisionTreeClassifier, DecisionTreeRegressor +from ..utils import _safe_indexing, check_random_state +from ..utils._param_validation import HasMethods, Hidden, Interval, StrOptions +from ..utils.extmath import softmax, stable_cumsum +from ..utils.metadata_routing import ( + _raise_for_unsupported_routing, + _RoutingNotSupportedMixin, +) +from ..utils.validation import ( + _check_sample_weight, + _num_samples, + check_is_fitted, + has_fit_parameter, + validate_data, +) +from ._base import BaseEnsemble + +__all__ = [ + "AdaBoostClassifier", + "AdaBoostRegressor", +] + + +class BaseWeightBoosting(BaseEnsemble, metaclass=ABCMeta): + """Base class for AdaBoost estimators. + + Warning: This class should not be used directly. Use derived classes + instead. + """ + + _parameter_constraints: dict = { + "estimator": [HasMethods(["fit", "predict"]), None], + "n_estimators": [Interval(Integral, 1, None, closed="left")], + "learning_rate": [Interval(Real, 0, None, closed="neither")], + "random_state": ["random_state"], + } + + @abstractmethod + def __init__( + self, + estimator=None, + *, + n_estimators=50, + estimator_params=tuple(), + learning_rate=1.0, + random_state=None, + ): + super().__init__( + estimator=estimator, + n_estimators=n_estimators, + estimator_params=estimator_params, + ) + + self.learning_rate = learning_rate + self.random_state = random_state + + def _check_X(self, X): + # Only called to validate X in non-fit methods, therefore reset=False + return validate_data( + self, + X, + accept_sparse=["csr", "csc"], + ensure_2d=True, + allow_nd=True, + dtype=None, + reset=False, + ) + + @_fit_context( + # AdaBoost*.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y, sample_weight=None): + """Build a boosted classifier/regressor from the training set (X, y). + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Sparse matrix can be CSC, CSR, COO, + DOK, or LIL. COO, DOK, and LIL are converted to CSR. + + y : array-like of shape (n_samples,) + The target values. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. If None, the sample weights are initialized to + 1 / n_samples. + + Returns + ------- + self : object + Fitted estimator. + """ + _raise_for_unsupported_routing(self, "fit", sample_weight=sample_weight) + X, y = validate_data( + self, + X, + y, + accept_sparse=["csr", "csc"], + ensure_2d=True, + allow_nd=True, + dtype=None, + y_numeric=is_regressor(self), + ) + + sample_weight = _check_sample_weight( + sample_weight, X, np.float64, copy=True, ensure_non_negative=True + ) + sample_weight /= sample_weight.sum() + + # Check parameters + self._validate_estimator() + + # Clear any previous fit results + self.estimators_ = [] + self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float64) + self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float64) + + # Initialization of the random number instance that will be used to + # generate a seed at each iteration + random_state = check_random_state(self.random_state) + epsilon = np.finfo(sample_weight.dtype).eps + + zero_weight_mask = sample_weight == 0.0 + for iboost in range(self.n_estimators): + # avoid extremely small sample weight, for details see issue #20320 + sample_weight = np.clip(sample_weight, a_min=epsilon, a_max=None) + # do not clip sample weights that were exactly zero originally + sample_weight[zero_weight_mask] = 0.0 + + # Boosting step + sample_weight, estimator_weight, estimator_error = self._boost( + iboost, X, y, sample_weight, random_state + ) + + # Early termination + if sample_weight is None: + break + self.estimator_weights_[iboost] = estimator_weight + self.estimator_errors_[iboost] = estimator_error + + # Stop if error is zero + if estimator_error == 0: + break + + sample_weight_sum = np.sum(sample_weight) + + if not np.isfinite(sample_weight_sum): + warnings.warn( + ( + "Sample weights have reached infinite values," + f" at iteration {iboost}, causing overflow. " + "Iterations stopped. Try lowering the learning rate." + ), + stacklevel=2, + ) + break + + # Stop if the sum of sample weights has become non-positive + if sample_weight_sum <= 0: + break + + if iboost < self.n_estimators - 1: + # Normalize + sample_weight /= sample_weight_sum + + return self + + @abstractmethod + def _boost(self, iboost, X, y, sample_weight, random_state): + """Implement a single boost. + + Warning: This method needs to be overridden by subclasses. + + Parameters + ---------- + iboost : int + The index of the current boost iteration. + + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Sparse matrix can be CSC, CSR, COO, + DOK, or LIL. COO, DOK, and LIL are converted to CSR. + + y : array-like of shape (n_samples,) + The target values (class labels). + + sample_weight : array-like of shape (n_samples,) + The current sample weights. + + random_state : RandomState + The current random number generator + + Returns + ------- + sample_weight : array-like of shape (n_samples,) or None + The reweighted sample weights. + If None then boosting has terminated early. + + estimator_weight : float + The weight for the current boost. + If None then boosting has terminated early. + + error : float + The classification error for the current boost. + If None then boosting has terminated early. + """ + pass + + def staged_score(self, X, y, sample_weight=None): + """Return staged scores for X, y. + + This generator method yields the ensemble score after each iteration of + boosting and therefore allows monitoring, such as to determine the + score on a test set after each boost. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Sparse matrix can be CSC, CSR, COO, + DOK, or LIL. COO, DOK, and LIL are converted to CSR. + + y : array-like of shape (n_samples,) + Labels for X. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Yields + ------ + z : float + """ + X = self._check_X(X) + + for y_pred in self.staged_predict(X): + if is_classifier(self): + yield accuracy_score(y, y_pred, sample_weight=sample_weight) + else: + yield r2_score(y, y_pred, sample_weight=sample_weight) + + @property + def feature_importances_(self): + """The impurity-based feature importances. + + The higher, the more important the feature. + The importance of a feature is computed as the (normalized) + total reduction of the criterion brought by that feature. It is also + known as the Gini importance. + + Warning: impurity-based feature importances can be misleading for + high cardinality features (many unique values). See + :func:`sklearn.inspection.permutation_importance` as an alternative. + + Returns + ------- + feature_importances_ : ndarray of shape (n_features,) + The feature importances. + """ + if self.estimators_ is None or len(self.estimators_) == 0: + raise ValueError( + "Estimator not fitted, call `fit` before `feature_importances_`." + ) + + try: + norm = self.estimator_weights_.sum() + return ( + sum( + weight * clf.feature_importances_ + for weight, clf in zip(self.estimator_weights_, self.estimators_) + ) + / norm + ) + + except AttributeError as e: + raise AttributeError( + "Unable to compute feature importances " + "since estimator does not have a " + "feature_importances_ attribute" + ) from e + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.input_tags.sparse = True + return tags + + +def _samme_proba(estimator, n_classes, X): + """Calculate algorithm 4, step 2, equation c) of Zhu et al [1]. + + References + ---------- + .. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009. + + """ + proba = estimator.predict_proba(X) + + # Displace zero probabilities so the log is defined. + # Also fix negative elements which may occur with + # negative sample weights. + np.clip(proba, np.finfo(proba.dtype).eps, None, out=proba) + log_proba = np.log(proba) + + return (n_classes - 1) * ( + log_proba - (1.0 / n_classes) * log_proba.sum(axis=1)[:, np.newaxis] + ) + + +class AdaBoostClassifier( + _RoutingNotSupportedMixin, ClassifierMixin, BaseWeightBoosting +): + """An AdaBoost classifier. + + An AdaBoost [1]_ classifier is a meta-estimator that begins by fitting a + classifier on the original dataset and then fits additional copies of the + classifier on the same dataset but where the weights of incorrectly + classified instances are adjusted such that subsequent classifiers focus + more on difficult cases. + + This class implements the algorithm based on [2]_. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.14 + + Parameters + ---------- + estimator : object, default=None + The base estimator from which the boosted ensemble is built. + Support for sample weighting is required, as well as proper + ``classes_`` and ``n_classes_`` attributes. If ``None``, then + the base estimator is :class:`~sklearn.tree.DecisionTreeClassifier` + initialized with `max_depth=1`. + + .. versionadded:: 1.2 + `base_estimator` was renamed to `estimator`. + + n_estimators : int, default=50 + The maximum number of estimators at which boosting is terminated. + In case of perfect fit, the learning procedure is stopped early. + Values must be in the range `[1, inf)`. + + learning_rate : float, default=1.0 + Weight applied to each classifier at each boosting iteration. A higher + learning rate increases the contribution of each classifier. There is + a trade-off between the `learning_rate` and `n_estimators` parameters. + Values must be in the range `(0.0, inf)`. + + algorithm : {'SAMME'}, default='SAMME' + Use the SAMME discrete boosting algorithm. + + .. deprecated:: 1.6 + `algorithm` is deprecated and will be removed in version 1.8. This + estimator only implements the 'SAMME' algorithm. + + random_state : int, RandomState instance or None, default=None + Controls the random seed given at each `estimator` at each + boosting iteration. + Thus, it is only used when `estimator` exposes a `random_state`. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + estimator_ : estimator + The base estimator from which the ensemble is grown. + + .. versionadded:: 1.2 + `base_estimator_` was renamed to `estimator_`. + + estimators_ : list of classifiers + The collection of fitted sub-estimators. + + classes_ : ndarray of shape (n_classes,) + The classes labels. + + n_classes_ : int + The number of classes. + + estimator_weights_ : ndarray of floats + Weights for each estimator in the boosted ensemble. + + estimator_errors_ : ndarray of floats + Classification error for each estimator in the boosted + ensemble. + + feature_importances_ : ndarray of shape (n_features,) + The impurity-based feature importances if supported by the + ``estimator`` (when based on decision trees). + + Warning: impurity-based feature importances can be misleading for + high cardinality features (many unique values). See + :func:`sklearn.inspection.permutation_importance` as an alternative. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + AdaBoostRegressor : An AdaBoost regressor that begins by fitting a + regressor on the original dataset and then fits additional copies of + the regressor on the same dataset but where the weights of instances + are adjusted according to the error of the current prediction. + + GradientBoostingClassifier : GB builds an additive model in a forward + stage-wise fashion. Regression trees are fit on the negative gradient + of the binomial or multinomial deviance loss function. Binary + classification is a special case where only a single regression tree is + induced. + + sklearn.tree.DecisionTreeClassifier : A non-parametric supervised learning + method used for classification. + Creates a model that predicts the value of a target variable by + learning simple decision rules inferred from the data features. + + References + ---------- + .. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of + on-Line Learning and an Application to Boosting", 1995. + + .. [2] :doi:`J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class adaboost." + Statistics and its Interface 2.3 (2009): 349-360. + <10.4310/SII.2009.v2.n3.a8>` + + Examples + -------- + >>> from sklearn.ensemble import AdaBoostClassifier + >>> from sklearn.datasets import make_classification + >>> X, y = make_classification(n_samples=1000, n_features=4, + ... n_informative=2, n_redundant=0, + ... random_state=0, shuffle=False) + >>> clf = AdaBoostClassifier(n_estimators=100, random_state=0) + >>> clf.fit(X, y) + AdaBoostClassifier(n_estimators=100, random_state=0) + >>> clf.predict([[0, 0, 0, 0]]) + array([1]) + >>> clf.score(X, y) + 0.96... + + For a detailed example of using AdaBoost to fit a sequence of DecisionTrees + as weaklearners, please refer to + :ref:`sphx_glr_auto_examples_ensemble_plot_adaboost_multiclass.py`. + + For a detailed example of using AdaBoost to fit a non-linearly seperable + classification dataset composed of two Gaussian quantiles clusters, please + refer to :ref:`sphx_glr_auto_examples_ensemble_plot_adaboost_twoclass.py`. + """ + + # TODO(1.8): remove "algorithm" entry + _parameter_constraints: dict = { + **BaseWeightBoosting._parameter_constraints, + "algorithm": [StrOptions({"SAMME"}), Hidden(StrOptions({"deprecated"}))], + } + + def __init__( + self, + estimator=None, + *, + n_estimators=50, + learning_rate=1.0, + algorithm="deprecated", + random_state=None, + ): + super().__init__( + estimator=estimator, + n_estimators=n_estimators, + learning_rate=learning_rate, + random_state=random_state, + ) + + self.algorithm = algorithm + + def _validate_estimator(self): + """Check the estimator and set the estimator_ attribute.""" + super()._validate_estimator(default=DecisionTreeClassifier(max_depth=1)) + + if self.algorithm != "deprecated": + warnings.warn( + "The parameter 'algorithm' is deprecated in 1.6 and has no effect. " + "It will be removed in version 1.8.", + FutureWarning, + ) + + if not has_fit_parameter(self.estimator_, "sample_weight"): + raise ValueError( + f"{self.estimator.__class__.__name__} doesn't support sample_weight." + ) + + def _boost(self, iboost, X, y, sample_weight, random_state): + """Implement a single boost. + + Perform a single boost according to the discrete SAMME algorithm and return the + updated sample weights. + + Parameters + ---------- + iboost : int + The index of the current boost iteration. + + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. + + y : array-like of shape (n_samples,) + The target values (class labels). + + sample_weight : array-like of shape (n_samples,) + The current sample weights. + + random_state : RandomState instance + The RandomState instance used if the base estimator accepts a + `random_state` attribute. + + Returns + ------- + sample_weight : array-like of shape (n_samples,) or None + The reweighted sample weights. + If None then boosting has terminated early. + + estimator_weight : float + The weight for the current boost. + If None then boosting has terminated early. + + estimator_error : float + The classification error for the current boost. + If None then boosting has terminated early. + """ + estimator = self._make_estimator(random_state=random_state) + + estimator.fit(X, y, sample_weight=sample_weight) + + y_predict = estimator.predict(X) + + if iboost == 0: + self.classes_ = getattr(estimator, "classes_", None) + self.n_classes_ = len(self.classes_) + + # Instances incorrectly classified + incorrect = y_predict != y + + # Error fraction + estimator_error = np.mean(np.average(incorrect, weights=sample_weight, axis=0)) + + # Stop if classification is perfect + if estimator_error <= 0: + return sample_weight, 1.0, 0.0 + + n_classes = self.n_classes_ + + # Stop if the error is at least as bad as random guessing + if estimator_error >= 1.0 - (1.0 / n_classes): + self.estimators_.pop(-1) + if len(self.estimators_) == 0: + raise ValueError( + "BaseClassifier in AdaBoostClassifier " + "ensemble is worse than random, ensemble " + "can not be fit." + ) + return None, None, None + + # Boost weight using multi-class AdaBoost SAMME alg + estimator_weight = self.learning_rate * ( + np.log((1.0 - estimator_error) / estimator_error) + np.log(n_classes - 1.0) + ) + + # Only boost the weights if it will fit again + if not iboost == self.n_estimators - 1: + # Only boost positive weights + sample_weight = np.exp( + np.log(sample_weight) + + estimator_weight * incorrect * (sample_weight > 0) + ) + + return sample_weight, estimator_weight, estimator_error + + def predict(self, X): + """Predict classes for X. + + The predicted class of an input sample is computed as the weighted mean + prediction of the classifiers in the ensemble. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Sparse matrix can be CSC, CSR, COO, + DOK, or LIL. COO, DOK, and LIL are converted to CSR. + + Returns + ------- + y : ndarray of shape (n_samples,) + The predicted classes. + """ + pred = self.decision_function(X) + + if self.n_classes_ == 2: + return self.classes_.take(pred > 0, axis=0) + + return self.classes_.take(np.argmax(pred, axis=1), axis=0) + + def staged_predict(self, X): + """Return staged predictions for X. + + The predicted class of an input sample is computed as the weighted mean + prediction of the classifiers in the ensemble. + + This generator method yields the ensemble prediction after each + iteration of boosting and therefore allows monitoring, such as to + determine the prediction on a test set after each boost. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The input samples. Sparse matrix can be CSC, CSR, COO, + DOK, or LIL. COO, DOK, and LIL are converted to CSR. + + Yields + ------ + y : generator of ndarray of shape (n_samples,) + The predicted classes. + """ + X = self._check_X(X) + + n_classes = self.n_classes_ + classes = self.classes_ + + if n_classes == 2: + for pred in self.staged_decision_function(X): + yield np.array(classes.take(pred > 0, axis=0)) + + else: + for pred in self.staged_decision_function(X): + yield np.array(classes.take(np.argmax(pred, axis=1), axis=0)) + + def decision_function(self, X): + """Compute the decision function of ``X``. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Sparse matrix can be CSC, CSR, COO, + DOK, or LIL. COO, DOK, and LIL are converted to CSR. + + Returns + ------- + score : ndarray of shape of (n_samples, k) + The decision function of the input samples. The order of + outputs is the same as that of the :term:`classes_` attribute. + Binary classification is a special cases with ``k == 1``, + otherwise ``k==n_classes``. For binary classification, + values closer to -1 or 1 mean more like the first or second + class in ``classes_``, respectively. + """ + check_is_fitted(self) + X = self._check_X(X) + + n_classes = self.n_classes_ + classes = self.classes_[:, np.newaxis] + + if n_classes == 1: + return np.zeros_like(X, shape=(X.shape[0], 1)) + + pred = sum( + np.where( + (estimator.predict(X) == classes).T, + w, + -1 / (n_classes - 1) * w, + ) + for estimator, w in zip(self.estimators_, self.estimator_weights_) + ) + + pred /= self.estimator_weights_.sum() + if n_classes == 2: + pred[:, 0] *= -1 + return pred.sum(axis=1) + return pred + + def staged_decision_function(self, X): + """Compute decision function of ``X`` for each boosting iteration. + + This method allows monitoring (i.e. determine error on testing set) + after each boosting iteration. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Sparse matrix can be CSC, CSR, COO, + DOK, or LIL. COO, DOK, and LIL are converted to CSR. + + Yields + ------ + score : generator of ndarray of shape (n_samples, k) + The decision function of the input samples. The order of + outputs is the same of that of the :term:`classes_` attribute. + Binary classification is a special cases with ``k == 1``, + otherwise ``k==n_classes``. For binary classification, + values closer to -1 or 1 mean more like the first or second + class in ``classes_``, respectively. + """ + check_is_fitted(self) + X = self._check_X(X) + + n_classes = self.n_classes_ + classes = self.classes_[:, np.newaxis] + pred = None + norm = 0.0 + + for weight, estimator in zip(self.estimator_weights_, self.estimators_): + norm += weight + + current_pred = np.where( + (estimator.predict(X) == classes).T, + weight, + -1 / (n_classes - 1) * weight, + ) + + if pred is None: + pred = current_pred + else: + pred += current_pred + + if n_classes == 2: + tmp_pred = np.copy(pred) + tmp_pred[:, 0] *= -1 + yield (tmp_pred / norm).sum(axis=1) + else: + yield pred / norm + + @staticmethod + def _compute_proba_from_decision(decision, n_classes): + """Compute probabilities from the decision function. + + This is based eq. (15) of [1] where: + p(y=c|X) = exp((1 / K-1) f_c(X)) / sum_k(exp((1 / K-1) f_k(X))) + = softmax((1 / K-1) * f(X)) + + References + ---------- + .. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", + 2009. + """ + if n_classes == 2: + decision = np.vstack([-decision, decision]).T / 2 + else: + decision /= n_classes - 1 + return softmax(decision, copy=False) + + def predict_proba(self, X): + """Predict class probabilities for X. + + The predicted class probabilities of an input sample is computed as + the weighted mean predicted class probabilities of the classifiers + in the ensemble. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Sparse matrix can be CSC, CSR, COO, + DOK, or LIL. COO, DOK, and LIL are converted to CSR. + + Returns + ------- + p : ndarray of shape (n_samples, n_classes) + The class probabilities of the input samples. The order of + outputs is the same of that of the :term:`classes_` attribute. + """ + check_is_fitted(self) + n_classes = self.n_classes_ + + if n_classes == 1: + return np.ones((_num_samples(X), 1)) + + decision = self.decision_function(X) + return self._compute_proba_from_decision(decision, n_classes) + + def staged_predict_proba(self, X): + """Predict class probabilities for X. + + The predicted class probabilities of an input sample is computed as + the weighted mean predicted class probabilities of the classifiers + in the ensemble. + + This generator method yields the ensemble predicted class probabilities + after each iteration of boosting and therefore allows monitoring, such + as to determine the predicted class probabilities on a test set after + each boost. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Sparse matrix can be CSC, CSR, COO, + DOK, or LIL. COO, DOK, and LIL are converted to CSR. + + Yields + ------ + p : generator of ndarray of shape (n_samples,) + The class probabilities of the input samples. The order of + outputs is the same of that of the :term:`classes_` attribute. + """ + + n_classes = self.n_classes_ + + for decision in self.staged_decision_function(X): + yield self._compute_proba_from_decision(decision, n_classes) + + def predict_log_proba(self, X): + """Predict class log-probabilities for X. + + The predicted class log-probabilities of an input sample is computed as + the weighted mean predicted class log-probabilities of the classifiers + in the ensemble. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Sparse matrix can be CSC, CSR, COO, + DOK, or LIL. COO, DOK, and LIL are converted to CSR. + + Returns + ------- + p : ndarray of shape (n_samples, n_classes) + The class probabilities of the input samples. The order of + outputs is the same of that of the :term:`classes_` attribute. + """ + return np.log(self.predict_proba(X)) + + +class AdaBoostRegressor(_RoutingNotSupportedMixin, RegressorMixin, BaseWeightBoosting): + """An AdaBoost regressor. + + An AdaBoost [1] regressor is a meta-estimator that begins by fitting a + regressor on the original dataset and then fits additional copies of the + regressor on the same dataset but where the weights of instances are + adjusted according to the error of the current prediction. As such, + subsequent regressors focus more on difficult cases. + + This class implements the algorithm known as AdaBoost.R2 [2]. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.14 + + Parameters + ---------- + estimator : object, default=None + The base estimator from which the boosted ensemble is built. + If ``None``, then the base estimator is + :class:`~sklearn.tree.DecisionTreeRegressor` initialized with + `max_depth=3`. + + .. versionadded:: 1.2 + `base_estimator` was renamed to `estimator`. + + n_estimators : int, default=50 + The maximum number of estimators at which boosting is terminated. + In case of perfect fit, the learning procedure is stopped early. + Values must be in the range `[1, inf)`. + + learning_rate : float, default=1.0 + Weight applied to each regressor at each boosting iteration. A higher + learning rate increases the contribution of each regressor. There is + a trade-off between the `learning_rate` and `n_estimators` parameters. + Values must be in the range `(0.0, inf)`. + + loss : {'linear', 'square', 'exponential'}, default='linear' + The loss function to use when updating the weights after each + boosting iteration. + + random_state : int, RandomState instance or None, default=None + Controls the random seed given at each `estimator` at each + boosting iteration. + Thus, it is only used when `estimator` exposes a `random_state`. + In addition, it controls the bootstrap of the weights used to train the + `estimator` at each boosting iteration. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + estimator_ : estimator + The base estimator from which the ensemble is grown. + + .. versionadded:: 1.2 + `base_estimator_` was renamed to `estimator_`. + + estimators_ : list of regressors + The collection of fitted sub-estimators. + + estimator_weights_ : ndarray of floats + Weights for each estimator in the boosted ensemble. + + estimator_errors_ : ndarray of floats + Regression error for each estimator in the boosted ensemble. + + feature_importances_ : ndarray of shape (n_features,) + The impurity-based feature importances if supported by the + ``estimator`` (when based on decision trees). + + Warning: impurity-based feature importances can be misleading for + high cardinality features (many unique values). See + :func:`sklearn.inspection.permutation_importance` as an alternative. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + AdaBoostClassifier : An AdaBoost classifier. + GradientBoostingRegressor : Gradient Boosting Classification Tree. + sklearn.tree.DecisionTreeRegressor : A decision tree regressor. + + References + ---------- + .. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of + on-Line Learning and an Application to Boosting", 1995. + + .. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997. + + Examples + -------- + >>> from sklearn.ensemble import AdaBoostRegressor + >>> from sklearn.datasets import make_regression + >>> X, y = make_regression(n_features=4, n_informative=2, + ... random_state=0, shuffle=False) + >>> regr = AdaBoostRegressor(random_state=0, n_estimators=100) + >>> regr.fit(X, y) + AdaBoostRegressor(n_estimators=100, random_state=0) + >>> regr.predict([[0, 0, 0, 0]]) + array([4.7972...]) + >>> regr.score(X, y) + 0.9771... + + For a detailed example of utilizing :class:`~sklearn.ensemble.AdaBoostRegressor` + to fit a sequence of decision trees as weak learners, please refer to + :ref:`sphx_glr_auto_examples_ensemble_plot_adaboost_regression.py`. + """ + + _parameter_constraints: dict = { + **BaseWeightBoosting._parameter_constraints, + "loss": [StrOptions({"linear", "square", "exponential"})], + } + + def __init__( + self, + estimator=None, + *, + n_estimators=50, + learning_rate=1.0, + loss="linear", + random_state=None, + ): + super().__init__( + estimator=estimator, + n_estimators=n_estimators, + learning_rate=learning_rate, + random_state=random_state, + ) + + self.loss = loss + self.random_state = random_state + + def _validate_estimator(self): + """Check the estimator and set the estimator_ attribute.""" + super()._validate_estimator(default=DecisionTreeRegressor(max_depth=3)) + + def _boost(self, iboost, X, y, sample_weight, random_state): + """Implement a single boost for regression + + Perform a single boost according to the AdaBoost.R2 algorithm and + return the updated sample weights. + + Parameters + ---------- + iboost : int + The index of the current boost iteration. + + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. + + y : array-like of shape (n_samples,) + The target values (class labels in classification, real numbers in + regression). + + sample_weight : array-like of shape (n_samples,) + The current sample weights. + + random_state : RandomState + The RandomState instance used if the base estimator accepts a + `random_state` attribute. + Controls also the bootstrap of the weights used to train the weak + learner. + + Returns + ------- + sample_weight : array-like of shape (n_samples,) or None + The reweighted sample weights. + If None then boosting has terminated early. + + estimator_weight : float + The weight for the current boost. + If None then boosting has terminated early. + + estimator_error : float + The regression error for the current boost. + If None then boosting has terminated early. + """ + estimator = self._make_estimator(random_state=random_state) + + # Weighted sampling of the training set with replacement + bootstrap_idx = random_state.choice( + np.arange(_num_samples(X)), + size=_num_samples(X), + replace=True, + p=sample_weight, + ) + + # Fit on the bootstrapped sample and obtain a prediction + # for all samples in the training set + X_ = _safe_indexing(X, bootstrap_idx) + y_ = _safe_indexing(y, bootstrap_idx) + estimator.fit(X_, y_) + y_predict = estimator.predict(X) + + error_vect = np.abs(y_predict - y) + sample_mask = sample_weight > 0 + masked_sample_weight = sample_weight[sample_mask] + masked_error_vector = error_vect[sample_mask] + + error_max = masked_error_vector.max() + if error_max != 0: + masked_error_vector /= error_max + + if self.loss == "square": + masked_error_vector **= 2 + elif self.loss == "exponential": + masked_error_vector = 1.0 - np.exp(-masked_error_vector) + + # Calculate the average loss + estimator_error = (masked_sample_weight * masked_error_vector).sum() + + if estimator_error <= 0: + # Stop if fit is perfect + return sample_weight, 1.0, 0.0 + + elif estimator_error >= 0.5: + # Discard current estimator only if it isn't the only one + if len(self.estimators_) > 1: + self.estimators_.pop(-1) + return None, None, None + + beta = estimator_error / (1.0 - estimator_error) + + # Boost weight using AdaBoost.R2 alg + estimator_weight = self.learning_rate * np.log(1.0 / beta) + + if not iboost == self.n_estimators - 1: + sample_weight[sample_mask] *= np.power( + beta, (1.0 - masked_error_vector) * self.learning_rate + ) + + return sample_weight, estimator_weight, estimator_error + + def _get_median_predict(self, X, limit): + # Evaluate predictions of all estimators + predictions = np.array([est.predict(X) for est in self.estimators_[:limit]]).T + + # Sort the predictions + sorted_idx = np.argsort(predictions, axis=1) + + # Find index of median prediction for each sample + weight_cdf = stable_cumsum(self.estimator_weights_[sorted_idx], axis=1) + median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis] + median_idx = median_or_above.argmax(axis=1) + + median_estimators = sorted_idx[np.arange(_num_samples(X)), median_idx] + + # Return median predictions + return predictions[np.arange(_num_samples(X)), median_estimators] + + def predict(self, X): + """Predict regression value for X. + + The predicted regression value of an input sample is computed + as the weighted median prediction of the regressors in the ensemble. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Sparse matrix can be CSC, CSR, COO, + DOK, or LIL. COO, DOK, and LIL are converted to CSR. + + Returns + ------- + y : ndarray of shape (n_samples,) + The predicted regression values. + """ + check_is_fitted(self) + X = self._check_X(X) + + return self._get_median_predict(X, len(self.estimators_)) + + def staged_predict(self, X): + """Return staged predictions for X. + + The predicted regression value of an input sample is computed + as the weighted median prediction of the regressors in the ensemble. + + This generator method yields the ensemble prediction after each + iteration of boosting and therefore allows monitoring, such as to + determine the prediction on a test set after each boost. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. + + Yields + ------ + y : generator of ndarray of shape (n_samples,) + The predicted regression values. + """ + check_is_fitted(self) + X = self._check_X(X) + + for i, _ in enumerate(self.estimators_, 1): + yield self._get_median_predict(X, limit=i) diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/meson.build b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/meson.build new file mode 100644 index 0000000000000000000000000000000000000000..bc5868b3a01042ae21234d62c62da77e409627df --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/meson.build @@ -0,0 +1,10 @@ +py.extension_module( + '_gradient_boosting', + ['_gradient_boosting.pyx'] + utils_cython_tree, + dependencies: [np_dep], + cython_args: cython_args, + subdir: 'sklearn/ensemble', + install: true +) + +subdir('_hist_gradient_boosting') diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__init__.py b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/__init__.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d88313ecd09aae1aecca4a1e121108bf9fec231 Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_bagging.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_bagging.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26fbbcc4a00cdae9bcce9775cdfdd722c943d3a0 Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_bagging.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_base.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b660e83d1a9fea4934e94b54700b7091e5c1958 Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_base.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_common.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..103253f8eaed4559607f6bcd506ec666ce65d741 Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_common.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_forest.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_forest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f769fffe24f8729a8ccd1c3df928bf4d442b028e Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_forest.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_gradient_boosting.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_gradient_boosting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7512b0d85304881afb8e703c842b3ec127aa2135 Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_gradient_boosting.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_iforest.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_iforest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..830103dbb330cd9b5606998981d6dd6a7aed29fe Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_iforest.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_stacking.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_stacking.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0bd8984674a362301e4f9573499fd1d9682de971 Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_stacking.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_voting.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_voting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8467747e1dffb5b4d8eeb9d34c941cea540ad1a7 Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_voting.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_weight_boosting.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_weight_boosting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7e8a9e9e83547433c224661346c3aac73e5cdcf Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_weight_boosting.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/test_bagging.py b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/test_bagging.py new file mode 100644 index 0000000000000000000000000000000000000000..f5386804d77d76e2f2c45cbe89b129d6becbf702 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/test_bagging.py @@ -0,0 +1,977 @@ +""" +Testing for the bagging ensemble module (sklearn.ensemble.bagging). +""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +from itertools import cycle, product + +import joblib +import numpy as np +import pytest + +import sklearn +from sklearn.base import BaseEstimator +from sklearn.datasets import load_diabetes, load_iris, make_hastie_10_2 +from sklearn.dummy import DummyClassifier, DummyRegressor +from sklearn.ensemble import ( + AdaBoostClassifier, + AdaBoostRegressor, + BaggingClassifier, + BaggingRegressor, + HistGradientBoostingClassifier, + HistGradientBoostingRegressor, + RandomForestClassifier, + RandomForestRegressor, +) +from sklearn.feature_selection import SelectKBest +from sklearn.linear_model import LogisticRegression, Perceptron +from sklearn.model_selection import GridSearchCV, ParameterGrid, train_test_split +from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import FunctionTransformer, scale +from sklearn.random_projection import SparseRandomProjection +from sklearn.svm import SVC, SVR +from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor +from sklearn.utils import check_random_state +from sklearn.utils._testing import assert_array_almost_equal, assert_array_equal +from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS + +rng = check_random_state(0) + +# also load the iris dataset +# and randomly permute it +iris = load_iris() +perm = rng.permutation(iris.target.size) +iris.data = iris.data[perm] +iris.target = iris.target[perm] + +# also load the diabetes dataset +# and randomly permute it +diabetes = load_diabetes() +perm = rng.permutation(diabetes.target.size) +diabetes.data = diabetes.data[perm] +diabetes.target = diabetes.target[perm] + + +def test_classification(): + # Check classification for various parameter settings. + rng = check_random_state(0) + X_train, X_test, y_train, y_test = train_test_split( + iris.data, iris.target, random_state=rng + ) + grid = ParameterGrid( + { + "max_samples": [0.5, 1.0], + "max_features": [1, 4], + "bootstrap": [True, False], + "bootstrap_features": [True, False], + } + ) + estimators = [ + None, + DummyClassifier(), + Perceptron(max_iter=20), + DecisionTreeClassifier(max_depth=2), + KNeighborsClassifier(), + SVC(), + ] + # Try different parameter settings with different base classifiers without + # doing the full cartesian product to keep the test durations low. + for params, estimator in zip(grid, cycle(estimators)): + BaggingClassifier( + estimator=estimator, + random_state=rng, + n_estimators=2, + **params, + ).fit(X_train, y_train).predict(X_test) + + +@pytest.mark.parametrize( + "sparse_container, params, method", + product( + CSR_CONTAINERS + CSC_CONTAINERS, + [ + { + "max_samples": 0.5, + "max_features": 2, + "bootstrap": True, + "bootstrap_features": True, + }, + { + "max_samples": 1.0, + "max_features": 4, + "bootstrap": True, + "bootstrap_features": True, + }, + {"max_features": 2, "bootstrap": False, "bootstrap_features": True}, + {"max_samples": 0.5, "bootstrap": True, "bootstrap_features": False}, + ], + ["predict", "predict_proba", "predict_log_proba", "decision_function"], + ), +) +def test_sparse_classification(sparse_container, params, method): + # Check classification for various parameter settings on sparse input. + + class CustomSVC(SVC): + """SVC variant that records the nature of the training set""" + + def fit(self, X, y): + super().fit(X, y) + self.data_type_ = type(X) + return self + + rng = check_random_state(0) + X_train, X_test, y_train, y_test = train_test_split( + scale(iris.data), iris.target, random_state=rng + ) + + X_train_sparse = sparse_container(X_train) + X_test_sparse = sparse_container(X_test) + # Trained on sparse format + sparse_classifier = BaggingClassifier( + estimator=CustomSVC(kernel="linear", decision_function_shape="ovr"), + random_state=1, + **params, + ).fit(X_train_sparse, y_train) + sparse_results = getattr(sparse_classifier, method)(X_test_sparse) + + # Trained on dense format + dense_classifier = BaggingClassifier( + estimator=CustomSVC(kernel="linear", decision_function_shape="ovr"), + random_state=1, + **params, + ).fit(X_train, y_train) + dense_results = getattr(dense_classifier, method)(X_test) + assert_array_almost_equal(sparse_results, dense_results) + + sparse_type = type(X_train_sparse) + types = [i.data_type_ for i in sparse_classifier.estimators_] + + assert all([t == sparse_type for t in types]) + + +def test_regression(): + # Check regression for various parameter settings. + rng = check_random_state(0) + X_train, X_test, y_train, y_test = train_test_split( + diabetes.data[:50], diabetes.target[:50], random_state=rng + ) + grid = ParameterGrid( + { + "max_samples": [0.5, 1.0], + "max_features": [0.5, 1.0], + "bootstrap": [True, False], + "bootstrap_features": [True, False], + } + ) + + for estimator in [ + None, + DummyRegressor(), + DecisionTreeRegressor(), + KNeighborsRegressor(), + SVR(), + ]: + for params in grid: + BaggingRegressor(estimator=estimator, random_state=rng, **params).fit( + X_train, y_train + ).predict(X_test) + + +@pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + CSC_CONTAINERS) +def test_sparse_regression(sparse_container): + # Check regression for various parameter settings on sparse input. + rng = check_random_state(0) + X_train, X_test, y_train, y_test = train_test_split( + diabetes.data[:50], diabetes.target[:50], random_state=rng + ) + + class CustomSVR(SVR): + """SVC variant that records the nature of the training set""" + + def fit(self, X, y): + super().fit(X, y) + self.data_type_ = type(X) + return self + + parameter_sets = [ + { + "max_samples": 0.5, + "max_features": 2, + "bootstrap": True, + "bootstrap_features": True, + }, + { + "max_samples": 1.0, + "max_features": 4, + "bootstrap": True, + "bootstrap_features": True, + }, + {"max_features": 2, "bootstrap": False, "bootstrap_features": True}, + {"max_samples": 0.5, "bootstrap": True, "bootstrap_features": False}, + ] + + X_train_sparse = sparse_container(X_train) + X_test_sparse = sparse_container(X_test) + for params in parameter_sets: + # Trained on sparse format + sparse_classifier = BaggingRegressor( + estimator=CustomSVR(), random_state=1, **params + ).fit(X_train_sparse, y_train) + sparse_results = sparse_classifier.predict(X_test_sparse) + + # Trained on dense format + dense_results = ( + BaggingRegressor(estimator=CustomSVR(), random_state=1, **params) + .fit(X_train, y_train) + .predict(X_test) + ) + + sparse_type = type(X_train_sparse) + types = [i.data_type_ for i in sparse_classifier.estimators_] + + assert_array_almost_equal(sparse_results, dense_results) + assert all([t == sparse_type for t in types]) + assert_array_almost_equal(sparse_results, dense_results) + + +class DummySizeEstimator(BaseEstimator): + def fit(self, X, y): + self.training_size_ = X.shape[0] + self.training_hash_ = joblib.hash(X) + + def predict(self, X): + return np.ones(X.shape[0]) + + +def test_bootstrap_samples(): + # Test that bootstrapping samples generate non-perfect base estimators. + rng = check_random_state(0) + X_train, X_test, y_train, y_test = train_test_split( + diabetes.data, diabetes.target, random_state=rng + ) + + estimator = DecisionTreeRegressor().fit(X_train, y_train) + + # without bootstrap, all trees are perfect on the training set + ensemble = BaggingRegressor( + estimator=DecisionTreeRegressor(), + max_samples=1.0, + bootstrap=False, + random_state=rng, + ).fit(X_train, y_train) + + assert estimator.score(X_train, y_train) == ensemble.score(X_train, y_train) + + # with bootstrap, trees are no longer perfect on the training set + ensemble = BaggingRegressor( + estimator=DecisionTreeRegressor(), + max_samples=1.0, + bootstrap=True, + random_state=rng, + ).fit(X_train, y_train) + + assert estimator.score(X_train, y_train) > ensemble.score(X_train, y_train) + + # check that each sampling correspond to a complete bootstrap resample. + # the size of each bootstrap should be the same as the input data but + # the data should be different (checked using the hash of the data). + ensemble = BaggingRegressor(estimator=DummySizeEstimator(), bootstrap=True).fit( + X_train, y_train + ) + training_hash = [] + for estimator in ensemble.estimators_: + assert estimator.training_size_ == X_train.shape[0] + training_hash.append(estimator.training_hash_) + assert len(set(training_hash)) == len(training_hash) + + +def test_bootstrap_features(): + # Test that bootstrapping features may generate duplicate features. + rng = check_random_state(0) + X_train, X_test, y_train, y_test = train_test_split( + diabetes.data, diabetes.target, random_state=rng + ) + + ensemble = BaggingRegressor( + estimator=DecisionTreeRegressor(), + max_features=1.0, + bootstrap_features=False, + random_state=rng, + ).fit(X_train, y_train) + + for features in ensemble.estimators_features_: + assert diabetes.data.shape[1] == np.unique(features).shape[0] + + ensemble = BaggingRegressor( + estimator=DecisionTreeRegressor(), + max_features=1.0, + bootstrap_features=True, + random_state=rng, + ).fit(X_train, y_train) + + for features in ensemble.estimators_features_: + assert diabetes.data.shape[1] > np.unique(features).shape[0] + + +def test_probability(): + # Predict probabilities. + rng = check_random_state(0) + X_train, X_test, y_train, y_test = train_test_split( + iris.data, iris.target, random_state=rng + ) + + with np.errstate(divide="ignore", invalid="ignore"): + # Normal case + ensemble = BaggingClassifier( + estimator=DecisionTreeClassifier(), random_state=rng + ).fit(X_train, y_train) + + assert_array_almost_equal( + np.sum(ensemble.predict_proba(X_test), axis=1), np.ones(len(X_test)) + ) + + assert_array_almost_equal( + ensemble.predict_proba(X_test), np.exp(ensemble.predict_log_proba(X_test)) + ) + + # Degenerate case, where some classes are missing + ensemble = BaggingClassifier( + estimator=LogisticRegression(), random_state=rng, max_samples=5 + ).fit(X_train, y_train) + + assert_array_almost_equal( + np.sum(ensemble.predict_proba(X_test), axis=1), np.ones(len(X_test)) + ) + + assert_array_almost_equal( + ensemble.predict_proba(X_test), np.exp(ensemble.predict_log_proba(X_test)) + ) + + +def test_oob_score_classification(): + # Check that oob prediction is a good estimation of the generalization + # error. + rng = check_random_state(0) + X_train, X_test, y_train, y_test = train_test_split( + iris.data, iris.target, random_state=rng + ) + + for estimator in [DecisionTreeClassifier(), SVC()]: + clf = BaggingClassifier( + estimator=estimator, + n_estimators=100, + bootstrap=True, + oob_score=True, + random_state=rng, + ).fit(X_train, y_train) + + test_score = clf.score(X_test, y_test) + + assert abs(test_score - clf.oob_score_) < 0.1 + + # Test with few estimators + warn_msg = ( + "Some inputs do not have OOB scores. This probably means too few " + "estimators were used to compute any reliable oob estimates." + ) + with pytest.warns(UserWarning, match=warn_msg): + clf = BaggingClassifier( + estimator=estimator, + n_estimators=1, + bootstrap=True, + oob_score=True, + random_state=rng, + ) + clf.fit(X_train, y_train) + + +def test_oob_score_regression(): + # Check that oob prediction is a good estimation of the generalization + # error. + rng = check_random_state(0) + X_train, X_test, y_train, y_test = train_test_split( + diabetes.data, diabetes.target, random_state=rng + ) + + clf = BaggingRegressor( + estimator=DecisionTreeRegressor(), + n_estimators=50, + bootstrap=True, + oob_score=True, + random_state=rng, + ).fit(X_train, y_train) + + test_score = clf.score(X_test, y_test) + + assert abs(test_score - clf.oob_score_) < 0.1 + + # Test with few estimators + warn_msg = ( + "Some inputs do not have OOB scores. This probably means too few " + "estimators were used to compute any reliable oob estimates." + ) + with pytest.warns(UserWarning, match=warn_msg): + regr = BaggingRegressor( + estimator=DecisionTreeRegressor(), + n_estimators=1, + bootstrap=True, + oob_score=True, + random_state=rng, + ) + regr.fit(X_train, y_train) + + +def test_single_estimator(): + # Check singleton ensembles. + rng = check_random_state(0) + X_train, X_test, y_train, y_test = train_test_split( + diabetes.data, diabetes.target, random_state=rng + ) + + clf1 = BaggingRegressor( + estimator=KNeighborsRegressor(), + n_estimators=1, + bootstrap=False, + bootstrap_features=False, + random_state=rng, + ).fit(X_train, y_train) + + clf2 = KNeighborsRegressor().fit(X_train, y_train) + + assert_array_almost_equal(clf1.predict(X_test), clf2.predict(X_test)) + + +def test_error(): + # Test support of decision_function + X, y = iris.data, iris.target + base = DecisionTreeClassifier() + assert not hasattr(BaggingClassifier(base).fit(X, y), "decision_function") + + +def test_parallel_classification(): + # Check parallel classification. + X_train, X_test, y_train, y_test = train_test_split( + iris.data, iris.target, random_state=0 + ) + + ensemble = BaggingClassifier( + DecisionTreeClassifier(), n_jobs=3, random_state=0 + ).fit(X_train, y_train) + + # predict_proba + y1 = ensemble.predict_proba(X_test) + ensemble.set_params(n_jobs=1) + y2 = ensemble.predict_proba(X_test) + assert_array_almost_equal(y1, y2) + + ensemble = BaggingClassifier( + DecisionTreeClassifier(), n_jobs=1, random_state=0 + ).fit(X_train, y_train) + + y3 = ensemble.predict_proba(X_test) + assert_array_almost_equal(y1, y3) + + # decision_function + ensemble = BaggingClassifier( + SVC(decision_function_shape="ovr"), n_jobs=3, random_state=0 + ).fit(X_train, y_train) + + decisions1 = ensemble.decision_function(X_test) + ensemble.set_params(n_jobs=1) + decisions2 = ensemble.decision_function(X_test) + assert_array_almost_equal(decisions1, decisions2) + + ensemble = BaggingClassifier( + SVC(decision_function_shape="ovr"), n_jobs=1, random_state=0 + ).fit(X_train, y_train) + + decisions3 = ensemble.decision_function(X_test) + assert_array_almost_equal(decisions1, decisions3) + + +def test_parallel_regression(): + # Check parallel regression. + rng = check_random_state(0) + + X_train, X_test, y_train, y_test = train_test_split( + diabetes.data, diabetes.target, random_state=rng + ) + + ensemble = BaggingRegressor(DecisionTreeRegressor(), n_jobs=3, random_state=0).fit( + X_train, y_train + ) + + ensemble.set_params(n_jobs=1) + y1 = ensemble.predict(X_test) + ensemble.set_params(n_jobs=2) + y2 = ensemble.predict(X_test) + assert_array_almost_equal(y1, y2) + + ensemble = BaggingRegressor(DecisionTreeRegressor(), n_jobs=1, random_state=0).fit( + X_train, y_train + ) + + y3 = ensemble.predict(X_test) + assert_array_almost_equal(y1, y3) + + +def test_gridsearch(): + # Check that bagging ensembles can be grid-searched. + # Transform iris into a binary classification task + X, y = iris.data, iris.target + y[y == 2] = 1 + + # Grid search with scoring based on decision_function + parameters = {"n_estimators": (1, 2), "estimator__C": (1, 2)} + + GridSearchCV(BaggingClassifier(SVC()), parameters, scoring="roc_auc").fit(X, y) + + +def test_estimator(): + # Check estimator and its default values. + rng = check_random_state(0) + + # Classification + X_train, X_test, y_train, y_test = train_test_split( + iris.data, iris.target, random_state=rng + ) + + ensemble = BaggingClassifier(None, n_jobs=3, random_state=0).fit(X_train, y_train) + + assert isinstance(ensemble.estimator_, DecisionTreeClassifier) + + ensemble = BaggingClassifier( + DecisionTreeClassifier(), n_jobs=3, random_state=0 + ).fit(X_train, y_train) + + assert isinstance(ensemble.estimator_, DecisionTreeClassifier) + + ensemble = BaggingClassifier(Perceptron(), n_jobs=3, random_state=0).fit( + X_train, y_train + ) + + assert isinstance(ensemble.estimator_, Perceptron) + + # Regression + X_train, X_test, y_train, y_test = train_test_split( + diabetes.data, diabetes.target, random_state=rng + ) + + ensemble = BaggingRegressor(None, n_jobs=3, random_state=0).fit(X_train, y_train) + + assert isinstance(ensemble.estimator_, DecisionTreeRegressor) + + ensemble = BaggingRegressor(DecisionTreeRegressor(), n_jobs=3, random_state=0).fit( + X_train, y_train + ) + + assert isinstance(ensemble.estimator_, DecisionTreeRegressor) + + ensemble = BaggingRegressor(SVR(), n_jobs=3, random_state=0).fit(X_train, y_train) + assert isinstance(ensemble.estimator_, SVR) + + +def test_bagging_with_pipeline(): + estimator = BaggingClassifier( + make_pipeline(SelectKBest(k=1), DecisionTreeClassifier()), max_features=2 + ) + estimator.fit(iris.data, iris.target) + assert isinstance(estimator[0].steps[-1][1].random_state, int) + + +class DummyZeroEstimator(BaseEstimator): + def fit(self, X, y): + self.classes_ = np.unique(y) + return self + + def predict(self, X): + return self.classes_[np.zeros(X.shape[0], dtype=int)] + + +def test_bagging_sample_weight_unsupported_but_passed(): + estimator = BaggingClassifier(DummyZeroEstimator()) + rng = check_random_state(0) + + estimator.fit(iris.data, iris.target).predict(iris.data) + with pytest.raises(ValueError): + estimator.fit( + iris.data, + iris.target, + sample_weight=rng.randint(10, size=(iris.data.shape[0])), + ) + + +def test_warm_start(random_state=42): + # Test if fitting incrementally with warm start gives a forest of the + # right size and the same results as a normal fit. + X, y = make_hastie_10_2(n_samples=20, random_state=1) + + clf_ws = None + for n_estimators in [5, 10]: + if clf_ws is None: + clf_ws = BaggingClassifier( + n_estimators=n_estimators, random_state=random_state, warm_start=True + ) + else: + clf_ws.set_params(n_estimators=n_estimators) + clf_ws.fit(X, y) + assert len(clf_ws) == n_estimators + + clf_no_ws = BaggingClassifier( + n_estimators=10, random_state=random_state, warm_start=False + ) + clf_no_ws.fit(X, y) + + assert set([tree.random_state for tree in clf_ws]) == set( + [tree.random_state for tree in clf_no_ws] + ) + + +def test_warm_start_smaller_n_estimators(): + # Test if warm start'ed second fit with smaller n_estimators raises error. + X, y = make_hastie_10_2(n_samples=20, random_state=1) + clf = BaggingClassifier(n_estimators=5, warm_start=True) + clf.fit(X, y) + clf.set_params(n_estimators=4) + with pytest.raises(ValueError): + clf.fit(X, y) + + +def test_warm_start_equal_n_estimators(): + # Test that nothing happens when fitting without increasing n_estimators + X, y = make_hastie_10_2(n_samples=20, random_state=1) + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43) + + clf = BaggingClassifier(n_estimators=5, warm_start=True, random_state=83) + clf.fit(X_train, y_train) + + y_pred = clf.predict(X_test) + # modify X to nonsense values, this should not change anything + X_train += 1.0 + + warn_msg = "Warm-start fitting without increasing n_estimators does not" + with pytest.warns(UserWarning, match=warn_msg): + clf.fit(X_train, y_train) + assert_array_equal(y_pred, clf.predict(X_test)) + + +def test_warm_start_equivalence(): + # warm started classifier with 5+5 estimators should be equivalent to + # one classifier with 10 estimators + X, y = make_hastie_10_2(n_samples=20, random_state=1) + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43) + + clf_ws = BaggingClassifier(n_estimators=5, warm_start=True, random_state=3141) + clf_ws.fit(X_train, y_train) + clf_ws.set_params(n_estimators=10) + clf_ws.fit(X_train, y_train) + y1 = clf_ws.predict(X_test) + + clf = BaggingClassifier(n_estimators=10, warm_start=False, random_state=3141) + clf.fit(X_train, y_train) + y2 = clf.predict(X_test) + + assert_array_almost_equal(y1, y2) + + +def test_warm_start_with_oob_score_fails(): + # Check using oob_score and warm_start simultaneously fails + X, y = make_hastie_10_2(n_samples=20, random_state=1) + clf = BaggingClassifier(n_estimators=5, warm_start=True, oob_score=True) + with pytest.raises(ValueError): + clf.fit(X, y) + + +def test_oob_score_removed_on_warm_start(): + X, y = make_hastie_10_2(n_samples=100, random_state=1) + + clf = BaggingClassifier(n_estimators=5, oob_score=True) + clf.fit(X, y) + + clf.set_params(warm_start=True, oob_score=False, n_estimators=10) + clf.fit(X, y) + + with pytest.raises(AttributeError): + getattr(clf, "oob_score_") + + +def test_oob_score_consistency(): + # Make sure OOB scores are identical when random_state, estimator, and + # training data are fixed and fitting is done twice + X, y = make_hastie_10_2(n_samples=200, random_state=1) + bagging = BaggingClassifier( + KNeighborsClassifier(), + max_samples=0.5, + max_features=0.5, + oob_score=True, + random_state=1, + ) + assert bagging.fit(X, y).oob_score_ == bagging.fit(X, y).oob_score_ + + +def test_estimators_samples(): + # Check that format of estimators_samples_ is correct and that results + # generated at fit time can be identically reproduced at a later time + # using data saved in object attributes. + X, y = make_hastie_10_2(n_samples=200, random_state=1) + bagging = BaggingClassifier( + LogisticRegression(), + max_samples=0.5, + max_features=0.5, + random_state=1, + bootstrap=False, + ) + bagging.fit(X, y) + + # Get relevant attributes + estimators_samples = bagging.estimators_samples_ + estimators_features = bagging.estimators_features_ + estimators = bagging.estimators_ + + # Test for correct formatting + assert len(estimators_samples) == len(estimators) + assert len(estimators_samples[0]) == len(X) // 2 + assert estimators_samples[0].dtype.kind == "i" + + # Re-fit single estimator to test for consistent sampling + estimator_index = 0 + estimator_samples = estimators_samples[estimator_index] + estimator_features = estimators_features[estimator_index] + estimator = estimators[estimator_index] + + X_train = (X[estimator_samples])[:, estimator_features] + y_train = y[estimator_samples] + + orig_coefs = estimator.coef_ + estimator.fit(X_train, y_train) + new_coefs = estimator.coef_ + + assert_array_almost_equal(orig_coefs, new_coefs) + + +def test_estimators_samples_deterministic(): + # This test is a regression test to check that with a random step + # (e.g. SparseRandomProjection) and a given random state, the results + # generated at fit time can be identically reproduced at a later time using + # data saved in object attributes. Check issue #9524 for full discussion. + + iris = load_iris() + X, y = iris.data, iris.target + + base_pipeline = make_pipeline( + SparseRandomProjection(n_components=2), LogisticRegression() + ) + clf = BaggingClassifier(estimator=base_pipeline, max_samples=0.5, random_state=0) + clf.fit(X, y) + pipeline_estimator_coef = clf.estimators_[0].steps[-1][1].coef_.copy() + + estimator = clf.estimators_[0] + estimator_sample = clf.estimators_samples_[0] + estimator_feature = clf.estimators_features_[0] + + X_train = (X[estimator_sample])[:, estimator_feature] + y_train = y[estimator_sample] + + estimator.fit(X_train, y_train) + assert_array_equal(estimator.steps[-1][1].coef_, pipeline_estimator_coef) + + +def test_max_samples_consistency(): + # Make sure validated max_samples and original max_samples are identical + # when valid integer max_samples supplied by user + max_samples = 100 + X, y = make_hastie_10_2(n_samples=2 * max_samples, random_state=1) + bagging = BaggingClassifier( + KNeighborsClassifier(), + max_samples=max_samples, + max_features=0.5, + random_state=1, + ) + bagging.fit(X, y) + assert bagging._max_samples == max_samples + + +def test_set_oob_score_label_encoding(): + # Make sure the oob_score doesn't change when the labels change + # See: https://github.com/scikit-learn/scikit-learn/issues/8933 + random_state = 5 + X = [[-1], [0], [1]] * 5 + Y1 = ["A", "B", "C"] * 5 + Y2 = [-1, 0, 1] * 5 + Y3 = [0, 1, 2] * 5 + x1 = ( + BaggingClassifier(oob_score=True, random_state=random_state) + .fit(X, Y1) + .oob_score_ + ) + x2 = ( + BaggingClassifier(oob_score=True, random_state=random_state) + .fit(X, Y2) + .oob_score_ + ) + x3 = ( + BaggingClassifier(oob_score=True, random_state=random_state) + .fit(X, Y3) + .oob_score_ + ) + assert [x1, x2] == [x3, x3] + + +def replace(X): + X = X.astype("float", copy=True) + X[~np.isfinite(X)] = 0 + return X + + +def test_bagging_regressor_with_missing_inputs(): + # Check that BaggingRegressor can accept X with missing/infinite data + X = np.array( + [ + [1, 3, 5], + [2, None, 6], + [2, np.nan, 6], + [2, np.inf, 6], + [2, -np.inf, 6], + ] + ) + y_values = [ + np.array([2, 3, 3, 3, 3]), + np.array( + [ + [2, 1, 9], + [3, 6, 8], + [3, 6, 8], + [3, 6, 8], + [3, 6, 8], + ] + ), + ] + for y in y_values: + regressor = DecisionTreeRegressor() + pipeline = make_pipeline(FunctionTransformer(replace), regressor) + pipeline.fit(X, y).predict(X) + bagging_regressor = BaggingRegressor(pipeline) + y_hat = bagging_regressor.fit(X, y).predict(X) + assert y.shape == y_hat.shape + + # Verify that exceptions can be raised by wrapper regressor + regressor = DecisionTreeRegressor() + pipeline = make_pipeline(regressor) + with pytest.raises(ValueError): + pipeline.fit(X, y) + bagging_regressor = BaggingRegressor(pipeline) + with pytest.raises(ValueError): + bagging_regressor.fit(X, y) + + +def test_bagging_classifier_with_missing_inputs(): + # Check that BaggingClassifier can accept X with missing/infinite data + X = np.array( + [ + [1, 3, 5], + [2, None, 6], + [2, np.nan, 6], + [2, np.inf, 6], + [2, -np.inf, 6], + ] + ) + y = np.array([3, 6, 6, 6, 6]) + classifier = DecisionTreeClassifier() + pipeline = make_pipeline(FunctionTransformer(replace), classifier) + pipeline.fit(X, y).predict(X) + bagging_classifier = BaggingClassifier(pipeline) + bagging_classifier.fit(X, y) + y_hat = bagging_classifier.predict(X) + assert y.shape == y_hat.shape + bagging_classifier.predict_log_proba(X) + bagging_classifier.predict_proba(X) + + # Verify that exceptions can be raised by wrapper classifier + classifier = DecisionTreeClassifier() + pipeline = make_pipeline(classifier) + with pytest.raises(ValueError): + pipeline.fit(X, y) + bagging_classifier = BaggingClassifier(pipeline) + with pytest.raises(ValueError): + bagging_classifier.fit(X, y) + + +def test_bagging_small_max_features(): + # Check that Bagging estimator can accept low fractional max_features + + X = np.array([[1, 2], [3, 4]]) + y = np.array([1, 0]) + + bagging = BaggingClassifier(LogisticRegression(), max_features=0.3, random_state=1) + bagging.fit(X, y) + + +def test_bagging_get_estimators_indices(): + # Check that Bagging estimator can generate sample indices properly + # Non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/16436 + + rng = np.random.RandomState(0) + X = rng.randn(13, 4) + y = np.arange(13) + + class MyEstimator(DecisionTreeRegressor): + """An estimator which stores y indices information at fit.""" + + def fit(self, X, y): + self._sample_indices = y + + clf = BaggingRegressor(estimator=MyEstimator(), n_estimators=1, random_state=0) + clf.fit(X, y) + + assert_array_equal(clf.estimators_[0]._sample_indices, clf.estimators_samples_[0]) + + +@pytest.mark.parametrize( + "bagging, expected_allow_nan", + [ + (BaggingClassifier(HistGradientBoostingClassifier(max_iter=1)), True), + (BaggingRegressor(HistGradientBoostingRegressor(max_iter=1)), True), + (BaggingClassifier(LogisticRegression()), False), + (BaggingRegressor(SVR()), False), + ], +) +def test_bagging_allow_nan_tag(bagging, expected_allow_nan): + """Check that bagging inherits allow_nan tag.""" + assert bagging.__sklearn_tags__().input_tags.allow_nan == expected_allow_nan + + +@pytest.mark.parametrize( + "model", + [ + BaggingClassifier( + estimator=RandomForestClassifier(n_estimators=1), n_estimators=1 + ), + BaggingRegressor( + estimator=RandomForestRegressor(n_estimators=1), n_estimators=1 + ), + ], +) +def test_bagging_with_metadata_routing(model): + """Make sure that metadata routing works with non-default estimator.""" + with sklearn.config_context(enable_metadata_routing=True): + model.fit(iris.data, iris.target) + + +@pytest.mark.parametrize( + "model", + [ + BaggingClassifier( + estimator=AdaBoostClassifier(n_estimators=1), + n_estimators=1, + ), + BaggingRegressor(estimator=AdaBoostRegressor(n_estimators=1), n_estimators=1), + ], +) +def test_bagging_without_support_metadata_routing(model): + """Make sure that we still can use an estimator that does not implement the + metadata routing.""" + model.fit(iris.data, iris.target) diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/test_common.py b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/test_common.py new file mode 100644 index 0000000000000000000000000000000000000000..6e83512ccd1d673951655c4572ac294fdda52af2 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/test_common.py @@ -0,0 +1,262 @@ +import numpy as np +import pytest + +from sklearn.base import ClassifierMixin, clone, is_classifier +from sklearn.datasets import ( + load_diabetes, + load_iris, + make_classification, + make_regression, +) +from sklearn.ensemble import ( + RandomForestClassifier, + RandomForestRegressor, + StackingClassifier, + StackingRegressor, + VotingClassifier, + VotingRegressor, +) +from sklearn.impute import SimpleImputer +from sklearn.linear_model import LinearRegression, LogisticRegression +from sklearn.pipeline import make_pipeline +from sklearn.svm import SVC, SVR, LinearSVC, LinearSVR + +X, y = load_iris(return_X_y=True) + +X_r, y_r = load_diabetes(return_X_y=True) + + +@pytest.mark.parametrize( + "X, y, estimator", + [ + ( + *make_classification(n_samples=10), + StackingClassifier( + estimators=[ + ("lr", LogisticRegression()), + ("svm", LinearSVC()), + ("rf", RandomForestClassifier(n_estimators=5, max_depth=3)), + ], + cv=2, + ), + ), + ( + *make_classification(n_samples=10), + VotingClassifier( + estimators=[ + ("lr", LogisticRegression()), + ("svm", LinearSVC()), + ("rf", RandomForestClassifier(n_estimators=5, max_depth=3)), + ] + ), + ), + ( + *make_regression(n_samples=10), + StackingRegressor( + estimators=[ + ("lr", LinearRegression()), + ("svm", LinearSVR()), + ("rf", RandomForestRegressor(n_estimators=5, max_depth=3)), + ], + cv=2, + ), + ), + ( + *make_regression(n_samples=10), + VotingRegressor( + estimators=[ + ("lr", LinearRegression()), + ("svm", LinearSVR()), + ("rf", RandomForestRegressor(n_estimators=5, max_depth=3)), + ] + ), + ), + ], + ids=[ + "stacking-classifier", + "voting-classifier", + "stacking-regressor", + "voting-regressor", + ], +) +def test_ensemble_heterogeneous_estimators_behavior(X, y, estimator): + # check that the behavior of `estimators`, `estimators_`, + # `named_estimators`, `named_estimators_` is consistent across all + # ensemble classes and when using `set_params()`. + + # before fit + assert "svm" in estimator.named_estimators + assert estimator.named_estimators.svm is estimator.estimators[1][1] + assert estimator.named_estimators.svm is estimator.named_estimators["svm"] + + # check fitted attributes + estimator.fit(X, y) + assert len(estimator.named_estimators) == 3 + assert len(estimator.named_estimators_) == 3 + assert sorted(list(estimator.named_estimators_.keys())) == sorted( + ["lr", "svm", "rf"] + ) + + # check that set_params() does not add a new attribute + estimator_new_params = clone(estimator) + svm_estimator = SVC() if is_classifier(estimator) else SVR() + estimator_new_params.set_params(svm=svm_estimator).fit(X, y) + assert not hasattr(estimator_new_params, "svm") + assert ( + estimator_new_params.named_estimators.lr.get_params() + == estimator.named_estimators.lr.get_params() + ) + assert ( + estimator_new_params.named_estimators.rf.get_params() + == estimator.named_estimators.rf.get_params() + ) + + # check the behavior when setting an dropping an estimator + estimator_dropped = clone(estimator) + estimator_dropped.set_params(svm="drop") + estimator_dropped.fit(X, y) + assert len(estimator_dropped.named_estimators) == 3 + assert estimator_dropped.named_estimators.svm == "drop" + assert len(estimator_dropped.named_estimators_) == 3 + assert sorted(list(estimator_dropped.named_estimators_.keys())) == sorted( + ["lr", "svm", "rf"] + ) + for sub_est in estimator_dropped.named_estimators_: + # check that the correspondence is correct + assert not isinstance(sub_est, type(estimator.named_estimators.svm)) + + # check that we can set the parameters of the underlying classifier + estimator.set_params(svm__C=10.0) + estimator.set_params(rf__max_depth=5) + assert ( + estimator.get_params()["svm__C"] + == estimator.get_params()["svm"].get_params()["C"] + ) + assert ( + estimator.get_params()["rf__max_depth"] + == estimator.get_params()["rf"].get_params()["max_depth"] + ) + + +@pytest.mark.parametrize( + "Ensemble", + [VotingClassifier, StackingRegressor, VotingRegressor], +) +def test_ensemble_heterogeneous_estimators_type(Ensemble): + # check that ensemble will fail during validation if the underlying + # estimators are not of the same type (i.e. classifier or regressor) + # StackingClassifier can have an underlying regresor so it's not checked + if issubclass(Ensemble, ClassifierMixin): + X, y = make_classification(n_samples=10) + estimators = [("lr", LinearRegression())] + ensemble_type = "classifier" + else: + X, y = make_regression(n_samples=10) + estimators = [("lr", LogisticRegression())] + ensemble_type = "regressor" + ensemble = Ensemble(estimators=estimators) + + err_msg = "should be a {}".format(ensemble_type) + with pytest.raises(ValueError, match=err_msg): + ensemble.fit(X, y) + + +@pytest.mark.parametrize( + "X, y, Ensemble", + [ + (*make_classification(n_samples=10), StackingClassifier), + (*make_classification(n_samples=10), VotingClassifier), + (*make_regression(n_samples=10), StackingRegressor), + (*make_regression(n_samples=10), VotingRegressor), + ], +) +def test_ensemble_heterogeneous_estimators_name_validation(X, y, Ensemble): + # raise an error when the name contains dunder + if issubclass(Ensemble, ClassifierMixin): + estimators = [("lr__", LogisticRegression())] + else: + estimators = [("lr__", LinearRegression())] + ensemble = Ensemble(estimators=estimators) + + err_msg = r"Estimator names must not contain __: got \['lr__'\]" + with pytest.raises(ValueError, match=err_msg): + ensemble.fit(X, y) + + # raise an error when the name is not unique + if issubclass(Ensemble, ClassifierMixin): + estimators = [("lr", LogisticRegression()), ("lr", LogisticRegression())] + else: + estimators = [("lr", LinearRegression()), ("lr", LinearRegression())] + ensemble = Ensemble(estimators=estimators) + + err_msg = r"Names provided are not unique: \['lr', 'lr'\]" + with pytest.raises(ValueError, match=err_msg): + ensemble.fit(X, y) + + # raise an error when the name conflicts with the parameters + if issubclass(Ensemble, ClassifierMixin): + estimators = [("estimators", LogisticRegression())] + else: + estimators = [("estimators", LinearRegression())] + ensemble = Ensemble(estimators=estimators) + + err_msg = "Estimator names conflict with constructor arguments" + with pytest.raises(ValueError, match=err_msg): + ensemble.fit(X, y) + + +@pytest.mark.parametrize( + "X, y, estimator", + [ + ( + *make_classification(n_samples=10), + StackingClassifier(estimators=[("lr", LogisticRegression())]), + ), + ( + *make_classification(n_samples=10), + VotingClassifier(estimators=[("lr", LogisticRegression())]), + ), + ( + *make_regression(n_samples=10), + StackingRegressor(estimators=[("lr", LinearRegression())]), + ), + ( + *make_regression(n_samples=10), + VotingRegressor(estimators=[("lr", LinearRegression())]), + ), + ], + ids=[ + "stacking-classifier", + "voting-classifier", + "stacking-regressor", + "voting-regressor", + ], +) +def test_ensemble_heterogeneous_estimators_all_dropped(X, y, estimator): + # check that we raise a consistent error when all estimators are + # dropped + estimator.set_params(lr="drop") + with pytest.raises(ValueError, match="All estimators are dropped."): + estimator.fit(X, y) + + +@pytest.mark.parametrize( + "Ensemble, Estimator, X, y", + [ + (StackingClassifier, LogisticRegression, X, y), + (StackingRegressor, LinearRegression, X_r, y_r), + (VotingClassifier, LogisticRegression, X, y), + (VotingRegressor, LinearRegression, X_r, y_r), + ], +) +# FIXME: we should move this test in `estimator_checks` once we are able +# to construct meta-estimator instances +def test_heterogeneous_ensemble_support_missing_values(Ensemble, Estimator, X, y): + # check that Voting and Stacking predictor delegate the missing values + # validation to the underlying estimator. + X = X.copy() + mask = np.random.choice([1, 0], X.shape, p=[0.1, 0.9]).astype(bool) + X[mask] = np.nan + pipe = make_pipeline(SimpleImputer(), Estimator()) + ensemble = Ensemble(estimators=[("pipe1", pipe), ("pipe2", pipe)]) + ensemble.fit(X, y).score(X, y) diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/test_forest.py b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/test_forest.py new file mode 100644 index 0000000000000000000000000000000000000000..aadf230fd751e670cec81ece47c122c49efc9bb5 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/test_forest.py @@ -0,0 +1,1864 @@ +""" +Testing for the forest module (sklearn.ensemble.forest). +""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import itertools +import math +import pickle +from collections import defaultdict +from functools import partial +from itertools import combinations, product +from typing import Any, Dict +from unittest.mock import patch + +import joblib +import numpy as np +import pytest +from scipy.special import comb + +import sklearn +from sklearn import clone, datasets +from sklearn.datasets import make_classification, make_hastie_10_2 +from sklearn.decomposition import TruncatedSVD +from sklearn.dummy import DummyRegressor +from sklearn.ensemble import ( + ExtraTreesClassifier, + ExtraTreesRegressor, + RandomForestClassifier, + RandomForestRegressor, + RandomTreesEmbedding, +) +from sklearn.ensemble._forest import ( + _generate_unsampled_indices, + _get_n_samples_bootstrap, +) +from sklearn.exceptions import NotFittedError +from sklearn.metrics import ( + explained_variance_score, + f1_score, + mean_poisson_deviance, + mean_squared_error, +) +from sklearn.model_selection import GridSearchCV, cross_val_score, train_test_split +from sklearn.svm import LinearSVC +from sklearn.tree._classes import SPARSE_SPLITTERS +from sklearn.utils._testing import ( + _convert_container, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + ignore_warnings, + skip_if_no_parallel, +) +from sklearn.utils.fixes import COO_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS +from sklearn.utils.multiclass import type_of_target +from sklearn.utils.parallel import Parallel +from sklearn.utils.validation import check_random_state + +# toy sample +X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] +y = [-1, -1, -1, 1, 1, 1] +T = [[-1, -1], [2, 2], [3, 2]] +true_result = [-1, 1, 1] + +# Larger classification sample used for testing feature importances +X_large, y_large = datasets.make_classification( + n_samples=500, + n_features=10, + n_informative=3, + n_redundant=0, + n_repeated=0, + shuffle=False, + random_state=0, +) + +# also load the iris dataset +# and randomly permute it +iris = datasets.load_iris() +rng = check_random_state(0) +perm = rng.permutation(iris.target.size) +iris.data = iris.data[perm] +iris.target = iris.target[perm] + +# Make regression dataset +X_reg, y_reg = datasets.make_regression(n_samples=500, n_features=10, random_state=1) + +# also make a hastie_10_2 dataset +hastie_X, hastie_y = datasets.make_hastie_10_2(n_samples=20, random_state=1) +hastie_X = hastie_X.astype(np.float32) + +# Get the default backend in joblib to test parallelism and interaction with +# different backends +DEFAULT_JOBLIB_BACKEND = joblib.parallel.get_active_backend()[0].__class__ + +FOREST_CLASSIFIERS = { + "ExtraTreesClassifier": ExtraTreesClassifier, + "RandomForestClassifier": RandomForestClassifier, +} + +FOREST_REGRESSORS = { + "ExtraTreesRegressor": ExtraTreesRegressor, + "RandomForestRegressor": RandomForestRegressor, +} + +FOREST_TRANSFORMERS = { + "RandomTreesEmbedding": RandomTreesEmbedding, +} + +FOREST_ESTIMATORS: Dict[str, Any] = dict() +FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS) +FOREST_ESTIMATORS.update(FOREST_REGRESSORS) +FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS) + +FOREST_CLASSIFIERS_REGRESSORS: Dict[str, Any] = FOREST_CLASSIFIERS.copy() +FOREST_CLASSIFIERS_REGRESSORS.update(FOREST_REGRESSORS) + + +@pytest.mark.parametrize("name", FOREST_CLASSIFIERS) +def test_classification_toy(name): + """Check classification on a toy dataset.""" + ForestClassifier = FOREST_CLASSIFIERS[name] + + clf = ForestClassifier(n_estimators=10, random_state=1) + clf.fit(X, y) + assert_array_equal(clf.predict(T), true_result) + assert 10 == len(clf) + + clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1) + clf.fit(X, y) + assert_array_equal(clf.predict(T), true_result) + assert 10 == len(clf) + + # also test apply + leaf_indices = clf.apply(X) + assert leaf_indices.shape == (len(X), clf.n_estimators) + + +@pytest.mark.parametrize("name", FOREST_CLASSIFIERS) +@pytest.mark.parametrize("criterion", ("gini", "log_loss")) +def test_iris_criterion(name, criterion): + # Check consistency on dataset iris. + ForestClassifier = FOREST_CLASSIFIERS[name] + + clf = ForestClassifier(n_estimators=10, criterion=criterion, random_state=1) + clf.fit(iris.data, iris.target) + score = clf.score(iris.data, iris.target) + assert score > 0.9, "Failed with criterion %s and score = %f" % (criterion, score) + + clf = ForestClassifier( + n_estimators=10, criterion=criterion, max_features=2, random_state=1 + ) + clf.fit(iris.data, iris.target) + score = clf.score(iris.data, iris.target) + assert score > 0.5, "Failed with criterion %s and score = %f" % (criterion, score) + + +@pytest.mark.parametrize("name", FOREST_REGRESSORS) +@pytest.mark.parametrize( + "criterion", ("squared_error", "absolute_error", "friedman_mse") +) +def test_regression_criterion(name, criterion): + # Check consistency on regression dataset. + ForestRegressor = FOREST_REGRESSORS[name] + + reg = ForestRegressor(n_estimators=5, criterion=criterion, random_state=1) + reg.fit(X_reg, y_reg) + score = reg.score(X_reg, y_reg) + assert ( + score > 0.93 + ), "Failed with max_features=None, criterion %s and score = %f" % ( + criterion, + score, + ) + + reg = ForestRegressor( + n_estimators=5, criterion=criterion, max_features=6, random_state=1 + ) + reg.fit(X_reg, y_reg) + score = reg.score(X_reg, y_reg) + assert score > 0.92, "Failed with max_features=6, criterion %s and score = %f" % ( + criterion, + score, + ) + + +def test_poisson_vs_mse(): + """Test that random forest with poisson criterion performs better than + mse for a poisson target. + + There is a similar test for DecisionTreeRegressor. + """ + rng = np.random.RandomState(42) + n_train, n_test, n_features = 500, 500, 10 + X = datasets.make_low_rank_matrix( + n_samples=n_train + n_test, n_features=n_features, random_state=rng + ) + # We create a log-linear Poisson model and downscale coef as it will get + # exponentiated. + coef = rng.uniform(low=-2, high=2, size=n_features) / np.max(X, axis=0) + y = rng.poisson(lam=np.exp(X @ coef)) + X_train, X_test, y_train, y_test = train_test_split( + X, y, test_size=n_test, random_state=rng + ) + # We prevent some overfitting by setting min_samples_split=10. + forest_poi = RandomForestRegressor( + criterion="poisson", min_samples_leaf=10, max_features="sqrt", random_state=rng + ) + forest_mse = RandomForestRegressor( + criterion="squared_error", + min_samples_leaf=10, + max_features="sqrt", + random_state=rng, + ) + + forest_poi.fit(X_train, y_train) + forest_mse.fit(X_train, y_train) + dummy = DummyRegressor(strategy="mean").fit(X_train, y_train) + + for X, y, data_name in [(X_train, y_train, "train"), (X_test, y_test, "test")]: + metric_poi = mean_poisson_deviance(y, forest_poi.predict(X)) + # squared_error forest might produce non-positive predictions => clip + # If y = 0 for those, the poisson deviance gets too good. + # If we drew more samples, we would eventually get y > 0 and the + # poisson deviance would explode, i.e. be undefined. Therefore, we do + # not clip to a tiny value like 1e-15, but to 1e-6. This acts like a + # small penalty to the non-positive predictions. + metric_mse = mean_poisson_deviance( + y, np.clip(forest_mse.predict(X), 1e-6, None) + ) + metric_dummy = mean_poisson_deviance(y, dummy.predict(X)) + # As squared_error might correctly predict 0 in train set, its train + # score can be better than Poisson. This is no longer the case for the + # test set. But keep the above comment for clipping in mind. + if data_name == "test": + assert metric_poi < metric_mse + assert metric_poi < 0.8 * metric_dummy + + +@pytest.mark.parametrize("criterion", ("poisson", "squared_error")) +def test_balance_property_random_forest(criterion): + """ "Test that sum(y_pred)==sum(y_true) on the training set.""" + rng = np.random.RandomState(42) + n_train, n_test, n_features = 500, 500, 10 + X = datasets.make_low_rank_matrix( + n_samples=n_train + n_test, n_features=n_features, random_state=rng + ) + + coef = rng.uniform(low=-2, high=2, size=n_features) / np.max(X, axis=0) + y = rng.poisson(lam=np.exp(X @ coef)) + + reg = RandomForestRegressor( + criterion=criterion, n_estimators=10, bootstrap=False, random_state=rng + ) + reg.fit(X, y) + + assert np.sum(reg.predict(X)) == pytest.approx(np.sum(y)) + + +@pytest.mark.parametrize("name", FOREST_REGRESSORS) +def test_regressor_attributes(name): + # Regression models should not have a classes_ attribute. + r = FOREST_REGRESSORS[name](random_state=0) + assert not hasattr(r, "classes_") + assert not hasattr(r, "n_classes_") + + r.fit([[1, 2, 3], [4, 5, 6]], [1, 2]) + assert not hasattr(r, "classes_") + assert not hasattr(r, "n_classes_") + + +@pytest.mark.parametrize("name", FOREST_CLASSIFIERS) +def test_probability(name): + # Predict probabilities. + ForestClassifier = FOREST_CLASSIFIERS[name] + with np.errstate(divide="ignore"): + clf = ForestClassifier( + n_estimators=10, random_state=1, max_features=1, max_depth=1 + ) + clf.fit(iris.data, iris.target) + assert_array_almost_equal( + np.sum(clf.predict_proba(iris.data), axis=1), np.ones(iris.data.shape[0]) + ) + assert_array_almost_equal( + clf.predict_proba(iris.data), np.exp(clf.predict_log_proba(iris.data)) + ) + + +@pytest.mark.parametrize("dtype", (np.float64, np.float32)) +@pytest.mark.parametrize( + "name, criterion", + itertools.chain( + product(FOREST_CLASSIFIERS, ["gini", "log_loss"]), + product(FOREST_REGRESSORS, ["squared_error", "friedman_mse", "absolute_error"]), + ), +) +def test_importances(dtype, name, criterion): + tolerance = 0.01 + if name in FOREST_REGRESSORS and criterion == "absolute_error": + tolerance = 0.05 + + # cast as dtype + X = X_large.astype(dtype, copy=False) + y = y_large.astype(dtype, copy=False) + + ForestEstimator = FOREST_ESTIMATORS[name] + + est = ForestEstimator(n_estimators=10, criterion=criterion, random_state=0) + est.fit(X, y) + importances = est.feature_importances_ + + # The forest estimator can detect that only the first 3 features of the + # dataset are informative: + n_important = np.sum(importances > 0.1) + assert importances.shape[0] == 10 + assert n_important == 3 + assert np.all(importances[:3] > 0.1) + + # Check with parallel + importances = est.feature_importances_ + est.set_params(n_jobs=2) + importances_parallel = est.feature_importances_ + assert_array_almost_equal(importances, importances_parallel) + + # Check with sample weights + sample_weight = check_random_state(0).randint(1, 10, len(X)) + est = ForestEstimator(n_estimators=10, random_state=0, criterion=criterion) + est.fit(X, y, sample_weight=sample_weight) + importances = est.feature_importances_ + assert np.all(importances >= 0.0) + + for scale in [0.5, 100]: + est = ForestEstimator(n_estimators=10, random_state=0, criterion=criterion) + est.fit(X, y, sample_weight=scale * sample_weight) + importances_bis = est.feature_importances_ + assert np.abs(importances - importances_bis).mean() < tolerance + + +def test_importances_asymptotic(): + # Check whether variable importances of totally randomized trees + # converge towards their theoretical values (See Louppe et al, + # Understanding variable importances in forests of randomized trees, 2013). + + def binomial(k, n): + return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True) + + def entropy(samples): + n_samples = len(samples) + entropy = 0.0 + + for count in np.bincount(samples): + p = 1.0 * count / n_samples + if p > 0: + entropy -= p * np.log2(p) + + return entropy + + def mdi_importance(X_m, X, y): + n_samples, n_features = X.shape + + features = list(range(n_features)) + features.pop(X_m) + values = [np.unique(X[:, i]) for i in range(n_features)] + + imp = 0.0 + + for k in range(n_features): + # Weight of each B of size k + coef = 1.0 / (binomial(k, n_features) * (n_features - k)) + + # For all B of size k + for B in combinations(features, k): + # For all values B=b + for b in product(*[values[B[j]] for j in range(k)]): + mask_b = np.ones(n_samples, dtype=bool) + + for j in range(k): + mask_b &= X[:, B[j]] == b[j] + + X_, y_ = X[mask_b, :], y[mask_b] + n_samples_b = len(X_) + + if n_samples_b > 0: + children = [] + + for xi in values[X_m]: + mask_xi = X_[:, X_m] == xi + children.append(y_[mask_xi]) + + imp += ( + coef + * (1.0 * n_samples_b / n_samples) # P(B=b) + * ( + entropy(y_) + - sum( + [ + entropy(c) * len(c) / n_samples_b + for c in children + ] + ) + ) + ) + + return imp + + data = np.array( + [ + [0, 0, 1, 0, 0, 1, 0, 1], + [1, 0, 1, 1, 1, 0, 1, 2], + [1, 0, 1, 1, 0, 1, 1, 3], + [0, 1, 1, 1, 0, 1, 0, 4], + [1, 1, 0, 1, 0, 1, 1, 5], + [1, 1, 0, 1, 1, 1, 1, 6], + [1, 0, 1, 0, 0, 1, 0, 7], + [1, 1, 1, 1, 1, 1, 1, 8], + [1, 1, 1, 1, 0, 1, 1, 9], + [1, 1, 1, 0, 1, 1, 1, 0], + ] + ) + + X, y = np.array(data[:, :7], dtype=bool), data[:, 7] + n_features = X.shape[1] + + # Compute true importances + true_importances = np.zeros(n_features) + + for i in range(n_features): + true_importances[i] = mdi_importance(i, X, y) + + # Estimate importances with totally randomized trees + clf = ExtraTreesClassifier( + n_estimators=500, max_features=1, criterion="log_loss", random_state=0 + ).fit(X, y) + + importances = ( + sum( + tree.tree_.compute_feature_importances(normalize=False) + for tree in clf.estimators_ + ) + / clf.n_estimators + ) + + # Check correctness + assert_almost_equal(entropy(y), sum(importances)) + assert np.abs(true_importances - importances).mean() < 0.01 + + +@pytest.mark.parametrize("name", FOREST_ESTIMATORS) +def test_unfitted_feature_importances(name): + err_msg = ( + "This {} instance is not fitted yet. Call 'fit' with " + "appropriate arguments before using this estimator.".format(name) + ) + with pytest.raises(NotFittedError, match=err_msg): + getattr(FOREST_ESTIMATORS[name](), "feature_importances_") + + +@pytest.mark.parametrize("ForestClassifier", FOREST_CLASSIFIERS.values()) +@pytest.mark.parametrize("X_type", ["array", "sparse_csr", "sparse_csc"]) +@pytest.mark.parametrize( + "X, y, lower_bound_accuracy", + [ + ( + *datasets.make_classification(n_samples=300, n_classes=2, random_state=0), + 0.9, + ), + ( + *datasets.make_classification( + n_samples=1000, n_classes=3, n_informative=6, random_state=0 + ), + 0.65, + ), + ( + iris.data, + iris.target * 2 + 1, + 0.65, + ), + ( + *datasets.make_multilabel_classification(n_samples=300, random_state=0), + 0.18, + ), + ], +) +@pytest.mark.parametrize("oob_score", [True, partial(f1_score, average="micro")]) +def test_forest_classifier_oob( + ForestClassifier, X, y, X_type, lower_bound_accuracy, oob_score +): + """Check that OOB score is close to score on a test set.""" + X = _convert_container(X, constructor_name=X_type) + X_train, X_test, y_train, y_test = train_test_split( + X, + y, + test_size=0.5, + random_state=0, + ) + classifier = ForestClassifier( + n_estimators=40, + bootstrap=True, + oob_score=oob_score, + random_state=0, + ) + + assert not hasattr(classifier, "oob_score_") + assert not hasattr(classifier, "oob_decision_function_") + + classifier.fit(X_train, y_train) + if callable(oob_score): + test_score = oob_score(y_test, classifier.predict(X_test)) + else: + test_score = classifier.score(X_test, y_test) + assert classifier.oob_score_ >= lower_bound_accuracy + + abs_diff = abs(test_score - classifier.oob_score_) + assert abs_diff <= 0.11, f"{abs_diff=} is greater than 0.11" + + assert hasattr(classifier, "oob_score_") + assert not hasattr(classifier, "oob_prediction_") + assert hasattr(classifier, "oob_decision_function_") + + if y.ndim == 1: + expected_shape = (X_train.shape[0], len(set(y))) + else: + expected_shape = (X_train.shape[0], len(set(y[:, 0])), y.shape[1]) + assert classifier.oob_decision_function_.shape == expected_shape + + +@pytest.mark.parametrize("ForestRegressor", FOREST_REGRESSORS.values()) +@pytest.mark.parametrize("X_type", ["array", "sparse_csr", "sparse_csc"]) +@pytest.mark.parametrize( + "X, y, lower_bound_r2", + [ + ( + *datasets.make_regression( + n_samples=500, n_features=10, n_targets=1, random_state=0 + ), + 0.7, + ), + ( + *datasets.make_regression( + n_samples=500, n_features=10, n_targets=2, random_state=0 + ), + 0.55, + ), + ], +) +@pytest.mark.parametrize("oob_score", [True, explained_variance_score]) +def test_forest_regressor_oob(ForestRegressor, X, y, X_type, lower_bound_r2, oob_score): + """Check that forest-based regressor provide an OOB score close to the + score on a test set.""" + X = _convert_container(X, constructor_name=X_type) + X_train, X_test, y_train, y_test = train_test_split( + X, + y, + test_size=0.5, + random_state=0, + ) + regressor = ForestRegressor( + n_estimators=50, + bootstrap=True, + oob_score=oob_score, + random_state=0, + ) + + assert not hasattr(regressor, "oob_score_") + assert not hasattr(regressor, "oob_prediction_") + + regressor.fit(X_train, y_train) + if callable(oob_score): + test_score = oob_score(y_test, regressor.predict(X_test)) + else: + test_score = regressor.score(X_test, y_test) + assert regressor.oob_score_ >= lower_bound_r2 + + assert abs(test_score - regressor.oob_score_) <= 0.1 + + assert hasattr(regressor, "oob_score_") + assert hasattr(regressor, "oob_prediction_") + assert not hasattr(regressor, "oob_decision_function_") + + if y.ndim == 1: + expected_shape = (X_train.shape[0],) + else: + expected_shape = (X_train.shape[0], y.ndim) + assert regressor.oob_prediction_.shape == expected_shape + + +@pytest.mark.parametrize("ForestEstimator", FOREST_CLASSIFIERS_REGRESSORS.values()) +def test_forest_oob_warning(ForestEstimator): + """Check that a warning is raised when not enough estimator and the OOB + estimates will be inaccurate.""" + estimator = ForestEstimator( + n_estimators=1, + oob_score=True, + bootstrap=True, + random_state=0, + ) + with pytest.warns(UserWarning, match="Some inputs do not have OOB scores"): + estimator.fit(iris.data, iris.target) + + +@pytest.mark.parametrize("ForestEstimator", FOREST_CLASSIFIERS_REGRESSORS.values()) +def test_forest_oob_score_requires_bootstrap(ForestEstimator): + """Check that we raise an error if OOB score is requested without + activating bootstrapping. + """ + X = iris.data + y = iris.target + err_msg = "Out of bag estimation only available if bootstrap=True" + estimator = ForestEstimator(oob_score=True, bootstrap=False) + with pytest.raises(ValueError, match=err_msg): + estimator.fit(X, y) + + +@pytest.mark.parametrize("ForestClassifier", FOREST_CLASSIFIERS.values()) +def test_classifier_error_oob_score_multiclass_multioutput(ForestClassifier): + """Check that we raise an error with when requesting OOB score with + multiclass-multioutput classification target. + """ + rng = np.random.RandomState(42) + X = iris.data + y = rng.randint(low=0, high=5, size=(iris.data.shape[0], 2)) + y_type = type_of_target(y) + assert y_type == "multiclass-multioutput" + estimator = ForestClassifier(oob_score=True, bootstrap=True) + err_msg = "The type of target cannot be used to compute OOB estimates" + with pytest.raises(ValueError, match=err_msg): + estimator.fit(X, y) + + +@pytest.mark.parametrize("ForestRegressor", FOREST_REGRESSORS.values()) +def test_forest_multioutput_integral_regression_target(ForestRegressor): + """Check that multioutput regression with integral values is not interpreted + as a multiclass-multioutput target and OOB score can be computed. + """ + rng = np.random.RandomState(42) + X = iris.data + y = rng.randint(low=0, high=10, size=(iris.data.shape[0], 2)) + estimator = ForestRegressor( + n_estimators=30, oob_score=True, bootstrap=True, random_state=0 + ) + estimator.fit(X, y) + + n_samples_bootstrap = _get_n_samples_bootstrap(len(X), estimator.max_samples) + n_samples_test = X.shape[0] // 4 + oob_pred = np.zeros([n_samples_test, 2]) + for sample_idx, sample in enumerate(X[:n_samples_test]): + n_samples_oob = 0 + oob_pred_sample = np.zeros(2) + for tree in estimator.estimators_: + oob_unsampled_indices = _generate_unsampled_indices( + tree.random_state, len(X), n_samples_bootstrap + ) + if sample_idx in oob_unsampled_indices: + n_samples_oob += 1 + oob_pred_sample += tree.predict(sample.reshape(1, -1)).squeeze() + oob_pred[sample_idx] = oob_pred_sample / n_samples_oob + assert_allclose(oob_pred, estimator.oob_prediction_[:n_samples_test]) + + +@pytest.mark.parametrize("oob_score", [True, False]) +def test_random_trees_embedding_raise_error_oob(oob_score): + with pytest.raises(TypeError, match="got an unexpected keyword argument"): + RandomTreesEmbedding(oob_score=oob_score) + with pytest.raises(NotImplementedError, match="OOB score not supported"): + RandomTreesEmbedding()._set_oob_score_and_attributes(X, y) + + +@pytest.mark.parametrize("name", FOREST_CLASSIFIERS) +def test_gridsearch(name): + # Check that base trees can be grid-searched. + forest = FOREST_CLASSIFIERS[name]() + clf = GridSearchCV(forest, {"n_estimators": (1, 2), "max_depth": (1, 2)}) + clf.fit(iris.data, iris.target) + + +@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS) +def test_parallel(name): + """Check parallel computations in classification""" + if name in FOREST_CLASSIFIERS: + X = iris.data + y = iris.target + elif name in FOREST_REGRESSORS: + X = X_reg + y = y_reg + + ForestEstimator = FOREST_ESTIMATORS[name] + forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0) + + forest.fit(X, y) + assert len(forest) == 10 + + forest.set_params(n_jobs=1) + y1 = forest.predict(X) + forest.set_params(n_jobs=2) + y2 = forest.predict(X) + assert_array_almost_equal(y1, y2, 3) + + +@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS) +def test_pickle(name): + # Check pickability. + if name in FOREST_CLASSIFIERS: + X = iris.data[::2] + y = iris.target[::2] + elif name in FOREST_REGRESSORS: + X = X_reg[::2] + y = y_reg[::2] + + ForestEstimator = FOREST_ESTIMATORS[name] + obj = ForestEstimator(random_state=0) + obj.fit(X, y) + score = obj.score(X, y) + pickle_object = pickle.dumps(obj) + + obj2 = pickle.loads(pickle_object) + assert type(obj2) == obj.__class__ + score2 = obj2.score(X, y) + assert score == score2 + + +@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS) +def test_multioutput(name): + # Check estimators on multi-output problems. + + X_train = [ + [-2, -1], + [-1, -1], + [-1, -2], + [1, 1], + [1, 2], + [2, 1], + [-2, 1], + [-1, 1], + [-1, 2], + [2, -1], + [1, -1], + [1, -2], + ] + y_train = [ + [-1, 0], + [-1, 0], + [-1, 0], + [1, 1], + [1, 1], + [1, 1], + [-1, 2], + [-1, 2], + [-1, 2], + [1, 3], + [1, 3], + [1, 3], + ] + X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]] + y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]] + + est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False) + y_pred = est.fit(X_train, y_train).predict(X_test) + assert_array_almost_equal(y_pred, y_test) + + if name in FOREST_CLASSIFIERS: + with np.errstate(divide="ignore"): + proba = est.predict_proba(X_test) + assert len(proba) == 2 + assert proba[0].shape == (4, 2) + assert proba[1].shape == (4, 4) + + log_proba = est.predict_log_proba(X_test) + assert len(log_proba) == 2 + assert log_proba[0].shape == (4, 2) + assert log_proba[1].shape == (4, 4) + + +@pytest.mark.parametrize("name", FOREST_CLASSIFIERS) +def test_multioutput_string(name): + # Check estimators on multi-output problems with string outputs. + + X_train = [ + [-2, -1], + [-1, -1], + [-1, -2], + [1, 1], + [1, 2], + [2, 1], + [-2, 1], + [-1, 1], + [-1, 2], + [2, -1], + [1, -1], + [1, -2], + ] + y_train = [ + ["red", "blue"], + ["red", "blue"], + ["red", "blue"], + ["green", "green"], + ["green", "green"], + ["green", "green"], + ["red", "purple"], + ["red", "purple"], + ["red", "purple"], + ["green", "yellow"], + ["green", "yellow"], + ["green", "yellow"], + ] + X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]] + y_test = [ + ["red", "blue"], + ["green", "green"], + ["red", "purple"], + ["green", "yellow"], + ] + + est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False) + y_pred = est.fit(X_train, y_train).predict(X_test) + assert_array_equal(y_pred, y_test) + + with np.errstate(divide="ignore"): + proba = est.predict_proba(X_test) + assert len(proba) == 2 + assert proba[0].shape == (4, 2) + assert proba[1].shape == (4, 4) + + log_proba = est.predict_log_proba(X_test) + assert len(log_proba) == 2 + assert log_proba[0].shape == (4, 2) + assert log_proba[1].shape == (4, 4) + + +@pytest.mark.parametrize("name", FOREST_CLASSIFIERS) +def test_classes_shape(name): + # Test that n_classes_ and classes_ have proper shape. + ForestClassifier = FOREST_CLASSIFIERS[name] + + # Classification, single output + clf = ForestClassifier(random_state=0).fit(X, y) + + assert clf.n_classes_ == 2 + assert_array_equal(clf.classes_, [-1, 1]) + + # Classification, multi-output + _y = np.vstack((y, np.array(y) * 2)).T + clf = ForestClassifier(random_state=0).fit(X, _y) + + assert_array_equal(clf.n_classes_, [2, 2]) + assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]]) + + +def test_random_trees_dense_type(): + # Test that the `sparse_output` parameter of RandomTreesEmbedding + # works by returning a dense array. + + # Create the RTE with sparse=False + hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False) + X, y = datasets.make_circles(factor=0.5) + X_transformed = hasher.fit_transform(X) + + # Assert that type is ndarray, not scipy.sparse.csr_matrix + assert isinstance(X_transformed, np.ndarray) + + +def test_random_trees_dense_equal(): + # Test that the `sparse_output` parameter of RandomTreesEmbedding + # works by returning the same array for both argument values. + + # Create the RTEs + hasher_dense = RandomTreesEmbedding( + n_estimators=10, sparse_output=False, random_state=0 + ) + hasher_sparse = RandomTreesEmbedding( + n_estimators=10, sparse_output=True, random_state=0 + ) + X, y = datasets.make_circles(factor=0.5) + X_transformed_dense = hasher_dense.fit_transform(X) + X_transformed_sparse = hasher_sparse.fit_transform(X) + + # Assert that dense and sparse hashers have same array. + assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense) + + +def test_random_hasher(): + # test random forest hashing on circles dataset + # make sure that it is linearly separable. + # even after projected to two SVD dimensions + # Note: Not all random_states produce perfect results. + hasher = RandomTreesEmbedding(n_estimators=30, random_state=1) + X, y = datasets.make_circles(factor=0.5) + X_transformed = hasher.fit_transform(X) + + # test fit and transform: + hasher = RandomTreesEmbedding(n_estimators=30, random_state=1) + assert_array_equal(hasher.fit(X).transform(X).toarray(), X_transformed.toarray()) + + # one leaf active per data point per forest + assert X_transformed.shape[0] == X.shape[0] + assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators) + svd = TruncatedSVD(n_components=2) + X_reduced = svd.fit_transform(X_transformed) + linear_clf = LinearSVC() + linear_clf.fit(X_reduced, y) + assert linear_clf.score(X_reduced, y) == 1.0 + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_random_hasher_sparse_data(csc_container): + X, y = datasets.make_multilabel_classification(random_state=0) + hasher = RandomTreesEmbedding(n_estimators=30, random_state=1) + X_transformed = hasher.fit_transform(X) + X_transformed_sparse = hasher.fit_transform(csc_container(X)) + assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray()) + + +def test_parallel_train(): + rng = check_random_state(12321) + n_samples, n_features = 80, 30 + X_train = rng.randn(n_samples, n_features) + y_train = rng.randint(0, 2, n_samples) + + clfs = [ + RandomForestClassifier(n_estimators=20, n_jobs=n_jobs, random_state=12345).fit( + X_train, y_train + ) + for n_jobs in [1, 2, 3, 8, 16, 32] + ] + + X_test = rng.randn(n_samples, n_features) + probas = [clf.predict_proba(X_test) for clf in clfs] + for proba1, proba2 in zip(probas, probas[1:]): + assert_array_almost_equal(proba1, proba2) + + +def test_distribution(): + rng = check_random_state(12321) + + # Single variable with 4 values + X = rng.randint(0, 4, size=(1000, 1)) + y = rng.rand(1000) + n_trees = 500 + + reg = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y) + + uniques = defaultdict(int) + for tree in reg.estimators_: + tree = "".join( + ("%d,%d/" % (f, int(t)) if f >= 0 else "-") + for f, t in zip(tree.tree_.feature, tree.tree_.threshold) + ) + + uniques[tree] += 1 + + uniques = sorted([(1.0 * count / n_trees, tree) for tree, count in uniques.items()]) + + # On a single variable problem where X_0 has 4 equiprobable values, there + # are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of + # them has probability 1/3 while the 4 others have probability 1/6. + + assert len(uniques) == 5 + assert 0.20 > uniques[0][0] # Rough approximation of 1/6. + assert 0.20 > uniques[1][0] + assert 0.20 > uniques[2][0] + assert 0.20 > uniques[3][0] + assert uniques[4][0] > 0.3 + assert uniques[4][1] == "0,1/0,0/--0,2/--" + + # Two variables, one with 2 values, one with 3 values + X = np.empty((1000, 2)) + X[:, 0] = np.random.randint(0, 2, 1000) + X[:, 1] = np.random.randint(0, 3, 1000) + y = rng.rand(1000) + + reg = ExtraTreesRegressor(max_features=1, random_state=1).fit(X, y) + + uniques = defaultdict(int) + for tree in reg.estimators_: + tree = "".join( + ("%d,%d/" % (f, int(t)) if f >= 0 else "-") + for f, t in zip(tree.tree_.feature, tree.tree_.threshold) + ) + + uniques[tree] += 1 + + uniques = [(count, tree) for tree, count in uniques.items()] + assert len(uniques) == 8 + + +@pytest.mark.parametrize("name", FOREST_ESTIMATORS) +def test_max_leaf_nodes_max_depth(name): + X, y = hastie_X, hastie_y + + # Test precedence of max_leaf_nodes over max_depth. + ForestEstimator = FOREST_ESTIMATORS[name] + est = ForestEstimator( + max_depth=1, max_leaf_nodes=4, n_estimators=1, random_state=0 + ).fit(X, y) + assert est.estimators_[0].get_depth() == 1 + + est = ForestEstimator(max_depth=1, n_estimators=1, random_state=0).fit(X, y) + assert est.estimators_[0].get_depth() == 1 + + +@pytest.mark.parametrize("name", FOREST_ESTIMATORS) +def test_min_samples_split(name): + X, y = hastie_X, hastie_y + ForestEstimator = FOREST_ESTIMATORS[name] + + est = ForestEstimator(min_samples_split=10, n_estimators=1, random_state=0) + est.fit(X, y) + node_idx = est.estimators_[0].tree_.children_left != -1 + node_samples = est.estimators_[0].tree_.n_node_samples[node_idx] + + assert np.min(node_samples) > len(X) * 0.5 - 1, "Failed with {0}".format(name) + + est = ForestEstimator(min_samples_split=0.5, n_estimators=1, random_state=0) + est.fit(X, y) + node_idx = est.estimators_[0].tree_.children_left != -1 + node_samples = est.estimators_[0].tree_.n_node_samples[node_idx] + + assert np.min(node_samples) > len(X) * 0.5 - 1, "Failed with {0}".format(name) + + +@pytest.mark.parametrize("name", FOREST_ESTIMATORS) +def test_min_samples_leaf(name): + X, y = hastie_X, hastie_y + + # Test if leaves contain more than leaf_count training examples + ForestEstimator = FOREST_ESTIMATORS[name] + + est = ForestEstimator(min_samples_leaf=5, n_estimators=1, random_state=0) + est.fit(X, y) + out = est.estimators_[0].tree_.apply(X) + node_counts = np.bincount(out) + # drop inner nodes + leaf_count = node_counts[node_counts != 0] + assert np.min(leaf_count) > 4, "Failed with {0}".format(name) + + est = ForestEstimator(min_samples_leaf=0.25, n_estimators=1, random_state=0) + est.fit(X, y) + out = est.estimators_[0].tree_.apply(X) + node_counts = np.bincount(out) + # drop inner nodes + leaf_count = node_counts[node_counts != 0] + assert np.min(leaf_count) > len(X) * 0.25 - 1, "Failed with {0}".format(name) + + +@pytest.mark.parametrize("name", FOREST_ESTIMATORS) +def test_min_weight_fraction_leaf(name): + X, y = hastie_X, hastie_y + + # Test if leaves contain at least min_weight_fraction_leaf of the + # training set + ForestEstimator = FOREST_ESTIMATORS[name] + rng = np.random.RandomState(0) + weights = rng.rand(X.shape[0]) + total_weight = np.sum(weights) + + # test both DepthFirstTreeBuilder and BestFirstTreeBuilder + # by setting max_leaf_nodes + for frac in np.linspace(0, 0.5, 6): + est = ForestEstimator( + min_weight_fraction_leaf=frac, n_estimators=1, random_state=0 + ) + if "RandomForest" in name: + est.bootstrap = False + + est.fit(X, y, sample_weight=weights) + out = est.estimators_[0].tree_.apply(X) + node_weights = np.bincount(out, weights=weights) + # drop inner nodes + leaf_weights = node_weights[node_weights != 0] + assert ( + np.min(leaf_weights) >= total_weight * est.min_weight_fraction_leaf + ), "Failed with {0} min_weight_fraction_leaf={1}".format( + name, est.min_weight_fraction_leaf + ) + + +@pytest.mark.parametrize("name", FOREST_ESTIMATORS) +@pytest.mark.parametrize( + "sparse_container", COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS +) +def test_sparse_input(name, sparse_container): + X, y = datasets.make_multilabel_classification(random_state=0, n_samples=50) + + ForestEstimator = FOREST_ESTIMATORS[name] + + dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y) + sparse = ForestEstimator(random_state=0, max_depth=2).fit(sparse_container(X), y) + + assert_array_almost_equal(sparse.apply(X), dense.apply(X)) + + if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS: + assert_array_almost_equal(sparse.predict(X), dense.predict(X)) + assert_array_almost_equal( + sparse.feature_importances_, dense.feature_importances_ + ) + + if name in FOREST_CLASSIFIERS: + assert_array_almost_equal(sparse.predict_proba(X), dense.predict_proba(X)) + assert_array_almost_equal( + sparse.predict_log_proba(X), dense.predict_log_proba(X) + ) + + if name in FOREST_TRANSFORMERS: + assert_array_almost_equal( + sparse.transform(X).toarray(), dense.transform(X).toarray() + ) + assert_array_almost_equal( + sparse.fit_transform(X).toarray(), dense.fit_transform(X).toarray() + ) + + +@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS) +@pytest.mark.parametrize("dtype", (np.float64, np.float32)) +def test_memory_layout(name, dtype): + # Test that it works no matter the memory layout + est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False) + + # Dense + for container, kwargs in ( + (np.asarray, {}), # Nothing + (np.asarray, {"order": "C"}), # C-order + (np.asarray, {"order": "F"}), # F-order + (np.ascontiguousarray, {}), # Contiguous + ): + X = container(iris.data, dtype=dtype, **kwargs) + y = iris.target + assert_array_almost_equal(est.fit(X, y).predict(X), y) + + # Sparse (if applicable) + if est.estimator.splitter in SPARSE_SPLITTERS: + for sparse_container in COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS: + X = sparse_container(iris.data, dtype=dtype) + y = iris.target + assert_array_almost_equal(est.fit(X, y).predict(X), y) + + # Strided + X = np.asarray(iris.data[::3], dtype=dtype) + y = iris.target[::3] + assert_array_almost_equal(est.fit(X, y).predict(X), y) + + +@pytest.mark.parametrize("name", FOREST_ESTIMATORS) +def test_1d_input(name): + X = iris.data[:, 0] + X_2d = iris.data[:, 0].reshape((-1, 1)) + y = iris.target + + with ignore_warnings(): + ForestEstimator = FOREST_ESTIMATORS[name] + with pytest.raises(ValueError): + ForestEstimator(n_estimators=1, random_state=0).fit(X, y) + + est = ForestEstimator(random_state=0) + est.fit(X_2d, y) + + if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS: + with pytest.raises(ValueError): + est.predict(X) + + +@pytest.mark.parametrize("name", FOREST_CLASSIFIERS) +def test_class_weights(name): + # Check class_weights resemble sample_weights behavior. + ForestClassifier = FOREST_CLASSIFIERS[name] + + # Iris is balanced, so no effect expected for using 'balanced' weights + clf1 = ForestClassifier(random_state=0) + clf1.fit(iris.data, iris.target) + clf2 = ForestClassifier(class_weight="balanced", random_state=0) + clf2.fit(iris.data, iris.target) + assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) + + # Make a multi-output problem with three copies of Iris + iris_multi = np.vstack((iris.target, iris.target, iris.target)).T + # Create user-defined weights that should balance over the outputs + clf3 = ForestClassifier( + class_weight=[ + {0: 2.0, 1: 2.0, 2: 1.0}, + {0: 2.0, 1: 1.0, 2: 2.0}, + {0: 1.0, 1: 2.0, 2: 2.0}, + ], + random_state=0, + ) + clf3.fit(iris.data, iris_multi) + assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_) + # Check against multi-output "balanced" which should also have no effect + clf4 = ForestClassifier(class_weight="balanced", random_state=0) + clf4.fit(iris.data, iris_multi) + assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_) + + # Inflate importance of class 1, check against user-defined weights + sample_weight = np.ones(iris.target.shape) + sample_weight[iris.target == 1] *= 100 + class_weight = {0: 1.0, 1: 100.0, 2: 1.0} + clf1 = ForestClassifier(random_state=0) + clf1.fit(iris.data, iris.target, sample_weight) + clf2 = ForestClassifier(class_weight=class_weight, random_state=0) + clf2.fit(iris.data, iris.target) + assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) + + # Check that sample_weight and class_weight are multiplicative + clf1 = ForestClassifier(random_state=0) + clf1.fit(iris.data, iris.target, sample_weight**2) + clf2 = ForestClassifier(class_weight=class_weight, random_state=0) + clf2.fit(iris.data, iris.target, sample_weight) + assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) + + +@pytest.mark.parametrize("name", FOREST_CLASSIFIERS) +def test_class_weight_balanced_and_bootstrap_multi_output(name): + # Test class_weight works for multi-output""" + ForestClassifier = FOREST_CLASSIFIERS[name] + _y = np.vstack((y, np.array(y) * 2)).T + clf = ForestClassifier(class_weight="balanced", random_state=0) + clf.fit(X, _y) + clf = ForestClassifier( + class_weight=[{-1: 0.5, 1: 1.0}, {-2: 1.0, 2: 1.0}], random_state=0 + ) + clf.fit(X, _y) + # smoke test for balanced subsample + clf = ForestClassifier(class_weight="balanced_subsample", random_state=0) + clf.fit(X, _y) + + +@pytest.mark.parametrize("name", FOREST_CLASSIFIERS) +def test_class_weight_errors(name): + # Test if class_weight raises errors and warnings when expected. + ForestClassifier = FOREST_CLASSIFIERS[name] + _y = np.vstack((y, np.array(y) * 2)).T + + # Warning warm_start with preset + clf = ForestClassifier(class_weight="balanced", warm_start=True, random_state=0) + clf.fit(X, y) + + warn_msg = ( + "Warm-start fitting without increasing n_estimators does not fit new trees." + ) + with pytest.warns(UserWarning, match=warn_msg): + clf.fit(X, _y) + + # Incorrect length list for multi-output + clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.0}], random_state=0) + with pytest.raises(ValueError): + clf.fit(X, _y) + + +@pytest.mark.parametrize("name", FOREST_ESTIMATORS) +def test_warm_start(name): + # Test if fitting incrementally with warm start gives a forest of the + # right size and the same results as a normal fit. + X, y = hastie_X, hastie_y + ForestEstimator = FOREST_ESTIMATORS[name] + est_ws = None + for n_estimators in [5, 10]: + if est_ws is None: + est_ws = ForestEstimator( + n_estimators=n_estimators, random_state=42, warm_start=True + ) + else: + est_ws.set_params(n_estimators=n_estimators) + est_ws.fit(X, y) + assert len(est_ws) == n_estimators + + est_no_ws = ForestEstimator(n_estimators=10, random_state=42, warm_start=False) + est_no_ws.fit(X, y) + + assert set([tree.random_state for tree in est_ws]) == set( + [tree.random_state for tree in est_no_ws] + ) + + assert_array_equal( + est_ws.apply(X), est_no_ws.apply(X), err_msg="Failed with {0}".format(name) + ) + + +@pytest.mark.parametrize("name", FOREST_ESTIMATORS) +def test_warm_start_clear(name): + # Test if fit clears state and grows a new forest when warm_start==False. + X, y = hastie_X, hastie_y + ForestEstimator = FOREST_ESTIMATORS[name] + est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False, random_state=1) + est.fit(X, y) + + est_2 = ForestEstimator( + n_estimators=5, max_depth=1, warm_start=True, random_state=2 + ) + est_2.fit(X, y) # inits state + est_2.set_params(warm_start=False, random_state=1) + est_2.fit(X, y) # clears old state and equals est + + assert_array_almost_equal(est_2.apply(X), est.apply(X)) + + +@pytest.mark.parametrize("name", FOREST_ESTIMATORS) +def test_warm_start_smaller_n_estimators(name): + # Test if warm start second fit with smaller n_estimators raises error. + X, y = hastie_X, hastie_y + ForestEstimator = FOREST_ESTIMATORS[name] + est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True) + est.fit(X, y) + est.set_params(n_estimators=4) + with pytest.raises(ValueError): + est.fit(X, y) + + +@pytest.mark.parametrize("name", FOREST_ESTIMATORS) +def test_warm_start_equal_n_estimators(name): + # Test if warm start with equal n_estimators does nothing and returns the + # same forest and raises a warning. + X, y = hastie_X, hastie_y + ForestEstimator = FOREST_ESTIMATORS[name] + est = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True, random_state=1) + est.fit(X, y) + + est_2 = ForestEstimator( + n_estimators=5, max_depth=3, warm_start=True, random_state=1 + ) + est_2.fit(X, y) + # Now est_2 equals est. + + est_2.set_params(random_state=2) + warn_msg = ( + "Warm-start fitting without increasing n_estimators does not fit new trees." + ) + with pytest.warns(UserWarning, match=warn_msg): + est_2.fit(X, y) + # If we had fit the trees again we would have got a different forest as we + # changed the random state. + assert_array_equal(est.apply(X), est_2.apply(X)) + + +@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS) +def test_warm_start_oob(name): + # Test that the warm start computes oob score when asked. + X, y = hastie_X, hastie_y + ForestEstimator = FOREST_ESTIMATORS[name] + # Use 15 estimators to avoid 'some inputs do not have OOB scores' warning. + est = ForestEstimator( + n_estimators=15, + max_depth=3, + warm_start=False, + random_state=1, + bootstrap=True, + oob_score=True, + ) + est.fit(X, y) + + est_2 = ForestEstimator( + n_estimators=5, + max_depth=3, + warm_start=False, + random_state=1, + bootstrap=True, + oob_score=False, + ) + est_2.fit(X, y) + + est_2.set_params(warm_start=True, oob_score=True, n_estimators=15) + est_2.fit(X, y) + + assert hasattr(est_2, "oob_score_") + assert est.oob_score_ == est_2.oob_score_ + + # Test that oob_score is computed even if we don't need to train + # additional trees. + est_3 = ForestEstimator( + n_estimators=15, + max_depth=3, + warm_start=True, + random_state=1, + bootstrap=True, + oob_score=False, + ) + est_3.fit(X, y) + assert not hasattr(est_3, "oob_score_") + + est_3.set_params(oob_score=True) + ignore_warnings(est_3.fit)(X, y) + + assert est.oob_score_ == est_3.oob_score_ + + +@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS) +def test_oob_not_computed_twice(name): + # Check that oob_score is not computed twice when warm_start=True. + X, y = hastie_X, hastie_y + ForestEstimator = FOREST_ESTIMATORS[name] + + est = ForestEstimator( + n_estimators=10, warm_start=True, bootstrap=True, oob_score=True + ) + + with patch.object( + est, "_set_oob_score_and_attributes", wraps=est._set_oob_score_and_attributes + ) as mock_set_oob_score_and_attributes: + est.fit(X, y) + + with pytest.warns(UserWarning, match="Warm-start fitting without increasing"): + est.fit(X, y) + + mock_set_oob_score_and_attributes.assert_called_once() + + +def test_dtype_convert(n_classes=15): + classifier = RandomForestClassifier(random_state=0, bootstrap=False) + + X = np.eye(n_classes) + y = [ch for ch in "ABCDEFGHIJKLMNOPQRSTU"[:n_classes]] + + result = classifier.fit(X, y).predict(X) + assert_array_equal(classifier.classes_, y) + assert_array_equal(result, y) + + +@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS) +def test_decision_path(name): + X, y = hastie_X, hastie_y + n_samples = X.shape[0] + ForestEstimator = FOREST_ESTIMATORS[name] + est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False, random_state=1) + est.fit(X, y) + indicator, n_nodes_ptr = est.decision_path(X) + + assert indicator.shape[1] == n_nodes_ptr[-1] + assert indicator.shape[0] == n_samples + assert_array_equal( + np.diff(n_nodes_ptr), [e.tree_.node_count for e in est.estimators_] + ) + + # Assert that leaves index are correct + leaves = est.apply(X) + for est_id in range(leaves.shape[1]): + leave_indicator = [ + indicator[i, n_nodes_ptr[est_id] + j] + for i, j in enumerate(leaves[:, est_id]) + ] + assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples)) + + +def test_min_impurity_decrease(): + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) + all_estimators = [ + RandomForestClassifier, + RandomForestRegressor, + ExtraTreesClassifier, + ExtraTreesRegressor, + ] + + for Estimator in all_estimators: + est = Estimator(min_impurity_decrease=0.1) + est.fit(X, y) + for tree in est.estimators_: + # Simply check if the parameter is passed on correctly. Tree tests + # will suffice for the actual working of this param + assert tree.min_impurity_decrease == 0.1 + + +def test_poisson_y_positive_check(): + est = RandomForestRegressor(criterion="poisson") + X = np.zeros((3, 3)) + + y = [-1, 1, 3] + err_msg = ( + r"Some value\(s\) of y are negative which is " + r"not allowed for Poisson regression." + ) + with pytest.raises(ValueError, match=err_msg): + est.fit(X, y) + + y = [0, 0, 0] + err_msg = ( + r"Sum of y is not strictly positive which " + r"is necessary for Poisson regression." + ) + with pytest.raises(ValueError, match=err_msg): + est.fit(X, y) + + +# mypy error: Variable "DEFAULT_JOBLIB_BACKEND" is not valid type +class MyBackend(DEFAULT_JOBLIB_BACKEND): # type: ignore + def __init__(self, *args, **kwargs): + self.count = 0 + super().__init__(*args, **kwargs) + + def start_call(self): + self.count += 1 + return super().start_call() + + +joblib.register_parallel_backend("testing", MyBackend) + + +@skip_if_no_parallel +def test_backend_respected(): + clf = RandomForestClassifier(n_estimators=10, n_jobs=2) + + with joblib.parallel_backend("testing") as (ba, n_jobs): + clf.fit(X, y) + + assert ba.count > 0 + + # predict_proba requires shared memory. Ensure that's honored. + with joblib.parallel_backend("testing") as (ba, _): + clf.predict_proba(X) + + assert ba.count == 0 + + +def test_forest_feature_importances_sum(): + X, y = make_classification( + n_samples=15, n_informative=3, random_state=1, n_classes=3 + ) + clf = RandomForestClassifier( + min_samples_leaf=5, random_state=42, n_estimators=200 + ).fit(X, y) + assert math.isclose(1, clf.feature_importances_.sum(), abs_tol=1e-7) + + +def test_forest_degenerate_feature_importances(): + # build a forest of single node trees. See #13636 + X = np.zeros((10, 10)) + y = np.ones((10,)) + gbr = RandomForestRegressor(n_estimators=10).fit(X, y) + assert_array_equal(gbr.feature_importances_, np.zeros(10, dtype=np.float64)) + + +@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS) +def test_max_samples_bootstrap(name): + # Check invalid `max_samples` values + est = FOREST_CLASSIFIERS_REGRESSORS[name](bootstrap=False, max_samples=0.5) + err_msg = ( + r"`max_sample` cannot be set if `bootstrap=False`. " + r"Either switch to `bootstrap=True` or set " + r"`max_sample=None`." + ) + with pytest.raises(ValueError, match=err_msg): + est.fit(X, y) + + +@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS) +def test_large_max_samples_exception(name): + # Check invalid `max_samples` + est = FOREST_CLASSIFIERS_REGRESSORS[name](bootstrap=True, max_samples=int(1e9)) + match = "`max_samples` must be <= n_samples=6 but got value 1000000000" + with pytest.raises(ValueError, match=match): + est.fit(X, y) + + +@pytest.mark.parametrize("name", FOREST_REGRESSORS) +def test_max_samples_boundary_regressors(name): + X_train, X_test, y_train, y_test = train_test_split( + X_reg, y_reg, train_size=0.7, test_size=0.3, random_state=0 + ) + + ms_1_model = FOREST_REGRESSORS[name]( + bootstrap=True, max_samples=1.0, random_state=0 + ) + ms_1_predict = ms_1_model.fit(X_train, y_train).predict(X_test) + + ms_None_model = FOREST_REGRESSORS[name]( + bootstrap=True, max_samples=None, random_state=0 + ) + ms_None_predict = ms_None_model.fit(X_train, y_train).predict(X_test) + + ms_1_ms = mean_squared_error(ms_1_predict, y_test) + ms_None_ms = mean_squared_error(ms_None_predict, y_test) + + assert ms_1_ms == pytest.approx(ms_None_ms) + + +@pytest.mark.parametrize("name", FOREST_CLASSIFIERS) +def test_max_samples_boundary_classifiers(name): + X_train, X_test, y_train, _ = train_test_split( + X_large, y_large, random_state=0, stratify=y_large + ) + + ms_1_model = FOREST_CLASSIFIERS[name]( + bootstrap=True, max_samples=1.0, random_state=0 + ) + ms_1_proba = ms_1_model.fit(X_train, y_train).predict_proba(X_test) + + ms_None_model = FOREST_CLASSIFIERS[name]( + bootstrap=True, max_samples=None, random_state=0 + ) + ms_None_proba = ms_None_model.fit(X_train, y_train).predict_proba(X_test) + + np.testing.assert_allclose(ms_1_proba, ms_None_proba) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_forest_y_sparse(csr_container): + X = [[1, 2, 3]] + y = csr_container([[4, 5, 6]]) + est = RandomForestClassifier() + msg = "sparse multilabel-indicator for y is not supported." + with pytest.raises(ValueError, match=msg): + est.fit(X, y) + + +@pytest.mark.parametrize("ForestClass", [RandomForestClassifier, RandomForestRegressor]) +def test_little_tree_with_small_max_samples(ForestClass): + rng = np.random.RandomState(1) + + X = rng.randn(10000, 2) + y = rng.randn(10000) > 0 + + # First fit with no restriction on max samples + est1 = ForestClass( + n_estimators=1, + random_state=rng, + max_samples=None, + ) + + # Second fit with max samples restricted to just 2 + est2 = ForestClass( + n_estimators=1, + random_state=rng, + max_samples=2, + ) + + est1.fit(X, y) + est2.fit(X, y) + + tree1 = est1.estimators_[0].tree_ + tree2 = est2.estimators_[0].tree_ + + msg = "Tree without `max_samples` restriction should have more nodes" + assert tree1.node_count > tree2.node_count, msg + + +@pytest.mark.parametrize("Forest", FOREST_REGRESSORS) +def test_mse_criterion_object_segfault_smoke_test(Forest): + # This is a smoke test to ensure that passing a mutable criterion + # does not cause a segfault when fitting with concurrent threads. + # Non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/12623 + from sklearn.tree._criterion import MSE + + y = y_reg.reshape(-1, 1) + n_samples, n_outputs = y.shape + mse_criterion = MSE(n_outputs, n_samples) + est = FOREST_REGRESSORS[Forest](n_estimators=2, n_jobs=2, criterion=mse_criterion) + + est.fit(X_reg, y) + + +def test_random_trees_embedding_feature_names_out(): + """Check feature names out for Random Trees Embedding.""" + random_state = np.random.RandomState(0) + X = np.abs(random_state.randn(100, 4)) + hasher = RandomTreesEmbedding( + n_estimators=2, max_depth=2, sparse_output=False, random_state=0 + ).fit(X) + names = hasher.get_feature_names_out() + expected_names = [ + f"randomtreesembedding_{tree}_{leaf}" + # Note: nodes with indices 0, 1 and 4 are internal split nodes and + # therefore do not appear in the expected output feature names. + for tree, leaf in [ + (0, 2), + (0, 3), + (0, 5), + (0, 6), + (1, 2), + (1, 3), + (1, 5), + (1, 6), + ] + ] + assert_array_equal(expected_names, names) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_read_only_buffer(csr_container, monkeypatch): + """RandomForestClassifier must work on readonly sparse data. + + Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/25333 + """ + monkeypatch.setattr( + sklearn.ensemble._forest, + "Parallel", + partial(Parallel, max_nbytes=100), + ) + rng = np.random.RandomState(seed=0) + + X, y = make_classification(n_samples=100, n_features=200, random_state=rng) + X = csr_container(X, copy=True) + + clf = RandomForestClassifier(n_jobs=2, random_state=rng) + cross_val_score(clf, X, y, cv=2) + + +@pytest.mark.parametrize("class_weight", ["balanced_subsample", None]) +def test_round_samples_to_one_when_samples_too_low(class_weight): + """Check low max_samples works and is rounded to one. + + Non-regression test for gh-24037. + """ + X, y = datasets.load_wine(return_X_y=True) + forest = RandomForestClassifier( + n_estimators=10, max_samples=1e-4, class_weight=class_weight, random_state=0 + ) + forest.fit(X, y) + + +@pytest.mark.parametrize("seed", [None, 1]) +@pytest.mark.parametrize("bootstrap", [True, False]) +@pytest.mark.parametrize("ForestClass", FOREST_CLASSIFIERS_REGRESSORS.values()) +def test_estimators_samples(ForestClass, bootstrap, seed): + """Estimators_samples_ property should be consistent. + + Tests consistency across fits and whether or not the seed for the random generator + is set. + """ + X, y = make_hastie_10_2(n_samples=200, random_state=1) + + if bootstrap: + max_samples = 0.5 + else: + max_samples = None + est = ForestClass( + n_estimators=10, + max_samples=max_samples, + max_features=0.5, + random_state=seed, + bootstrap=bootstrap, + ) + est.fit(X, y) + + estimators_samples = est.estimators_samples_.copy() + + # Test repeated calls result in same set of indices + assert_array_equal(estimators_samples, est.estimators_samples_) + estimators = est.estimators_ + + assert isinstance(estimators_samples, list) + assert len(estimators_samples) == len(estimators) + assert estimators_samples[0].dtype == np.int32 + + for i in range(len(estimators)): + if bootstrap: + assert len(estimators_samples[i]) == len(X) // 2 + + # the bootstrap should be a resampling with replacement + assert len(np.unique(estimators_samples[i])) < len(estimators_samples[i]) + else: + assert len(set(estimators_samples[i])) == len(X) + + estimator_index = 0 + estimator_samples = estimators_samples[estimator_index] + estimator = estimators[estimator_index] + + X_train = X[estimator_samples] + y_train = y[estimator_samples] + + orig_tree_values = estimator.tree_.value + estimator = clone(estimator) + estimator.fit(X_train, y_train) + new_tree_values = estimator.tree_.value + assert_allclose(orig_tree_values, new_tree_values) + + +@pytest.mark.parametrize( + "make_data, Forest", + [ + (datasets.make_regression, RandomForestRegressor), + (datasets.make_classification, RandomForestClassifier), + (datasets.make_regression, ExtraTreesRegressor), + (datasets.make_classification, ExtraTreesClassifier), + ], +) +def test_missing_values_is_resilient(make_data, Forest): + """Check that forest can deal with missing values and has decent performance.""" + + rng = np.random.RandomState(0) + n_samples, n_features = 1000, 10 + X, y = make_data(n_samples=n_samples, n_features=n_features, random_state=rng) + + # Create dataset with missing values + X_missing = X.copy() + X_missing[rng.choice([False, True], size=X.shape, p=[0.95, 0.05])] = np.nan + assert np.isnan(X_missing).any() + + X_missing_train, X_missing_test, y_train, y_test = train_test_split( + X_missing, y, random_state=0 + ) + + # Train forest with missing values + forest_with_missing = Forest(random_state=rng, n_estimators=50) + forest_with_missing.fit(X_missing_train, y_train) + score_with_missing = forest_with_missing.score(X_missing_test, y_test) + + # Train forest without missing values + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) + forest = Forest(random_state=rng, n_estimators=50) + forest.fit(X_train, y_train) + score_without_missing = forest.score(X_test, y_test) + + # Score is still 80 percent of the forest's score that had no missing values + assert score_with_missing >= 0.80 * score_without_missing + + +@pytest.mark.parametrize( + "Forest", + [ + RandomForestClassifier, + RandomForestRegressor, + ExtraTreesRegressor, + ExtraTreesClassifier, + ], +) +def test_missing_value_is_predictive(Forest): + """Check that the forest learns when missing values are only present for + a predictive feature.""" + rng = np.random.RandomState(0) + n_samples = 300 + expected_score = 0.75 + + X_non_predictive = rng.standard_normal(size=(n_samples, 10)) + y = rng.randint(0, high=2, size=n_samples) + + # Create a predictive feature using `y` and with some noise + X_random_mask = rng.choice([False, True], size=n_samples, p=[0.95, 0.05]) + y_mask = y.astype(bool) + y_mask[X_random_mask] = ~y_mask[X_random_mask] + + predictive_feature = rng.standard_normal(size=n_samples) + predictive_feature[y_mask] = np.nan + assert np.isnan(predictive_feature).any() + + X_predictive = X_non_predictive.copy() + X_predictive[:, 5] = predictive_feature + + ( + X_predictive_train, + X_predictive_test, + X_non_predictive_train, + X_non_predictive_test, + y_train, + y_test, + ) = train_test_split(X_predictive, X_non_predictive, y, random_state=0) + forest_predictive = Forest(random_state=0).fit(X_predictive_train, y_train) + forest_non_predictive = Forest(random_state=0).fit(X_non_predictive_train, y_train) + + predictive_test_score = forest_predictive.score(X_predictive_test, y_test) + + assert predictive_test_score >= expected_score + assert predictive_test_score >= forest_non_predictive.score( + X_non_predictive_test, y_test + ) + + +@pytest.mark.parametrize("Forest", FOREST_REGRESSORS.values()) +def test_non_supported_criterion_raises_error_with_missing_values(Forest): + """Raise error for unsupported criterion when there are missing values.""" + X = np.array([[0, 1, 2], [np.nan, 0, 2.0]]) + y = [0.5, 1.0] + + forest = Forest(criterion="absolute_error") + + msg = ".*does not accept missing values" + with pytest.raises(ValueError, match=msg): + forest.fit(X, y) diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/test_gradient_boosting.py b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/test_gradient_boosting.py new file mode 100644 index 0000000000000000000000000000000000000000..f799d51eec25cd908b9dfcda3704a0ab8b8d381a --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/test_gradient_boosting.py @@ -0,0 +1,1711 @@ +""" +Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting). +""" + +import re +import warnings + +import numpy as np +import pytest +from numpy.testing import assert_allclose + +from sklearn import datasets +from sklearn.base import clone +from sklearn.datasets import make_classification, make_regression +from sklearn.dummy import DummyClassifier, DummyRegressor +from sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor +from sklearn.ensemble._gb import _safe_divide +from sklearn.ensemble._gradient_boosting import predict_stages +from sklearn.exceptions import DataConversionWarning, NotFittedError +from sklearn.linear_model import LinearRegression +from sklearn.metrics import mean_squared_error +from sklearn.model_selection import train_test_split +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import scale +from sklearn.svm import NuSVR +from sklearn.utils import check_random_state +from sklearn.utils._mocking import NoSampleWeightWrapper +from sklearn.utils._param_validation import InvalidParameterError +from sklearn.utils._testing import ( + assert_array_almost_equal, + assert_array_equal, + skip_if_32bit, +) +from sklearn.utils.fixes import COO_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS + +GRADIENT_BOOSTING_ESTIMATORS = [GradientBoostingClassifier, GradientBoostingRegressor] + +# toy sample +X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] +y = [-1, -1, -1, 1, 1, 1] +T = [[-1, -1], [2, 2], [3, 2]] +true_result = [-1, 1, 1] + +# also make regression dataset +X_reg, y_reg = make_regression( + n_samples=100, n_features=4, n_informative=8, noise=10, random_state=7 +) +y_reg = scale(y_reg) + +rng = np.random.RandomState(0) +# also load the iris dataset +# and randomly permute it +iris = datasets.load_iris() +perm = rng.permutation(iris.target.size) +iris.data = iris.data[perm] +iris.target = iris.target[perm] + + +def test_exponential_n_classes_gt_2(): + """Test exponential loss raises for n_classes > 2.""" + clf = GradientBoostingClassifier(loss="exponential") + msg = "loss='exponential' is only suitable for a binary classification" + with pytest.raises(ValueError, match=msg): + clf.fit(iris.data, iris.target) + + +def test_raise_if_init_has_no_predict_proba(): + """Test raise if init_ has no predict_proba method.""" + clf = GradientBoostingClassifier(init=GradientBoostingRegressor) + msg = ( + "The 'init' parameter of GradientBoostingClassifier must be a str among " + "{'zero'}, None or an object implementing 'fit' and 'predict_proba'." + ) + with pytest.raises(ValueError, match=msg): + clf.fit(X, y) + + +@pytest.mark.parametrize("loss", ("log_loss", "exponential")) +def test_classification_toy(loss, global_random_seed): + # Check classification on a toy dataset. + clf = GradientBoostingClassifier( + loss=loss, n_estimators=10, random_state=global_random_seed + ) + + with pytest.raises(ValueError): + clf.predict(T) + + clf.fit(X, y) + assert_array_equal(clf.predict(T), true_result) + assert 10 == len(clf.estimators_) + + log_loss_decrease = clf.train_score_[:-1] - clf.train_score_[1:] + assert np.any(log_loss_decrease >= 0.0) + + leaves = clf.apply(X) + assert leaves.shape == (6, 10, 1) + + +@pytest.mark.parametrize("loss", ("log_loss", "exponential")) +def test_classification_synthetic(loss, global_random_seed): + # Test GradientBoostingClassifier on synthetic dataset used by + # Hastie et al. in ESLII - Figure 10.9 + # Note that Figure 10.9 reuses the dataset generated for figure 10.2 + # and should have 2_000 train data points and 10_000 test data points. + # Here we intentionally use a smaller variant to make the test run faster, + # but the conclusions are still the same, despite the smaller datasets. + X, y = datasets.make_hastie_10_2(n_samples=2000, random_state=global_random_seed) + + split_idx = 500 + X_train, X_test = X[:split_idx], X[split_idx:] + y_train, y_test = y[:split_idx], y[split_idx:] + + # Increasing the number of trees should decrease the test error + common_params = { + "max_depth": 1, + "learning_rate": 1.0, + "loss": loss, + "random_state": global_random_seed, + } + gbrt_10_stumps = GradientBoostingClassifier(n_estimators=10, **common_params) + gbrt_10_stumps.fit(X_train, y_train) + + gbrt_50_stumps = GradientBoostingClassifier(n_estimators=50, **common_params) + gbrt_50_stumps.fit(X_train, y_train) + + assert gbrt_10_stumps.score(X_test, y_test) < gbrt_50_stumps.score(X_test, y_test) + + # Decision stumps are better suited for this dataset with a large number of + # estimators. + common_params = { + "n_estimators": 200, + "learning_rate": 1.0, + "loss": loss, + "random_state": global_random_seed, + } + gbrt_stumps = GradientBoostingClassifier(max_depth=1, **common_params) + gbrt_stumps.fit(X_train, y_train) + + gbrt_10_nodes = GradientBoostingClassifier(max_leaf_nodes=10, **common_params) + gbrt_10_nodes.fit(X_train, y_train) + + assert gbrt_stumps.score(X_test, y_test) > gbrt_10_nodes.score(X_test, y_test) + + +@pytest.mark.parametrize("loss", ("squared_error", "absolute_error", "huber")) +@pytest.mark.parametrize("subsample", (1.0, 0.5)) +def test_regression_dataset(loss, subsample, global_random_seed): + # Check consistency on regression dataset with least squares + # and least absolute deviation. + ones = np.ones(len(y_reg)) + last_y_pred = None + for sample_weight in [None, ones, 2 * ones]: + # learning_rate, max_depth and n_estimators were adjusted to get a mode + # that is accurate enough to reach a low MSE on the training set while + # keeping the resource used to execute this test low enough. + reg = GradientBoostingRegressor( + n_estimators=30, + loss=loss, + max_depth=4, + subsample=subsample, + min_samples_split=2, + random_state=global_random_seed, + learning_rate=0.5, + ) + + reg.fit(X_reg, y_reg, sample_weight=sample_weight) + leaves = reg.apply(X_reg) + assert leaves.shape == (100, 30) + + y_pred = reg.predict(X_reg) + mse = mean_squared_error(y_reg, y_pred) + assert mse < 0.05 + + if last_y_pred is not None: + # FIXME: We temporarily bypass this test. This is due to the fact + # that GBRT with and without `sample_weight` do not use the same + # implementation of the median during the initialization with the + # `DummyRegressor`. In the future, we should make sure that both + # implementations should be the same. See PR #17377 for more. + # assert_allclose(last_y_pred, y_pred) + pass + + last_y_pred = y_pred + + +@pytest.mark.parametrize("subsample", (1.0, 0.5)) +@pytest.mark.parametrize("sample_weight", (None, 1)) +def test_iris(subsample, sample_weight, global_random_seed): + if sample_weight == 1: + sample_weight = np.ones(len(iris.target)) + # Check consistency on dataset iris. + clf = GradientBoostingClassifier( + n_estimators=100, + loss="log_loss", + random_state=global_random_seed, + subsample=subsample, + ) + clf.fit(iris.data, iris.target, sample_weight=sample_weight) + score = clf.score(iris.data, iris.target) + assert score > 0.9 + + leaves = clf.apply(iris.data) + assert leaves.shape == (150, 100, 3) + + +def test_regression_synthetic(global_random_seed): + # Test on synthetic regression datasets used in Leo Breiman, + # `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996). + random_state = check_random_state(global_random_seed) + regression_params = { + "n_estimators": 100, + "max_depth": 4, + "min_samples_split": 2, + "learning_rate": 0.1, + "loss": "squared_error", + "random_state": global_random_seed, + } + + # Friedman1 + X, y = datasets.make_friedman1(n_samples=1200, random_state=random_state, noise=1.0) + X_train, y_train = X[:200], y[:200] + X_test, y_test = X[200:], y[200:] + + clf = GradientBoostingRegressor(**regression_params) + clf.fit(X_train, y_train) + mse = mean_squared_error(y_test, clf.predict(X_test)) + assert mse < 6.5 + + # Friedman2 + X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state) + X_train, y_train = X[:200], y[:200] + X_test, y_test = X[200:], y[200:] + + clf = GradientBoostingRegressor(**regression_params) + clf.fit(X_train, y_train) + mse = mean_squared_error(y_test, clf.predict(X_test)) + assert mse < 2500.0 + + # Friedman3 + X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state) + X_train, y_train = X[:200], y[:200] + X_test, y_test = X[200:], y[200:] + + clf = GradientBoostingRegressor(**regression_params) + clf.fit(X_train, y_train) + mse = mean_squared_error(y_test, clf.predict(X_test)) + assert mse < 0.025 + + +@pytest.mark.parametrize( + "GradientBoosting, X, y", + [ + (GradientBoostingRegressor, X_reg, y_reg), + (GradientBoostingClassifier, iris.data, iris.target), + ], +) +def test_feature_importances(GradientBoosting, X, y): + # smoke test to check that the gradient boosting expose an attribute + # feature_importances_ + gbdt = GradientBoosting() + assert not hasattr(gbdt, "feature_importances_") + gbdt.fit(X, y) + assert hasattr(gbdt, "feature_importances_") + + +def test_probability_log(global_random_seed): + # Predict probabilities. + clf = GradientBoostingClassifier(n_estimators=100, random_state=global_random_seed) + + with pytest.raises(ValueError): + clf.predict_proba(T) + + clf.fit(X, y) + assert_array_equal(clf.predict(T), true_result) + + # check if probabilities are in [0, 1]. + y_proba = clf.predict_proba(T) + assert np.all(y_proba >= 0.0) + assert np.all(y_proba <= 1.0) + + # derive predictions from probabilities + y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0) + assert_array_equal(y_pred, true_result) + + +def test_single_class_with_sample_weight(): + sample_weight = [0, 0, 0, 1, 1, 1] + clf = GradientBoostingClassifier(n_estimators=100, random_state=1) + msg = ( + "y contains 1 class after sample_weight trimmed classes with " + "zero weights, while a minimum of 2 classes are required." + ) + with pytest.raises(ValueError, match=msg): + clf.fit(X, y, sample_weight=sample_weight) + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_check_inputs_predict_stages(csc_container): + # check that predict_stages through an error if the type of X is not + # supported + x, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) + x_sparse_csc = csc_container(x) + clf = GradientBoostingClassifier(n_estimators=100, random_state=1) + clf.fit(x, y) + score = np.zeros((y.shape)).reshape(-1, 1) + err_msg = "When X is a sparse matrix, a CSR format is expected" + with pytest.raises(ValueError, match=err_msg): + predict_stages(clf.estimators_, x_sparse_csc, clf.learning_rate, score) + x_fortran = np.asfortranarray(x) + with pytest.raises(ValueError, match="X should be C-ordered np.ndarray"): + predict_stages(clf.estimators_, x_fortran, clf.learning_rate, score) + + +def test_max_feature_regression(global_random_seed): + # Test to make sure random state is set properly. + X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=global_random_seed) + + X_train, X_test = X[:2000], X[2000:] + y_train, y_test = y[:2000], y[2000:] + + gbrt = GradientBoostingClassifier( + n_estimators=100, + min_samples_split=5, + max_depth=2, + learning_rate=0.1, + max_features=2, + random_state=global_random_seed, + ) + gbrt.fit(X_train, y_train) + log_loss = gbrt._loss(y_test, gbrt.decision_function(X_test)) + assert log_loss < 0.5, "GB failed with deviance %.4f" % log_loss + + +def test_feature_importance_regression( + fetch_california_housing_fxt, global_random_seed +): + """Test that Gini importance is calculated correctly. + + This test follows the example from [1]_ (pg. 373). + + .. [1] Friedman, J., Hastie, T., & Tibshirani, R. (2001). The elements + of statistical learning. New York: Springer series in statistics. + """ + california = fetch_california_housing_fxt() + X, y = california.data, california.target + X_train, X_test, y_train, y_test = train_test_split( + X, y, random_state=global_random_seed + ) + + reg = GradientBoostingRegressor( + loss="huber", + learning_rate=0.1, + max_leaf_nodes=6, + n_estimators=100, + random_state=global_random_seed, + ) + reg.fit(X_train, y_train) + sorted_idx = np.argsort(reg.feature_importances_)[::-1] + sorted_features = [california.feature_names[s] for s in sorted_idx] + + # The most important feature is the median income by far. + assert sorted_features[0] == "MedInc" + + # The three subsequent features are the following. Their relative ordering + # might change a bit depending on the randomness of the trees and the + # train / test split. + assert set(sorted_features[1:4]) == {"Longitude", "AveOccup", "Latitude"} + + +def test_max_features(): + # Test if max features is set properly for floats and str. + X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1) + _, n_features = X.shape + + X_train = X[:2000] + y_train = y[:2000] + + gbrt = GradientBoostingClassifier(n_estimators=1, max_features=None) + gbrt.fit(X_train, y_train) + assert gbrt.max_features_ == n_features + + gbrt = GradientBoostingRegressor(n_estimators=1, max_features=None) + gbrt.fit(X_train, y_train) + assert gbrt.max_features_ == n_features + + gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3) + gbrt.fit(X_train, y_train) + assert gbrt.max_features_ == int(n_features * 0.3) + + gbrt = GradientBoostingRegressor(n_estimators=1, max_features="sqrt") + gbrt.fit(X_train, y_train) + assert gbrt.max_features_ == int(np.sqrt(n_features)) + + gbrt = GradientBoostingRegressor(n_estimators=1, max_features="log2") + gbrt.fit(X_train, y_train) + assert gbrt.max_features_ == int(np.log2(n_features)) + + gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.01 / X.shape[1]) + gbrt.fit(X_train, y_train) + assert gbrt.max_features_ == 1 + + +def test_staged_predict(): + # Test whether staged decision function eventually gives + # the same prediction. + X, y = datasets.make_friedman1(n_samples=1200, random_state=1, noise=1.0) + X_train, y_train = X[:200], y[:200] + X_test = X[200:] + clf = GradientBoostingRegressor() + # test raise ValueError if not fitted + with pytest.raises(ValueError): + np.fromiter(clf.staged_predict(X_test), dtype=np.float64) + + clf.fit(X_train, y_train) + y_pred = clf.predict(X_test) + + # test if prediction for last stage equals ``predict`` + for y in clf.staged_predict(X_test): + assert y.shape == y_pred.shape + + assert_array_almost_equal(y_pred, y) + + +def test_staged_predict_proba(): + # Test whether staged predict proba eventually gives + # the same prediction. + X, y = datasets.make_hastie_10_2(n_samples=1200, random_state=1) + X_train, y_train = X[:200], y[:200] + X_test, y_test = X[200:], y[200:] + clf = GradientBoostingClassifier(n_estimators=20) + # test raise NotFittedError if not + with pytest.raises(NotFittedError): + np.fromiter(clf.staged_predict_proba(X_test), dtype=np.float64) + + clf.fit(X_train, y_train) + + # test if prediction for last stage equals ``predict`` + for y_pred in clf.staged_predict(X_test): + assert y_test.shape == y_pred.shape + + assert_array_equal(clf.predict(X_test), y_pred) + + # test if prediction for last stage equals ``predict_proba`` + for staged_proba in clf.staged_predict_proba(X_test): + assert y_test.shape[0] == staged_proba.shape[0] + assert 2 == staged_proba.shape[1] + + assert_array_almost_equal(clf.predict_proba(X_test), staged_proba) + + +@pytest.mark.parametrize("Estimator", GRADIENT_BOOSTING_ESTIMATORS) +def test_staged_functions_defensive(Estimator, global_random_seed): + # test that staged_functions make defensive copies + rng = np.random.RandomState(global_random_seed) + X = rng.uniform(size=(10, 3)) + y = (4 * X[:, 0]).astype(int) + 1 # don't predict zeros + estimator = Estimator() + estimator.fit(X, y) + for func in ["predict", "decision_function", "predict_proba"]: + staged_func = getattr(estimator, "staged_" + func, None) + if staged_func is None: + # regressor has no staged_predict_proba + continue + with warnings.catch_warnings(record=True): + staged_result = list(staged_func(X)) + staged_result[1][:] = 0 + assert np.all(staged_result[0] != 0) + + +def test_serialization(): + # Check model serialization. + clf = GradientBoostingClassifier(n_estimators=100, random_state=1) + + clf.fit(X, y) + assert_array_equal(clf.predict(T), true_result) + assert 100 == len(clf.estimators_) + + try: + import cPickle as pickle + except ImportError: + import pickle + + serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL) + clf = None + clf = pickle.loads(serialized_clf) + assert_array_equal(clf.predict(T), true_result) + assert 100 == len(clf.estimators_) + + +def test_degenerate_targets(): + # Check if we can fit even though all targets are equal. + clf = GradientBoostingClassifier(n_estimators=100, random_state=1) + + # classifier should raise exception + with pytest.raises(ValueError): + clf.fit(X, np.ones(len(X))) + + clf = GradientBoostingRegressor(n_estimators=100, random_state=1) + clf.fit(X, np.ones(len(X))) + clf.predict([rng.rand(2)]) + assert_array_equal(np.ones((1,), dtype=np.float64), clf.predict([rng.rand(2)])) + + +def test_quantile_loss(global_random_seed): + # Check if quantile loss with alpha=0.5 equals absolute_error. + clf_quantile = GradientBoostingRegressor( + n_estimators=100, + loss="quantile", + max_depth=4, + alpha=0.5, + random_state=global_random_seed, + ) + + clf_quantile.fit(X_reg, y_reg) + y_quantile = clf_quantile.predict(X_reg) + + clf_ae = GradientBoostingRegressor( + n_estimators=100, + loss="absolute_error", + max_depth=4, + random_state=global_random_seed, + ) + + clf_ae.fit(X_reg, y_reg) + y_ae = clf_ae.predict(X_reg) + assert_allclose(y_quantile, y_ae) + + +def test_symbol_labels(): + # Test with non-integer class labels. + clf = GradientBoostingClassifier(n_estimators=100, random_state=1) + + symbol_y = list(map(str, y)) + + clf.fit(X, symbol_y) + assert_array_equal(clf.predict(T), list(map(str, true_result))) + assert 100 == len(clf.estimators_) + + +def test_float_class_labels(): + # Test with float class labels. + clf = GradientBoostingClassifier(n_estimators=100, random_state=1) + + float_y = np.asarray(y, dtype=np.float32) + + clf.fit(X, float_y) + assert_array_equal(clf.predict(T), np.asarray(true_result, dtype=np.float32)) + assert 100 == len(clf.estimators_) + + +def test_shape_y(): + # Test with float class labels. + clf = GradientBoostingClassifier(n_estimators=100, random_state=1) + + y_ = np.asarray(y, dtype=np.int32) + y_ = y_[:, np.newaxis] + + # This will raise a DataConversionWarning that we want to + # "always" raise, elsewhere the warnings gets ignored in the + # later tests, and the tests that check for this warning fail + warn_msg = ( + "A column-vector y was passed when a 1d array was expected. " + "Please change the shape of y to \\(n_samples, \\), for " + "example using ravel()." + ) + with pytest.warns(DataConversionWarning, match=warn_msg): + clf.fit(X, y_) + assert_array_equal(clf.predict(T), true_result) + assert 100 == len(clf.estimators_) + + +def test_mem_layout(): + # Test with different memory layouts of X and y + X_ = np.asfortranarray(X) + clf = GradientBoostingClassifier(n_estimators=100, random_state=1) + clf.fit(X_, y) + assert_array_equal(clf.predict(T), true_result) + assert 100 == len(clf.estimators_) + + X_ = np.ascontiguousarray(X) + clf = GradientBoostingClassifier(n_estimators=100, random_state=1) + clf.fit(X_, y) + assert_array_equal(clf.predict(T), true_result) + assert 100 == len(clf.estimators_) + + y_ = np.asarray(y, dtype=np.int32) + y_ = np.ascontiguousarray(y_) + clf = GradientBoostingClassifier(n_estimators=100, random_state=1) + clf.fit(X, y_) + assert_array_equal(clf.predict(T), true_result) + assert 100 == len(clf.estimators_) + + y_ = np.asarray(y, dtype=np.int32) + y_ = np.asfortranarray(y_) + clf = GradientBoostingClassifier(n_estimators=100, random_state=1) + clf.fit(X, y_) + assert_array_equal(clf.predict(T), true_result) + assert 100 == len(clf.estimators_) + + +@pytest.mark.parametrize("GradientBoostingEstimator", GRADIENT_BOOSTING_ESTIMATORS) +def test_oob_improvement(GradientBoostingEstimator): + # Test if oob improvement has correct shape and regression test. + estimator = GradientBoostingEstimator( + n_estimators=100, random_state=1, subsample=0.5 + ) + estimator.fit(X, y) + assert estimator.oob_improvement_.shape[0] == 100 + # hard-coded regression test - change if modification in OOB computation + assert_array_almost_equal( + estimator.oob_improvement_[:5], + np.array([0.19, 0.15, 0.12, -0.11, 0.11]), + decimal=2, + ) + + +@pytest.mark.parametrize("GradientBoostingEstimator", GRADIENT_BOOSTING_ESTIMATORS) +def test_oob_scores(GradientBoostingEstimator): + # Test if oob scores has correct shape and regression test. + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) + estimator = GradientBoostingEstimator( + n_estimators=100, random_state=1, subsample=0.5 + ) + estimator.fit(X, y) + assert estimator.oob_scores_.shape[0] == 100 + assert estimator.oob_scores_[-1] == pytest.approx(estimator.oob_score_) + + estimator = GradientBoostingEstimator( + n_estimators=100, + random_state=1, + subsample=0.5, + n_iter_no_change=5, + ) + estimator.fit(X, y) + assert estimator.oob_scores_.shape[0] < 100 + assert estimator.oob_scores_[-1] == pytest.approx(estimator.oob_score_) + + +@pytest.mark.parametrize( + "GradientBoostingEstimator, oob_attribute", + [ + (GradientBoostingClassifier, "oob_improvement_"), + (GradientBoostingClassifier, "oob_scores_"), + (GradientBoostingClassifier, "oob_score_"), + (GradientBoostingRegressor, "oob_improvement_"), + (GradientBoostingRegressor, "oob_scores_"), + (GradientBoostingRegressor, "oob_score_"), + ], +) +def test_oob_attributes_error(GradientBoostingEstimator, oob_attribute): + """ + Check that we raise an AttributeError when the OOB statistics were not computed. + """ + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) + estimator = GradientBoostingEstimator( + n_estimators=100, + random_state=1, + subsample=1.0, + ) + estimator.fit(X, y) + with pytest.raises(AttributeError): + estimator.oob_attribute + + +def test_oob_multilcass_iris(): + # Check OOB improvement on multi-class dataset. + estimator = GradientBoostingClassifier( + n_estimators=100, loss="log_loss", random_state=1, subsample=0.5 + ) + estimator.fit(iris.data, iris.target) + score = estimator.score(iris.data, iris.target) + assert score > 0.9 + assert estimator.oob_improvement_.shape[0] == estimator.n_estimators + assert estimator.oob_scores_.shape[0] == estimator.n_estimators + assert estimator.oob_scores_[-1] == pytest.approx(estimator.oob_score_) + + estimator = GradientBoostingClassifier( + n_estimators=100, + loss="log_loss", + random_state=1, + subsample=0.5, + n_iter_no_change=5, + ) + estimator.fit(iris.data, iris.target) + score = estimator.score(iris.data, iris.target) + assert estimator.oob_improvement_.shape[0] < estimator.n_estimators + assert estimator.oob_scores_.shape[0] < estimator.n_estimators + assert estimator.oob_scores_[-1] == pytest.approx(estimator.oob_score_) + + # hard-coded regression test - change if modification in OOB computation + # FIXME: the following snippet does not yield the same results on 32 bits + # assert_array_almost_equal(estimator.oob_improvement_[:5], + # np.array([12.68, 10.45, 8.18, 6.43, 5.13]), + # decimal=2) + + +def test_verbose_output(): + # Check verbose=1 does not cause error. + import sys + from io import StringIO + + old_stdout = sys.stdout + sys.stdout = StringIO() + clf = GradientBoostingClassifier( + n_estimators=100, random_state=1, verbose=1, subsample=0.8 + ) + clf.fit(X, y) + verbose_output = sys.stdout + sys.stdout = old_stdout + + # check output + verbose_output.seek(0) + header = verbose_output.readline().rstrip() + # with OOB + true_header = " ".join(["%10s"] + ["%16s"] * 3) % ( + "Iter", + "Train Loss", + "OOB Improve", + "Remaining Time", + ) + assert true_header == header + + n_lines = sum(1 for l in verbose_output.readlines()) + # one for 1-10 and then 9 for 20-100 + assert 10 + 9 == n_lines + + +def test_more_verbose_output(): + # Check verbose=2 does not cause error. + import sys + from io import StringIO + + old_stdout = sys.stdout + sys.stdout = StringIO() + clf = GradientBoostingClassifier(n_estimators=100, random_state=1, verbose=2) + clf.fit(X, y) + verbose_output = sys.stdout + sys.stdout = old_stdout + + # check output + verbose_output.seek(0) + header = verbose_output.readline().rstrip() + # no OOB + true_header = " ".join(["%10s"] + ["%16s"] * 2) % ( + "Iter", + "Train Loss", + "Remaining Time", + ) + assert true_header == header + + n_lines = sum(1 for l in verbose_output.readlines()) + # 100 lines for n_estimators==100 + assert 100 == n_lines + + +@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS) +def test_warm_start(Cls, global_random_seed): + # Test if warm start equals fit. + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=global_random_seed) + est = Cls(n_estimators=200, max_depth=1, random_state=global_random_seed) + est.fit(X, y) + + est_ws = Cls( + n_estimators=100, max_depth=1, warm_start=True, random_state=global_random_seed + ) + est_ws.fit(X, y) + est_ws.set_params(n_estimators=200) + est_ws.fit(X, y) + + if Cls is GradientBoostingRegressor: + assert_allclose(est_ws.predict(X), est.predict(X)) + else: + # Random state is preserved and hence predict_proba must also be + # same + assert_array_equal(est_ws.predict(X), est.predict(X)) + assert_allclose(est_ws.predict_proba(X), est.predict_proba(X)) + + +@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS) +def test_warm_start_n_estimators(Cls, global_random_seed): + # Test if warm start equals fit - set n_estimators. + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=global_random_seed) + est = Cls(n_estimators=300, max_depth=1, random_state=global_random_seed) + est.fit(X, y) + + est_ws = Cls( + n_estimators=100, max_depth=1, warm_start=True, random_state=global_random_seed + ) + est_ws.fit(X, y) + est_ws.set_params(n_estimators=300) + est_ws.fit(X, y) + + assert_allclose(est_ws.predict(X), est.predict(X)) + + +@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS) +def test_warm_start_max_depth(Cls): + # Test if possible to fit trees of different depth in ensemble. + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) + est = Cls(n_estimators=100, max_depth=1, warm_start=True) + est.fit(X, y) + est.set_params(n_estimators=110, max_depth=2) + est.fit(X, y) + + # last 10 trees have different depth + assert est.estimators_[0, 0].max_depth == 1 + for i in range(1, 11): + assert est.estimators_[-i, 0].max_depth == 2 + + +@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS) +def test_warm_start_clear(Cls): + # Test if fit clears state. + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) + est = Cls(n_estimators=100, max_depth=1) + est.fit(X, y) + + est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True) + est_2.fit(X, y) # inits state + est_2.set_params(warm_start=False) + est_2.fit(X, y) # clears old state and equals est + + assert_array_almost_equal(est_2.predict(X), est.predict(X)) + + +@pytest.mark.parametrize("GradientBoosting", GRADIENT_BOOSTING_ESTIMATORS) +def test_warm_start_state_oob_scores(GradientBoosting): + """ + Check that the states of the OOB scores are cleared when used with `warm_start`. + """ + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) + n_estimators = 100 + estimator = GradientBoosting( + n_estimators=n_estimators, + max_depth=1, + subsample=0.5, + warm_start=True, + random_state=1, + ) + estimator.fit(X, y) + oob_scores, oob_score = estimator.oob_scores_, estimator.oob_score_ + assert len(oob_scores) == n_estimators + assert oob_scores[-1] == pytest.approx(oob_score) + + n_more_estimators = 200 + estimator.set_params(n_estimators=n_more_estimators).fit(X, y) + assert len(estimator.oob_scores_) == n_more_estimators + assert_allclose(estimator.oob_scores_[:n_estimators], oob_scores) + + estimator.set_params(n_estimators=n_estimators, warm_start=False).fit(X, y) + assert estimator.oob_scores_ is not oob_scores + assert estimator.oob_score_ is not oob_score + assert_allclose(estimator.oob_scores_, oob_scores) + assert estimator.oob_score_ == pytest.approx(oob_score) + assert oob_scores[-1] == pytest.approx(oob_score) + + +@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS) +def test_warm_start_smaller_n_estimators(Cls): + # Test if warm start with smaller n_estimators raises error + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) + est = Cls(n_estimators=100, max_depth=1, warm_start=True) + est.fit(X, y) + est.set_params(n_estimators=99) + with pytest.raises(ValueError): + est.fit(X, y) + + +@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS) +def test_warm_start_equal_n_estimators(Cls): + # Test if warm start with equal n_estimators does nothing + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) + est = Cls(n_estimators=100, max_depth=1) + est.fit(X, y) + + est2 = clone(est) + est2.set_params(n_estimators=est.n_estimators, warm_start=True) + est2.fit(X, y) + + assert_array_almost_equal(est2.predict(X), est.predict(X)) + + +@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS) +def test_warm_start_oob_switch(Cls): + # Test if oob can be turned on during warm start. + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) + est = Cls(n_estimators=100, max_depth=1, warm_start=True) + est.fit(X, y) + est.set_params(n_estimators=110, subsample=0.5) + est.fit(X, y) + + assert_array_equal(est.oob_improvement_[:100], np.zeros(100)) + assert_array_equal(est.oob_scores_[:100], np.zeros(100)) + + # the last 10 are not zeros + assert (est.oob_improvement_[-10:] != 0.0).all() + assert (est.oob_scores_[-10:] != 0.0).all() + + assert est.oob_scores_[-1] == pytest.approx(est.oob_score_) + + +@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS) +def test_warm_start_oob(Cls): + # Test if warm start OOB equals fit. + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) + est = Cls(n_estimators=200, max_depth=1, subsample=0.5, random_state=1) + est.fit(X, y) + + est_ws = Cls( + n_estimators=100, max_depth=1, subsample=0.5, random_state=1, warm_start=True + ) + est_ws.fit(X, y) + est_ws.set_params(n_estimators=200) + est_ws.fit(X, y) + + assert_array_almost_equal(est_ws.oob_improvement_[:100], est.oob_improvement_[:100]) + assert_array_almost_equal(est_ws.oob_scores_[:100], est.oob_scores_[:100]) + assert est.oob_scores_[-1] == pytest.approx(est.oob_score_) + assert est_ws.oob_scores_[-1] == pytest.approx(est_ws.oob_score_) + + +@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS) +@pytest.mark.parametrize( + "sparse_container", COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS +) +def test_warm_start_sparse(Cls, sparse_container): + # Test that all sparse matrix types are supported + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) + est_dense = Cls( + n_estimators=100, max_depth=1, subsample=0.5, random_state=1, warm_start=True + ) + est_dense.fit(X, y) + est_dense.predict(X) + est_dense.set_params(n_estimators=200) + est_dense.fit(X, y) + y_pred_dense = est_dense.predict(X) + + X_sparse = sparse_container(X) + + est_sparse = Cls( + n_estimators=100, + max_depth=1, + subsample=0.5, + random_state=1, + warm_start=True, + ) + est_sparse.fit(X_sparse, y) + est_sparse.predict(X) + est_sparse.set_params(n_estimators=200) + est_sparse.fit(X_sparse, y) + y_pred_sparse = est_sparse.predict(X) + + assert_array_almost_equal( + est_dense.oob_improvement_[:100], est_sparse.oob_improvement_[:100] + ) + assert est_dense.oob_scores_[-1] == pytest.approx(est_dense.oob_score_) + assert_array_almost_equal(est_dense.oob_scores_[:100], est_sparse.oob_scores_[:100]) + assert est_sparse.oob_scores_[-1] == pytest.approx(est_sparse.oob_score_) + assert_array_almost_equal(y_pred_dense, y_pred_sparse) + + +@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS) +def test_warm_start_fortran(Cls, global_random_seed): + # Test that feeding a X in Fortran-ordered is giving the same results as + # in C-ordered + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=global_random_seed) + est_c = Cls(n_estimators=1, random_state=global_random_seed, warm_start=True) + est_fortran = Cls(n_estimators=1, random_state=global_random_seed, warm_start=True) + + est_c.fit(X, y) + est_c.set_params(n_estimators=11) + est_c.fit(X, y) + + X_fortran = np.asfortranarray(X) + est_fortran.fit(X_fortran, y) + est_fortran.set_params(n_estimators=11) + est_fortran.fit(X_fortran, y) + + assert_allclose(est_c.predict(X), est_fortran.predict(X)) + + +def early_stopping_monitor(i, est, locals): + """Returns True on the 10th iteration.""" + if i == 9: + return True + else: + return False + + +@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS) +def test_monitor_early_stopping(Cls): + # Test if monitor return value works. + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) + + est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5) + est.fit(X, y, monitor=early_stopping_monitor) + assert est.n_estimators == 20 # this is not altered + assert est.estimators_.shape[0] == 10 + assert est.train_score_.shape[0] == 10 + assert est.oob_improvement_.shape[0] == 10 + assert est.oob_scores_.shape[0] == 10 + assert est.oob_scores_[-1] == pytest.approx(est.oob_score_) + + # try refit + est.set_params(n_estimators=30) + est.fit(X, y) + assert est.n_estimators == 30 + assert est.estimators_.shape[0] == 30 + assert est.train_score_.shape[0] == 30 + assert est.oob_improvement_.shape[0] == 30 + assert est.oob_scores_.shape[0] == 30 + assert est.oob_scores_[-1] == pytest.approx(est.oob_score_) + + est = Cls( + n_estimators=20, max_depth=1, random_state=1, subsample=0.5, warm_start=True + ) + est.fit(X, y, monitor=early_stopping_monitor) + assert est.n_estimators == 20 + assert est.estimators_.shape[0] == 10 + assert est.train_score_.shape[0] == 10 + assert est.oob_improvement_.shape[0] == 10 + assert est.oob_scores_.shape[0] == 10 + assert est.oob_scores_[-1] == pytest.approx(est.oob_score_) + + # try refit + est.set_params(n_estimators=30, warm_start=False) + est.fit(X, y) + assert est.n_estimators == 30 + assert est.train_score_.shape[0] == 30 + assert est.estimators_.shape[0] == 30 + assert est.oob_improvement_.shape[0] == 30 + assert est.oob_scores_.shape[0] == 30 + assert est.oob_scores_[-1] == pytest.approx(est.oob_score_) + + +def test_complete_classification(): + # Test greedy trees with max_depth + 1 leafs. + from sklearn.tree._tree import TREE_LEAF + + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) + k = 4 + + est = GradientBoostingClassifier( + n_estimators=20, max_depth=None, random_state=1, max_leaf_nodes=k + 1 + ) + est.fit(X, y) + + tree = est.estimators_[0, 0].tree_ + assert tree.max_depth == k + assert tree.children_left[tree.children_left == TREE_LEAF].shape[0] == k + 1 + + +def test_complete_regression(): + # Test greedy trees with max_depth + 1 leafs. + from sklearn.tree._tree import TREE_LEAF + + k = 4 + + est = GradientBoostingRegressor( + n_estimators=20, max_depth=None, random_state=1, max_leaf_nodes=k + 1 + ) + est.fit(X_reg, y_reg) + + tree = est.estimators_[-1, 0].tree_ + assert tree.children_left[tree.children_left == TREE_LEAF].shape[0] == k + 1 + + +def test_zero_estimator_reg(global_random_seed): + # Test if init='zero' works for regression by checking that it is better + # than a simple baseline. + + baseline = DummyRegressor(strategy="mean").fit(X_reg, y_reg) + mse_baseline = mean_squared_error(baseline.predict(X_reg), y_reg) + est = GradientBoostingRegressor( + n_estimators=5, + max_depth=1, + random_state=global_random_seed, + init="zero", + learning_rate=0.5, + ) + est.fit(X_reg, y_reg) + y_pred = est.predict(X_reg) + mse_gbdt = mean_squared_error(y_reg, y_pred) + assert mse_gbdt < mse_baseline + + +def test_zero_estimator_clf(global_random_seed): + # Test if init='zero' works for classification. + X = iris.data + y = np.array(iris.target) + + est = GradientBoostingClassifier( + n_estimators=20, max_depth=1, random_state=global_random_seed, init="zero" + ) + est.fit(X, y) + + assert est.score(X, y) > 0.96 + + # binary clf + mask = y != 0 + y[mask] = 1 + y[~mask] = 0 + est = GradientBoostingClassifier( + n_estimators=20, max_depth=1, random_state=global_random_seed, init="zero" + ) + est.fit(X, y) + assert est.score(X, y) > 0.96 + + +@pytest.mark.parametrize("GBEstimator", GRADIENT_BOOSTING_ESTIMATORS) +def test_max_leaf_nodes_max_depth(GBEstimator): + # Test precedence of max_leaf_nodes over max_depth. + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) + + k = 4 + + est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y) + tree = est.estimators_[0, 0].tree_ + assert tree.max_depth == 1 + + est = GBEstimator(max_depth=1).fit(X, y) + tree = est.estimators_[0, 0].tree_ + assert tree.max_depth == 1 + + +@pytest.mark.parametrize("GBEstimator", GRADIENT_BOOSTING_ESTIMATORS) +def test_min_impurity_decrease(GBEstimator): + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) + + est = GBEstimator(min_impurity_decrease=0.1) + est.fit(X, y) + for tree in est.estimators_.flat: + # Simply check if the parameter is passed on correctly. Tree tests + # will suffice for the actual working of this param + assert tree.min_impurity_decrease == 0.1 + + +def test_warm_start_wo_nestimators_change(): + # Test if warm_start does nothing if n_estimators is not changed. + # Regression test for #3513. + clf = GradientBoostingClassifier(n_estimators=10, warm_start=True) + clf.fit([[0, 1], [2, 3]], [0, 1]) + assert clf.estimators_.shape[0] == 10 + clf.fit([[0, 1], [2, 3]], [0, 1]) + assert clf.estimators_.shape[0] == 10 + + +@pytest.mark.parametrize( + ("loss", "value"), + [ + ("squared_error", 0.5), + ("absolute_error", 0.0), + ("huber", 0.5), + ("quantile", 0.5), + ], +) +def test_non_uniform_weights_toy_edge_case_reg(loss, value): + X = [[1, 0], [1, 0], [1, 0], [0, 1]] + y = [0, 0, 1, 0] + # ignore the first 2 training samples by setting their weight to 0 + sample_weight = [0, 0, 1, 1] + gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2, loss=loss) + gb.fit(X, y, sample_weight=sample_weight) + assert gb.predict([[1, 0]])[0] >= value + + +def test_non_uniform_weights_toy_edge_case_clf(): + X = [[1, 0], [1, 0], [1, 0], [0, 1]] + y = [0, 0, 1, 0] + # ignore the first 2 training samples by setting their weight to 0 + sample_weight = [0, 0, 1, 1] + for loss in ("log_loss", "exponential"): + gb = GradientBoostingClassifier(n_estimators=5, loss=loss) + gb.fit(X, y, sample_weight=sample_weight) + assert_array_equal(gb.predict([[1, 0]]), [1]) + + +@skip_if_32bit +@pytest.mark.parametrize( + "EstimatorClass", (GradientBoostingClassifier, GradientBoostingRegressor) +) +@pytest.mark.parametrize( + "sparse_container", COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS +) +def test_sparse_input(EstimatorClass, sparse_container): + y, X = datasets.make_multilabel_classification( + random_state=0, n_samples=50, n_features=1, n_classes=20 + ) + y = y[:, 0] + X_sparse = sparse_container(X) + + dense = EstimatorClass( + n_estimators=10, random_state=0, max_depth=2, min_impurity_decrease=1e-7 + ).fit(X, y) + sparse = EstimatorClass( + n_estimators=10, random_state=0, max_depth=2, min_impurity_decrease=1e-7 + ).fit(X_sparse, y) + + assert_array_almost_equal(sparse.apply(X), dense.apply(X)) + assert_array_almost_equal(sparse.predict(X), dense.predict(X)) + assert_array_almost_equal(sparse.feature_importances_, dense.feature_importances_) + + assert_array_almost_equal(sparse.predict(X_sparse), dense.predict(X)) + assert_array_almost_equal(dense.predict(X_sparse), sparse.predict(X)) + + if issubclass(EstimatorClass, GradientBoostingClassifier): + assert_array_almost_equal(sparse.predict_proba(X), dense.predict_proba(X)) + assert_array_almost_equal( + sparse.predict_log_proba(X), dense.predict_log_proba(X) + ) + + assert_array_almost_equal( + sparse.decision_function(X_sparse), sparse.decision_function(X) + ) + assert_array_almost_equal( + dense.decision_function(X_sparse), sparse.decision_function(X) + ) + for res_sparse, res in zip( + sparse.staged_decision_function(X_sparse), + sparse.staged_decision_function(X), + ): + assert_array_almost_equal(res_sparse, res) + + +@pytest.mark.parametrize( + "GradientBoostingEstimator", [GradientBoostingClassifier, GradientBoostingRegressor] +) +def test_gradient_boosting_early_stopping(GradientBoostingEstimator): + # Check if early stopping works as expected, that is empirically check that the + # number of trained estimators is increasing when the tolerance decreases. + + X, y = make_classification(n_samples=1000, random_state=0) + n_estimators = 1000 + + gb_large_tol = GradientBoostingEstimator( + n_estimators=n_estimators, + n_iter_no_change=10, + learning_rate=0.1, + max_depth=3, + random_state=42, + tol=1e-1, + ) + + gb_small_tol = GradientBoostingEstimator( + n_estimators=n_estimators, + n_iter_no_change=10, + learning_rate=0.1, + max_depth=3, + random_state=42, + tol=1e-3, + ) + + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) + gb_large_tol.fit(X_train, y_train) + gb_small_tol.fit(X_train, y_train) + + assert gb_large_tol.n_estimators_ < gb_small_tol.n_estimators_ < n_estimators + + assert gb_large_tol.score(X_test, y_test) > 0.7 + assert gb_small_tol.score(X_test, y_test) > 0.7 + + +def test_gradient_boosting_without_early_stopping(): + # When early stopping is not used, the number of trained estimators + # must be the one specified. + X, y = make_classification(n_samples=1000, random_state=0) + + gbc = GradientBoostingClassifier( + n_estimators=50, learning_rate=0.1, max_depth=3, random_state=42 + ) + gbc.fit(X, y) + gbr = GradientBoostingRegressor( + n_estimators=30, learning_rate=0.1, max_depth=3, random_state=42 + ) + gbr.fit(X, y) + + # The number of trained estimators must be the one specified. + assert gbc.n_estimators_ == 50 + assert gbr.n_estimators_ == 30 + + +def test_gradient_boosting_validation_fraction(): + X, y = make_classification(n_samples=1000, random_state=0) + + gbc = GradientBoostingClassifier( + n_estimators=100, + n_iter_no_change=10, + validation_fraction=0.1, + learning_rate=0.1, + max_depth=3, + random_state=42, + ) + gbc2 = clone(gbc).set_params(validation_fraction=0.3) + gbc3 = clone(gbc).set_params(n_iter_no_change=20) + + gbr = GradientBoostingRegressor( + n_estimators=100, + n_iter_no_change=10, + learning_rate=0.1, + max_depth=3, + validation_fraction=0.1, + random_state=42, + ) + gbr2 = clone(gbr).set_params(validation_fraction=0.3) + gbr3 = clone(gbr).set_params(n_iter_no_change=20) + + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) + # Check if validation_fraction has an effect + gbc.fit(X_train, y_train) + gbc2.fit(X_train, y_train) + assert gbc.n_estimators_ != gbc2.n_estimators_ + + gbr.fit(X_train, y_train) + gbr2.fit(X_train, y_train) + assert gbr.n_estimators_ != gbr2.n_estimators_ + + # Check if n_estimators_ increase monotonically with n_iter_no_change + # Set validation + gbc3.fit(X_train, y_train) + gbr3.fit(X_train, y_train) + assert gbr.n_estimators_ < gbr3.n_estimators_ + assert gbc.n_estimators_ < gbc3.n_estimators_ + + +def test_early_stopping_stratified(): + # Make sure data splitting for early stopping is stratified + X = [[1, 2], [2, 3], [3, 4], [4, 5]] + y = [0, 0, 0, 1] + + gbc = GradientBoostingClassifier(n_iter_no_change=5) + with pytest.raises( + ValueError, match="The least populated class in y has only 1 member" + ): + gbc.fit(X, y) + + +def _make_multiclass(): + return make_classification(n_classes=3, n_clusters_per_class=1) + + +@pytest.mark.parametrize( + "gb, dataset_maker, init_estimator", + [ + (GradientBoostingClassifier, make_classification, DummyClassifier), + (GradientBoostingClassifier, _make_multiclass, DummyClassifier), + (GradientBoostingRegressor, make_regression, DummyRegressor), + ], + ids=["binary classification", "multiclass classification", "regression"], +) +def test_gradient_boosting_with_init( + gb, dataset_maker, init_estimator, global_random_seed +): + # Check that GradientBoostingRegressor works when init is a sklearn + # estimator. + # Check that an error is raised if trying to fit with sample weight but + # initial estimator does not support sample weight + + X, y = dataset_maker() + sample_weight = np.random.RandomState(global_random_seed).rand(100) + + # init supports sample weights + init_est = init_estimator() + gb(init=init_est).fit(X, y, sample_weight=sample_weight) + + # init does not support sample weights + init_est = NoSampleWeightWrapper(init_estimator()) + gb(init=init_est).fit(X, y) # ok no sample weights + with pytest.raises(ValueError, match="estimator.*does not support sample weights"): + gb(init=init_est).fit(X, y, sample_weight=sample_weight) + + +def test_gradient_boosting_with_init_pipeline(): + # Check that the init estimator can be a pipeline (see issue #13466) + + X, y = make_regression(random_state=0) + init = make_pipeline(LinearRegression()) + gb = GradientBoostingRegressor(init=init) + gb.fit(X, y) # pipeline without sample_weight works fine + + with pytest.raises( + ValueError, + match="The initial estimator Pipeline does not support sample weights", + ): + gb.fit(X, y, sample_weight=np.ones(X.shape[0])) + + # Passing sample_weight to a pipeline raises a ValueError. This test makes + # sure we make the distinction between ValueError raised by a pipeline that + # was passed sample_weight, and a InvalidParameterError raised by a regular + # estimator whose input checking failed. + invalid_nu = 1.5 + err_msg = ( + "The 'nu' parameter of NuSVR must be a float in the" + f" range (0.0, 1.0]. Got {invalid_nu} instead." + ) + with pytest.raises(InvalidParameterError, match=re.escape(err_msg)): + # Note that NuSVR properly supports sample_weight + init = NuSVR(gamma="auto", nu=invalid_nu) + gb = GradientBoostingRegressor(init=init) + gb.fit(X, y, sample_weight=np.ones(X.shape[0])) + + +def test_early_stopping_n_classes(): + # when doing early stopping (_, , y_train, _ = train_test_split(X, y)) + # there might be classes in y that are missing in y_train. As the init + # estimator will be trained on y_train, we need to raise an error if this + # happens. + + X = [[1]] * 10 + y = [0, 0] + [1] * 8 # only 2 negative class over 10 samples + gb = GradientBoostingClassifier( + n_iter_no_change=5, random_state=0, validation_fraction=0.8 + ) + with pytest.raises( + ValueError, match="The training data after the early stopping split" + ): + gb.fit(X, y) + + # No error if we let training data be big enough + gb = GradientBoostingClassifier( + n_iter_no_change=5, random_state=0, validation_fraction=0.4 + ) + + +def test_gbr_degenerate_feature_importances(): + # growing an ensemble of single node trees. See #13620 + X = np.zeros((10, 10)) + y = np.ones((10,)) + gbr = GradientBoostingRegressor().fit(X, y) + assert_array_equal(gbr.feature_importances_, np.zeros(10, dtype=np.float64)) + + +def test_huber_vs_mean_and_median(): + """Check that huber lies between absolute and squared error.""" + n_rep = 100 + n_samples = 10 + y = np.tile(np.arange(n_samples), n_rep) + x1 = np.minimum(y, n_samples / 2) + x2 = np.minimum(-y, -n_samples / 2) + X = np.c_[x1, x2] + + rng = np.random.RandomState(42) + # We want an asymmetric distribution. + y = y + rng.exponential(scale=1, size=y.shape) + + gbt_absolute_error = GradientBoostingRegressor(loss="absolute_error").fit(X, y) + gbt_huber = GradientBoostingRegressor(loss="huber").fit(X, y) + gbt_squared_error = GradientBoostingRegressor().fit(X, y) + + gbt_huber_predictions = gbt_huber.predict(X) + assert np.all(gbt_absolute_error.predict(X) <= gbt_huber_predictions) + assert np.all(gbt_huber_predictions <= gbt_squared_error.predict(X)) + + +def test_safe_divide(): + """Test that _safe_divide handles division by zero.""" + with warnings.catch_warnings(): + warnings.simplefilter("error") + assert _safe_divide(np.float64(1e300), 0) == 0 + assert _safe_divide(np.float64(0.0), np.float64(0.0)) == 0 + with pytest.warns(RuntimeWarning, match="overflow"): + # np.finfo(float).max = 1.7976931348623157e+308 + _safe_divide(np.float64(1e300), 1e-10) + + +def test_squared_error_exact_backward_compat(): + """Test squared error GBT backward compat on a simple dataset. + + The results to compare against are taken from scikit-learn v1.2.0. + """ + n_samples = 10 + y = np.arange(n_samples) + x1 = np.minimum(y, n_samples / 2) + x2 = np.minimum(-y, -n_samples / 2) + X = np.c_[x1, x2] + gbt = GradientBoostingRegressor(loss="squared_error", n_estimators=100).fit(X, y) + + pred_result = np.array( + [ + 1.39245726e-04, + 1.00010468e00, + 2.00007043e00, + 3.00004051e00, + 4.00000802e00, + 4.99998972e00, + 5.99996312e00, + 6.99993395e00, + 7.99989372e00, + 8.99985660e00, + ] + ) + assert_allclose(gbt.predict(X), pred_result, rtol=1e-8) + + train_score = np.array( + [ + 4.87246390e-08, + 3.95590036e-08, + 3.21267865e-08, + 2.60970300e-08, + 2.11820178e-08, + 1.71995782e-08, + 1.39695549e-08, + 1.13391770e-08, + 9.19931587e-09, + 7.47000575e-09, + ] + ) + assert_allclose(gbt.train_score_[-10:], train_score, rtol=1e-8) + + # Same but with sample_weights + sample_weights = np.tile([1, 10], n_samples // 2) + gbt = GradientBoostingRegressor(loss="squared_error", n_estimators=100).fit( + X, y, sample_weight=sample_weights + ) + + pred_result = np.array( + [ + 1.52391462e-04, + 1.00011168e00, + 2.00007724e00, + 3.00004638e00, + 4.00001302e00, + 4.99999873e00, + 5.99997093e00, + 6.99994329e00, + 7.99991290e00, + 8.99988727e00, + ] + ) + assert_allclose(gbt.predict(X), pred_result, rtol=1e-6, atol=1e-5) + + train_score = np.array( + [ + 4.12445296e-08, + 3.34418322e-08, + 2.71151383e-08, + 2.19782469e-08, + 1.78173649e-08, + 1.44461976e-08, + 1.17120123e-08, + 9.49485678e-09, + 7.69772505e-09, + 6.24155316e-09, + ] + ) + assert_allclose(gbt.train_score_[-10:], train_score, rtol=1e-3, atol=1e-11) + + +@skip_if_32bit +def test_huber_exact_backward_compat(): + """Test huber GBT backward compat on a simple dataset. + + The results to compare against are taken from scikit-learn v1.2.0. + """ + n_samples = 10 + y = np.arange(n_samples) + x1 = np.minimum(y, n_samples / 2) + x2 = np.minimum(-y, -n_samples / 2) + X = np.c_[x1, x2] + gbt = GradientBoostingRegressor(loss="huber", n_estimators=100, alpha=0.8).fit(X, y) + + assert_allclose(gbt._loss.closs.delta, 0.0001655688041282133) + + pred_result = np.array( + [ + 1.48120765e-04, + 9.99949174e-01, + 2.00116957e00, + 2.99986716e00, + 4.00012064e00, + 5.00002462e00, + 5.99998898e00, + 6.99692549e00, + 8.00006356e00, + 8.99985099e00, + ] + ) + assert_allclose(gbt.predict(X), pred_result, rtol=1e-8) + + train_score = np.array( + [ + 2.59484709e-07, + 2.19165900e-07, + 1.89644782e-07, + 1.64556454e-07, + 1.38705110e-07, + 1.20373736e-07, + 1.04746082e-07, + 9.13835687e-08, + 8.20245756e-08, + 7.17122188e-08, + ] + ) + assert_allclose(gbt.train_score_[-10:], train_score, rtol=1e-8) + + +def test_binomial_error_exact_backward_compat(): + """Test binary log_loss GBT backward compat on a simple dataset. + + The results to compare against are taken from scikit-learn v1.2.0. + """ + n_samples = 10 + y = np.arange(n_samples) % 2 + x1 = np.minimum(y, n_samples / 2) + x2 = np.minimum(-y, -n_samples / 2) + X = np.c_[x1, x2] + gbt = GradientBoostingClassifier(loss="log_loss", n_estimators=100).fit(X, y) + + pred_result = np.array( + [ + [9.99978098e-01, 2.19017313e-05], + [2.19017313e-05, 9.99978098e-01], + [9.99978098e-01, 2.19017313e-05], + [2.19017313e-05, 9.99978098e-01], + [9.99978098e-01, 2.19017313e-05], + [2.19017313e-05, 9.99978098e-01], + [9.99978098e-01, 2.19017313e-05], + [2.19017313e-05, 9.99978098e-01], + [9.99978098e-01, 2.19017313e-05], + [2.19017313e-05, 9.99978098e-01], + ] + ) + assert_allclose(gbt.predict_proba(X), pred_result, rtol=1e-8) + + train_score = np.array( + [ + 1.07742210e-04, + 9.74889078e-05, + 8.82113863e-05, + 7.98167784e-05, + 7.22210566e-05, + 6.53481907e-05, + 5.91293869e-05, + 5.35023988e-05, + 4.84109045e-05, + 4.38039423e-05, + ] + ) + assert_allclose(gbt.train_score_[-10:], train_score, rtol=1e-8) + + +def test_multinomial_error_exact_backward_compat(): + """Test multiclass log_loss GBT backward compat on a simple dataset. + + The results to compare against are taken from scikit-learn v1.2.0. + """ + n_samples = 10 + y = np.arange(n_samples) % 4 + x1 = np.minimum(y, n_samples / 2) + x2 = np.minimum(-y, -n_samples / 2) + X = np.c_[x1, x2] + gbt = GradientBoostingClassifier(loss="log_loss", n_estimators=100).fit(X, y) + + pred_result = np.array( + [ + [9.99999727e-01, 1.11956255e-07, 8.04921671e-08, 8.04921668e-08], + [1.11956254e-07, 9.99999727e-01, 8.04921671e-08, 8.04921668e-08], + [1.19417637e-07, 1.19417637e-07, 9.99999675e-01, 8.60526098e-08], + [1.19417637e-07, 1.19417637e-07, 8.60526088e-08, 9.99999675e-01], + [9.99999727e-01, 1.11956255e-07, 8.04921671e-08, 8.04921668e-08], + [1.11956254e-07, 9.99999727e-01, 8.04921671e-08, 8.04921668e-08], + [1.19417637e-07, 1.19417637e-07, 9.99999675e-01, 8.60526098e-08], + [1.19417637e-07, 1.19417637e-07, 8.60526088e-08, 9.99999675e-01], + [9.99999727e-01, 1.11956255e-07, 8.04921671e-08, 8.04921668e-08], + [1.11956254e-07, 9.99999727e-01, 8.04921671e-08, 8.04921668e-08], + ] + ) + assert_allclose(gbt.predict_proba(X), pred_result, rtol=1e-8) + + train_score = np.array( + [ + 1.13300150e-06, + 9.75183397e-07, + 8.39348103e-07, + 7.22433588e-07, + 6.21804338e-07, + 5.35191943e-07, + 4.60643966e-07, + 3.96479930e-07, + 3.41253434e-07, + 2.93719550e-07, + ] + ) + assert_allclose(gbt.train_score_[-10:], train_score, rtol=1e-8) + + +def test_gb_denominator_zero(global_random_seed): + """Test _update_terminal_regions denominator is not zero. + + For instance for log loss based binary classification, the line search step might + become nan/inf as denominator = hessian = prob * (1 - prob) and prob = 0 or 1 can + happen. + Here, we create a situation were this happens (at least with roughly 80%) based + on the random seed. + """ + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=20) + + params = { + "learning_rate": 1.0, + "subsample": 0.5, + "n_estimators": 100, + "max_leaf_nodes": 4, + "max_depth": None, + "random_state": global_random_seed, + "min_samples_leaf": 2, + } + + clf = GradientBoostingClassifier(**params) + # _safe_devide would raise a RuntimeWarning + with warnings.catch_warnings(): + warnings.simplefilter("error") + clf.fit(X, y) diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/test_iforest.py b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/test_iforest.py new file mode 100644 index 0000000000000000000000000000000000000000..19e34bbf51808931fd29b650a527ac0bc668dd9c --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/test_iforest.py @@ -0,0 +1,393 @@ +""" +Testing for Isolation Forest algorithm (sklearn.ensemble.iforest). +""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import warnings +from unittest.mock import Mock, patch + +import numpy as np +import pytest +from joblib import parallel_backend + +from sklearn.datasets import load_diabetes, load_iris, make_classification +from sklearn.ensemble import IsolationForest +from sklearn.ensemble._iforest import _average_path_length +from sklearn.metrics import roc_auc_score +from sklearn.model_selection import ParameterGrid, train_test_split +from sklearn.utils import check_random_state +from sklearn.utils._testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, + ignore_warnings, +) +from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS + +# load iris & diabetes dataset +iris = load_iris() +diabetes = load_diabetes() + + +def test_iforest(global_random_seed): + """Check Isolation Forest for various parameter settings.""" + X_train = np.array([[0, 1], [1, 2]]) + X_test = np.array([[2, 1], [1, 1]]) + + grid = ParameterGrid( + {"n_estimators": [3], "max_samples": [0.5, 1.0, 3], "bootstrap": [True, False]} + ) + + with ignore_warnings(): + for params in grid: + IsolationForest(random_state=global_random_seed, **params).fit( + X_train + ).predict(X_test) + + +@pytest.mark.parametrize("sparse_container", CSC_CONTAINERS + CSR_CONTAINERS) +def test_iforest_sparse(global_random_seed, sparse_container): + """Check IForest for various parameter settings on sparse input.""" + rng = check_random_state(global_random_seed) + X_train, X_test = train_test_split(diabetes.data[:50], random_state=rng) + grid = ParameterGrid({"max_samples": [0.5, 1.0], "bootstrap": [True, False]}) + + X_train_sparse = sparse_container(X_train) + X_test_sparse = sparse_container(X_test) + + for params in grid: + # Trained on sparse format + sparse_classifier = IsolationForest( + n_estimators=10, random_state=global_random_seed, **params + ).fit(X_train_sparse) + sparse_results = sparse_classifier.predict(X_test_sparse) + + # Trained on dense format + dense_classifier = IsolationForest( + n_estimators=10, random_state=global_random_seed, **params + ).fit(X_train) + dense_results = dense_classifier.predict(X_test) + + assert_array_equal(sparse_results, dense_results) + + +def test_iforest_error(): + """Test that it gives proper exception on deficient input.""" + X = iris.data + + # The dataset has less than 256 samples, explicitly setting + # max_samples > n_samples should result in a warning. If not set + # explicitly there should be no warning + warn_msg = "max_samples will be set to n_samples for estimation" + with pytest.warns(UserWarning, match=warn_msg): + IsolationForest(max_samples=1000).fit(X) + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + IsolationForest(max_samples="auto").fit(X) + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + IsolationForest(max_samples=np.int64(2)).fit(X) + + # test X_test n_features match X_train one: + with pytest.raises(ValueError): + IsolationForest().fit(X).predict(X[:, 1:]) + + +def test_recalculate_max_depth(): + """Check max_depth recalculation when max_samples is reset to n_samples""" + X = iris.data + clf = IsolationForest().fit(X) + for est in clf.estimators_: + assert est.max_depth == int(np.ceil(np.log2(X.shape[0]))) + + +def test_max_samples_attribute(): + X = iris.data + clf = IsolationForest().fit(X) + assert clf.max_samples_ == X.shape[0] + + clf = IsolationForest(max_samples=500) + warn_msg = "max_samples will be set to n_samples for estimation" + with pytest.warns(UserWarning, match=warn_msg): + clf.fit(X) + assert clf.max_samples_ == X.shape[0] + + clf = IsolationForest(max_samples=0.4).fit(X) + assert clf.max_samples_ == 0.4 * X.shape[0] + + +def test_iforest_parallel_regression(global_random_seed): + """Check parallel regression.""" + rng = check_random_state(global_random_seed) + + X_train, X_test = train_test_split(diabetes.data, random_state=rng) + + ensemble = IsolationForest(n_jobs=3, random_state=global_random_seed).fit(X_train) + + ensemble.set_params(n_jobs=1) + y1 = ensemble.predict(X_test) + ensemble.set_params(n_jobs=2) + y2 = ensemble.predict(X_test) + assert_array_almost_equal(y1, y2) + + ensemble = IsolationForest(n_jobs=1, random_state=global_random_seed).fit(X_train) + + y3 = ensemble.predict(X_test) + assert_array_almost_equal(y1, y3) + + +def test_iforest_performance(global_random_seed): + """Test Isolation Forest performs well""" + + # Generate train/test data + rng = check_random_state(global_random_seed) + X = 0.3 * rng.randn(600, 2) + X = rng.permutation(np.vstack((X + 2, X - 2))) + X_train = X[:1000] + + # Generate some abnormal novel observations + X_outliers = rng.uniform(low=-1, high=1, size=(200, 2)) + X_test = np.vstack((X[1000:], X_outliers)) + y_test = np.array([0] * 200 + [1] * 200) + + # fit the model + clf = IsolationForest(max_samples=100, random_state=rng).fit(X_train) + + # predict scores (the lower, the more normal) + y_pred = -clf.decision_function(X_test) + + # check that there is at most 6 errors (false positive or false negative) + assert roc_auc_score(y_test, y_pred) > 0.98 + + +@pytest.mark.parametrize("contamination", [0.25, "auto"]) +def test_iforest_works(contamination, global_random_seed): + # toy sample (the last two samples are outliers) + X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [7, 4], [-5, 9]] + + # Test IsolationForest + clf = IsolationForest(random_state=global_random_seed, contamination=contamination) + clf.fit(X) + decision_func = -clf.decision_function(X) + pred = clf.predict(X) + # assert detect outliers: + assert np.min(decision_func[-2:]) > np.max(decision_func[:-2]) + assert_array_equal(pred, 6 * [1] + 2 * [-1]) + + +def test_max_samples_consistency(): + # Make sure validated max_samples in iforest and BaseBagging are identical + X = iris.data + clf = IsolationForest().fit(X) + assert clf.max_samples_ == clf._max_samples + + +def test_iforest_subsampled_features(): + # It tests non-regression for #5732 which failed at predict. + rng = check_random_state(0) + X_train, X_test, y_train, y_test = train_test_split( + diabetes.data[:50], diabetes.target[:50], random_state=rng + ) + clf = IsolationForest(max_features=0.8) + clf.fit(X_train, y_train) + clf.predict(X_test) + + +def test_iforest_average_path_length(): + # It tests non-regression for #8549 which used the wrong formula + # for average path length, strictly for the integer case + # Updated to check average path length when input is <= 2 (issue #11839) + result_one = 2.0 * (np.log(4.0) + np.euler_gamma) - 2.0 * 4.0 / 5.0 + result_two = 2.0 * (np.log(998.0) + np.euler_gamma) - 2.0 * 998.0 / 999.0 + assert_allclose(_average_path_length([0]), [0.0]) + assert_allclose(_average_path_length([1]), [0.0]) + assert_allclose(_average_path_length([2]), [1.0]) + assert_allclose(_average_path_length([5]), [result_one]) + assert_allclose(_average_path_length([999]), [result_two]) + assert_allclose( + _average_path_length(np.array([1, 2, 5, 999])), + [0.0, 1.0, result_one, result_two], + ) + # _average_path_length is increasing + avg_path_length = _average_path_length(np.arange(5)) + assert_array_equal(avg_path_length, np.sort(avg_path_length)) + + +def test_score_samples(): + X_train = [[1, 1], [1, 2], [2, 1]] + clf1 = IsolationForest(contamination=0.1).fit(X_train) + clf2 = IsolationForest().fit(X_train) + assert_array_equal( + clf1.score_samples([[2.0, 2.0]]), + clf1.decision_function([[2.0, 2.0]]) + clf1.offset_, + ) + assert_array_equal( + clf2.score_samples([[2.0, 2.0]]), + clf2.decision_function([[2.0, 2.0]]) + clf2.offset_, + ) + assert_array_equal( + clf1.score_samples([[2.0, 2.0]]), clf2.score_samples([[2.0, 2.0]]) + ) + + +def test_iforest_warm_start(): + """Test iterative addition of iTrees to an iForest""" + + rng = check_random_state(0) + X = rng.randn(20, 2) + + # fit first 10 trees + clf = IsolationForest( + n_estimators=10, max_samples=20, random_state=rng, warm_start=True + ) + clf.fit(X) + # remember the 1st tree + tree_1 = clf.estimators_[0] + # fit another 10 trees + clf.set_params(n_estimators=20) + clf.fit(X) + # expecting 20 fitted trees and no overwritten trees + assert len(clf.estimators_) == 20 + assert clf.estimators_[0] is tree_1 + + +# mock get_chunk_n_rows to actually test more than one chunk (here one +# chunk has 3 rows): +@patch( + "sklearn.ensemble._iforest.get_chunk_n_rows", + side_effect=Mock(**{"return_value": 3}), +) +@pytest.mark.parametrize("contamination, n_predict_calls", [(0.25, 3), ("auto", 2)]) +def test_iforest_chunks_works1( + mocked_get_chunk, contamination, n_predict_calls, global_random_seed +): + test_iforest_works(contamination, global_random_seed) + assert mocked_get_chunk.call_count == n_predict_calls + + +# idem with chunk_size = 10 rows +@patch( + "sklearn.ensemble._iforest.get_chunk_n_rows", + side_effect=Mock(**{"return_value": 10}), +) +@pytest.mark.parametrize("contamination, n_predict_calls", [(0.25, 3), ("auto", 2)]) +def test_iforest_chunks_works2( + mocked_get_chunk, contamination, n_predict_calls, global_random_seed +): + test_iforest_works(contamination, global_random_seed) + assert mocked_get_chunk.call_count == n_predict_calls + + +def test_iforest_with_uniform_data(): + """Test whether iforest predicts inliers when using uniform data""" + + # 2-d array of all 1s + X = np.ones((100, 10)) + iforest = IsolationForest() + iforest.fit(X) + + rng = np.random.RandomState(0) + + assert all(iforest.predict(X) == 1) + assert all(iforest.predict(rng.randn(100, 10)) == 1) + assert all(iforest.predict(X + 1) == 1) + assert all(iforest.predict(X - 1) == 1) + + # 2-d array where columns contain the same value across rows + X = np.repeat(rng.randn(1, 10), 100, 0) + iforest = IsolationForest() + iforest.fit(X) + + assert all(iforest.predict(X) == 1) + assert all(iforest.predict(rng.randn(100, 10)) == 1) + assert all(iforest.predict(np.ones((100, 10))) == 1) + + # Single row + X = rng.randn(1, 10) + iforest = IsolationForest() + iforest.fit(X) + + assert all(iforest.predict(X) == 1) + assert all(iforest.predict(rng.randn(100, 10)) == 1) + assert all(iforest.predict(np.ones((100, 10))) == 1) + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_iforest_with_n_jobs_does_not_segfault(csc_container): + """Check that Isolation Forest does not segfault with n_jobs=2 + + Non-regression test for #23252 + """ + X, _ = make_classification(n_samples=85_000, n_features=100, random_state=0) + X = csc_container(X) + IsolationForest(n_estimators=10, max_samples=256, n_jobs=2).fit(X) + + +def test_iforest_preserve_feature_names(): + """Check that feature names are preserved when contamination is not "auto". + + Feature names are required for consistency checks during scoring. + + Non-regression test for Issue #25844 + """ + pd = pytest.importorskip("pandas") + rng = np.random.RandomState(0) + + X = pd.DataFrame(data=rng.randn(4), columns=["a"]) + model = IsolationForest(random_state=0, contamination=0.05) + + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + model.fit(X) + + +@pytest.mark.parametrize("sparse_container", CSC_CONTAINERS + CSR_CONTAINERS) +def test_iforest_sparse_input_float_contamination(sparse_container): + """Check that `IsolationForest` accepts sparse matrix input and float value for + contamination. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/27626 + """ + X, _ = make_classification(n_samples=50, n_features=4, random_state=0) + X = sparse_container(X) + X.sort_indices() + contamination = 0.1 + iforest = IsolationForest( + n_estimators=5, contamination=contamination, random_state=0 + ).fit(X) + + X_decision = iforest.decision_function(X) + assert (X_decision < 0).sum() / X.shape[0] == pytest.approx(contamination) + + +@pytest.mark.parametrize("n_jobs", [1, 2]) +@pytest.mark.parametrize("contamination", [0.25, "auto"]) +def test_iforest_predict_parallel(global_random_seed, contamination, n_jobs): + """Check that `IsolationForest.predict` is parallelized.""" + # toy sample (the last two samples are outliers) + X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [7, 4], [-5, 9]] + + # Test IsolationForest + clf = IsolationForest( + random_state=global_random_seed, contamination=contamination, n_jobs=None + ) + clf.fit(X) + decision_func = -clf.decision_function(X) + pred = clf.predict(X) + + # assert detect outliers: + assert np.min(decision_func[-2:]) > np.max(decision_func[:-2]) + assert_array_equal(pred, 6 * [1] + 2 * [-1]) + + clf_parallel = IsolationForest( + random_state=global_random_seed, contamination=contamination, n_jobs=-1 + ) + clf_parallel.fit(X) + with parallel_backend("threading", n_jobs=n_jobs): + pred_paralell = clf_parallel.predict(X) + + # assert the same results as non-parallel + assert_array_equal(pred, pred_paralell) diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/test_stacking.py b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/test_stacking.py new file mode 100644 index 0000000000000000000000000000000000000000..e944ecc4abb528c9bffb1cf23674831fcd0fb7ca --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/test_stacking.py @@ -0,0 +1,1019 @@ +"""Test the stacking classifier and regressor.""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import re +from unittest.mock import Mock + +import numpy as np +import pytest +from numpy.testing import assert_array_equal +from scipy import sparse + +from sklearn import config_context +from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin, clone +from sklearn.datasets import ( + load_breast_cancer, + load_diabetes, + load_iris, + make_classification, + make_multilabel_classification, + make_regression, +) +from sklearn.dummy import DummyClassifier, DummyRegressor +from sklearn.ensemble import ( + RandomForestClassifier, + RandomForestRegressor, + StackingClassifier, + StackingRegressor, +) +from sklearn.exceptions import ConvergenceWarning, NotFittedError +from sklearn.linear_model import ( + LinearRegression, + LogisticRegression, + Ridge, + RidgeClassifier, +) +from sklearn.model_selection import KFold, StratifiedKFold, train_test_split +from sklearn.neighbors import KNeighborsClassifier +from sklearn.neural_network import MLPClassifier +from sklearn.preprocessing import scale +from sklearn.svm import SVC, LinearSVC, LinearSVR +from sklearn.tests.metadata_routing_common import ( + ConsumingClassifier, + ConsumingRegressor, + _Registry, + check_recorded_metadata, +) +from sklearn.utils._mocking import CheckingClassifier +from sklearn.utils._testing import ( + assert_allclose, + assert_allclose_dense_sparse, + ignore_warnings, +) +from sklearn.utils.fixes import COO_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS + +diabetes = load_diabetes() +X_diabetes, y_diabetes = diabetes.data, diabetes.target +iris = load_iris() +X_iris, y_iris = iris.data, iris.target +X_multilabel, y_multilabel = make_multilabel_classification( + n_classes=3, random_state=42 +) +X_binary, y_binary = make_classification(n_classes=2, random_state=42) + + +@pytest.mark.parametrize( + "cv", [3, StratifiedKFold(n_splits=3, shuffle=True, random_state=42)] +) +@pytest.mark.parametrize( + "final_estimator", [None, RandomForestClassifier(random_state=42)] +) +@pytest.mark.parametrize("passthrough", [False, True]) +def test_stacking_classifier_iris(cv, final_estimator, passthrough): + # prescale the data to avoid convergence warning without using a pipeline + # for later assert + X_train, X_test, y_train, y_test = train_test_split( + scale(X_iris), y_iris, stratify=y_iris, random_state=42 + ) + estimators = [("lr", LogisticRegression()), ("svc", LinearSVC())] + clf = StackingClassifier( + estimators=estimators, + final_estimator=final_estimator, + cv=cv, + passthrough=passthrough, + ) + clf.fit(X_train, y_train) + clf.predict(X_test) + clf.predict_proba(X_test) + assert clf.score(X_test, y_test) > 0.8 + + X_trans = clf.transform(X_test) + expected_column_count = 10 if passthrough else 6 + assert X_trans.shape[1] == expected_column_count + if passthrough: + assert_allclose(X_test, X_trans[:, -4:]) + + clf.set_params(lr="drop") + clf.fit(X_train, y_train) + clf.predict(X_test) + clf.predict_proba(X_test) + if final_estimator is None: + # LogisticRegression has decision_function method + clf.decision_function(X_test) + + X_trans = clf.transform(X_test) + expected_column_count_drop = 7 if passthrough else 3 + assert X_trans.shape[1] == expected_column_count_drop + if passthrough: + assert_allclose(X_test, X_trans[:, -4:]) + + +def test_stacking_classifier_drop_column_binary_classification(): + # check that a column is dropped in binary classification + X, y = load_breast_cancer(return_X_y=True) + X_train, X_test, y_train, _ = train_test_split( + scale(X), y, stratify=y, random_state=42 + ) + + # both classifiers implement 'predict_proba' and will both drop one column + estimators = [ + ("lr", LogisticRegression()), + ("rf", RandomForestClassifier(random_state=42)), + ] + clf = StackingClassifier(estimators=estimators, cv=3) + + clf.fit(X_train, y_train) + X_trans = clf.transform(X_test) + assert X_trans.shape[1] == 2 + + # LinearSVC does not implement 'predict_proba' and will not drop one column + estimators = [("lr", LogisticRegression()), ("svc", LinearSVC())] + clf.set_params(estimators=estimators) + + clf.fit(X_train, y_train) + X_trans = clf.transform(X_test) + assert X_trans.shape[1] == 2 + + +def test_stacking_classifier_drop_estimator(): + # prescale the data to avoid convergence warning without using a pipeline + # for later assert + X_train, X_test, y_train, _ = train_test_split( + scale(X_iris), y_iris, stratify=y_iris, random_state=42 + ) + estimators = [("lr", "drop"), ("svc", LinearSVC(random_state=0))] + rf = RandomForestClassifier(n_estimators=10, random_state=42) + clf = StackingClassifier( + estimators=[("svc", LinearSVC(random_state=0))], + final_estimator=rf, + cv=5, + ) + clf_drop = StackingClassifier(estimators=estimators, final_estimator=rf, cv=5) + + clf.fit(X_train, y_train) + clf_drop.fit(X_train, y_train) + assert_allclose(clf.predict(X_test), clf_drop.predict(X_test)) + assert_allclose(clf.predict_proba(X_test), clf_drop.predict_proba(X_test)) + assert_allclose(clf.transform(X_test), clf_drop.transform(X_test)) + + +def test_stacking_regressor_drop_estimator(): + # prescale the data to avoid convergence warning without using a pipeline + # for later assert + X_train, X_test, y_train, _ = train_test_split( + scale(X_diabetes), y_diabetes, random_state=42 + ) + estimators = [("lr", "drop"), ("svr", LinearSVR(random_state=0))] + rf = RandomForestRegressor(n_estimators=10, random_state=42) + reg = StackingRegressor( + estimators=[("svr", LinearSVR(random_state=0))], + final_estimator=rf, + cv=5, + ) + reg_drop = StackingRegressor(estimators=estimators, final_estimator=rf, cv=5) + + reg.fit(X_train, y_train) + reg_drop.fit(X_train, y_train) + assert_allclose(reg.predict(X_test), reg_drop.predict(X_test)) + assert_allclose(reg.transform(X_test), reg_drop.transform(X_test)) + + +@pytest.mark.parametrize("cv", [3, KFold(n_splits=3, shuffle=True, random_state=42)]) +@pytest.mark.parametrize( + "final_estimator, predict_params", + [ + (None, {}), + (RandomForestRegressor(random_state=42), {}), + (DummyRegressor(), {"return_std": True}), + ], +) +@pytest.mark.parametrize("passthrough", [False, True]) +def test_stacking_regressor_diabetes(cv, final_estimator, predict_params, passthrough): + # prescale the data to avoid convergence warning without using a pipeline + # for later assert + X_train, X_test, y_train, _ = train_test_split( + scale(X_diabetes), y_diabetes, random_state=42 + ) + estimators = [("lr", LinearRegression()), ("svr", LinearSVR())] + reg = StackingRegressor( + estimators=estimators, + final_estimator=final_estimator, + cv=cv, + passthrough=passthrough, + ) + reg.fit(X_train, y_train) + result = reg.predict(X_test, **predict_params) + expected_result_length = 2 if predict_params else 1 + if predict_params: + assert len(result) == expected_result_length + + X_trans = reg.transform(X_test) + expected_column_count = 12 if passthrough else 2 + assert X_trans.shape[1] == expected_column_count + if passthrough: + assert_allclose(X_test, X_trans[:, -10:]) + + reg.set_params(lr="drop") + reg.fit(X_train, y_train) + reg.predict(X_test) + + X_trans = reg.transform(X_test) + expected_column_count_drop = 11 if passthrough else 1 + assert X_trans.shape[1] == expected_column_count_drop + if passthrough: + assert_allclose(X_test, X_trans[:, -10:]) + + +@pytest.mark.parametrize( + "sparse_container", COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS +) +def test_stacking_regressor_sparse_passthrough(sparse_container): + # Check passthrough behavior on a sparse X matrix + X_train, X_test, y_train, _ = train_test_split( + sparse_container(scale(X_diabetes)), y_diabetes, random_state=42 + ) + estimators = [("lr", LinearRegression()), ("svr", LinearSVR())] + rf = RandomForestRegressor(n_estimators=10, random_state=42) + clf = StackingRegressor( + estimators=estimators, final_estimator=rf, cv=5, passthrough=True + ) + clf.fit(X_train, y_train) + X_trans = clf.transform(X_test) + assert_allclose_dense_sparse(X_test, X_trans[:, -10:]) + assert sparse.issparse(X_trans) + assert X_test.format == X_trans.format + + +@pytest.mark.parametrize( + "sparse_container", COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS +) +def test_stacking_classifier_sparse_passthrough(sparse_container): + # Check passthrough behavior on a sparse X matrix + X_train, X_test, y_train, _ = train_test_split( + sparse_container(scale(X_iris)), y_iris, random_state=42 + ) + estimators = [("lr", LogisticRegression()), ("svc", LinearSVC())] + rf = RandomForestClassifier(n_estimators=10, random_state=42) + clf = StackingClassifier( + estimators=estimators, final_estimator=rf, cv=5, passthrough=True + ) + clf.fit(X_train, y_train) + X_trans = clf.transform(X_test) + assert_allclose_dense_sparse(X_test, X_trans[:, -4:]) + assert sparse.issparse(X_trans) + assert X_test.format == X_trans.format + + +def test_stacking_classifier_drop_binary_prob(): + # check that classifier will drop one of the probability column for + # binary classification problem + + # Select only the 2 first classes + X_, y_ = scale(X_iris[:100]), y_iris[:100] + + estimators = [("lr", LogisticRegression()), ("rf", RandomForestClassifier())] + clf = StackingClassifier(estimators=estimators) + clf.fit(X_, y_) + X_meta = clf.transform(X_) + assert X_meta.shape[1] == 2 + + +class NoWeightRegressor(RegressorMixin, BaseEstimator): + def fit(self, X, y): + self.reg = DummyRegressor() + return self.reg.fit(X, y) + + def predict(self, X): + return np.ones(X.shape[0]) + + +class NoWeightClassifier(ClassifierMixin, BaseEstimator): + def fit(self, X, y): + self.clf = DummyClassifier(strategy="stratified") + return self.clf.fit(X, y) + + +@pytest.mark.parametrize( + "y, params, type_err, msg_err", + [ + (y_iris, {"estimators": []}, ValueError, "Invalid 'estimators' attribute,"), + ( + y_iris, + { + "estimators": [ + ("lr", LogisticRegression()), + ("svm", SVC(max_iter=50_000)), + ], + "stack_method": "predict_proba", + }, + ValueError, + "does not implement the method predict_proba", + ), + ( + y_iris, + { + "estimators": [ + ("lr", LogisticRegression()), + ("cor", NoWeightClassifier()), + ] + }, + TypeError, + "does not support sample weight", + ), + ( + y_iris, + { + "estimators": [ + ("lr", LogisticRegression()), + ("cor", LinearSVC(max_iter=50_000)), + ], + "final_estimator": NoWeightClassifier(), + }, + TypeError, + "does not support sample weight", + ), + ], +) +def test_stacking_classifier_error(y, params, type_err, msg_err): + with pytest.raises(type_err, match=msg_err): + clf = StackingClassifier(**params, cv=3) + clf.fit(scale(X_iris), y, sample_weight=np.ones(X_iris.shape[0])) + + +@pytest.mark.parametrize( + "y, params, type_err, msg_err", + [ + (y_diabetes, {"estimators": []}, ValueError, "Invalid 'estimators' attribute,"), + ( + y_diabetes, + {"estimators": [("lr", LinearRegression()), ("cor", NoWeightRegressor())]}, + TypeError, + "does not support sample weight", + ), + ( + y_diabetes, + { + "estimators": [ + ("lr", LinearRegression()), + ("cor", LinearSVR()), + ], + "final_estimator": NoWeightRegressor(), + }, + TypeError, + "does not support sample weight", + ), + ], +) +def test_stacking_regressor_error(y, params, type_err, msg_err): + with pytest.raises(type_err, match=msg_err): + reg = StackingRegressor(**params, cv=3) + reg.fit(scale(X_diabetes), y, sample_weight=np.ones(X_diabetes.shape[0])) + + +@pytest.mark.parametrize( + "estimator, X, y", + [ + ( + StackingClassifier( + estimators=[ + ("lr", LogisticRegression(random_state=0)), + ("svm", LinearSVC(random_state=0)), + ] + ), + X_iris[:100], + y_iris[:100], + ), # keep only classes 0 and 1 + ( + StackingRegressor( + estimators=[ + ("lr", LinearRegression()), + ("svm", LinearSVR(random_state=0)), + ] + ), + X_diabetes, + y_diabetes, + ), + ], + ids=["StackingClassifier", "StackingRegressor"], +) +def test_stacking_randomness(estimator, X, y): + # checking that fixing the random state of the CV will lead to the same + # results + estimator_full = clone(estimator) + estimator_full.set_params( + cv=KFold(shuffle=True, random_state=np.random.RandomState(0)) + ) + + estimator_drop = clone(estimator) + estimator_drop.set_params(lr="drop") + estimator_drop.set_params( + cv=KFold(shuffle=True, random_state=np.random.RandomState(0)) + ) + + assert_allclose( + estimator_full.fit(X, y).transform(X)[:, 1:], + estimator_drop.fit(X, y).transform(X), + ) + + +def test_stacking_classifier_stratify_default(): + # check that we stratify the classes for the default CV + clf = StackingClassifier( + estimators=[ + ("lr", LogisticRegression(max_iter=10_000)), + ("svm", LinearSVC(max_iter=10_000)), + ] + ) + # since iris is not shuffled, a simple k-fold would not contain the + # 3 classes during training + clf.fit(X_iris, y_iris) + + +@pytest.mark.parametrize( + "stacker, X, y", + [ + ( + StackingClassifier( + estimators=[ + ("lr", LogisticRegression()), + ("svm", LinearSVC(random_state=42)), + ], + final_estimator=LogisticRegression(), + cv=KFold(shuffle=True, random_state=42), + ), + *load_breast_cancer(return_X_y=True), + ), + ( + StackingRegressor( + estimators=[ + ("lr", LinearRegression()), + ("svm", LinearSVR(random_state=42)), + ], + final_estimator=LinearRegression(), + cv=KFold(shuffle=True, random_state=42), + ), + X_diabetes, + y_diabetes, + ), + ], + ids=["StackingClassifier", "StackingRegressor"], +) +def test_stacking_with_sample_weight(stacker, X, y): + # check that sample weights has an influence on the fitting + # note: ConvergenceWarning are catch since we are not worrying about the + # convergence here + n_half_samples = len(y) // 2 + total_sample_weight = np.array( + [0.1] * n_half_samples + [0.9] * (len(y) - n_half_samples) + ) + X_train, X_test, y_train, _, sample_weight_train, _ = train_test_split( + X, y, total_sample_weight, random_state=42 + ) + + with ignore_warnings(category=ConvergenceWarning): + stacker.fit(X_train, y_train) + y_pred_no_weight = stacker.predict(X_test) + + with ignore_warnings(category=ConvergenceWarning): + stacker.fit(X_train, y_train, sample_weight=np.ones(y_train.shape)) + y_pred_unit_weight = stacker.predict(X_test) + + assert_allclose(y_pred_no_weight, y_pred_unit_weight) + + with ignore_warnings(category=ConvergenceWarning): + stacker.fit(X_train, y_train, sample_weight=sample_weight_train) + y_pred_biased = stacker.predict(X_test) + + assert np.abs(y_pred_no_weight - y_pred_biased).sum() > 0 + + +def test_stacking_classifier_sample_weight_fit_param(): + # check sample_weight is passed to all invocations of fit + stacker = StackingClassifier( + estimators=[("lr", CheckingClassifier(expected_sample_weight=True))], + final_estimator=CheckingClassifier(expected_sample_weight=True), + ) + stacker.fit(X_iris, y_iris, sample_weight=np.ones(X_iris.shape[0])) + + +@pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning") +@pytest.mark.parametrize( + "stacker, X, y", + [ + ( + StackingClassifier( + estimators=[ + ("lr", LogisticRegression()), + ("svm", LinearSVC(random_state=42)), + ], + final_estimator=LogisticRegression(), + ), + *load_breast_cancer(return_X_y=True), + ), + ( + StackingRegressor( + estimators=[ + ("lr", LinearRegression()), + ("svm", LinearSVR(random_state=42)), + ], + final_estimator=LinearRegression(), + ), + X_diabetes, + y_diabetes, + ), + ], + ids=["StackingClassifier", "StackingRegressor"], +) +def test_stacking_cv_influence(stacker, X, y): + # check that the stacking affects the fit of the final estimator but not + # the fit of the base estimators + # note: ConvergenceWarning are catch since we are not worrying about the + # convergence here + stacker_cv_3 = clone(stacker) + stacker_cv_5 = clone(stacker) + + stacker_cv_3.set_params(cv=3) + stacker_cv_5.set_params(cv=5) + + stacker_cv_3.fit(X, y) + stacker_cv_5.fit(X, y) + + # the base estimators should be identical + for est_cv_3, est_cv_5 in zip(stacker_cv_3.estimators_, stacker_cv_5.estimators_): + assert_allclose(est_cv_3.coef_, est_cv_5.coef_) + + # the final estimator should be different + with pytest.raises(AssertionError, match="Not equal"): + assert_allclose( + stacker_cv_3.final_estimator_.coef_, stacker_cv_5.final_estimator_.coef_ + ) + + +@pytest.mark.parametrize( + "Stacker, Estimator, stack_method, final_estimator, X, y", + [ + ( + StackingClassifier, + DummyClassifier, + "predict_proba", + LogisticRegression(random_state=42), + X_iris, + y_iris, + ), + ( + StackingRegressor, + DummyRegressor, + "predict", + LinearRegression(), + X_diabetes, + y_diabetes, + ), + ], +) +def test_stacking_prefit(Stacker, Estimator, stack_method, final_estimator, X, y): + """Check the behaviour of stacking when `cv='prefit'`""" + X_train1, X_train2, y_train1, y_train2 = train_test_split( + X, y, random_state=42, test_size=0.5 + ) + estimators = [ + ("d0", Estimator().fit(X_train1, y_train1)), + ("d1", Estimator().fit(X_train1, y_train1)), + ] + + # mock out fit and stack_method to be asserted later + for _, estimator in estimators: + estimator.fit = Mock(name="fit") + stack_func = getattr(estimator, stack_method) + predict_method_mocked = Mock(side_effect=stack_func) + # Mocking a method will not provide a `__name__` while Python methods + # do and we are using it in `_get_response_method`. + predict_method_mocked.__name__ = stack_method + setattr(estimator, stack_method, predict_method_mocked) + + stacker = Stacker( + estimators=estimators, cv="prefit", final_estimator=final_estimator + ) + stacker.fit(X_train2, y_train2) + + assert stacker.estimators_ == [estimator for _, estimator in estimators] + # fit was not called again + assert all(estimator.fit.call_count == 0 for estimator in stacker.estimators_) + + # stack method is called with the proper inputs + for estimator in stacker.estimators_: + stack_func_mock = getattr(estimator, stack_method) + stack_func_mock.assert_called_with(X_train2) + + +@pytest.mark.parametrize( + "stacker, X, y", + [ + ( + StackingClassifier( + estimators=[("lr", LogisticRegression()), ("svm", SVC())], + cv="prefit", + ), + X_iris, + y_iris, + ), + ( + StackingRegressor( + estimators=[ + ("lr", LinearRegression()), + ("svm", LinearSVR()), + ], + cv="prefit", + ), + X_diabetes, + y_diabetes, + ), + ], +) +def test_stacking_prefit_error(stacker, X, y): + # check that NotFittedError is raised + # if base estimators are not fitted when cv="prefit" + with pytest.raises(NotFittedError): + stacker.fit(X, y) + + +@pytest.mark.parametrize( + "make_dataset, Stacking, Estimator", + [ + (make_classification, StackingClassifier, LogisticRegression), + (make_regression, StackingRegressor, LinearRegression), + ], +) +def test_stacking_without_n_features_in(make_dataset, Stacking, Estimator): + # Stacking supports estimators without `n_features_in_`. Regression test + # for #17353 + + class MyEstimator(Estimator): + """Estimator without n_features_in_""" + + def fit(self, X, y): + super().fit(X, y) + del self.n_features_in_ + + X, y = make_dataset(random_state=0, n_samples=100) + stacker = Stacking(estimators=[("lr", MyEstimator())]) + + msg = f"{Stacking.__name__} object has no attribute n_features_in_" + with pytest.raises(AttributeError, match=msg): + stacker.n_features_in_ + + # Does not raise + stacker.fit(X, y) + + msg = "'MyEstimator' object has no attribute 'n_features_in_'" + with pytest.raises(AttributeError, match=msg): + stacker.n_features_in_ + + +@pytest.mark.parametrize( + "estimator", + [ + # output a 2D array of the probability of the positive class for each output + MLPClassifier(random_state=42), + # output a list of 2D array containing the probability of each class + # for each output + RandomForestClassifier(random_state=42), + ], + ids=["MLPClassifier", "RandomForestClassifier"], +) +def test_stacking_classifier_multilabel_predict_proba(estimator): + """Check the behaviour for the multilabel classification case and the + `predict_proba` stacking method. + + Estimators are not consistent with the output arrays and we need to ensure that + we handle all cases. + """ + X_train, X_test, y_train, y_test = train_test_split( + X_multilabel, y_multilabel, stratify=y_multilabel, random_state=42 + ) + n_outputs = 3 + + estimators = [("est", estimator)] + stacker = StackingClassifier( + estimators=estimators, + final_estimator=KNeighborsClassifier(), + stack_method="predict_proba", + ).fit(X_train, y_train) + + X_trans = stacker.transform(X_test) + assert X_trans.shape == (X_test.shape[0], n_outputs) + # we should not have any collinear classes and thus nothing should sum to 1 + assert not any(np.isclose(X_trans.sum(axis=1), 1.0)) + + y_pred = stacker.predict(X_test) + assert y_pred.shape == y_test.shape + + +def test_stacking_classifier_multilabel_decision_function(): + """Check the behaviour for the multilabel classification case and the + `decision_function` stacking method. Only `RidgeClassifier` supports this + case. + """ + X_train, X_test, y_train, y_test = train_test_split( + X_multilabel, y_multilabel, stratify=y_multilabel, random_state=42 + ) + n_outputs = 3 + + estimators = [("est", RidgeClassifier())] + stacker = StackingClassifier( + estimators=estimators, + final_estimator=KNeighborsClassifier(), + stack_method="decision_function", + ).fit(X_train, y_train) + + X_trans = stacker.transform(X_test) + assert X_trans.shape == (X_test.shape[0], n_outputs) + + y_pred = stacker.predict(X_test) + assert y_pred.shape == y_test.shape + + +@pytest.mark.parametrize("stack_method", ["auto", "predict"]) +@pytest.mark.parametrize("passthrough", [False, True]) +def test_stacking_classifier_multilabel_auto_predict(stack_method, passthrough): + """Check the behaviour for the multilabel classification case for stack methods + supported for all estimators or automatically picked up. + """ + X_train, X_test, y_train, y_test = train_test_split( + X_multilabel, y_multilabel, stratify=y_multilabel, random_state=42 + ) + y_train_before_fit = y_train.copy() + n_outputs = 3 + + estimators = [ + ("mlp", MLPClassifier(random_state=42)), + ("rf", RandomForestClassifier(random_state=42)), + ("ridge", RidgeClassifier()), + ] + final_estimator = KNeighborsClassifier() + + clf = StackingClassifier( + estimators=estimators, + final_estimator=final_estimator, + passthrough=passthrough, + stack_method=stack_method, + ).fit(X_train, y_train) + + # make sure we don't change `y_train` inplace + assert_array_equal(y_train_before_fit, y_train) + + y_pred = clf.predict(X_test) + assert y_pred.shape == y_test.shape + + if stack_method == "auto": + expected_stack_methods = ["predict_proba", "predict_proba", "decision_function"] + else: + expected_stack_methods = ["predict"] * len(estimators) + assert clf.stack_method_ == expected_stack_methods + + n_features_X_trans = n_outputs * len(estimators) + if passthrough: + n_features_X_trans += X_train.shape[1] + X_trans = clf.transform(X_test) + assert X_trans.shape == (X_test.shape[0], n_features_X_trans) + + assert_array_equal(clf.classes_, [np.array([0, 1])] * n_outputs) + + +@pytest.mark.parametrize( + "stacker, feature_names, X, y, expected_names", + [ + ( + StackingClassifier( + estimators=[ + ("lr", LogisticRegression(random_state=0)), + ("svm", LinearSVC(random_state=0)), + ] + ), + iris.feature_names, + X_iris, + y_iris, + [ + "stackingclassifier_lr0", + "stackingclassifier_lr1", + "stackingclassifier_lr2", + "stackingclassifier_svm0", + "stackingclassifier_svm1", + "stackingclassifier_svm2", + ], + ), + ( + StackingClassifier( + estimators=[ + ("lr", LogisticRegression(random_state=0)), + ("other", "drop"), + ("svm", LinearSVC(random_state=0)), + ] + ), + iris.feature_names, + X_iris[:100], + y_iris[:100], # keep only classes 0 and 1 + [ + "stackingclassifier_lr", + "stackingclassifier_svm", + ], + ), + ( + StackingRegressor( + estimators=[ + ("lr", LinearRegression()), + ("svm", LinearSVR(random_state=0)), + ] + ), + diabetes.feature_names, + X_diabetes, + y_diabetes, + [ + "stackingregressor_lr", + "stackingregressor_svm", + ], + ), + ], + ids=[ + "StackingClassifier_multiclass", + "StackingClassifier_binary", + "StackingRegressor", + ], +) +@pytest.mark.parametrize("passthrough", [True, False]) +def test_get_feature_names_out( + stacker, feature_names, X, y, expected_names, passthrough +): + """Check get_feature_names_out works for stacking.""" + + stacker.set_params(passthrough=passthrough) + stacker.fit(scale(X), y) + + if passthrough: + expected_names = np.concatenate((expected_names, feature_names)) + + names_out = stacker.get_feature_names_out(feature_names) + assert_array_equal(names_out, expected_names) + + +def test_stacking_classifier_base_regressor(): + """Check that a regressor can be used as the first layer in `StackingClassifier`.""" + X_train, X_test, y_train, y_test = train_test_split( + scale(X_iris), y_iris, stratify=y_iris, random_state=42 + ) + clf = StackingClassifier(estimators=[("ridge", Ridge())]) + clf.fit(X_train, y_train) + clf.predict(X_test) + clf.predict_proba(X_test) + assert clf.score(X_test, y_test) > 0.8 + + +def test_stacking_final_estimator_attribute_error(): + """Check that we raise the proper AttributeError when the final estimator + does not implement the `decision_function` method, which is decorated with + `available_if`. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/28108 + """ + X, y = make_classification(random_state=42) + + estimators = [ + ("lr", LogisticRegression()), + ("rf", RandomForestClassifier(n_estimators=2, random_state=42)), + ] + # RandomForestClassifier does not implement 'decision_function' and should raise + # an AttributeError + final_estimator = RandomForestClassifier(n_estimators=2, random_state=42) + clf = StackingClassifier( + estimators=estimators, final_estimator=final_estimator, cv=3 + ) + + outer_msg = "This 'StackingClassifier' has no attribute 'decision_function'" + inner_msg = "'RandomForestClassifier' object has no attribute 'decision_function'" + with pytest.raises(AttributeError, match=outer_msg) as exec_info: + clf.fit(X, y).decision_function(X) + assert isinstance(exec_info.value.__cause__, AttributeError) + assert inner_msg in str(exec_info.value.__cause__) + + +# Metadata Routing Tests +# ====================== + + +@pytest.mark.parametrize( + "Estimator, Child", + [ + (StackingClassifier, ConsumingClassifier), + (StackingRegressor, ConsumingRegressor), + ], +) +def test_routing_passed_metadata_not_supported(Estimator, Child): + """Test that the right error message is raised when metadata is passed while + not supported when `enable_metadata_routing=False`.""" + + with pytest.raises( + ValueError, match="is only supported if enable_metadata_routing=True" + ): + Estimator(["clf", Child()]).fit( + X_iris, y_iris, sample_weight=[1, 1, 1, 1, 1], metadata="a" + ) + + +@pytest.mark.parametrize( + "Estimator, Child", + [ + (StackingClassifier, ConsumingClassifier), + (StackingRegressor, ConsumingRegressor), + ], +) +@config_context(enable_metadata_routing=True) +def test_get_metadata_routing_without_fit(Estimator, Child): + # Test that metadata_routing() doesn't raise when called before fit. + est = Estimator([("sub_est", Child())]) + est.get_metadata_routing() + + +@pytest.mark.parametrize( + "Estimator, Child", + [ + (StackingClassifier, ConsumingClassifier), + (StackingRegressor, ConsumingRegressor), + ], +) +@pytest.mark.parametrize( + "prop, prop_value", [("sample_weight", np.ones(X_iris.shape[0])), ("metadata", "a")] +) +@config_context(enable_metadata_routing=True) +def test_metadata_routing_for_stacking_estimators(Estimator, Child, prop, prop_value): + """Test that metadata is routed correctly for Stacking*.""" + + est = Estimator( + [ + ( + "sub_est1", + Child(registry=_Registry()).set_fit_request(**{prop: True}), + ), + ( + "sub_est2", + Child(registry=_Registry()).set_fit_request(**{prop: True}), + ), + ], + final_estimator=Child(registry=_Registry()).set_predict_request(**{prop: True}), + ) + + est.fit(X_iris, y_iris, **{prop: prop_value}) + est.fit_transform(X_iris, y_iris, **{prop: prop_value}) + + est.predict(X_iris, **{prop: prop_value}) + + for estimator in est.estimators: + # access sub-estimator in (name, est) with estimator[1]: + registry = estimator[1].registry + assert len(registry) + for sub_est in registry: + check_recorded_metadata( + obj=sub_est, + method="fit", + parent="fit", + split_params=(prop), + **{prop: prop_value}, + ) + # access final_estimator: + registry = est.final_estimator_.registry + assert len(registry) + check_recorded_metadata( + obj=registry[-1], + method="predict", + parent="predict", + split_params=(prop), + **{prop: prop_value}, + ) + + +@pytest.mark.parametrize( + "Estimator, Child", + [ + (StackingClassifier, ConsumingClassifier), + (StackingRegressor, ConsumingRegressor), + ], +) +@config_context(enable_metadata_routing=True) +def test_metadata_routing_error_for_stacking_estimators(Estimator, Child): + """Test that the right error is raised when metadata is not requested.""" + sample_weight, metadata = np.ones(X_iris.shape[0]), "a" + + est = Estimator([("sub_est", Child())]) + + error_message = ( + "[sample_weight, metadata] are passed but are not explicitly set as requested" + f" or not requested for {Child.__name__}.fit" + ) + + with pytest.raises(ValueError, match=re.escape(error_message)): + est.fit(X_iris, y_iris, sample_weight=sample_weight, metadata=metadata) + + +# End of Metadata Routing Tests +# ============================= diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/test_voting.py b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/test_voting.py new file mode 100644 index 0000000000000000000000000000000000000000..bb0d34bcd7d162d9e09d9ffcc36463613185d77b --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/test_voting.py @@ -0,0 +1,787 @@ +"""Testing for the VotingClassifier and VotingRegressor""" + +import re + +import numpy as np +import pytest + +from sklearn import config_context, datasets +from sklearn.base import BaseEstimator, ClassifierMixin, clone +from sklearn.datasets import make_multilabel_classification +from sklearn.dummy import DummyRegressor +from sklearn.ensemble import ( + RandomForestClassifier, + RandomForestRegressor, + VotingClassifier, + VotingRegressor, +) +from sklearn.exceptions import NotFittedError +from sklearn.linear_model import LinearRegression, LogisticRegression +from sklearn.model_selection import GridSearchCV, cross_val_score, train_test_split +from sklearn.multiclass import OneVsRestClassifier +from sklearn.naive_bayes import GaussianNB +from sklearn.neighbors import KNeighborsClassifier +from sklearn.preprocessing import StandardScaler +from sklearn.svm import SVC +from sklearn.tests.metadata_routing_common import ( + ConsumingClassifier, + ConsumingRegressor, + _Registry, + check_recorded_metadata, +) +from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor +from sklearn.utils._testing import ( + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, +) + +# Load datasets +iris = datasets.load_iris() +X, y = iris.data[:, 1:3], iris.target +# Scaled to solve ConvergenceWarning throw by Logistic Regression +X_scaled = StandardScaler().fit_transform(X) + +X_r, y_r = datasets.load_diabetes(return_X_y=True) + + +@pytest.mark.parametrize( + "params, err_msg", + [ + ( + {"estimators": []}, + "Invalid 'estimators' attribute, 'estimators' should be a non-empty list", + ), + ( + {"estimators": [("lr", LogisticRegression())], "weights": [1, 2]}, + "Number of `estimators` and weights must be equal", + ), + ], +) +def test_voting_classifier_estimator_init(params, err_msg): + ensemble = VotingClassifier(**params) + with pytest.raises(ValueError, match=err_msg): + ensemble.fit(X, y) + + +def test_predictproba_hardvoting(): + eclf = VotingClassifier( + estimators=[("lr1", LogisticRegression()), ("lr2", LogisticRegression())], + voting="hard", + ) + + inner_msg = "predict_proba is not available when voting='hard'" + outer_msg = "'VotingClassifier' has no attribute 'predict_proba'" + with pytest.raises(AttributeError, match=outer_msg) as exec_info: + eclf.predict_proba + assert isinstance(exec_info.value.__cause__, AttributeError) + assert inner_msg in str(exec_info.value.__cause__) + + assert not hasattr(eclf, "predict_proba") + eclf.fit(X_scaled, y) + assert not hasattr(eclf, "predict_proba") + + +def test_notfitted(): + eclf = VotingClassifier( + estimators=[("lr1", LogisticRegression()), ("lr2", LogisticRegression())], + voting="soft", + ) + ereg = VotingRegressor([("dr", DummyRegressor())]) + msg = ( + "This %s instance is not fitted yet. Call 'fit'" + " with appropriate arguments before using this estimator." + ) + with pytest.raises(NotFittedError, match=msg % "VotingClassifier"): + eclf.predict(X) + with pytest.raises(NotFittedError, match=msg % "VotingClassifier"): + eclf.predict_proba(X) + with pytest.raises(NotFittedError, match=msg % "VotingClassifier"): + eclf.transform(X) + with pytest.raises(NotFittedError, match=msg % "VotingRegressor"): + ereg.predict(X_r) + with pytest.raises(NotFittedError, match=msg % "VotingRegressor"): + ereg.transform(X_r) + + +def test_majority_label_iris(global_random_seed): + """Check classification by majority label on dataset iris.""" + clf1 = LogisticRegression(solver="liblinear", random_state=global_random_seed) + clf2 = RandomForestClassifier(n_estimators=10, random_state=global_random_seed) + clf3 = GaussianNB() + eclf = VotingClassifier( + estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)], voting="hard" + ) + scores = cross_val_score(eclf, X, y, scoring="accuracy") + + assert scores.mean() >= 0.9 + + +def test_tie_situation(): + """Check voting classifier selects smaller class label in tie situation.""" + clf1 = LogisticRegression(random_state=123, solver="liblinear") + clf2 = RandomForestClassifier(random_state=123) + eclf = VotingClassifier(estimators=[("lr", clf1), ("rf", clf2)], voting="hard") + assert clf1.fit(X, y).predict(X)[73] == 2 + assert clf2.fit(X, y).predict(X)[73] == 1 + assert eclf.fit(X, y).predict(X)[73] == 1 + + +def test_weights_iris(global_random_seed): + """Check classification by average probabilities on dataset iris.""" + clf1 = LogisticRegression(random_state=global_random_seed) + clf2 = RandomForestClassifier(n_estimators=10, random_state=global_random_seed) + clf3 = GaussianNB() + eclf = VotingClassifier( + estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)], + voting="soft", + weights=[1, 2, 10], + ) + scores = cross_val_score(eclf, X_scaled, y, scoring="accuracy") + assert scores.mean() >= 0.9 + + +def test_weights_regressor(): + """Check weighted average regression prediction on diabetes dataset.""" + reg1 = DummyRegressor(strategy="mean") + reg2 = DummyRegressor(strategy="median") + reg3 = DummyRegressor(strategy="quantile", quantile=0.2) + ereg = VotingRegressor( + [("mean", reg1), ("median", reg2), ("quantile", reg3)], weights=[1, 2, 10] + ) + + X_r_train, X_r_test, y_r_train, y_r_test = train_test_split( + X_r, y_r, test_size=0.25 + ) + + reg1_pred = reg1.fit(X_r_train, y_r_train).predict(X_r_test) + reg2_pred = reg2.fit(X_r_train, y_r_train).predict(X_r_test) + reg3_pred = reg3.fit(X_r_train, y_r_train).predict(X_r_test) + ereg_pred = ereg.fit(X_r_train, y_r_train).predict(X_r_test) + + avg = np.average( + np.asarray([reg1_pred, reg2_pred, reg3_pred]), axis=0, weights=[1, 2, 10] + ) + assert_almost_equal(ereg_pred, avg, decimal=2) + + ereg_weights_none = VotingRegressor( + [("mean", reg1), ("median", reg2), ("quantile", reg3)], weights=None + ) + ereg_weights_equal = VotingRegressor( + [("mean", reg1), ("median", reg2), ("quantile", reg3)], weights=[1, 1, 1] + ) + ereg_weights_none.fit(X_r_train, y_r_train) + ereg_weights_equal.fit(X_r_train, y_r_train) + ereg_none_pred = ereg_weights_none.predict(X_r_test) + ereg_equal_pred = ereg_weights_equal.predict(X_r_test) + assert_almost_equal(ereg_none_pred, ereg_equal_pred, decimal=2) + + +def test_predict_on_toy_problem(global_random_seed): + """Manually check predicted class labels for toy dataset.""" + clf1 = LogisticRegression(random_state=global_random_seed) + clf2 = RandomForestClassifier(n_estimators=10, random_state=global_random_seed) + clf3 = GaussianNB() + + X = np.array( + [[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2], [2.1, 1.4], [3.1, 2.3]] + ) + + y = np.array([1, 1, 1, 2, 2, 2]) + + assert_array_equal(clf1.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2]) + assert_array_equal(clf2.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2]) + assert_array_equal(clf3.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2]) + + eclf = VotingClassifier( + estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)], + voting="hard", + weights=[1, 1, 1], + ) + assert_array_equal(eclf.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2]) + + eclf = VotingClassifier( + estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)], + voting="soft", + weights=[1, 1, 1], + ) + assert_array_equal(eclf.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2]) + + +def test_predict_proba_on_toy_problem(): + """Calculate predicted probabilities on toy dataset.""" + clf1 = LogisticRegression(random_state=123) + clf2 = RandomForestClassifier(random_state=123) + clf3 = GaussianNB() + X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]]) + y = np.array([1, 1, 2, 2]) + + clf1_res = np.array( + [ + [0.59790391, 0.40209609], + [0.57622162, 0.42377838], + [0.50728456, 0.49271544], + [0.40241774, 0.59758226], + ] + ) + + clf2_res = np.array([[0.8, 0.2], [0.8, 0.2], [0.2, 0.8], [0.3, 0.7]]) + + clf3_res = np.array( + [[0.9985082, 0.0014918], [0.99845843, 0.00154157], [0.0, 1.0], [0.0, 1.0]] + ) + + t00 = (2 * clf1_res[0][0] + clf2_res[0][0] + clf3_res[0][0]) / 4 + t11 = (2 * clf1_res[1][1] + clf2_res[1][1] + clf3_res[1][1]) / 4 + t21 = (2 * clf1_res[2][1] + clf2_res[2][1] + clf3_res[2][1]) / 4 + t31 = (2 * clf1_res[3][1] + clf2_res[3][1] + clf3_res[3][1]) / 4 + + eclf = VotingClassifier( + estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)], + voting="soft", + weights=[2, 1, 1], + ) + eclf_res = eclf.fit(X, y).predict_proba(X) + + assert_almost_equal(t00, eclf_res[0][0], decimal=1) + assert_almost_equal(t11, eclf_res[1][1], decimal=1) + assert_almost_equal(t21, eclf_res[2][1], decimal=1) + assert_almost_equal(t31, eclf_res[3][1], decimal=1) + + inner_msg = "predict_proba is not available when voting='hard'" + outer_msg = "'VotingClassifier' has no attribute 'predict_proba'" + with pytest.raises(AttributeError, match=outer_msg) as exec_info: + eclf = VotingClassifier( + estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)], voting="hard" + ) + eclf.fit(X, y).predict_proba(X) + + assert isinstance(exec_info.value.__cause__, AttributeError) + assert inner_msg in str(exec_info.value.__cause__) + + +def test_multilabel(): + """Check if error is raised for multilabel classification.""" + X, y = make_multilabel_classification( + n_classes=2, n_labels=1, allow_unlabeled=False, random_state=123 + ) + clf = OneVsRestClassifier(SVC(kernel="linear")) + + eclf = VotingClassifier(estimators=[("ovr", clf)], voting="hard") + + try: + eclf.fit(X, y) + except NotImplementedError: + return + + +def test_gridsearch(): + """Check GridSearch support.""" + clf1 = LogisticRegression(random_state=1) + clf2 = RandomForestClassifier(random_state=1, n_estimators=3) + clf3 = GaussianNB() + eclf = VotingClassifier( + estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)], voting="soft" + ) + + params = { + "lr__C": [1.0, 100.0], + "voting": ["soft", "hard"], + "weights": [[0.5, 0.5, 0.5], [1.0, 0.5, 0.5]], + } + + grid = GridSearchCV(estimator=eclf, param_grid=params, cv=2) + grid.fit(X_scaled, y) + + +def test_parallel_fit(global_random_seed): + """Check parallel backend of VotingClassifier on toy dataset.""" + clf1 = LogisticRegression(random_state=global_random_seed) + clf2 = RandomForestClassifier(n_estimators=10, random_state=global_random_seed) + clf3 = GaussianNB() + X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]]) + y = np.array([1, 1, 2, 2]) + + eclf1 = VotingClassifier( + estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)], voting="soft", n_jobs=1 + ).fit(X, y) + eclf2 = VotingClassifier( + estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)], voting="soft", n_jobs=2 + ).fit(X, y) + + assert_array_equal(eclf1.predict(X), eclf2.predict(X)) + assert_array_almost_equal(eclf1.predict_proba(X), eclf2.predict_proba(X)) + + +# TODO(1.7): remove warning filter when sample_weight is kwarg only +@pytest.mark.filterwarnings("ignore::FutureWarning") +def test_sample_weight(global_random_seed): + """Tests sample_weight parameter of VotingClassifier""" + clf1 = LogisticRegression(random_state=global_random_seed) + clf2 = RandomForestClassifier(n_estimators=10, random_state=global_random_seed) + clf3 = SVC(probability=True, random_state=global_random_seed) + eclf1 = VotingClassifier( + estimators=[("lr", clf1), ("rf", clf2), ("svc", clf3)], voting="soft" + ).fit(X_scaled, y, sample_weight=np.ones((len(y),))) + eclf2 = VotingClassifier( + estimators=[("lr", clf1), ("rf", clf2), ("svc", clf3)], voting="soft" + ).fit(X_scaled, y) + assert_array_equal(eclf1.predict(X_scaled), eclf2.predict(X_scaled)) + assert_array_almost_equal( + eclf1.predict_proba(X_scaled), eclf2.predict_proba(X_scaled) + ) + sample_weight = np.random.RandomState(global_random_seed).uniform(size=(len(y),)) + eclf3 = VotingClassifier(estimators=[("lr", clf1)], voting="soft") + eclf3.fit(X_scaled, y, sample_weight) + clf1.fit(X_scaled, y, sample_weight) + assert_array_equal(eclf3.predict(X_scaled), clf1.predict(X_scaled)) + assert_array_almost_equal( + eclf3.predict_proba(X_scaled), clf1.predict_proba(X_scaled) + ) + + # check that an error is raised and indicative if sample_weight is not + # supported. + clf4 = KNeighborsClassifier() + eclf3 = VotingClassifier( + estimators=[("lr", clf1), ("svc", clf3), ("knn", clf4)], voting="soft" + ) + msg = "Underlying estimator KNeighborsClassifier does not support sample weights." + with pytest.raises(TypeError, match=msg): + eclf3.fit(X_scaled, y, sample_weight) + + # check that _fit_single_estimator will raise the right error + # it should raise the original error if this is not linked to sample_weight + class ClassifierErrorFit(ClassifierMixin, BaseEstimator): + def fit(self, X_scaled, y, sample_weight): + raise TypeError("Error unrelated to sample_weight.") + + clf = ClassifierErrorFit() + with pytest.raises(TypeError, match="Error unrelated to sample_weight"): + clf.fit(X_scaled, y, sample_weight=sample_weight) + + +def test_sample_weight_kwargs(): + """Check that VotingClassifier passes sample_weight as kwargs""" + + class MockClassifier(ClassifierMixin, BaseEstimator): + """Mock Classifier to check that sample_weight is received as kwargs""" + + def fit(self, X, y, *args, **sample_weight): + assert "sample_weight" in sample_weight + + clf = MockClassifier() + eclf = VotingClassifier(estimators=[("mock", clf)], voting="soft") + + # Should not raise an error. + eclf.fit(X, y, sample_weight=np.ones((len(y),))) + + +def test_voting_classifier_set_params(global_random_seed): + # check equivalence in the output when setting underlying estimators + clf1 = LogisticRegression(random_state=global_random_seed) + clf2 = RandomForestClassifier( + n_estimators=10, random_state=global_random_seed, max_depth=None + ) + clf3 = GaussianNB() + + eclf1 = VotingClassifier( + [("lr", clf1), ("rf", clf2)], voting="soft", weights=[1, 2] + ).fit(X_scaled, y) + eclf2 = VotingClassifier( + [("lr", clf1), ("nb", clf3)], voting="soft", weights=[1, 2] + ) + eclf2.set_params(nb=clf2).fit(X_scaled, y) + + assert_array_equal(eclf1.predict(X_scaled), eclf2.predict(X_scaled)) + assert_array_almost_equal( + eclf1.predict_proba(X_scaled), eclf2.predict_proba(X_scaled) + ) + assert eclf2.estimators[0][1].get_params() == clf1.get_params() + assert eclf2.estimators[1][1].get_params() == clf2.get_params() + + +def test_set_estimator_drop(): + # VotingClassifier set_params should be able to set estimators as drop + # Test predict + clf1 = LogisticRegression(random_state=123) + clf2 = RandomForestClassifier(n_estimators=10, random_state=123) + clf3 = GaussianNB() + eclf1 = VotingClassifier( + estimators=[("lr", clf1), ("rf", clf2), ("nb", clf3)], + voting="hard", + weights=[1, 0, 0.5], + ).fit(X, y) + + eclf2 = VotingClassifier( + estimators=[("lr", clf1), ("rf", clf2), ("nb", clf3)], + voting="hard", + weights=[1, 1, 0.5], + ) + eclf2.set_params(rf="drop").fit(X, y) + + assert_array_equal(eclf1.predict(X), eclf2.predict(X)) + + assert dict(eclf2.estimators)["rf"] == "drop" + assert len(eclf2.estimators_) == 2 + assert all( + isinstance(est, (LogisticRegression, GaussianNB)) for est in eclf2.estimators_ + ) + assert eclf2.get_params()["rf"] == "drop" + + eclf1.set_params(voting="soft").fit(X, y) + eclf2.set_params(voting="soft").fit(X, y) + + assert_array_equal(eclf1.predict(X), eclf2.predict(X)) + assert_array_almost_equal(eclf1.predict_proba(X), eclf2.predict_proba(X)) + msg = "All estimators are dropped. At least one is required" + with pytest.raises(ValueError, match=msg): + eclf2.set_params(lr="drop", rf="drop", nb="drop").fit(X, y) + + # Test soft voting transform + X1 = np.array([[1], [2]]) + y1 = np.array([1, 2]) + eclf1 = VotingClassifier( + estimators=[("rf", clf2), ("nb", clf3)], + voting="soft", + weights=[0, 0.5], + flatten_transform=False, + ).fit(X1, y1) + + eclf2 = VotingClassifier( + estimators=[("rf", clf2), ("nb", clf3)], + voting="soft", + weights=[1, 0.5], + flatten_transform=False, + ) + eclf2.set_params(rf="drop").fit(X1, y1) + assert_array_almost_equal( + eclf1.transform(X1), + np.array([[[0.7, 0.3], [0.3, 0.7]], [[1.0, 0.0], [0.0, 1.0]]]), + ) + assert_array_almost_equal(eclf2.transform(X1), np.array([[[1.0, 0.0], [0.0, 1.0]]])) + eclf1.set_params(voting="hard") + eclf2.set_params(voting="hard") + assert_array_equal(eclf1.transform(X1), np.array([[0, 0], [1, 1]])) + assert_array_equal(eclf2.transform(X1), np.array([[0], [1]])) + + +def test_estimator_weights_format(global_random_seed): + # Test estimator weights inputs as list and array + clf1 = LogisticRegression(random_state=global_random_seed) + clf2 = RandomForestClassifier(n_estimators=10, random_state=global_random_seed) + eclf1 = VotingClassifier( + estimators=[("lr", clf1), ("rf", clf2)], weights=[1, 2], voting="soft" + ) + eclf2 = VotingClassifier( + estimators=[("lr", clf1), ("rf", clf2)], weights=np.array((1, 2)), voting="soft" + ) + eclf1.fit(X_scaled, y) + eclf2.fit(X_scaled, y) + assert_array_almost_equal( + eclf1.predict_proba(X_scaled), eclf2.predict_proba(X_scaled) + ) + + +def test_transform(global_random_seed): + """Check transform method of VotingClassifier on toy dataset.""" + clf1 = LogisticRegression(random_state=global_random_seed) + clf2 = RandomForestClassifier(n_estimators=10, random_state=global_random_seed) + clf3 = GaussianNB() + X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]]) + y = np.array([1, 1, 2, 2]) + + eclf1 = VotingClassifier( + estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)], voting="soft" + ).fit(X, y) + eclf2 = VotingClassifier( + estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)], + voting="soft", + flatten_transform=True, + ).fit(X, y) + eclf3 = VotingClassifier( + estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)], + voting="soft", + flatten_transform=False, + ).fit(X, y) + + assert_array_equal(eclf1.transform(X).shape, (4, 6)) + assert_array_equal(eclf2.transform(X).shape, (4, 6)) + assert_array_equal(eclf3.transform(X).shape, (3, 4, 2)) + assert_array_almost_equal(eclf1.transform(X), eclf2.transform(X)) + assert_array_almost_equal( + eclf3.transform(X).swapaxes(0, 1).reshape((4, 6)), eclf2.transform(X) + ) + + +@pytest.mark.parametrize( + "X, y, voter", + [ + ( + X, + y, + VotingClassifier( + [ + ("lr", LogisticRegression()), + ("rf", RandomForestClassifier(n_estimators=5)), + ] + ), + ), + ( + X_r, + y_r, + VotingRegressor( + [ + ("lr", LinearRegression()), + ("rf", RandomForestRegressor(n_estimators=5)), + ] + ), + ), + ], +) +def test_none_estimator_with_weights(X, y, voter): + # check that an estimator can be set to 'drop' and passing some weight + # regression test for + # https://github.com/scikit-learn/scikit-learn/issues/13777 + voter = clone(voter) + # Scaled to solve ConvergenceWarning throw by Logistic Regression + X_scaled = StandardScaler().fit_transform(X) + voter.fit(X_scaled, y, sample_weight=np.ones(y.shape)) + voter.set_params(lr="drop") + voter.fit(X_scaled, y, sample_weight=np.ones(y.shape)) + y_pred = voter.predict(X_scaled) + assert y_pred.shape == y.shape + + +@pytest.mark.parametrize( + "est", + [ + VotingRegressor( + estimators=[ + ("lr", LinearRegression()), + ("tree", DecisionTreeRegressor(random_state=0)), + ] + ), + VotingClassifier( + estimators=[ + ("lr", LogisticRegression(random_state=0)), + ("tree", DecisionTreeClassifier(random_state=0)), + ] + ), + ], + ids=["VotingRegressor", "VotingClassifier"], +) +def test_n_features_in(est): + X = [[1, 2], [3, 4], [5, 6]] + y = [0, 1, 2] + + assert not hasattr(est, "n_features_in_") + est.fit(X, y) + assert est.n_features_in_ == 2 + + +@pytest.mark.parametrize( + "estimator", + [ + VotingRegressor( + estimators=[ + ("lr", LinearRegression()), + ("rf", RandomForestRegressor(random_state=123)), + ], + verbose=True, + ), + VotingClassifier( + estimators=[ + ("lr", LogisticRegression(random_state=123)), + ("rf", RandomForestClassifier(random_state=123)), + ], + verbose=True, + ), + ], +) +def test_voting_verbose(estimator, capsys): + X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]]) + y = np.array([1, 1, 2, 2]) + + pattern = ( + r"\[Voting\].*\(1 of 2\) Processing lr, total=.*\n" + r"\[Voting\].*\(2 of 2\) Processing rf, total=.*\n$" + ) + clone(estimator).fit(X, y) + assert re.match(pattern, capsys.readouterr()[0]) + + +def test_get_features_names_out_regressor(): + """Check get_feature_names_out output for regressor.""" + + X = [[1, 2], [3, 4], [5, 6]] + y = [0, 1, 2] + + voting = VotingRegressor( + estimators=[ + ("lr", LinearRegression()), + ("tree", DecisionTreeRegressor(random_state=0)), + ("ignore", "drop"), + ] + ) + voting.fit(X, y) + + names_out = voting.get_feature_names_out() + expected_names = ["votingregressor_lr", "votingregressor_tree"] + assert_array_equal(names_out, expected_names) + + +@pytest.mark.parametrize( + "kwargs, expected_names", + [ + ( + {"voting": "soft", "flatten_transform": True}, + [ + "votingclassifier_lr0", + "votingclassifier_lr1", + "votingclassifier_lr2", + "votingclassifier_tree0", + "votingclassifier_tree1", + "votingclassifier_tree2", + ], + ), + ({"voting": "hard"}, ["votingclassifier_lr", "votingclassifier_tree"]), + ], +) +def test_get_features_names_out_classifier(kwargs, expected_names): + """Check get_feature_names_out for classifier for different settings.""" + X = [[1, 2], [3, 4], [5, 6], [1, 1.2]] + y = [0, 1, 2, 0] + + voting = VotingClassifier( + estimators=[ + ("lr", LogisticRegression(random_state=0)), + ("tree", DecisionTreeClassifier(random_state=0)), + ], + **kwargs, + ) + voting.fit(X, y) + X_trans = voting.transform(X) + names_out = voting.get_feature_names_out() + + assert X_trans.shape[1] == len(expected_names) + assert_array_equal(names_out, expected_names) + + +def test_get_features_names_out_classifier_error(): + """Check that error is raised when voting="soft" and flatten_transform=False.""" + X = [[1, 2], [3, 4], [5, 6]] + y = [0, 1, 2] + + voting = VotingClassifier( + estimators=[ + ("lr", LogisticRegression(random_state=0)), + ("tree", DecisionTreeClassifier(random_state=0)), + ], + voting="soft", + flatten_transform=False, + ) + voting.fit(X, y) + + msg = ( + "get_feature_names_out is not supported when `voting='soft'` and " + "`flatten_transform=False`" + ) + with pytest.raises(ValueError, match=msg): + voting.get_feature_names_out() + + +# Metadata Routing Tests +# ====================== + + +@pytest.mark.parametrize( + "Estimator, Child", + [(VotingClassifier, ConsumingClassifier), (VotingRegressor, ConsumingRegressor)], +) +def test_routing_passed_metadata_not_supported(Estimator, Child): + """Test that the right error message is raised when metadata is passed while + not supported when `enable_metadata_routing=False`.""" + + X = np.array([[0, 1], [2, 2], [4, 6]]) + y = [1, 2, 3] + + with pytest.raises( + ValueError, match="is only supported if enable_metadata_routing=True" + ): + Estimator(["clf", Child()]).fit(X, y, sample_weight=[1, 1, 1], metadata="a") + + +@pytest.mark.parametrize( + "Estimator, Child", + [(VotingClassifier, ConsumingClassifier), (VotingRegressor, ConsumingRegressor)], +) +@config_context(enable_metadata_routing=True) +def test_get_metadata_routing_without_fit(Estimator, Child): + # Test that metadata_routing() doesn't raise when called before fit. + est = Estimator([("sub_est", Child())]) + est.get_metadata_routing() + + +@pytest.mark.parametrize( + "Estimator, Child", + [(VotingClassifier, ConsumingClassifier), (VotingRegressor, ConsumingRegressor)], +) +@pytest.mark.parametrize("prop", ["sample_weight", "metadata"]) +@config_context(enable_metadata_routing=True) +def test_metadata_routing_for_voting_estimators(Estimator, Child, prop): + """Test that metadata is routed correctly for Voting*.""" + X = np.array([[0, 1], [2, 2], [4, 6]]) + y = [1, 2, 3] + sample_weight, metadata = [1, 1, 1], "a" + + est = Estimator( + [ + ( + "sub_est1", + Child(registry=_Registry()).set_fit_request(**{prop: True}), + ), + ( + "sub_est2", + Child(registry=_Registry()).set_fit_request(**{prop: True}), + ), + ] + ) + + est.fit(X, y, **{prop: sample_weight if prop == "sample_weight" else metadata}) + + for estimator in est.estimators: + if prop == "sample_weight": + kwargs = {prop: sample_weight} + else: + kwargs = {prop: metadata} + # access sub-estimator in (name, est) with estimator[1] + registry = estimator[1].registry + assert len(registry) + for sub_est in registry: + check_recorded_metadata(obj=sub_est, method="fit", parent="fit", **kwargs) + + +@pytest.mark.parametrize( + "Estimator, Child", + [(VotingClassifier, ConsumingClassifier), (VotingRegressor, ConsumingRegressor)], +) +@config_context(enable_metadata_routing=True) +def test_metadata_routing_error_for_voting_estimators(Estimator, Child): + """Test that the right error is raised when metadata is not requested.""" + X = np.array([[0, 1], [2, 2], [4, 6]]) + y = [1, 2, 3] + sample_weight, metadata = [1, 1, 1], "a" + + est = Estimator([("sub_est", Child())]) + + error_message = ( + "[sample_weight, metadata] are passed but are not explicitly set as requested" + f" or not requested for {Child.__name__}.fit" + ) + + with pytest.raises(ValueError, match=re.escape(error_message)): + est.fit(X, y, sample_weight=sample_weight, metadata=metadata) + + +# End of Metadata Routing Tests +# ============================= diff --git a/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/test_weight_boosting.py b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/test_weight_boosting.py new file mode 100644 index 0000000000000000000000000000000000000000..55825c438d76b29b74d8108970f72e3ebaa5e745 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/sklearn/ensemble/tests/test_weight_boosting.py @@ -0,0 +1,639 @@ +"""Testing for the boost module (sklearn.ensemble.boost).""" + +import re + +import numpy as np +import pytest + +from sklearn import datasets +from sklearn.base import BaseEstimator, clone +from sklearn.dummy import DummyClassifier, DummyRegressor +from sklearn.ensemble import AdaBoostClassifier, AdaBoostRegressor +from sklearn.ensemble._weight_boosting import _samme_proba +from sklearn.linear_model import LinearRegression +from sklearn.model_selection import GridSearchCV, train_test_split +from sklearn.svm import SVC, SVR +from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor +from sklearn.utils import shuffle +from sklearn.utils._mocking import NoSampleWeightWrapper +from sklearn.utils._testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, +) +from sklearn.utils.fixes import ( + COO_CONTAINERS, + CSC_CONTAINERS, + CSR_CONTAINERS, + DOK_CONTAINERS, + LIL_CONTAINERS, +) + +# Common random state +rng = np.random.RandomState(0) + +# Toy sample +X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] +y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels +y_regr = [-1, -1, -1, 1, 1, 1] +T = [[-1, -1], [2, 2], [3, 2]] +y_t_class = ["foo", 1, 1] +y_t_regr = [-1, 1, 1] + +# Load the iris dataset and randomly permute it +iris = datasets.load_iris() +perm = rng.permutation(iris.target.size) +iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng) + +# Load the diabetes dataset and randomly permute it +diabetes = datasets.load_diabetes() +diabetes.data, diabetes.target = shuffle( + diabetes.data, diabetes.target, random_state=rng +) + + +def test_samme_proba(): + # Test the `_samme_proba` helper function. + + # Define some example (bad) `predict_proba` output. + probs = np.array( + [[1, 1e-6, 0], [0.19, 0.6, 0.2], [-999, 0.51, 0.5], [1e-6, 1, 1e-9]] + ) + probs /= np.abs(probs.sum(axis=1))[:, np.newaxis] + + # _samme_proba calls estimator.predict_proba. + # Make a mock object so I can control what gets returned. + class MockEstimator: + def predict_proba(self, X): + assert_array_equal(X.shape, probs.shape) + return probs + + mock = MockEstimator() + + samme_proba = _samme_proba(mock, 3, np.ones_like(probs)) + + assert_array_equal(samme_proba.shape, probs.shape) + assert np.isfinite(samme_proba).all() + + # Make sure that the correct elements come out as smallest -- + # `_samme_proba` should preserve the ordering in each example. + assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2]) + assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1]) + + +def test_oneclass_adaboost_proba(): + # Test predict_proba robustness for one class label input. + # In response to issue #7501 + # https://github.com/scikit-learn/scikit-learn/issues/7501 + y_t = np.ones(len(X)) + clf = AdaBoostClassifier().fit(X, y_t) + assert_array_almost_equal(clf.predict_proba(X), np.ones((len(X), 1))) + + +def test_classification_toy(): + # Check classification on a toy dataset. + clf = AdaBoostClassifier(random_state=0) + clf.fit(X, y_class) + assert_array_equal(clf.predict(T), y_t_class) + assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_) + assert clf.predict_proba(T).shape == (len(T), 2) + assert clf.decision_function(T).shape == (len(T),) + + +def test_regression_toy(): + # Check classification on a toy dataset. + clf = AdaBoostRegressor(random_state=0) + clf.fit(X, y_regr) + assert_array_equal(clf.predict(T), y_t_regr) + + +def test_iris(): + # Check consistency on dataset iris. + classes = np.unique(iris.target) + + clf = AdaBoostClassifier() + clf.fit(iris.data, iris.target) + + assert_array_equal(classes, clf.classes_) + proba = clf.predict_proba(iris.data) + + assert proba.shape[1] == len(classes) + assert clf.decision_function(iris.data).shape[1] == len(classes) + + score = clf.score(iris.data, iris.target) + assert score > 0.9, f"Failed with {score = }" + + # Check we used multiple estimators + assert len(clf.estimators_) > 1 + # Check for distinct random states (see issue #7408) + assert len(set(est.random_state for est in clf.estimators_)) == len(clf.estimators_) + + +@pytest.mark.parametrize("loss", ["linear", "square", "exponential"]) +def test_diabetes(loss): + # Check consistency on dataset diabetes. + reg = AdaBoostRegressor(loss=loss, random_state=0) + reg.fit(diabetes.data, diabetes.target) + score = reg.score(diabetes.data, diabetes.target) + assert score > 0.55 + + # Check we used multiple estimators + assert len(reg.estimators_) > 1 + # Check for distinct random states (see issue #7408) + assert len(set(est.random_state for est in reg.estimators_)) == len(reg.estimators_) + + +def test_staged_predict(): + # Check staged predictions. + rng = np.random.RandomState(0) + iris_weights = rng.randint(10, size=iris.target.shape) + diabetes_weights = rng.randint(10, size=diabetes.target.shape) + + clf = AdaBoostClassifier(n_estimators=10) + clf.fit(iris.data, iris.target, sample_weight=iris_weights) + + predictions = clf.predict(iris.data) + staged_predictions = [p for p in clf.staged_predict(iris.data)] + proba = clf.predict_proba(iris.data) + staged_probas = [p for p in clf.staged_predict_proba(iris.data)] + score = clf.score(iris.data, iris.target, sample_weight=iris_weights) + staged_scores = [ + s for s in clf.staged_score(iris.data, iris.target, sample_weight=iris_weights) + ] + + assert len(staged_predictions) == 10 + assert_array_almost_equal(predictions, staged_predictions[-1]) + assert len(staged_probas) == 10 + assert_array_almost_equal(proba, staged_probas[-1]) + assert len(staged_scores) == 10 + assert_array_almost_equal(score, staged_scores[-1]) + + # AdaBoost regression + clf = AdaBoostRegressor(n_estimators=10, random_state=0) + clf.fit(diabetes.data, diabetes.target, sample_weight=diabetes_weights) + + predictions = clf.predict(diabetes.data) + staged_predictions = [p for p in clf.staged_predict(diabetes.data)] + score = clf.score(diabetes.data, diabetes.target, sample_weight=diabetes_weights) + staged_scores = [ + s + for s in clf.staged_score( + diabetes.data, diabetes.target, sample_weight=diabetes_weights + ) + ] + + assert len(staged_predictions) == 10 + assert_array_almost_equal(predictions, staged_predictions[-1]) + assert len(staged_scores) == 10 + assert_array_almost_equal(score, staged_scores[-1]) + + +def test_gridsearch(): + # Check that base trees can be grid-searched. + # AdaBoost classification + boost = AdaBoostClassifier(estimator=DecisionTreeClassifier()) + parameters = { + "n_estimators": (1, 2), + "estimator__max_depth": (1, 2), + } + clf = GridSearchCV(boost, parameters) + clf.fit(iris.data, iris.target) + + # AdaBoost regression + boost = AdaBoostRegressor(estimator=DecisionTreeRegressor(), random_state=0) + parameters = {"n_estimators": (1, 2), "estimator__max_depth": (1, 2)} + clf = GridSearchCV(boost, parameters) + clf.fit(diabetes.data, diabetes.target) + + +def test_pickle(): + # Check pickability. + import pickle + + # Adaboost classifier + obj = AdaBoostClassifier() + obj.fit(iris.data, iris.target) + score = obj.score(iris.data, iris.target) + s = pickle.dumps(obj) + + obj2 = pickle.loads(s) + assert type(obj2) == obj.__class__ + score2 = obj2.score(iris.data, iris.target) + assert score == score2 + + # Adaboost regressor + obj = AdaBoostRegressor(random_state=0) + obj.fit(diabetes.data, diabetes.target) + score = obj.score(diabetes.data, diabetes.target) + s = pickle.dumps(obj) + + obj2 = pickle.loads(s) + assert type(obj2) == obj.__class__ + score2 = obj2.score(diabetes.data, diabetes.target) + assert score == score2 + + +def test_importances(): + # Check variable importances. + X, y = datasets.make_classification( + n_samples=2000, + n_features=10, + n_informative=3, + n_redundant=0, + n_repeated=0, + shuffle=False, + random_state=1, + ) + + clf = AdaBoostClassifier() + + clf.fit(X, y) + importances = clf.feature_importances_ + + assert importances.shape[0] == 10 + assert (importances[:3, np.newaxis] >= importances[3:]).all() + + +def test_adaboost_classifier_sample_weight_error(): + # Test that it gives proper exception on incorrect sample weight. + clf = AdaBoostClassifier() + msg = re.escape("sample_weight.shape == (1,), expected (6,)") + with pytest.raises(ValueError, match=msg): + clf.fit(X, y_class, sample_weight=np.asarray([-1])) + + +def test_estimator(): + # Test different estimators. + from sklearn.ensemble import RandomForestClassifier + + # XXX doesn't work with y_class because RF doesn't support classes_ + # Shouldn't AdaBoost run a LabelBinarizer? + clf = AdaBoostClassifier(RandomForestClassifier()) + clf.fit(X, y_regr) + + clf = AdaBoostClassifier(SVC()) + clf.fit(X, y_class) + + from sklearn.ensemble import RandomForestRegressor + + clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0) + clf.fit(X, y_regr) + + clf = AdaBoostRegressor(SVR(), random_state=0) + clf.fit(X, y_regr) + + # Check that an empty discrete ensemble fails in fit, not predict. + X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]] + y_fail = ["foo", "bar", 1, 2] + clf = AdaBoostClassifier(SVC()) + with pytest.raises(ValueError, match="worse than random"): + clf.fit(X_fail, y_fail) + + +def test_sample_weights_infinite(): + msg = "Sample weights have reached infinite values" + clf = AdaBoostClassifier(n_estimators=30, learning_rate=23.0) + with pytest.warns(UserWarning, match=msg): + clf.fit(iris.data, iris.target) + + +@pytest.mark.parametrize( + "sparse_container, expected_internal_type", + zip( + [ + *CSC_CONTAINERS, + *CSR_CONTAINERS, + *LIL_CONTAINERS, + *COO_CONTAINERS, + *DOK_CONTAINERS, + ], + CSC_CONTAINERS + 4 * CSR_CONTAINERS, + ), +) +def test_sparse_classification(sparse_container, expected_internal_type): + # Check classification with sparse input. + + class CustomSVC(SVC): + """SVC variant that records the nature of the training set.""" + + def fit(self, X, y, sample_weight=None): + """Modification on fit caries data type for later verification.""" + super().fit(X, y, sample_weight=sample_weight) + self.data_type_ = type(X) + return self + + X, y = datasets.make_multilabel_classification( + n_classes=1, n_samples=15, n_features=5, random_state=42 + ) + # Flatten y to a 1d array + y = np.ravel(y) + + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) + + X_train_sparse = sparse_container(X_train) + X_test_sparse = sparse_container(X_test) + + # Trained on sparse format + sparse_classifier = AdaBoostClassifier( + estimator=CustomSVC(probability=True), + random_state=1, + ).fit(X_train_sparse, y_train) + + # Trained on dense format + dense_classifier = AdaBoostClassifier( + estimator=CustomSVC(probability=True), + random_state=1, + ).fit(X_train, y_train) + + # predict + sparse_clf_results = sparse_classifier.predict(X_test_sparse) + dense_clf_results = dense_classifier.predict(X_test) + assert_array_equal(sparse_clf_results, dense_clf_results) + + # decision_function + sparse_clf_results = sparse_classifier.decision_function(X_test_sparse) + dense_clf_results = dense_classifier.decision_function(X_test) + assert_array_almost_equal(sparse_clf_results, dense_clf_results) + + # predict_log_proba + sparse_clf_results = sparse_classifier.predict_log_proba(X_test_sparse) + dense_clf_results = dense_classifier.predict_log_proba(X_test) + assert_array_almost_equal(sparse_clf_results, dense_clf_results) + + # predict_proba + sparse_clf_results = sparse_classifier.predict_proba(X_test_sparse) + dense_clf_results = dense_classifier.predict_proba(X_test) + assert_array_almost_equal(sparse_clf_results, dense_clf_results) + + # score + sparse_clf_results = sparse_classifier.score(X_test_sparse, y_test) + dense_clf_results = dense_classifier.score(X_test, y_test) + assert_array_almost_equal(sparse_clf_results, dense_clf_results) + + # staged_decision_function + sparse_clf_results = sparse_classifier.staged_decision_function(X_test_sparse) + dense_clf_results = dense_classifier.staged_decision_function(X_test) + for sparse_clf_res, dense_clf_res in zip(sparse_clf_results, dense_clf_results): + assert_array_almost_equal(sparse_clf_res, dense_clf_res) + + # staged_predict + sparse_clf_results = sparse_classifier.staged_predict(X_test_sparse) + dense_clf_results = dense_classifier.staged_predict(X_test) + for sparse_clf_res, dense_clf_res in zip(sparse_clf_results, dense_clf_results): + assert_array_equal(sparse_clf_res, dense_clf_res) + + # staged_predict_proba + sparse_clf_results = sparse_classifier.staged_predict_proba(X_test_sparse) + dense_clf_results = dense_classifier.staged_predict_proba(X_test) + for sparse_clf_res, dense_clf_res in zip(sparse_clf_results, dense_clf_results): + assert_array_almost_equal(sparse_clf_res, dense_clf_res) + + # staged_score + sparse_clf_results = sparse_classifier.staged_score(X_test_sparse, y_test) + dense_clf_results = dense_classifier.staged_score(X_test, y_test) + for sparse_clf_res, dense_clf_res in zip(sparse_clf_results, dense_clf_results): + assert_array_equal(sparse_clf_res, dense_clf_res) + + # Verify sparsity of data is maintained during training + types = [i.data_type_ for i in sparse_classifier.estimators_] + + assert all([t == expected_internal_type for t in types]) + + +@pytest.mark.parametrize( + "sparse_container, expected_internal_type", + zip( + [ + *CSC_CONTAINERS, + *CSR_CONTAINERS, + *LIL_CONTAINERS, + *COO_CONTAINERS, + *DOK_CONTAINERS, + ], + CSC_CONTAINERS + 4 * CSR_CONTAINERS, + ), +) +def test_sparse_regression(sparse_container, expected_internal_type): + # Check regression with sparse input. + + class CustomSVR(SVR): + """SVR variant that records the nature of the training set.""" + + def fit(self, X, y, sample_weight=None): + """Modification on fit caries data type for later verification.""" + super().fit(X, y, sample_weight=sample_weight) + self.data_type_ = type(X) + return self + + X, y = datasets.make_regression( + n_samples=15, n_features=50, n_targets=1, random_state=42 + ) + + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) + + X_train_sparse = sparse_container(X_train) + X_test_sparse = sparse_container(X_test) + + # Trained on sparse format + sparse_regressor = AdaBoostRegressor(estimator=CustomSVR(), random_state=1).fit( + X_train_sparse, y_train + ) + + # Trained on dense format + dense_regressor = AdaBoostRegressor(estimator=CustomSVR(), random_state=1).fit( + X_train, y_train + ) + + # predict + sparse_regr_results = sparse_regressor.predict(X_test_sparse) + dense_regr_results = dense_regressor.predict(X_test) + assert_array_almost_equal(sparse_regr_results, dense_regr_results) + + # staged_predict + sparse_regr_results = sparse_regressor.staged_predict(X_test_sparse) + dense_regr_results = dense_regressor.staged_predict(X_test) + for sparse_regr_res, dense_regr_res in zip(sparse_regr_results, dense_regr_results): + assert_array_almost_equal(sparse_regr_res, dense_regr_res) + + types = [i.data_type_ for i in sparse_regressor.estimators_] + + assert all([t == expected_internal_type for t in types]) + + +def test_sample_weight_adaboost_regressor(): + """ + AdaBoostRegressor should work without sample_weights in the base estimator + The random weighted sampling is done internally in the _boost method in + AdaBoostRegressor. + """ + + class DummyEstimator(BaseEstimator): + def fit(self, X, y): + pass + + def predict(self, X): + return np.zeros(X.shape[0]) + + boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3) + boost.fit(X, y_regr) + assert len(boost.estimator_weights_) == len(boost.estimator_errors_) + + +def test_multidimensional_X(): + """ + Check that the AdaBoost estimators can work with n-dimensional + data matrix + """ + rng = np.random.RandomState(0) + + X = rng.randn(51, 3, 3) + yc = rng.choice([0, 1], 51) + yr = rng.randn(51) + + boost = AdaBoostClassifier(DummyClassifier(strategy="most_frequent")) + boost.fit(X, yc) + boost.predict(X) + boost.predict_proba(X) + + boost = AdaBoostRegressor(DummyRegressor()) + boost.fit(X, yr) + boost.predict(X) + + +def test_adaboostclassifier_without_sample_weight(): + X, y = iris.data, iris.target + estimator = NoSampleWeightWrapper(DummyClassifier()) + clf = AdaBoostClassifier(estimator=estimator) + err_msg = "{} doesn't support sample_weight".format(estimator.__class__.__name__) + with pytest.raises(ValueError, match=err_msg): + clf.fit(X, y) + + +def test_adaboostregressor_sample_weight(): + # check that giving weight will have an influence on the error computed + # for a weak learner + rng = np.random.RandomState(42) + X = np.linspace(0, 100, num=1000) + y = (0.8 * X + 0.2) + (rng.rand(X.shape[0]) * 0.0001) + X = X.reshape(-1, 1) + + # add an arbitrary outlier + X[-1] *= 10 + y[-1] = 10000 + + # random_state=0 ensure that the underlying bootstrap will use the outlier + regr_no_outlier = AdaBoostRegressor( + estimator=LinearRegression(), n_estimators=1, random_state=0 + ) + regr_with_weight = clone(regr_no_outlier) + regr_with_outlier = clone(regr_no_outlier) + + # fit 3 models: + # - a model containing the outlier + # - a model without the outlier + # - a model containing the outlier but with a null sample-weight + regr_with_outlier.fit(X, y) + regr_no_outlier.fit(X[:-1], y[:-1]) + sample_weight = np.ones_like(y) + sample_weight[-1] = 0 + regr_with_weight.fit(X, y, sample_weight=sample_weight) + + score_with_outlier = regr_with_outlier.score(X[:-1], y[:-1]) + score_no_outlier = regr_no_outlier.score(X[:-1], y[:-1]) + score_with_weight = regr_with_weight.score(X[:-1], y[:-1]) + + assert score_with_outlier < score_no_outlier + assert score_with_outlier < score_with_weight + assert score_no_outlier == pytest.approx(score_with_weight) + + +def test_adaboost_consistent_predict(): + # check that predict_proba and predict give consistent results + # regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/14084 + X_train, X_test, y_train, y_test = train_test_split( + *datasets.load_digits(return_X_y=True), random_state=42 + ) + model = AdaBoostClassifier(random_state=42) + model.fit(X_train, y_train) + + assert_array_equal( + np.argmax(model.predict_proba(X_test), axis=1), model.predict(X_test) + ) + + +@pytest.mark.parametrize( + "model, X, y", + [ + (AdaBoostClassifier(), iris.data, iris.target), + (AdaBoostRegressor(), diabetes.data, diabetes.target), + ], +) +def test_adaboost_negative_weight_error(model, X, y): + sample_weight = np.ones_like(y) + sample_weight[-1] = -10 + + err_msg = "Negative values in data passed to `sample_weight`" + with pytest.raises(ValueError, match=err_msg): + model.fit(X, y, sample_weight=sample_weight) + + +def test_adaboost_numerically_stable_feature_importance_with_small_weights(): + """Check that we don't create NaN feature importance with numerically + instable inputs. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/20320 + """ + rng = np.random.RandomState(42) + X = rng.normal(size=(1000, 10)) + y = rng.choice([0, 1], size=1000) + sample_weight = np.ones_like(y) * 1e-263 + tree = DecisionTreeClassifier(max_depth=10, random_state=12) + ada_model = AdaBoostClassifier(estimator=tree, n_estimators=20, random_state=12) + ada_model.fit(X, y, sample_weight=sample_weight) + assert np.isnan(ada_model.feature_importances_).sum() == 0 + + +def test_adaboost_decision_function(global_random_seed): + """Check that the decision function respects the symmetric constraint for weak + learners. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/26520 + """ + n_classes = 3 + X, y = datasets.make_classification( + n_classes=n_classes, n_clusters_per_class=1, random_state=global_random_seed + ) + clf = AdaBoostClassifier(n_estimators=1, random_state=global_random_seed).fit(X, y) + + y_score = clf.decision_function(X) + assert_allclose(y_score.sum(axis=1), 0, atol=1e-8) + + # With a single learner, we expect to have a decision function in + # {1, - 1 / (n_classes - 1)}. + assert set(np.unique(y_score)) == {1, -1 / (n_classes - 1)} + + # We can assert the same for staged_decision_function since we have a single learner + for y_score in clf.staged_decision_function(X): + assert_allclose(y_score.sum(axis=1), 0, atol=1e-8) + + # With a single learner, we expect to have a decision function in + # {1, - 1 / (n_classes - 1)}. + assert set(np.unique(y_score)) == {1, -1 / (n_classes - 1)} + + clf.set_params(n_estimators=5).fit(X, y) + + y_score = clf.decision_function(X) + assert_allclose(y_score.sum(axis=1), 0, atol=1e-8) + + for y_score in clf.staged_decision_function(X): + assert_allclose(y_score.sum(axis=1), 0, atol=1e-8) + + +# TODO(1.8): remove +def test_deprecated_algorithm(): + adaboost_clf = AdaBoostClassifier(n_estimators=1, algorithm="SAMME") + with pytest.warns(FutureWarning, match="The parameter 'algorithm' is deprecated"): + adaboost_clf.fit(X, y_class) diff --git a/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_cholesky_solve_helper_cpu_dispatch.h b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_cholesky_solve_helper_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cc9099cb356e4911015355a8b6b7ed3dbe67e0b8 --- /dev/null +++ b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_cholesky_solve_helper_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _cholesky_solve_helper(const at::Tensor & self, const at::Tensor & A, bool upper); + +} // namespace cpu +} // namespace at diff --git a/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_index_put_impl_cpu_dispatch.h b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_index_put_impl_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b0cea2784d68620290551c825d38200b964d7ed6 --- /dev/null +++ b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_index_put_impl_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & _index_put_impl_(at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate=false, bool unsafe=false); + +} // namespace cpu +} // namespace at diff --git a/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_jagged_to_padded_dense_forward.h b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_jagged_to_padded_dense_forward.h new file mode 100644 index 0000000000000000000000000000000000000000..709975e130339e22609397b6c9c6dd1ffc17dcd4 --- /dev/null +++ b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_jagged_to_padded_dense_forward.h @@ -0,0 +1,47 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_jagged_to_padded_dense_forward(Tensor values, Tensor[] offsets, SymInt[] max_lengths, float padding_value=0.0) -> Tensor +inline at::Tensor _jagged_to_padded_dense_forward(const at::Tensor & values, at::TensorList offsets, at::IntArrayRef max_lengths, double padding_value=0.0) { + return at::_ops::_jagged_to_padded_dense_forward::call(values, offsets, c10::fromIntArrayRefSlow(max_lengths), padding_value); +} +namespace symint { + template >> + at::Tensor _jagged_to_padded_dense_forward(const at::Tensor & values, at::TensorList offsets, at::IntArrayRef max_lengths, double padding_value=0.0) { + return at::_ops::_jagged_to_padded_dense_forward::call(values, offsets, c10::fromIntArrayRefSlow(max_lengths), padding_value); + } +} + +// aten::_jagged_to_padded_dense_forward(Tensor values, Tensor[] offsets, SymInt[] max_lengths, float padding_value=0.0) -> Tensor +inline at::Tensor _jagged_to_padded_dense_forward_symint(const at::Tensor & values, at::TensorList offsets, c10::SymIntArrayRef max_lengths, double padding_value=0.0) { + return at::_ops::_jagged_to_padded_dense_forward::call(values, offsets, max_lengths, padding_value); +} +namespace symint { + template >> + at::Tensor _jagged_to_padded_dense_forward(const at::Tensor & values, at::TensorList offsets, c10::SymIntArrayRef max_lengths, double padding_value=0.0) { + return at::_ops::_jagged_to_padded_dense_forward::call(values, offsets, max_lengths, padding_value); + } +} + +} diff --git a/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_eigvals_cuda_dispatch.h b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_eigvals_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d42b44f20020efaf4a15c9c86bf855425bcecda9 --- /dev/null +++ b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_eigvals_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _linalg_eigvals(const at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_cudnn_attention_native.h b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_cudnn_attention_native.h new file mode 100644 index 0000000000000000000000000000000000000000..725bafabb037bf540e54de25a1a63267c22b858d --- /dev/null +++ b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_cudnn_attention_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple _scaled_dot_product_cudnn_attention_cuda(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional & attn_bias, bool compute_log_sumexp, double dropout_p=0.0, bool is_causal=false, bool return_debug_mask=false, ::std::optional scale=::std::nullopt); +} // namespace native +} // namespace at diff --git a/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_broadcast_to_ops.h b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_broadcast_to_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..4c887b2f31d2c67a7e163be588997b3e10167680 --- /dev/null +++ b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_broadcast_to_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _sparse_broadcast_to { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::_sparse_broadcast_to"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "_sparse_broadcast_to(Tensor(a) self, int[] size) -> Tensor(a)"; + static at::Tensor call(const at::Tensor & self, at::IntArrayRef size); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size); +}; + +}} // namespace at::_ops diff --git a/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_sum_ops.h b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_sum_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..6040e0818252c8eeccff36b09f897fefdb33e56a --- /dev/null +++ b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_sum_ops.h @@ -0,0 +1,72 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _sparse_sum { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::_sparse_sum"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "_sparse_sum(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API _sparse_sum_dtype { + using schema = at::Tensor (const at::Tensor &, at::ScalarType); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::_sparse_sum"; + static constexpr const char* overload_name = "dtype"; + static constexpr const char* schema_str = "_sparse_sum.dtype(Tensor self, *, ScalarType dtype) -> Tensor"; + static at::Tensor call(const at::Tensor & self, at::ScalarType dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype); +}; + +struct TORCH_API _sparse_sum_dim { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::_sparse_sum"; + static constexpr const char* overload_name = "dim"; + static constexpr const char* schema_str = "_sparse_sum.dim(Tensor self, int[1] dim) -> Tensor"; + static at::Tensor call(const at::Tensor & self, at::IntArrayRef dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim); +}; + +struct TORCH_API _sparse_sum_dim_dtype { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, at::ScalarType); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::_sparse_sum"; + static constexpr const char* overload_name = "dim_dtype"; + static constexpr const char* schema_str = "_sparse_sum.dim_dtype(Tensor self, int[1] dim, *, ScalarType dtype) -> Tensor"; + static at::Tensor call(const at::Tensor & self, at::IntArrayRef dim, at::ScalarType dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, at::ScalarType dtype); +}; + +struct TORCH_API _sparse_sum_dim_out { + using schema = at::Tensor & (const at::Tensor &, at::IntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::_sparse_sum"; + static constexpr const char* overload_name = "dim_out"; + static constexpr const char* schema_str = "_sparse_sum.dim_out(Tensor self, int[1] dim, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_test_autograd_multiple_dispatch_native.h b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_test_autograd_multiple_dispatch_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a8a7b1659b9c9c2b1236427e2283c6f5a41899ee --- /dev/null +++ b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_test_autograd_multiple_dispatch_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _test_autograd_multiple_dispatch_fullcoverage(const at::Tensor & self); +TORCH_API at::Tensor & _test_autograd_multiple_dispatch_fullcoverage_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor _test_autograd_multiple_dispatch_ntonly(const at::Tensor & self, bool b); +} // namespace native +} // namespace at diff --git a/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool3d.h b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool3d.h new file mode 100644 index 0000000000000000000000000000000000000000..f481a7f6b7d68a40e338c2103f8d6070037d667a --- /dev/null +++ b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool3d.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple adaptive_max_pool3d_out(at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::adaptive_max_pool3d_out::call(self, output_size, out, indices); +} +// aten::adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple adaptive_max_pool3d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices) { + return at::_ops::adaptive_max_pool3d_out::call(self, output_size, out, indices); +} + +// aten::adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor) +inline ::std::tuple adaptive_max_pool3d(const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::adaptive_max_pool3d::call(self, output_size); +} + +} diff --git a/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/affine_grid_generator_ops.h b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/affine_grid_generator_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..948c216d9d17de08e3e15660ae9357ef09e52410 --- /dev/null +++ b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/affine_grid_generator_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API affine_grid_generator { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::affine_grid_generator"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "affine_grid_generator(Tensor theta, SymInt[] size, bool align_corners) -> Tensor"; + static at::Tensor call(const at::Tensor & theta, c10::SymIntArrayRef size, bool align_corners); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & theta, c10::SymIntArrayRef size, bool align_corners); +}; + +struct TORCH_API affine_grid_generator_out { + using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::affine_grid_generator"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "affine_grid_generator.out(Tensor theta, SymInt[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & theta, c10::SymIntArrayRef size, bool align_corners, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & theta, c10::SymIntArrayRef size, bool align_corners, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/bmm.h b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/bmm.h new file mode 100644 index 0000000000000000000000000000000000000000..6dfc9612702cacdfef410cf0aedc90809051f482 --- /dev/null +++ b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/bmm.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::bmm(Tensor self, Tensor mat2) -> Tensor +inline at::Tensor bmm(const at::Tensor & self, const at::Tensor & mat2) { + return at::_ops::bmm::call(self, mat2); +} + +// aten::bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & bmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat2) { + return at::_ops::bmm_out::call(self, mat2, out); +} +// aten::bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & bmm_outf(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) { + return at::_ops::bmm_out::call(self, mat2, out); +} + +} diff --git a/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/cross_native.h b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/cross_native.h new file mode 100644 index 0000000000000000000000000000000000000000..3ed9e02c9d589d7a0de636e7ddbabfc0c073ba53 --- /dev/null +++ b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/cross_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor cross(const at::Tensor & self, const at::Tensor & other, ::std::optional dim=::std::nullopt); +TORCH_API at::Tensor & cross_out(const at::Tensor & self, const at::Tensor & other, ::std::optional dim, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_backward_native.h b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..cc685f616174b4fa0133f4f7812c6689d79ec3ee --- /dev/null +++ b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor embedding_backward_symint(const at::Tensor & grad, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse); +} // namespace native +} // namespace at diff --git a/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/erfc.h b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/erfc.h new file mode 100644 index 0000000000000000000000000000000000000000..cacc5cb753e51ff8e566b6c6d79348ce97322ce6 --- /dev/null +++ b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/erfc.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::erfc(Tensor self) -> Tensor +inline at::Tensor erfc(const at::Tensor & self) { + return at::_ops::erfc::call(self); +} + +// aten::erfc_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & erfc_(at::Tensor & self) { + return at::_ops::erfc_::call(self); +} + +// aten::erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & erfc_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::erfc_out::call(self, out); +} +// aten::erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & erfc_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::erfc_out::call(self, out); +} + +} diff --git a/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/fft_irfft2_native.h b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/fft_irfft2_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9cdd34a375d24c3375907d638e1bfa599020a0b3 --- /dev/null +++ b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/fft_irfft2_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor fft_irfft2_symint(const at::Tensor & self, at::OptionalSymIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional norm=::std::nullopt); +TORCH_API at::Tensor & fft_irfft2_symint_out(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional norm, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/fliplr.h b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/fliplr.h new file mode 100644 index 0000000000000000000000000000000000000000..2ca39aa17a27afdb3fe660fa62542df95744532c --- /dev/null +++ b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/fliplr.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fliplr(Tensor self) -> Tensor +inline at::Tensor fliplr(const at::Tensor & self) { + return at::_ops::fliplr::call(self); +} + +} diff --git a/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool2d_compositeexplicitautogradnonfunctional_dispatch.h b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool2d_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bf6f82acb6027a80f801cb557d25a351a87b7f50 --- /dev/null +++ b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool2d_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API ::std::tuple fractional_max_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill.h b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill.h new file mode 100644 index 0000000000000000000000000000000000000000..5b6d673dba980c06242ba2eb9706ceb0341ab4bc --- /dev/null +++ b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill.h @@ -0,0 +1,63 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor +inline at::Tensor index_fill(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) { + return at::_ops::index_fill_int_Scalar::call(self, dim, index, value); +} + +// aten::index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor +inline at::Tensor index_fill(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) { + return at::_ops::index_fill_int_Tensor::call(self, dim, index, value); +} + +// aten::index_fill.Dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor +inline at::Tensor index_fill(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) { + return at::_ops::index_fill_Dimname_Scalar::call(self, dim, index, value); +} + +// aten::index_fill.Dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor +inline at::Tensor index_fill(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value) { + return at::_ops::index_fill_Dimname_Tensor::call(self, dim, index, value); +} + +// aten::index_fill.int_Scalar_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_fill_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) { + return at::_ops::index_fill_int_Scalar_out::call(self, dim, index, value, out); +} +// aten::index_fill.int_Scalar_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_fill_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out) { + return at::_ops::index_fill_int_Scalar_out::call(self, dim, index, value, out); +} + +// aten::index_fill.int_Tensor_out(Tensor self, int dim, Tensor index, Tensor value, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_fill_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) { + return at::_ops::index_fill_int_Tensor_out::call(self, dim, index, value, out); +} +// aten::index_fill.int_Tensor_out(Tensor self, int dim, Tensor index, Tensor value, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_fill_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value, at::Tensor & out) { + return at::_ops::index_fill_int_Tensor_out::call(self, dim, index, value, out); +} + +} diff --git a/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/index_native.h b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/index_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2b5629a5f920c731e3ed8cbcc703c83e630058c7 --- /dev/null +++ b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/index_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_index_out : public at::meta::structured_index_Tensor { +void impl(const at::Tensor & self, at::DimVector sizes, at::DimVector strides, const at::Tensor & out); +}; +TORCH_API at::Tensor quantized_index(const at::Tensor & self, const c10::List<::std::optional> & indices); +} // namespace native +} // namespace at diff --git a/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/is_vulkan_available_compositeimplicitautograd_dispatch.h b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/is_vulkan_available_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..67645c74f872ed505d143f35e6b67929eb133236 --- /dev/null +++ b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/is_vulkan_available_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API bool is_vulkan_available(); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/log_meta.h b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/log_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..b73cb80ce1bd2ee8fce856b7116ce33ee4d72954 --- /dev/null +++ b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/log_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_log : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/max_meta_dispatch.h b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/max_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9680e33f3f4f259b6cf3f0cd76260b0791c55cbf --- /dev/null +++ b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/max_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API ::std::tuple max(const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API ::std::tuple max_out(at::Tensor & max, at::Tensor & max_values, const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API ::std::tuple max_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & max, at::Tensor & max_values); + +} // namespace meta +} // namespace at diff --git a/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_lstm_cell_compositeimplicitautograd_dispatch.h b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_lstm_cell_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..28ea7b94e5a8c02c59053e32f0aeb69cc42d58cc --- /dev/null +++ b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_lstm_cell_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple quantized_lstm_cell(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad2d_backward.h b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad2d_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..633bcdcf89b8747b360e444ec1fd923bf067ae53 --- /dev/null +++ b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad2d_backward.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & reflection_pad2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::reflection_pad2d_backward_grad_input::call(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input); +} +namespace symint { + template >> + at::Tensor & reflection_pad2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::reflection_pad2d_backward_grad_input::call(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input); + } +} + +// aten::reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & reflection_pad2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) { + return at::_ops::reflection_pad2d_backward_grad_input::call(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input); +} +namespace symint { + template >> + at::Tensor & reflection_pad2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) { + return at::_ops::reflection_pad2d_backward_grad_input::call(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input); + } +} + +// aten::reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & reflection_pad2d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::reflection_pad2d_backward_grad_input::call(grad_output, self, padding, grad_input); +} +namespace symint { + template >> + at::Tensor & reflection_pad2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::reflection_pad2d_backward_grad_input::call(grad_output, self, padding, grad_input); + } +} + +// aten::reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & reflection_pad2d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) { + return at::_ops::reflection_pad2d_backward_grad_input::call(grad_output, self, padding, grad_input); +} +namespace symint { + template >> + at::Tensor & reflection_pad2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) { + return at::_ops::reflection_pad2d_backward_grad_input::call(grad_output, self, padding, grad_input); + } +} + +// aten::reflection_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor +inline at::Tensor reflection_pad2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::reflection_pad2d_backward::call(grad_output, self, c10::fromIntArrayRefSlow(padding)); +} +namespace symint { + template >> + at::Tensor reflection_pad2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::reflection_pad2d_backward::call(grad_output, self, c10::fromIntArrayRefSlow(padding)); + } +} + +// aten::reflection_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor +inline at::Tensor reflection_pad2d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::reflection_pad2d_backward::call(grad_output, self, padding); +} +namespace symint { + template >> + at::Tensor reflection_pad2d_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::reflection_pad2d_backward::call(grad_output, self, padding); + } +} + +} diff --git a/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/row_indices_copy_ops.h b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/row_indices_copy_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..51ba605783b4537c457154f50577804ef014e01c --- /dev/null +++ b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/row_indices_copy_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API row_indices_copy { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::row_indices_copy"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "row_indices_copy(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API row_indices_copy_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::row_indices_copy"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "row_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/sub_cuda_dispatch.h b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/sub_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..295e9658d8ffe3b812227696874143f853593190 --- /dev/null +++ b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/sub_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor sub(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor & sub_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor & sub_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & sub_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); + +} // namespace cuda +} // namespace at diff --git a/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/tile_native.h b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/tile_native.h new file mode 100644 index 0000000000000000000000000000000000000000..7dbfc94f11cd5e082202daf3584a15312b7d1a93 --- /dev/null +++ b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/tile_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor tile_symint(const at::Tensor & self, c10::SymIntArrayRef dims); +} // namespace native +} // namespace at diff --git a/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/unbind_compositeexplicitautograd_dispatch.h b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/unbind_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..771af8c02bbbf3a0da35a7f8128c1630af6c12e0 --- /dev/null +++ b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/unbind_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::vector unbind(const at::Tensor & self, int64_t dim=0); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_linear1d_cuda_dispatch.h b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_linear1d_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8b4e694d15735e753858f120a81483940287526e --- /dev/null +++ b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_linear1d_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor upsample_linear1d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional scales=::std::nullopt); +TORCH_API at::Tensor upsample_linear1d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional scales=::std::nullopt); +TORCH_API at::Tensor & upsample_linear1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional scales=::std::nullopt); +TORCH_API at::Tensor & upsample_linear1d_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional scales, at::Tensor & out); +TORCH_API at::Tensor & upsample_linear1d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional scales=::std::nullopt); +TORCH_API at::Tensor & upsample_linear1d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional scales, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/zero_native.h b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/zero_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ac496a1e0cda39be848270d5dc325afe602e9f71 --- /dev/null +++ b/phi4/lib/python3.10/site-packages/torch/include/ATen/ops/zero_native.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor zero(const at::Tensor & self); +TORCH_API at::Tensor & zero_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & zero_(at::Tensor & self); +TORCH_API at::Tensor & zero_meta_(at::Tensor & self); +TORCH_API at::Tensor & zero_nested_(at::Tensor & self); +TORCH_API at::Tensor & zero_sparse_(at::Tensor & self); +TORCH_API at::Tensor & zero_sparse_csr_(at::Tensor & self); +TORCH_API at::Tensor & mkldnn_zero_(at::Tensor & self); +} // namespace native +} // namespace at