diff --git a/.gitattributes b/.gitattributes index 193983612b0461e79771e4e8a79a2d9fcfef046d..adb064d63670044c89b378c67509975bf0bb3c09 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1087,3 +1087,4 @@ mgm/lib/python3.10/site-packages/sympy/physics/continuum_mechanics/__pycache__/b vila/lib/python3.10/site-packages/opencv_python.libs/libavcodec-402e4b05.so.59.37.100 filter=lfs diff=lfs merge=lfs -text mgm/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/perm_groups.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text videollama2/lib/python3.10/site-packages/torch/lib/libtorch_cuda_linalg.so filter=lfs diff=lfs merge=lfs -text +mgm/lib/python3.10/site-packages/sklearn/metrics/cluster/_expected_mutual_info_fast.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/mgm/lib/python3.10/site-packages/sklearn/__pycache__/__init__.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57dca438157ac02fbbdfff66c0a5567cf623bfdf Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/__pycache__/__init__.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/sklearn/__pycache__/_config.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/__pycache__/_config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..403bbe9bd49f91b5d88839987b88c1bce2672361 Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/__pycache__/_config.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/sklearn/__pycache__/_distributor_init.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/__pycache__/_distributor_init.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f718f9b7047e099d1e1ef399390cd09b200c95fa Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/__pycache__/_distributor_init.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/sklearn/__pycache__/_min_dependencies.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/__pycache__/_min_dependencies.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd235a87d4547b77ebb1a62bd86e96455f7887d1 Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/__pycache__/_min_dependencies.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/sklearn/__pycache__/base.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08401fe3d1ca1fe123b7564c2147a9e6c65a2df2 Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/__pycache__/base.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/sklearn/__pycache__/calibration.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/__pycache__/calibration.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac9ca26e68e160becf5fb202a70717f49debcd60 Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/__pycache__/calibration.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/sklearn/__pycache__/conftest.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b09c8b9f4908a6b131bf83e46da3287802fd966 Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/__pycache__/conftest.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/sklearn/__pycache__/discriminant_analysis.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/__pycache__/discriminant_analysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..29298d87ca87e3b77959ead97595b64b45f76ccc Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/__pycache__/discriminant_analysis.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/sklearn/__pycache__/dummy.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/__pycache__/dummy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..010656f2fd128f1b8bae8a18cf90a7d1ef34df19 Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/__pycache__/dummy.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/sklearn/__pycache__/exceptions.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/__pycache__/exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..347552d43df8e0c6e60b27765ad2d72844afa3f9 Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/__pycache__/exceptions.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/sklearn/__pycache__/isotonic.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/__pycache__/isotonic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2561f3f01bf89725ca4c1a26eb58ce859ed473a Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/__pycache__/isotonic.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/sklearn/__pycache__/kernel_approximation.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/__pycache__/kernel_approximation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13bd75d2e7890186f8a6405e78c23e0c5b027f45 Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/__pycache__/kernel_approximation.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/sklearn/__pycache__/kernel_ridge.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/__pycache__/kernel_ridge.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf1c15ad7a9c3d6e4dd6dccb0213843b9d1ff774 Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/__pycache__/kernel_ridge.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/sklearn/__pycache__/multiclass.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/__pycache__/multiclass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5018f60c5a912c5463fc1a4640d8a9c6a7998f08 Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/__pycache__/multiclass.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/sklearn/__pycache__/multioutput.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/__pycache__/multioutput.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..baf0316fb513cd1b992fd30e72cc44e33fac7eb9 Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/__pycache__/multioutput.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/sklearn/__pycache__/naive_bayes.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/__pycache__/naive_bayes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4877fda1ac05feb217dc32d5818411c9c830f374 Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/__pycache__/naive_bayes.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/sklearn/__pycache__/pipeline.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/__pycache__/pipeline.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..86f106a541e6aa0add893f922ea8bd722e8ed17e Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/__pycache__/pipeline.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/sklearn/__pycache__/random_projection.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/__pycache__/random_projection.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b06296b4b8fdeb5566bf4235b4f34b346a38ccae Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/__pycache__/random_projection.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/sklearn/_loss/__init__.py b/mgm/lib/python3.10/site-packages/sklearn/_loss/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..63ae3038df8aed6917998d2830b913237fa8a7f9 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/sklearn/_loss/__init__.py @@ -0,0 +1,29 @@ +""" +The :mod:`sklearn._loss` module includes loss function classes suitable for +fitting classification and regression tasks. +""" + +from .loss import ( + HalfSquaredError, + AbsoluteError, + PinballLoss, + HalfPoissonLoss, + HalfGammaLoss, + HalfTweedieLoss, + HalfTweedieLossIdentity, + HalfBinomialLoss, + HalfMultinomialLoss, +) + + +__all__ = [ + "HalfSquaredError", + "AbsoluteError", + "PinballLoss", + "HalfPoissonLoss", + "HalfGammaLoss", + "HalfTweedieLoss", + "HalfTweedieLossIdentity", + "HalfBinomialLoss", + "HalfMultinomialLoss", +] diff --git a/mgm/lib/python3.10/site-packages/sklearn/_loss/_loss.pxd b/mgm/lib/python3.10/site-packages/sklearn/_loss/_loss.pxd new file mode 100644 index 0000000000000000000000000000000000000000..8ee3c8c7ed9f12a38382b86312157d3df775e111 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/sklearn/_loss/_loss.pxd @@ -0,0 +1,81 @@ +# cython: language_level=3 + +cimport numpy as cnp + +cnp.import_array() + + +# Fused types for y_true, y_pred, raw_prediction +ctypedef fused Y_DTYPE_C: + cnp.npy_float64 + cnp.npy_float32 + + +# Fused types for gradient and hessian +ctypedef fused G_DTYPE_C: + cnp.npy_float64 + cnp.npy_float32 + + +# Struct to return 2 doubles +ctypedef struct double_pair: + double val1 + double val2 + + +# C base class for loss functions +cdef class CyLossFunction: + cdef double cy_loss(self, double y_true, double raw_prediction) nogil + cdef double cy_gradient(self, double y_true, double raw_prediction) nogil + cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) nogil + + +cdef class CyHalfSquaredError(CyLossFunction): + cdef double cy_loss(self, double y_true, double raw_prediction) nogil + cdef double cy_gradient(self, double y_true, double raw_prediction) nogil + cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) nogil + + +cdef class CyAbsoluteError(CyLossFunction): + cdef double cy_loss(self, double y_true, double raw_prediction) nogil + cdef double cy_gradient(self, double y_true, double raw_prediction) nogil + cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) nogil + + +cdef class CyPinballLoss(CyLossFunction): + cdef readonly double quantile # readonly makes it accessible from Python + cdef double cy_loss(self, double y_true, double raw_prediction) nogil + cdef double cy_gradient(self, double y_true, double raw_prediction) nogil + cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) nogil + + +cdef class CyHalfPoissonLoss(CyLossFunction): + cdef double cy_loss(self, double y_true, double raw_prediction) nogil + cdef double cy_gradient(self, double y_true, double raw_prediction) nogil + cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) nogil + + +cdef class CyHalfGammaLoss(CyLossFunction): + cdef double cy_loss(self, double y_true, double raw_prediction) nogil + cdef double cy_gradient(self, double y_true, double raw_prediction) nogil + cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) nogil + + +cdef class CyHalfTweedieLoss(CyLossFunction): + cdef readonly double power # readonly makes it accessible from Python + cdef double cy_loss(self, double y_true, double raw_prediction) nogil + cdef double cy_gradient(self, double y_true, double raw_prediction) nogil + cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) nogil + + +cdef class CyHalfTweedieLossIdentity(CyLossFunction): + cdef readonly double power # readonly makes it accessible from Python + cdef double cy_loss(self, double y_true, double raw_prediction) nogil + cdef double cy_gradient(self, double y_true, double raw_prediction) nogil + cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) nogil + + +cdef class CyHalfBinomialLoss(CyLossFunction): + cdef double cy_loss(self, double y_true, double raw_prediction) nogil + cdef double cy_gradient(self, double y_true, double raw_prediction) nogil + cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) nogil diff --git a/mgm/lib/python3.10/site-packages/sklearn/_loss/glm_distribution.py b/mgm/lib/python3.10/site-packages/sklearn/_loss/glm_distribution.py new file mode 100644 index 0000000000000000000000000000000000000000..6fbe675fef53301a770c969009b68ef72ae12de8 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/sklearn/_loss/glm_distribution.py @@ -0,0 +1,373 @@ +""" +Distribution functions used in GLM +""" + +# Author: Christian Lorentzen +# License: BSD 3 clause +# +# TODO(1.3): remove file +# This is only used for backward compatibility in _GeneralizedLinearRegressor +# for the deprecated family attribute. + +from abc import ABCMeta, abstractmethod +from collections import namedtuple +import numbers + +import numpy as np +from scipy.special import xlogy + + +DistributionBoundary = namedtuple("DistributionBoundary", ("value", "inclusive")) + + +class ExponentialDispersionModel(metaclass=ABCMeta): + r"""Base class for reproductive Exponential Dispersion Models (EDM). + + The pdf of :math:`Y\sim \mathrm{EDM}(y_\textrm{pred}, \phi)` is given by + + .. math:: p(y| \theta, \phi) = c(y, \phi) + \exp\left(\frac{\theta y-A(\theta)}{\phi}\right) + = \tilde{c}(y, \phi) + \exp\left(-\frac{d(y, y_\textrm{pred})}{2\phi}\right) + + with mean :math:`\mathrm{E}[Y] = A'(\theta) = y_\textrm{pred}`, + variance :math:`\mathrm{Var}[Y] = \phi \cdot v(y_\textrm{pred})`, + unit variance :math:`v(y_\textrm{pred})` and + unit deviance :math:`d(y,y_\textrm{pred})`. + + Methods + ------- + deviance + deviance_derivative + in_y_range + unit_deviance + unit_deviance_derivative + unit_variance + + References + ---------- + https://en.wikipedia.org/wiki/Exponential_dispersion_model. + """ + + def in_y_range(self, y): + """Returns ``True`` if y is in the valid range of Y~EDM. + + Parameters + ---------- + y : array of shape (n_samples,) + Target values. + """ + # Note that currently supported distributions have +inf upper bound + + if not isinstance(self._lower_bound, DistributionBoundary): + raise TypeError( + "_lower_bound attribute must be of type DistributionBoundary" + ) + + if self._lower_bound.inclusive: + return np.greater_equal(y, self._lower_bound.value) + else: + return np.greater(y, self._lower_bound.value) + + @abstractmethod + def unit_variance(self, y_pred): + r"""Compute the unit variance function. + + The unit variance :math:`v(y_\textrm{pred})` determines the variance as + a function of the mean :math:`y_\textrm{pred}` by + :math:`\mathrm{Var}[Y_i] = \phi/s_i*v(y_\textrm{pred}_i)`. + It can also be derived from the unit deviance + :math:`d(y,y_\textrm{pred})` as + + .. math:: v(y_\textrm{pred}) = \frac{2}{ + \frac{\partial^2 d(y,y_\textrm{pred})}{ + \partialy_\textrm{pred}^2}}\big|_{y=y_\textrm{pred}} + + See also :func:`variance`. + + Parameters + ---------- + y_pred : array of shape (n_samples,) + Predicted mean. + """ + + @abstractmethod + def unit_deviance(self, y, y_pred, check_input=False): + r"""Compute the unit deviance. + + The unit_deviance :math:`d(y,y_\textrm{pred})` can be defined by the + log-likelihood as + :math:`d(y,y_\textrm{pred}) = -2\phi\cdot + \left(loglike(y,y_\textrm{pred},\phi) - loglike(y,y,\phi)\right).` + + Parameters + ---------- + y : array of shape (n_samples,) + Target values. + + y_pred : array of shape (n_samples,) + Predicted mean. + + check_input : bool, default=False + If True raise an exception on invalid y or y_pred values, otherwise + they will be propagated as NaN. + Returns + ------- + deviance: array of shape (n_samples,) + Computed deviance + """ + + def unit_deviance_derivative(self, y, y_pred): + r"""Compute the derivative of the unit deviance w.r.t. y_pred. + + The derivative of the unit deviance is given by + :math:`\frac{\partial}{\partialy_\textrm{pred}}d(y,y_\textrm{pred}) + = -2\frac{y-y_\textrm{pred}}{v(y_\textrm{pred})}` + with unit variance :math:`v(y_\textrm{pred})`. + + Parameters + ---------- + y : array of shape (n_samples,) + Target values. + + y_pred : array of shape (n_samples,) + Predicted mean. + """ + return -2 * (y - y_pred) / self.unit_variance(y_pred) + + def deviance(self, y, y_pred, weights=1): + r"""Compute the deviance. + + The deviance is a weighted sum of the per sample unit deviances, + :math:`D = \sum_i s_i \cdot d(y_i, y_\textrm{pred}_i)` + with weights :math:`s_i` and unit deviance + :math:`d(y,y_\textrm{pred})`. + In terms of the log-likelihood it is :math:`D = -2\phi\cdot + \left(loglike(y,y_\textrm{pred},\frac{phi}{s}) + - loglike(y,y,\frac{phi}{s})\right)`. + + Parameters + ---------- + y : array of shape (n_samples,) + Target values. + + y_pred : array of shape (n_samples,) + Predicted mean. + + weights : {int, array of shape (n_samples,)}, default=1 + Weights or exposure to which variance is inverse proportional. + """ + return np.sum(weights * self.unit_deviance(y, y_pred)) + + def deviance_derivative(self, y, y_pred, weights=1): + r"""Compute the derivative of the deviance w.r.t. y_pred. + + It gives :math:`\frac{\partial}{\partial y_\textrm{pred}} + D(y, \y_\textrm{pred}; weights)`. + + Parameters + ---------- + y : array, shape (n_samples,) + Target values. + + y_pred : array, shape (n_samples,) + Predicted mean. + + weights : {int, array of shape (n_samples,)}, default=1 + Weights or exposure to which variance is inverse proportional. + """ + return weights * self.unit_deviance_derivative(y, y_pred) + + +class TweedieDistribution(ExponentialDispersionModel): + r"""A class for the Tweedie distribution. + + A Tweedie distribution with mean :math:`y_\textrm{pred}=\mathrm{E}[Y]` + is uniquely defined by it's mean-variance relationship + :math:`\mathrm{Var}[Y] \propto y_\textrm{pred}^power`. + + Special cases are: + + ===== ================ + Power Distribution + ===== ================ + 0 Normal + 1 Poisson + (1,2) Compound Poisson + 2 Gamma + 3 Inverse Gaussian + + Parameters + ---------- + power : float, default=0 + The variance power of the `unit_variance` + :math:`v(y_\textrm{pred}) = y_\textrm{pred}^{power}`. + For ``0=1." + ) + elif 1 <= power < 2: + # Poisson or Compound Poisson distribution + self._lower_bound = DistributionBoundary(0, inclusive=True) + elif power >= 2: + # Gamma, Positive Stable, Inverse Gaussian distributions + self._lower_bound = DistributionBoundary(0, inclusive=False) + else: # pragma: no cover + # this branch should be unreachable. + raise ValueError + + self._power = power + + def unit_variance(self, y_pred): + """Compute the unit variance of a Tweedie distribution + v(y_\textrm{pred})=y_\textrm{pred}**power. + + Parameters + ---------- + y_pred : array of shape (n_samples,) + Predicted mean. + """ + return np.power(y_pred, self.power) + + def unit_deviance(self, y, y_pred, check_input=False): + r"""Compute the unit deviance. + + The unit_deviance :math:`d(y,y_\textrm{pred})` can be defined by the + log-likelihood as + :math:`d(y,y_\textrm{pred}) = -2\phi\cdot + \left(loglike(y,y_\textrm{pred},\phi) - loglike(y,y,\phi)\right).` + + Parameters + ---------- + y : array of shape (n_samples,) + Target values. + + y_pred : array of shape (n_samples,) + Predicted mean. + + check_input : bool, default=False + If True raise an exception on invalid y or y_pred values, otherwise + they will be propagated as NaN. + Returns + ------- + deviance: array of shape (n_samples,) + Computed deviance + """ + p = self.power + + if check_input: + message = ( + "Mean Tweedie deviance error with power={} can only be used on ".format( + p + ) + ) + if p < 0: + # 'Extreme stable', y any real number, y_pred > 0 + if (y_pred <= 0).any(): + raise ValueError(message + "strictly positive y_pred.") + elif p == 0: + # Normal, y and y_pred can be any real number + pass + elif 0 < p < 1: + raise ValueError( + "Tweedie deviance is only defined for power<=0 and power>=1." + ) + elif 1 <= p < 2: + # Poisson and compound Poisson distribution, y >= 0, y_pred > 0 + if (y < 0).any() or (y_pred <= 0).any(): + raise ValueError( + message + "non-negative y and strictly positive y_pred." + ) + elif p >= 2: + # Gamma and Extreme stable distribution, y and y_pred > 0 + if (y <= 0).any() or (y_pred <= 0).any(): + raise ValueError(message + "strictly positive y and y_pred.") + else: # pragma: nocover + # Unreachable statement + raise ValueError + + if p < 0: + # 'Extreme stable', y any real number, y_pred > 0 + dev = 2 * ( + np.power(np.maximum(y, 0), 2 - p) / ((1 - p) * (2 - p)) + - y * np.power(y_pred, 1 - p) / (1 - p) + + np.power(y_pred, 2 - p) / (2 - p) + ) + + elif p == 0: + # Normal distribution, y and y_pred any real number + dev = (y - y_pred) ** 2 + elif p < 1: + raise ValueError( + "Tweedie deviance is only defined for power<=0 and power>=1." + ) + elif p == 1: + # Poisson distribution + dev = 2 * (xlogy(y, y / y_pred) - y + y_pred) + elif p == 2: + # Gamma distribution + dev = 2 * (np.log(y_pred / y) + y / y_pred - 1) + else: + dev = 2 * ( + np.power(y, 2 - p) / ((1 - p) * (2 - p)) + - y * np.power(y_pred, 1 - p) / (1 - p) + + np.power(y_pred, 2 - p) / (2 - p) + ) + return dev + + +class NormalDistribution(TweedieDistribution): + """Class for the Normal (aka Gaussian) distribution.""" + + def __init__(self): + super().__init__(power=0) + + +class PoissonDistribution(TweedieDistribution): + """Class for the scaled Poisson distribution.""" + + def __init__(self): + super().__init__(power=1) + + +class GammaDistribution(TweedieDistribution): + """Class for the Gamma distribution.""" + + def __init__(self): + super().__init__(power=2) + + +class InverseGaussianDistribution(TweedieDistribution): + """Class for the scaled InverseGaussianDistribution distribution.""" + + def __init__(self): + super().__init__(power=3) + + +EDM_DISTRIBUTIONS = { + "normal": NormalDistribution, + "poisson": PoissonDistribution, + "gamma": GammaDistribution, + "inverse-gaussian": InverseGaussianDistribution, +} diff --git a/mgm/lib/python3.10/site-packages/sklearn/_loss/link.py b/mgm/lib/python3.10/site-packages/sklearn/_loss/link.py new file mode 100644 index 0000000000000000000000000000000000000000..4cb46a15ef2635ccd096c1b6a4e19f93c15f2d94 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/sklearn/_loss/link.py @@ -0,0 +1,261 @@ +""" +Module contains classes for invertible (and differentiable) link functions. +""" +# Author: Christian Lorentzen + +from abc import ABC, abstractmethod +from dataclasses import dataclass + +import numpy as np +from scipy.special import expit, logit +from scipy.stats import gmean +from ..utils.extmath import softmax + + +@dataclass +class Interval: + low: float + high: float + low_inclusive: bool + high_inclusive: bool + + def __post_init__(self): + """Check that low <= high""" + if self.low > self.high: + raise ValueError( + f"One must have low <= high; got low={self.low}, high={self.high}." + ) + + def includes(self, x): + """Test whether all values of x are in interval range. + + Parameters + ---------- + x : ndarray + Array whose elements are tested to be in interval range. + + Returns + ------- + result : bool + """ + if self.low_inclusive: + low = np.greater_equal(x, self.low) + else: + low = np.greater(x, self.low) + + if not np.all(low): + return False + + if self.high_inclusive: + high = np.less_equal(x, self.high) + else: + high = np.less(x, self.high) + + # Note: np.all returns numpy.bool_ + return bool(np.all(high)) + + +def _inclusive_low_high(interval, dtype=np.float64): + """Generate values low and high to be within the interval range. + + This is used in tests only. + + Returns + ------- + low, high : tuple + The returned values low and high lie within the interval. + """ + eps = 10 * np.finfo(dtype).eps + if interval.low == -np.inf: + low = -1e10 + elif interval.low < 0: + low = interval.low * (1 - eps) + eps + else: + low = interval.low * (1 + eps) + eps + + if interval.high == np.inf: + high = 1e10 + elif interval.high < 0: + high = interval.high * (1 + eps) - eps + else: + high = interval.high * (1 - eps) - eps + + return low, high + + +class BaseLink(ABC): + """Abstract base class for differentiable, invertible link functions. + + Convention: + - link function g: raw_prediction = g(y_pred) + - inverse link h: y_pred = h(raw_prediction) + + For (generalized) linear models, `raw_prediction = X @ coef` is the so + called linear predictor, and `y_pred = h(raw_prediction)` is the predicted + conditional (on X) expected value of the target `y_true`. + + The methods are not implemented as staticmethods in case a link function needs + parameters. + """ + + is_multiclass = False # used for testing only + + # Usually, raw_prediction may be any real number and y_pred is an open + # interval. + # interval_raw_prediction = Interval(-np.inf, np.inf, False, False) + interval_y_pred = Interval(-np.inf, np.inf, False, False) + + @abstractmethod + def link(self, y_pred, out=None): + """Compute the link function g(y_pred). + + The link function maps (predicted) target values to raw predictions, + i.e. `g(y_pred) = raw_prediction`. + + Parameters + ---------- + y_pred : array + Predicted target values. + out : array + A location into which the result is stored. If provided, it must + have a shape that the inputs broadcast to. If not provided or None, + a freshly-allocated array is returned. + + Returns + ------- + out : array + Output array, element-wise link function. + """ + + @abstractmethod + def inverse(self, raw_prediction, out=None): + """Compute the inverse link function h(raw_prediction). + + The inverse link function maps raw predictions to predicted target + values, i.e. `h(raw_prediction) = y_pred`. + + Parameters + ---------- + raw_prediction : array + Raw prediction values (in link space). + out : array + A location into which the result is stored. If provided, it must + have a shape that the inputs broadcast to. If not provided or None, + a freshly-allocated array is returned. + + Returns + ------- + out : array + Output array, element-wise inverse link function. + """ + + +class IdentityLink(BaseLink): + """The identity link function g(x)=x.""" + + def link(self, y_pred, out=None): + if out is not None: + np.copyto(out, y_pred) + return out + else: + return y_pred + + inverse = link + + +class LogLink(BaseLink): + """The log link function g(x)=log(x).""" + + interval_y_pred = Interval(0, np.inf, False, False) + + def link(self, y_pred, out=None): + return np.log(y_pred, out=out) + + def inverse(self, raw_prediction, out=None): + return np.exp(raw_prediction, out=out) + + +class LogitLink(BaseLink): + """The logit link function g(x)=logit(x).""" + + interval_y_pred = Interval(0, 1, False, False) + + def link(self, y_pred, out=None): + return logit(y_pred, out=out) + + def inverse(self, raw_prediction, out=None): + return expit(raw_prediction, out=out) + + +class MultinomialLogit(BaseLink): + """The symmetric multinomial logit function. + + Convention: + - y_pred.shape = raw_prediction.shape = (n_samples, n_classes) + + Notes: + - The inverse link h is the softmax function. + - The sum is over the second axis, i.e. axis=1 (n_classes). + + We have to choose additional constraints in order to make + + y_pred[k] = exp(raw_pred[k]) / sum(exp(raw_pred[k]), k=0..n_classes-1) + + for n_classes classes identifiable and invertible. + We choose the symmetric side constraint where the geometric mean response + is set as reference category, see [2]: + + The symmetric multinomial logit link function for a single data point is + then defined as + + raw_prediction[k] = g(y_pred[k]) = log(y_pred[k]/gmean(y_pred)) + = log(y_pred[k]) - mean(log(y_pred)). + + Note that this is equivalent to the definition in [1] and implies mean + centered raw predictions: + + sum(raw_prediction[k], k=0..n_classes-1) = 0. + + For linear models with raw_prediction = X @ coef, this corresponds to + sum(coef[k], k=0..n_classes-1) = 0, i.e. the sum over classes for every + feature is zero. + + Reference + --------- + .. [1] Friedman, Jerome; Hastie, Trevor; Tibshirani, Robert. "Additive + logistic regression: a statistical view of boosting" Ann. Statist. + 28 (2000), no. 2, 337--407. doi:10.1214/aos/1016218223. + https://projecteuclid.org/euclid.aos/1016218223 + + .. [2] Zahid, Faisal Maqbool and Gerhard Tutz. "Ridge estimation for + multinomial logit models with symmetric side constraints." + Computational Statistics 28 (2013): 1017-1034. + http://epub.ub.uni-muenchen.de/11001/1/tr067.pdf + """ + + is_multiclass = True + interval_y_pred = Interval(0, 1, False, False) + + def symmetrize_raw_prediction(self, raw_prediction): + return raw_prediction - np.mean(raw_prediction, axis=1)[:, np.newaxis] + + def link(self, y_pred, out=None): + # geometric mean as reference category + gm = gmean(y_pred, axis=1) + return np.log(y_pred / gm[:, np.newaxis], out=out) + + def inverse(self, raw_prediction, out=None): + if out is None: + return softmax(raw_prediction, copy=True) + else: + np.copyto(out, raw_prediction) + softmax(out, copy=False) + return out + + +_LINKS = { + "identity": IdentityLink, + "log": LogLink, + "logit": LogitLink, + "multinomial_logit": MultinomialLogit, +} diff --git a/mgm/lib/python3.10/site-packages/sklearn/_loss/loss.py b/mgm/lib/python3.10/site-packages/sklearn/_loss/loss.py new file mode 100644 index 0000000000000000000000000000000000000000..5ab3b08973a4347d0edf2a8299f885f72e06a769 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/sklearn/_loss/loss.py @@ -0,0 +1,1027 @@ +""" +This module contains loss classes suitable for fitting. + +It is not part of the public API. +Specific losses are used for regression, binary classification or multiclass +classification. +""" +# Goals: +# - Provide a common private module for loss functions/classes. +# - To be used in: +# - LogisticRegression +# - PoissonRegressor, GammaRegressor, TweedieRegressor +# - HistGradientBoostingRegressor, HistGradientBoostingClassifier +# - GradientBoostingRegressor, GradientBoostingClassifier +# - SGDRegressor, SGDClassifier +# - Replace link module of GLMs. + +import numbers +import numpy as np +from scipy.special import xlogy +from ._loss import ( + CyHalfSquaredError, + CyAbsoluteError, + CyPinballLoss, + CyHalfPoissonLoss, + CyHalfGammaLoss, + CyHalfTweedieLoss, + CyHalfTweedieLossIdentity, + CyHalfBinomialLoss, + CyHalfMultinomialLoss, +) +from .link import ( + Interval, + IdentityLink, + LogLink, + LogitLink, + MultinomialLogit, +) +from ..utils import check_scalar +from ..utils._readonly_array_wrapper import ReadonlyArrayWrapper +from ..utils.stats import _weighted_percentile + + +# Note: The shape of raw_prediction for multiclass classifications are +# - GradientBoostingClassifier: (n_samples, n_classes) +# - HistGradientBoostingClassifier: (n_classes, n_samples) +# +# Note: Instead of inheritance like +# +# class BaseLoss(BaseLink, CyLossFunction): +# ... +# +# # Note: Naturally, we would inherit in the following order +# # class HalfSquaredError(IdentityLink, CyHalfSquaredError, BaseLoss) +# # But because of https://github.com/cython/cython/issues/4350 we set BaseLoss as +# # the last one. This, of course, changes the MRO. +# class HalfSquaredError(IdentityLink, CyHalfSquaredError, BaseLoss): +# +# we use composition. This way we improve maintainability by avoiding the above +# mentioned Cython edge case and have easier to understand code (which method calls +# which code). +class BaseLoss: + """Base class for a loss function of 1-dimensional targets. + + Conventions: + + - y_true.shape = sample_weight.shape = (n_samples,) + - y_pred.shape = raw_prediction.shape = (n_samples,) + - If is_multiclass is true (multiclass classification), then + y_pred.shape = raw_prediction.shape = (n_samples, n_classes) + Note that this corresponds to the return value of decision_function. + + y_true, y_pred, sample_weight and raw_prediction must either be all float64 + or all float32. + gradient and hessian must be either both float64 or both float32. + + Note that y_pred = link.inverse(raw_prediction). + + Specific loss classes can inherit specific link classes to satisfy + BaseLink's abstractmethods. + + Parameters + ---------- + sample_weight : {None, ndarray} + If sample_weight is None, the hessian might be constant. + n_classes : {None, int} + The number of classes for classification, else None. + + Attributes + ---------- + closs: CyLossFunction + link : BaseLink + interval_y_true : Interval + Valid interval for y_true + interval_y_pred : Interval + Valid Interval for y_pred + differentiable : bool + Indicates whether or not loss function is differentiable in + raw_prediction everywhere. + need_update_leaves_values : bool + Indicates whether decision trees in gradient boosting need to uptade + leave values after having been fit to the (negative) gradients. + approx_hessian : bool + Indicates whether the hessian is approximated or exact. If, + approximated, it should be larger or equal to the exact one. + constant_hessian : bool + Indicates whether the hessian is one for this loss. + is_multiclass : bool + Indicates whether n_classes > 2 is allowed. + """ + + # For decision trees: + # This variable indicates whether the loss requires the leaves values to + # be updated once the tree has been trained. The trees are trained to + # predict a Newton-Raphson step (see grower._finalize_leaf()). But for + # some losses (e.g. least absolute deviation) we need to adjust the tree + # values to account for the "line search" of the gradient descent + # procedure. See the original paper Greedy Function Approximation: A + # Gradient Boosting Machine by Friedman + # (https://statweb.stanford.edu/~jhf/ftp/trebst.pdf) for the theory. + need_update_leaves_values = False + differentiable = True + is_multiclass = False + + def __init__(self, closs, link, n_classes=None): + self.closs = closs + self.link = link + self.approx_hessian = False + self.constant_hessian = False + self.n_classes = n_classes + self.interval_y_true = Interval(-np.inf, np.inf, False, False) + self.interval_y_pred = self.link.interval_y_pred + + def in_y_true_range(self, y): + """Return True if y is in the valid range of y_true. + + Parameters + ---------- + y : ndarray + """ + return self.interval_y_true.includes(y) + + def in_y_pred_range(self, y): + """Return True if y is in the valid range of y_pred. + + Parameters + ---------- + y : ndarray + """ + return self.interval_y_pred.includes(y) + + def loss( + self, + y_true, + raw_prediction, + sample_weight=None, + loss_out=None, + n_threads=1, + ): + """Compute the pointwise loss value for each input. + + Parameters + ---------- + y_true : C-contiguous array of shape (n_samples,) + Observed, true target values. + raw_prediction : C-contiguous array of shape (n_samples,) or array of \ + shape (n_samples, n_classes) + Raw prediction values (in link space). + sample_weight : None or C-contiguous array of shape (n_samples,) + Sample weights. + loss_out : None or C-contiguous array of shape (n_samples,) + A location into which the result is stored. If None, a new array + might be created. + n_threads : int, default=1 + Might use openmp thread parallelism. + + Returns + ------- + loss : array of shape (n_samples,) + Element-wise loss function. + """ + if loss_out is None: + loss_out = np.empty_like(y_true) + # Be graceful to shape (n_samples, 1) -> (n_samples,) + if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1: + raw_prediction = raw_prediction.squeeze(1) + + y_true = ReadonlyArrayWrapper(y_true) + raw_prediction = ReadonlyArrayWrapper(raw_prediction) + if sample_weight is not None: + sample_weight = ReadonlyArrayWrapper(sample_weight) + return self.closs.loss( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + loss_out=loss_out, + n_threads=n_threads, + ) + + def loss_gradient( + self, + y_true, + raw_prediction, + sample_weight=None, + loss_out=None, + gradient_out=None, + n_threads=1, + ): + """Compute loss and gradient w.r.t. raw_prediction for each input. + + Parameters + ---------- + y_true : C-contiguous array of shape (n_samples,) + Observed, true target values. + raw_prediction : C-contiguous array of shape (n_samples,) or array of \ + shape (n_samples, n_classes) + Raw prediction values (in link space). + sample_weight : None or C-contiguous array of shape (n_samples,) + Sample weights. + loss_out : None or C-contiguous array of shape (n_samples,) + A location into which the loss is stored. If None, a new array + might be created. + gradient_out : None or C-contiguous array of shape (n_samples,) or array \ + of shape (n_samples, n_classes) + A location into which the gradient is stored. If None, a new array + might be created. + n_threads : int, default=1 + Might use openmp thread parallelism. + + Returns + ------- + loss : array of shape (n_samples,) + Element-wise loss function. + + gradient : array of shape (n_samples,) or (n_samples, n_classes) + Element-wise gradients. + """ + if loss_out is None: + if gradient_out is None: + loss_out = np.empty_like(y_true) + gradient_out = np.empty_like(raw_prediction) + else: + loss_out = np.empty_like(y_true, dtype=gradient_out.dtype) + elif gradient_out is None: + gradient_out = np.empty_like(raw_prediction, dtype=loss_out.dtype) + + # Be graceful to shape (n_samples, 1) -> (n_samples,) + if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1: + raw_prediction = raw_prediction.squeeze(1) + if gradient_out.ndim == 2 and gradient_out.shape[1] == 1: + gradient_out = gradient_out.squeeze(1) + + y_true = ReadonlyArrayWrapper(y_true) + raw_prediction = ReadonlyArrayWrapper(raw_prediction) + if sample_weight is not None: + sample_weight = ReadonlyArrayWrapper(sample_weight) + return self.closs.loss_gradient( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + loss_out=loss_out, + gradient_out=gradient_out, + n_threads=n_threads, + ) + + def gradient( + self, + y_true, + raw_prediction, + sample_weight=None, + gradient_out=None, + n_threads=1, + ): + """Compute gradient of loss w.r.t raw_prediction for each input. + + Parameters + ---------- + y_true : C-contiguous array of shape (n_samples,) + Observed, true target values. + raw_prediction : C-contiguous array of shape (n_samples,) or array of \ + shape (n_samples, n_classes) + Raw prediction values (in link space). + sample_weight : None or C-contiguous array of shape (n_samples,) + Sample weights. + gradient_out : None or C-contiguous array of shape (n_samples,) or array \ + of shape (n_samples, n_classes) + A location into which the result is stored. If None, a new array + might be created. + n_threads : int, default=1 + Might use openmp thread parallelism. + + Returns + ------- + gradient : array of shape (n_samples,) or (n_samples, n_classes) + Element-wise gradients. + """ + if gradient_out is None: + gradient_out = np.empty_like(raw_prediction) + + # Be graceful to shape (n_samples, 1) -> (n_samples,) + if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1: + raw_prediction = raw_prediction.squeeze(1) + if gradient_out.ndim == 2 and gradient_out.shape[1] == 1: + gradient_out = gradient_out.squeeze(1) + + y_true = ReadonlyArrayWrapper(y_true) + raw_prediction = ReadonlyArrayWrapper(raw_prediction) + if sample_weight is not None: + sample_weight = ReadonlyArrayWrapper(sample_weight) + return self.closs.gradient( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + gradient_out=gradient_out, + n_threads=n_threads, + ) + + def gradient_hessian( + self, + y_true, + raw_prediction, + sample_weight=None, + gradient_out=None, + hessian_out=None, + n_threads=1, + ): + """Compute gradient and hessian of loss w.r.t raw_prediction. + + Parameters + ---------- + y_true : C-contiguous array of shape (n_samples,) + Observed, true target values. + raw_prediction : C-contiguous array of shape (n_samples,) or array of \ + shape (n_samples, n_classes) + Raw prediction values (in link space). + sample_weight : None or C-contiguous array of shape (n_samples,) + Sample weights. + gradient_out : None or C-contiguous array of shape (n_samples,) or array \ + of shape (n_samples, n_classes) + A location into which the gradient is stored. If None, a new array + might be created. + hessian_out : None or C-contiguous array of shape (n_samples,) or array \ + of shape (n_samples, n_classes) + A location into which the hessian is stored. If None, a new array + might be created. + n_threads : int, default=1 + Might use openmp thread parallelism. + + Returns + ------- + gradient : arrays of shape (n_samples,) or (n_samples, n_classes) + Element-wise gradients. + + hessian : arrays of shape (n_samples,) or (n_samples, n_classes) + Element-wise hessians. + """ + if gradient_out is None: + if hessian_out is None: + gradient_out = np.empty_like(raw_prediction) + hessian_out = np.empty_like(raw_prediction) + else: + gradient_out = np.empty_like(hessian_out) + elif hessian_out is None: + hessian_out = np.empty_like(gradient_out) + + # Be graceful to shape (n_samples, 1) -> (n_samples,) + if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1: + raw_prediction = raw_prediction.squeeze(1) + if gradient_out.ndim == 2 and gradient_out.shape[1] == 1: + gradient_out = gradient_out.squeeze(1) + if hessian_out.ndim == 2 and hessian_out.shape[1] == 1: + hessian_out = hessian_out.squeeze(1) + + y_true = ReadonlyArrayWrapper(y_true) + raw_prediction = ReadonlyArrayWrapper(raw_prediction) + if sample_weight is not None: + sample_weight = ReadonlyArrayWrapper(sample_weight) + return self.closs.gradient_hessian( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + gradient_out=gradient_out, + hessian_out=hessian_out, + n_threads=n_threads, + ) + + def __call__(self, y_true, raw_prediction, sample_weight=None, n_threads=1): + """Compute the weighted average loss. + + Parameters + ---------- + y_true : C-contiguous array of shape (n_samples,) + Observed, true target values. + raw_prediction : C-contiguous array of shape (n_samples,) or array of \ + shape (n_samples, n_classes) + Raw prediction values (in link space). + sample_weight : None or C-contiguous array of shape (n_samples,) + Sample weights. + n_threads : int, default=1 + Might use openmp thread parallelism. + + Returns + ------- + loss : float + Mean or averaged loss function. + """ + return np.average( + self.loss( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=None, + loss_out=None, + n_threads=n_threads, + ), + weights=sample_weight, + ) + + def fit_intercept_only(self, y_true, sample_weight=None): + """Compute raw_prediction of an intercept-only model. + + This can be used as initial estimates of predictions, i.e. before the + first iteration in fit. + + Parameters + ---------- + y_true : array-like of shape (n_samples,) + Observed, true target values. + sample_weight : None or array of shape (n_samples,) + Sample weights. + + Returns + ------- + raw_prediction : numpy scalar or array of shape (n_classes,) + Raw predictions of an intercept-only model. + """ + # As default, take weighted average of the target over the samples + # axis=0 and then transform into link-scale (raw_prediction). + y_pred = np.average(y_true, weights=sample_weight, axis=0) + eps = 10 * np.finfo(y_pred.dtype).eps + + if self.interval_y_pred.low == -np.inf: + a_min = None + elif self.interval_y_pred.low_inclusive: + a_min = self.interval_y_pred.low + else: + a_min = self.interval_y_pred.low + eps + + if self.interval_y_pred.high == np.inf: + a_max = None + elif self.interval_y_pred.high_inclusive: + a_max = self.interval_y_pred.high + else: + a_max = self.interval_y_pred.high - eps + + if a_min is None and a_max is None: + return self.link.link(y_pred) + else: + return self.link.link(np.clip(y_pred, a_min, a_max)) + + def constant_to_optimal_zero(self, y_true, sample_weight=None): + """Calculate term dropped in loss. + + With this term added, the loss of perfect predictions is zero. + """ + return np.zeros_like(y_true) + + def init_gradient_and_hessian(self, n_samples, dtype=np.float64, order="F"): + """Initialize arrays for gradients and hessians. + + Unless hessians are constant, arrays are initialized with undefined values. + + Parameters + ---------- + n_samples : int + The number of samples, usually passed to `fit()`. + dtype : {np.float64, np.float32}, default=np.float64 + The dtype of the arrays gradient and hessian. + order : {'C', 'F'}, default='F' + Order of the arrays gradient and hessian. The default 'F' makes the arrays + contiguous along samples. + + Returns + ------- + gradient : C-contiguous array of shape (n_samples,) or array of shape \ + (n_samples, n_classes) + Empty array (allocated but not initialized) to be used as argument + gradient_out. + hessian : C-contiguous array of shape (n_samples,), array of shape + (n_samples, n_classes) or shape (1,) + Empty (allocated but not initialized) array to be used as argument + hessian_out. + If constant_hessian is True (e.g. `HalfSquaredError`), the array is + initialized to ``1``. + """ + if dtype not in (np.float32, np.float64): + raise ValueError( + "Valid options for 'dtype' are np.float32 and np.float64. " + f"Got dtype={dtype} instead." + ) + + if self.is_multiclass: + shape = (n_samples, self.n_classes) + else: + shape = (n_samples,) + gradient = np.empty(shape=shape, dtype=dtype, order=order) + + if self.constant_hessian: + # If the hessians are constant, we consider them equal to 1. + # - This is correct for HalfSquaredError + # - For AbsoluteError, hessians are actually 0, but they are + # always ignored anyway. + hessian = np.ones(shape=(1,), dtype=dtype) + else: + hessian = np.empty(shape=shape, dtype=dtype, order=order) + + return gradient, hessian + + +# Note: Naturally, we would inherit in the following order +# class HalfSquaredError(IdentityLink, CyHalfSquaredError, BaseLoss) +# But because of https://github.com/cython/cython/issues/4350 we +# set BaseLoss as the last one. This, of course, changes the MRO. +class HalfSquaredError(BaseLoss): + """Half squared error with identity link, for regression. + + Domain: + y_true and y_pred all real numbers + + Link: + y_pred = raw_prediction + + For a given sample x_i, half squared error is defined as:: + + loss(x_i) = 0.5 * (y_true_i - raw_prediction_i)**2 + + The factor of 0.5 simplifies the computation of gradients and results in a + unit hessian (and is consistent with what is done in LightGBM). It is also + half the Normal distribution deviance. + """ + + def __init__(self, sample_weight=None): + super().__init__(closs=CyHalfSquaredError(), link=IdentityLink()) + self.constant_hessian = sample_weight is None + + +class AbsoluteError(BaseLoss): + """Absolute error with identity link, for regression. + + Domain: + y_true and y_pred all real numbers + + Link: + y_pred = raw_prediction + + For a given sample x_i, the absolute error is defined as:: + + loss(x_i) = |y_true_i - raw_prediction_i| + """ + + differentiable = False + need_update_leaves_values = True + + def __init__(self, sample_weight=None): + super().__init__(closs=CyAbsoluteError(), link=IdentityLink()) + self.approx_hessian = True + self.constant_hessian = sample_weight is None + + def fit_intercept_only(self, y_true, sample_weight=None): + """Compute raw_prediction of an intercept-only model. + + This is the weighted median of the target, i.e. over the samples + axis=0. + """ + if sample_weight is None: + return np.median(y_true, axis=0) + else: + return _weighted_percentile(y_true, sample_weight, 50) + + +class PinballLoss(BaseLoss): + """Quantile loss aka pinball loss, for regression. + + Domain: + y_true and y_pred all real numbers + quantile in (0, 1) + + Link: + y_pred = raw_prediction + + For a given sample x_i, the pinball loss is defined as:: + + loss(x_i) = rho_{quantile}(y_true_i - raw_prediction_i) + + rho_{quantile}(u) = u * (quantile - 1_{u<0}) + = -u *(1 - quantile) if u < 0 + u * quantile if u >= 0 + + Note: 2 * PinballLoss(quantile=0.5) equals AbsoluteError(). + + Additional Attributes + --------------------- + quantile : float + The quantile to be estimated. Must be in range (0, 1). + """ + + differentiable = False + need_update_leaves_values = True + + def __init__(self, sample_weight=None, quantile=0.5): + check_scalar( + quantile, + "quantile", + target_type=numbers.Real, + min_val=0, + max_val=1, + include_boundaries="neither", + ) + super().__init__( + closs=CyPinballLoss(quantile=float(quantile)), + link=IdentityLink(), + ) + self.approx_hessian = True + self.constant_hessian = sample_weight is None + + def fit_intercept_only(self, y_true, sample_weight=None): + """Compute raw_prediction of an intercept-only model. + + This is the weighted median of the target, i.e. over the samples + axis=0. + """ + if sample_weight is None: + return np.percentile(y_true, 100 * self.closs.quantile, axis=0) + else: + return _weighted_percentile( + y_true, sample_weight, 100 * self.closs.quantile + ) + + +class HalfPoissonLoss(BaseLoss): + """Half Poisson deviance loss with log-link, for regression. + + Domain: + y_true in non-negative real numbers + y_pred in positive real numbers + + Link: + y_pred = exp(raw_prediction) + + For a given sample x_i, half the Poisson deviance is defined as:: + + loss(x_i) = y_true_i * log(y_true_i/exp(raw_prediction_i)) + - y_true_i + exp(raw_prediction_i) + + Half the Poisson deviance is actually the negative log-likelihood up to + constant terms (not involving raw_prediction) and simplifies the + computation of the gradients. + We also skip the constant term `y_true_i * log(y_true_i) - y_true_i`. + """ + + def __init__(self, sample_weight=None): + super().__init__(closs=CyHalfPoissonLoss(), link=LogLink()) + self.interval_y_true = Interval(0, np.inf, True, False) + + def constant_to_optimal_zero(self, y_true, sample_weight=None): + term = xlogy(y_true, y_true) - y_true + if sample_weight is not None: + term *= sample_weight + return term + + +class HalfGammaLoss(BaseLoss): + """Half Gamma deviance loss with log-link, for regression. + + Domain: + y_true and y_pred in positive real numbers + + Link: + y_pred = exp(raw_prediction) + + For a given sample x_i, half Gamma deviance loss is defined as:: + + loss(x_i) = log(exp(raw_prediction_i)/y_true_i) + + y_true/exp(raw_prediction_i) - 1 + + Half the Gamma deviance is actually proportional to the negative log- + likelihood up to constant terms (not involving raw_prediction) and + simplifies the computation of the gradients. + We also skip the constant term `-log(y_true_i) - 1`. + """ + + def __init__(self, sample_weight=None): + super().__init__(closs=CyHalfGammaLoss(), link=LogLink()) + self.interval_y_true = Interval(0, np.inf, False, False) + + def constant_to_optimal_zero(self, y_true, sample_weight=None): + term = -np.log(y_true) - 1 + if sample_weight is not None: + term *= sample_weight + return term + + +class HalfTweedieLoss(BaseLoss): + """Half Tweedie deviance loss with log-link, for regression. + + Domain: + y_true in real numbers for power <= 0 + y_true in non-negative real numbers for 0 < power < 2 + y_true in positive real numbers for 2 <= power + y_pred in positive real numbers + power in real numbers + + Link: + y_pred = exp(raw_prediction) + + For a given sample x_i, half Tweedie deviance loss with p=power is defined + as:: + + loss(x_i) = max(y_true_i, 0)**(2-p) / (1-p) / (2-p) + - y_true_i * exp(raw_prediction_i)**(1-p) / (1-p) + + exp(raw_prediction_i)**(2-p) / (2-p) + + Taking the limits for p=0, 1, 2 gives HalfSquaredError with a log link, + HalfPoissonLoss and HalfGammaLoss. + + We also skip constant terms, but those are different for p=0, 1, 2. + Therefore, the loss is not continuous in `power`. + + Note furthermore that although no Tweedie distribution exists for + 0 < power < 1, it still gives a strictly consistent scoring function for + the expectation. + """ + + def __init__(self, sample_weight=None, power=1.5): + super().__init__( + closs=CyHalfTweedieLoss(power=float(power)), + link=LogLink(), + ) + if self.closs.power <= 0: + self.interval_y_true = Interval(-np.inf, np.inf, False, False) + elif self.closs.power < 2: + self.interval_y_true = Interval(0, np.inf, True, False) + else: + self.interval_y_true = Interval(0, np.inf, False, False) + + def constant_to_optimal_zero(self, y_true, sample_weight=None): + if self.closs.power == 0: + return HalfSquaredError().constant_to_optimal_zero( + y_true=y_true, sample_weight=sample_weight + ) + elif self.closs.power == 1: + return HalfPoissonLoss().constant_to_optimal_zero( + y_true=y_true, sample_weight=sample_weight + ) + elif self.closs.power == 2: + return HalfGammaLoss().constant_to_optimal_zero( + y_true=y_true, sample_weight=sample_weight + ) + else: + p = self.closs.power + term = np.power(np.maximum(y_true, 0), 2 - p) / (1 - p) / (2 - p) + if sample_weight is not None: + term *= sample_weight + return term + + +class HalfTweedieLossIdentity(BaseLoss): + """Half Tweedie deviance loss with identity link, for regression. + + Domain: + y_true in real numbers for power <= 0 + y_true in non-negative real numbers for 0 < power < 2 + y_true in positive real numbers for 2 <= power + y_pred in positive real numbers for power != 0 + y_pred in real numbers for power = 0 + power in real numbers + + Link: + y_pred = raw_prediction + + For a given sample x_i, half Tweedie deviance loss with p=power is defined + as:: + + loss(x_i) = max(y_true_i, 0)**(2-p) / (1-p) / (2-p) + - y_true_i * raw_prediction_i**(1-p) / (1-p) + + raw_prediction_i**(2-p) / (2-p) + + Note that the minimum value of this loss is 0. + + Note furthermore that although no Tweedie distribution exists for + 0 < power < 1, it still gives a strictly consistent scoring function for + the expectation. + """ + + def __init__(self, sample_weight=None, power=1.5): + super().__init__( + closs=CyHalfTweedieLossIdentity(power=float(power)), + link=IdentityLink(), + ) + if self.closs.power <= 0: + self.interval_y_true = Interval(-np.inf, np.inf, False, False) + elif self.closs.power < 2: + self.interval_y_true = Interval(0, np.inf, True, False) + else: + self.interval_y_true = Interval(0, np.inf, False, False) + + if self.closs.power == 0: + self.interval_y_pred = Interval(-np.inf, np.inf, False, False) + else: + self.interval_y_pred = Interval(0, np.inf, False, False) + + +class HalfBinomialLoss(BaseLoss): + """Half Binomial deviance loss with logit link, for binary classification. + + This is also know as binary cross entropy, log-loss and logistic loss. + + Domain: + y_true in [0, 1], i.e. regression on the unit interval + y_pred in (0, 1), i.e. boundaries excluded + + Link: + y_pred = expit(raw_prediction) + + For a given sample x_i, half Binomial deviance is defined as the negative + log-likelihood of the Binomial/Bernoulli distribution and can be expressed + as:: + + loss(x_i) = log(1 + exp(raw_pred_i)) - y_true_i * raw_pred_i + + See The Elements of Statistical Learning, by Hastie, Tibshirani, Friedman, + section 4.4.1 (about logistic regression). + + Note that the formulation works for classification, y = {0, 1}, as well as + logistic regression, y = [0, 1]. + If you add `constant_to_optimal_zero` to the loss, you get half the + Bernoulli/binomial deviance. + """ + + def __init__(self, sample_weight=None): + super().__init__( + closs=CyHalfBinomialLoss(), + link=LogitLink(), + n_classes=2, + ) + self.interval_y_true = Interval(0, 1, True, True) + + def constant_to_optimal_zero(self, y_true, sample_weight=None): + # This is non-zero only if y_true is neither 0 nor 1. + term = xlogy(y_true, y_true) + xlogy(1 - y_true, 1 - y_true) + if sample_weight is not None: + term *= sample_weight + return term + + def predict_proba(self, raw_prediction): + """Predict probabilities. + + Parameters + ---------- + raw_prediction : array of shape (n_samples,) or (n_samples, 1) + Raw prediction values (in link space). + + Returns + ------- + proba : array of shape (n_samples, 2) + Element-wise class probabilities. + """ + # Be graceful to shape (n_samples, 1) -> (n_samples,) + if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1: + raw_prediction = raw_prediction.squeeze(1) + proba = np.empty((raw_prediction.shape[0], 2), dtype=raw_prediction.dtype) + proba[:, 1] = self.link.inverse(raw_prediction) + proba[:, 0] = 1 - proba[:, 1] + return proba + + +class HalfMultinomialLoss(BaseLoss): + """Categorical cross-entropy loss, for multiclass classification. + + Domain: + y_true in {0, 1, 2, 3, .., n_classes - 1} + y_pred has n_classes elements, each element in (0, 1) + + Link: + y_pred = softmax(raw_prediction) + + Note: We assume y_true to be already label encoded. The inverse link is + softmax. But the full link function is the symmetric multinomial logit + function. + + For a given sample x_i, the categorical cross-entropy loss is defined as + the negative log-likelihood of the multinomial distribution, it + generalizes the binary cross-entropy to more than 2 classes:: + + loss_i = log(sum(exp(raw_pred_{i, k}), k=0..n_classes-1)) + - sum(y_true_{i, k} * raw_pred_{i, k}, k=0..n_classes-1) + + See [1]. + + Note that for the hessian, we calculate only the diagonal part in the + classes: If the full hessian for classes k and l and sample i is H_i_k_l, + we calculate H_i_k_k, i.e. k=l. + + Reference + --------- + .. [1] :arxiv:`Simon, Noah, J. Friedman and T. Hastie. + "A Blockwise Descent Algorithm for Group-penalized Multiresponse and + Multinomial Regression". + <1311.6529>` + """ + + is_multiclass = True + + def __init__(self, sample_weight=None, n_classes=3): + super().__init__( + closs=CyHalfMultinomialLoss(), + link=MultinomialLogit(), + n_classes=n_classes, + ) + self.interval_y_true = Interval(0, np.inf, True, False) + self.interval_y_pred = Interval(0, 1, False, False) + + def in_y_true_range(self, y): + """Return True if y is in the valid range of y_true. + + Parameters + ---------- + y : ndarray + """ + return self.interval_y_true.includes(y) and np.all(y.astype(int) == y) + + def fit_intercept_only(self, y_true, sample_weight=None): + """Compute raw_prediction of an intercept-only model. + + This is the softmax of the weighted average of the target, i.e. over + the samples axis=0. + """ + out = np.zeros(self.n_classes, dtype=y_true.dtype) + eps = np.finfo(y_true.dtype).eps + for k in range(self.n_classes): + out[k] = np.average(y_true == k, weights=sample_weight, axis=0) + out[k] = np.clip(out[k], eps, 1 - eps) + return self.link.link(out[None, :]).reshape(-1) + + def predict_proba(self, raw_prediction): + """Predict probabilities. + + Parameters + ---------- + raw_prediction : array of shape (n_samples, n_classes) + Raw prediction values (in link space). + + Returns + ------- + proba : array of shape (n_samples, n_classes) + Element-wise class probabilities. + """ + return self.link.inverse(raw_prediction) + + def gradient_proba( + self, + y_true, + raw_prediction, + sample_weight=None, + gradient_out=None, + proba_out=None, + n_threads=1, + ): + """Compute gradient and class probabilities fow raw_prediction. + + Parameters + ---------- + y_true : C-contiguous array of shape (n_samples,) + Observed, true target values. + raw_prediction : array of shape (n_samples, n_classes) + Raw prediction values (in link space). + sample_weight : None or C-contiguous array of shape (n_samples,) + Sample weights. + gradient_out : None or array of shape (n_samples, n_classes) + A location into which the gradient is stored. If None, a new array + might be created. + proba_out : None or array of shape (n_samples, n_classes) + A location into which the class probabilities are stored. If None, + a new array might be created. + n_threads : int, default=1 + Might use openmp thread parallelism. + + Returns + ------- + gradient : array of shape (n_samples, n_classes) + Element-wise gradients. + + proba : array of shape (n_samples, n_classes) + Element-wise class probabilities. + """ + if gradient_out is None: + if proba_out is None: + gradient_out = np.empty_like(raw_prediction) + proba_out = np.empty_like(raw_prediction) + else: + gradient_out = np.empty_like(proba_out) + elif proba_out is None: + proba_out = np.empty_like(gradient_out) + + y_true = ReadonlyArrayWrapper(y_true) + raw_prediction = ReadonlyArrayWrapper(raw_prediction) + if sample_weight is not None: + sample_weight = ReadonlyArrayWrapper(sample_weight) + return self.closs.gradient_proba( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + gradient_out=gradient_out, + proba_out=proba_out, + n_threads=n_threads, + ) + + +_LOSSES = { + "squared_error": HalfSquaredError, + "absolute_error": AbsoluteError, + "pinball_loss": PinballLoss, + "poisson_loss": HalfPoissonLoss, + "gamma_loss": HalfGammaLoss, + "tweedie_loss": HalfTweedieLoss, + "binomial_loss": HalfBinomialLoss, + "multinomial_loss": HalfMultinomialLoss, +} diff --git a/mgm/lib/python3.10/site-packages/sklearn/_loss/tests/__init__.py b/mgm/lib/python3.10/site-packages/sklearn/_loss/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/mgm/lib/python3.10/site-packages/sklearn/_loss/tests/__pycache__/test_glm_distribution.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/_loss/tests/__pycache__/test_glm_distribution.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7a47d5dc42914662fa996fd659c9a20f0ab5da7 Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/_loss/tests/__pycache__/test_glm_distribution.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/sklearn/_loss/tests/__pycache__/test_loss.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/_loss/tests/__pycache__/test_loss.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae3258559e0c846065e253a44558b6cdc07535fa Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/_loss/tests/__pycache__/test_loss.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/sklearn/_loss/tests/test_glm_distribution.py b/mgm/lib/python3.10/site-packages/sklearn/_loss/tests/test_glm_distribution.py new file mode 100644 index 0000000000000000000000000000000000000000..29d523d22adc2c59d04f59bac87499bb0b1df60d --- /dev/null +++ b/mgm/lib/python3.10/site-packages/sklearn/_loss/tests/test_glm_distribution.py @@ -0,0 +1,123 @@ +# Authors: Christian Lorentzen +# +# License: BSD 3 clause +# +# TODO(1.3): remove file +import numpy as np +from numpy.testing import ( + assert_allclose, + assert_array_equal, +) +from scipy.optimize import check_grad +import pytest + +from sklearn._loss.glm_distribution import ( + TweedieDistribution, + NormalDistribution, + PoissonDistribution, + GammaDistribution, + InverseGaussianDistribution, + DistributionBoundary, +) + + +@pytest.mark.parametrize( + "family, expected", + [ + (NormalDistribution(), [True, True, True]), + (PoissonDistribution(), [False, True, True]), + (TweedieDistribution(power=1.5), [False, True, True]), + (GammaDistribution(), [False, False, True]), + (InverseGaussianDistribution(), [False, False, True]), + (TweedieDistribution(power=4.5), [False, False, True]), + ], +) +def test_family_bounds(family, expected): + """Test the valid range of distributions at -1, 0, 1.""" + result = family.in_y_range([-1, 0, 1]) + assert_array_equal(result, expected) + + +def test_invalid_distribution_bound(): + dist = TweedieDistribution() + dist._lower_bound = 0 + with pytest.raises(TypeError, match="must be of type DistributionBoundary"): + dist.in_y_range([-1, 0, 1]) + + +def test_tweedie_distribution_power(): + msg = "distribution is only defined for power<=0 and power>=1" + with pytest.raises(ValueError, match=msg): + TweedieDistribution(power=0.5) + + with pytest.raises(TypeError, match="must be a real number"): + TweedieDistribution(power=1j) + + with pytest.raises(TypeError, match="must be a real number"): + dist = TweedieDistribution() + dist.power = 1j + + dist = TweedieDistribution() + assert isinstance(dist._lower_bound, DistributionBoundary) + + assert dist._lower_bound.inclusive is False + dist.power = 1 + assert dist._lower_bound.value == 0.0 + assert dist._lower_bound.inclusive is True + + +@pytest.mark.parametrize( + "family, chk_values", + [ + (NormalDistribution(), [-1.5, -0.1, 0.1, 2.5]), + (PoissonDistribution(), [0.1, 1.5]), + (GammaDistribution(), [0.1, 1.5]), + (InverseGaussianDistribution(), [0.1, 1.5]), + (TweedieDistribution(power=-2.5), [0.1, 1.5]), + (TweedieDistribution(power=-1), [0.1, 1.5]), + (TweedieDistribution(power=1.5), [0.1, 1.5]), + (TweedieDistribution(power=2.5), [0.1, 1.5]), + (TweedieDistribution(power=-4), [0.1, 1.5]), + ], +) +def test_deviance_zero(family, chk_values): + """Test deviance(y,y) = 0 for different families.""" + for x in chk_values: + assert_allclose(family.deviance(x, x), 0, atol=1e-9) + + +@pytest.mark.parametrize( + "family", + [ + NormalDistribution(), + PoissonDistribution(), + GammaDistribution(), + InverseGaussianDistribution(), + TweedieDistribution(power=-2.5), + TweedieDistribution(power=-1), + TweedieDistribution(power=1.5), + TweedieDistribution(power=2.5), + TweedieDistribution(power=-4), + ], + ids=lambda x: x.__class__.__name__, +) +def test_deviance_derivative(family, global_random_seed): + """Test deviance derivative for different families.""" + rng = np.random.RandomState(global_random_seed) + y_true = rng.rand(10) + # make data positive + y_true += np.abs(y_true.min()) + 1e-2 + + y_pred = y_true + np.fmax(rng.rand(10), 0.0) + + dev = family.deviance(y_true, y_pred) + assert isinstance(dev, float) + dev_derivative = family.deviance_derivative(y_true, y_pred) + assert dev_derivative.shape == y_pred.shape + + err = check_grad( + lambda y_pred: family.deviance(y_true, y_pred), + lambda y_pred: family.deviance_derivative(y_true, y_pred), + y_pred, + ) / np.linalg.norm(dev_derivative) + assert abs(err) < 3e-6 diff --git a/mgm/lib/python3.10/site-packages/sklearn/_loss/tests/test_link.py b/mgm/lib/python3.10/site-packages/sklearn/_loss/tests/test_link.py new file mode 100644 index 0000000000000000000000000000000000000000..c083883d3d650631d8fa9959203e52f83769b432 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/sklearn/_loss/tests/test_link.py @@ -0,0 +1,109 @@ +import numpy as np +from numpy.testing import assert_allclose, assert_array_equal +import pytest + +from sklearn._loss.link import ( + _LINKS, + _inclusive_low_high, + MultinomialLogit, + Interval, +) + + +LINK_FUNCTIONS = list(_LINKS.values()) + + +def test_interval_raises(): + """Test that interval with low > high raises ValueError.""" + with pytest.raises( + ValueError, match="One must have low <= high; got low=1, high=0." + ): + Interval(1, 0, False, False) + + +@pytest.mark.parametrize( + "interval", + [ + Interval(0, 1, False, False), + Interval(0, 1, False, True), + Interval(0, 1, True, False), + Interval(0, 1, True, True), + Interval(-np.inf, np.inf, False, False), + Interval(-np.inf, np.inf, False, True), + Interval(-np.inf, np.inf, True, False), + Interval(-np.inf, np.inf, True, True), + Interval(-10, -1, False, False), + Interval(-10, -1, False, True), + Interval(-10, -1, True, False), + Interval(-10, -1, True, True), + ], +) +def test_is_in_range(interval): + # make sure low and high are always within the interval, used for linspace + low, high = _inclusive_low_high(interval) + + x = np.linspace(low, high, num=10) + assert interval.includes(x) + + # x contains lower bound + assert interval.includes(np.r_[x, interval.low]) == interval.low_inclusive + + # x contains upper bound + assert interval.includes(np.r_[x, interval.high]) == interval.high_inclusive + + # x contains upper and lower bound + assert interval.includes(np.r_[x, interval.low, interval.high]) == ( + interval.low_inclusive and interval.high_inclusive + ) + + +@pytest.mark.parametrize("link", LINK_FUNCTIONS) +def test_link_inverse_identity(link, global_random_seed): + # Test that link of inverse gives identity. + rng = np.random.RandomState(global_random_seed) + link = link() + n_samples, n_classes = 100, None + # The values for `raw_prediction` are limited from -20 to 20 because in the + # class `LogitLink` the term `expit(x)` comes very close to 1 for large + # positive x and therefore loses precision. + if link.is_multiclass: + n_classes = 10 + raw_prediction = rng.uniform(low=-20, high=20, size=(n_samples, n_classes)) + if isinstance(link, MultinomialLogit): + raw_prediction = link.symmetrize_raw_prediction(raw_prediction) + else: + raw_prediction = rng.uniform(low=-20, high=20, size=(n_samples)) + + assert_allclose(link.link(link.inverse(raw_prediction)), raw_prediction) + y_pred = link.inverse(raw_prediction) + assert_allclose(link.inverse(link.link(y_pred)), y_pred) + + +@pytest.mark.parametrize("link", LINK_FUNCTIONS) +def test_link_out_argument(link): + # Test that out argument gets assigned the result. + rng = np.random.RandomState(42) + link = link() + n_samples, n_classes = 100, None + if link.is_multiclass: + n_classes = 10 + raw_prediction = rng.normal(loc=0, scale=10, size=(n_samples, n_classes)) + if isinstance(link, MultinomialLogit): + raw_prediction = link.symmetrize_raw_prediction(raw_prediction) + else: + # So far, the valid interval of raw_prediction is (-inf, inf) and + # we do not need to distinguish. + raw_prediction = rng.normal(loc=0, scale=10, size=(n_samples)) + + y_pred = link.inverse(raw_prediction, out=None) + out = np.empty_like(raw_prediction) + y_pred_2 = link.inverse(raw_prediction, out=out) + assert_allclose(y_pred, out) + assert_array_equal(out, y_pred_2) + assert np.shares_memory(out, y_pred_2) + + out = np.empty_like(y_pred) + raw_prediction_2 = link.link(y_pred, out=out) + assert_allclose(raw_prediction, out) + assert_array_equal(out, raw_prediction_2) + assert np.shares_memory(out, raw_prediction_2) diff --git a/mgm/lib/python3.10/site-packages/sklearn/_loss/tests/test_loss.py b/mgm/lib/python3.10/site-packages/sklearn/_loss/tests/test_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..4261b8366f64d85b166fb48d6d2c1bc9afd1b3ae --- /dev/null +++ b/mgm/lib/python3.10/site-packages/sklearn/_loss/tests/test_loss.py @@ -0,0 +1,1161 @@ +import pickle + +import numpy as np +from numpy.testing import assert_allclose, assert_array_equal +import pytest +from pytest import approx +from scipy.optimize import ( + minimize, + minimize_scalar, + newton, + LinearConstraint, +) +from scipy.special import logsumexp + +from sklearn._loss.link import _inclusive_low_high, IdentityLink +from sklearn._loss.loss import ( + _LOSSES, + BaseLoss, + AbsoluteError, + HalfBinomialLoss, + HalfGammaLoss, + HalfMultinomialLoss, + HalfPoissonLoss, + HalfSquaredError, + HalfTweedieLoss, + HalfTweedieLossIdentity, + PinballLoss, +) +from sklearn.utils import assert_all_finite +from sklearn.utils._testing import create_memmap_backed_data, skip_if_32bit + + +ALL_LOSSES = list(_LOSSES.values()) + +LOSS_INSTANCES = [loss() for loss in ALL_LOSSES] +# HalfTweedieLoss(power=1.5) is already there as default +LOSS_INSTANCES += [ + PinballLoss(quantile=0.25), + HalfTweedieLoss(power=-1.5), + HalfTweedieLoss(power=0), + HalfTweedieLoss(power=1), + HalfTweedieLoss(power=2), + HalfTweedieLoss(power=3.0), + HalfTweedieLossIdentity(power=0), + HalfTweedieLossIdentity(power=1), + HalfTweedieLossIdentity(power=2), + HalfTweedieLossIdentity(power=3.0), +] + + +def loss_instance_name(param): + if isinstance(param, BaseLoss): + loss = param + name = loss.__class__.__name__ + if hasattr(loss, "quantile"): + name += f"(quantile={loss.closs.quantile})" + elif hasattr(loss, "power"): + name += f"(power={loss.closs.power})" + return name + else: + return str(param) + + +def random_y_true_raw_prediction( + loss, n_samples, y_bound=(-100, 100), raw_bound=(-5, 5), seed=42 +): + """Random generate y_true and raw_prediction in valid range.""" + rng = np.random.RandomState(seed) + if loss.is_multiclass: + raw_prediction = np.empty((n_samples, loss.n_classes)) + raw_prediction.flat[:] = rng.uniform( + low=raw_bound[0], + high=raw_bound[1], + size=n_samples * loss.n_classes, + ) + y_true = np.arange(n_samples).astype(float) % loss.n_classes + else: + # If link is identity, we must respect the interval of y_pred: + if isinstance(loss.link, IdentityLink): + low, high = _inclusive_low_high(loss.interval_y_pred) + low = np.amax([low, raw_bound[0]]) + high = np.amin([high, raw_bound[1]]) + raw_bound = (low, high) + raw_prediction = rng.uniform( + low=raw_bound[0], high=raw_bound[1], size=n_samples + ) + # generate a y_true in valid range + low, high = _inclusive_low_high(loss.interval_y_true) + low = max(low, y_bound[0]) + high = min(high, y_bound[1]) + y_true = rng.uniform(low, high, size=n_samples) + # set some values at special boundaries + if loss.interval_y_true.low == 0 and loss.interval_y_true.low_inclusive: + y_true[:: (n_samples // 3)] = 0 + if loss.interval_y_true.high == 1 and loss.interval_y_true.high_inclusive: + y_true[1 :: (n_samples // 3)] = 1 + + return y_true, raw_prediction + + +def numerical_derivative(func, x, eps): + """Helper function for numerical (first) derivatives.""" + # For numerical derivatives, see + # https://en.wikipedia.org/wiki/Numerical_differentiation + # https://en.wikipedia.org/wiki/Finite_difference_coefficient + # We use central finite differences of accuracy 4. + h = np.full_like(x, fill_value=eps) + f_minus_2h = func(x - 2 * h) + f_minus_1h = func(x - h) + f_plus_1h = func(x + h) + f_plus_2h = func(x + 2 * h) + return (-f_plus_2h + 8 * f_plus_1h - 8 * f_minus_1h + f_minus_2h) / (12.0 * eps) + + +@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name) +def test_loss_boundary(loss): + """Test interval ranges of y_true and y_pred in losses.""" + # make sure low and high are always within the interval, used for linspace + if loss.is_multiclass: + y_true = np.linspace(0, 9, num=10) + else: + low, high = _inclusive_low_high(loss.interval_y_true) + y_true = np.linspace(low, high, num=10) + + # add boundaries if they are included + if loss.interval_y_true.low_inclusive: + y_true = np.r_[y_true, loss.interval_y_true.low] + if loss.interval_y_true.high_inclusive: + y_true = np.r_[y_true, loss.interval_y_true.high] + + assert loss.in_y_true_range(y_true) + + n = y_true.shape[0] + low, high = _inclusive_low_high(loss.interval_y_pred) + if loss.is_multiclass: + y_pred = np.empty((n, 3)) + y_pred[:, 0] = np.linspace(low, high, num=n) + y_pred[:, 1] = 0.5 * (1 - y_pred[:, 0]) + y_pred[:, 2] = 0.5 * (1 - y_pred[:, 0]) + else: + y_pred = np.linspace(low, high, num=n) + + assert loss.in_y_pred_range(y_pred) + + # calculating losses should not fail + raw_prediction = loss.link.link(y_pred) + loss.loss(y_true=y_true, raw_prediction=raw_prediction) + + +# Fixture to test valid value ranges. +Y_COMMON_PARAMS = [ + # (loss, [y success], [y fail]) + (HalfSquaredError(), [-100, 0, 0.1, 100], [-np.inf, np.inf]), + (AbsoluteError(), [-100, 0, 0.1, 100], [-np.inf, np.inf]), + (PinballLoss(), [-100, 0, 0.1, 100], [-np.inf, np.inf]), + (HalfPoissonLoss(), [0.1, 100], [-np.inf, -3, -0.1, np.inf]), + (HalfGammaLoss(), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]), + (HalfTweedieLoss(power=-3), [0.1, 100], [-np.inf, np.inf]), + (HalfTweedieLoss(power=0), [0.1, 100], [-np.inf, np.inf]), + (HalfTweedieLoss(power=1.5), [0.1, 100], [-np.inf, -3, -0.1, np.inf]), + (HalfTweedieLoss(power=2), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]), + (HalfTweedieLoss(power=3), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]), + (HalfTweedieLossIdentity(power=-3), [0.1, 100], [-np.inf, np.inf]), + (HalfTweedieLossIdentity(power=0), [-3, -0.1, 0, 0.1, 100], [-np.inf, np.inf]), + (HalfTweedieLossIdentity(power=1.5), [0.1, 100], [-np.inf, -3, -0.1, np.inf]), + (HalfTweedieLossIdentity(power=2), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]), + (HalfTweedieLossIdentity(power=3), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]), + (HalfBinomialLoss(), [0.1, 0.5, 0.9], [-np.inf, -1, 2, np.inf]), + (HalfMultinomialLoss(), [], [-np.inf, -1, 1.1, np.inf]), +] +# y_pred and y_true do not always have the same domain (valid value range). +# Hence, we define extra sets of parameters for each of them. +Y_TRUE_PARAMS = [ # type: ignore + # (loss, [y success], [y fail]) + (HalfPoissonLoss(), [0], []), + (HalfTweedieLoss(power=-3), [-100, -0.1, 0], []), + (HalfTweedieLoss(power=0), [-100, 0], []), + (HalfTweedieLoss(power=1.5), [0], []), + (HalfTweedieLossIdentity(power=-3), [-100, -0.1, 0], []), + (HalfTweedieLossIdentity(power=0), [-100, 0], []), + (HalfTweedieLossIdentity(power=1.5), [0], []), + (HalfBinomialLoss(), [0, 1], []), + (HalfMultinomialLoss(), [0.0, 1.0, 2], []), +] +Y_PRED_PARAMS = [ + # (loss, [y success], [y fail]) + (HalfPoissonLoss(), [], [0]), + (HalfTweedieLoss(power=-3), [], [-3, -0.1, 0]), + (HalfTweedieLoss(power=0), [], [-3, -0.1, 0]), + (HalfTweedieLoss(power=1.5), [], [0]), + (HalfTweedieLossIdentity(power=-3), [], [-3, -0.1, 0]), + (HalfTweedieLossIdentity(power=0), [-3, -0.1, 0], []), + (HalfTweedieLossIdentity(power=1.5), [], [0]), + (HalfBinomialLoss(), [], [0, 1]), + (HalfMultinomialLoss(), [0.1, 0.5], [0, 1]), +] + + +@pytest.mark.parametrize( + "loss, y_true_success, y_true_fail", Y_COMMON_PARAMS + Y_TRUE_PARAMS +) +def test_loss_boundary_y_true(loss, y_true_success, y_true_fail): + """Test boundaries of y_true for loss functions.""" + for y in y_true_success: + assert loss.in_y_true_range(np.array([y])) + for y in y_true_fail: + assert not loss.in_y_true_range(np.array([y])) + + +@pytest.mark.parametrize( + "loss, y_pred_success, y_pred_fail", Y_COMMON_PARAMS + Y_PRED_PARAMS # type: ignore +) +def test_loss_boundary_y_pred(loss, y_pred_success, y_pred_fail): + """Test boundaries of y_pred for loss functions.""" + for y in y_pred_success: + assert loss.in_y_pred_range(np.array([y])) + for y in y_pred_fail: + assert not loss.in_y_pred_range(np.array([y])) + + +@pytest.mark.parametrize( + "loss, y_true, raw_prediction, loss_true", + [ + (HalfSquaredError(), 1.0, 5.0, 8), + (AbsoluteError(), 1.0, 5.0, 4), + (PinballLoss(quantile=0.5), 1.0, 5.0, 2), + (PinballLoss(quantile=0.25), 1.0, 5.0, 4 * (1 - 0.25)), + (PinballLoss(quantile=0.25), 5.0, 1.0, 4 * 0.25), + (HalfPoissonLoss(), 2.0, np.log(4), 4 - 2 * np.log(4)), + (HalfGammaLoss(), 2.0, np.log(4), np.log(4) + 2 / 4), + (HalfTweedieLoss(power=3), 2.0, np.log(4), -1 / 4 + 1 / 4**2), + (HalfTweedieLossIdentity(power=1), 2.0, 4.0, 2 - 2 * np.log(2)), + (HalfTweedieLossIdentity(power=2), 2.0, 4.0, np.log(2) - 1 / 2), + (HalfTweedieLossIdentity(power=3), 2.0, 4.0, -1 / 4 + 1 / 4**2 + 1 / 2 / 2), + (HalfBinomialLoss(), 0.25, np.log(4), np.log(5) - 0.25 * np.log(4)), + ( + HalfMultinomialLoss(n_classes=3), + 0.0, + [0.2, 0.5, 0.3], + logsumexp([0.2, 0.5, 0.3]) - 0.2, + ), + ( + HalfMultinomialLoss(n_classes=3), + 1.0, + [0.2, 0.5, 0.3], + logsumexp([0.2, 0.5, 0.3]) - 0.5, + ), + ( + HalfMultinomialLoss(n_classes=3), + 2.0, + [0.2, 0.5, 0.3], + logsumexp([0.2, 0.5, 0.3]) - 0.3, + ), + ], + ids=loss_instance_name, +) +def test_loss_on_specific_values(loss, y_true, raw_prediction, loss_true): + """Test losses at specific values.""" + assert loss( + y_true=np.array([y_true]), raw_prediction=np.array([raw_prediction]) + ) == approx(loss_true, rel=1e-11, abs=1e-12) + + +@pytest.mark.parametrize("loss", ALL_LOSSES) +@pytest.mark.parametrize("readonly_memmap", [False, True]) +@pytest.mark.parametrize("dtype_in", [np.float32, np.float64]) +@pytest.mark.parametrize("dtype_out", [np.float32, np.float64]) +@pytest.mark.parametrize("sample_weight", [None, 1]) +@pytest.mark.parametrize("out1", [None, 1]) +@pytest.mark.parametrize("out2", [None, 1]) +@pytest.mark.parametrize("n_threads", [1, 2]) +def test_loss_dtype( + loss, readonly_memmap, dtype_in, dtype_out, sample_weight, out1, out2, n_threads +): + """Test acceptance of dtypes, readonly and writeable arrays in loss functions. + + Check that loss accepts if all input arrays are either all float32 or all + float64, and all output arrays are either all float32 or all float64. + + Also check that input arrays can be readonly, e.g. memory mapped. + """ + loss = loss() + # generate a y_true and raw_prediction in valid range + n_samples = 5 + y_true, raw_prediction = random_y_true_raw_prediction( + loss=loss, + n_samples=n_samples, + y_bound=(-100, 100), + raw_bound=(-10, 10), + seed=42, + ) + y_true = y_true.astype(dtype_in) + raw_prediction = raw_prediction.astype(dtype_in) + + if sample_weight is not None: + sample_weight = np.array([2.0] * n_samples, dtype=dtype_in) + if out1 is not None: + out1 = np.empty_like(y_true, dtype=dtype_out) + if out2 is not None: + out2 = np.empty_like(raw_prediction, dtype=dtype_out) + + if readonly_memmap: + y_true = create_memmap_backed_data(y_true, aligned=True) + raw_prediction = create_memmap_backed_data(raw_prediction, aligned=True) + if sample_weight is not None: + sample_weight = create_memmap_backed_data(sample_weight, aligned=True) + + loss.loss( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + loss_out=out1, + n_threads=n_threads, + ) + loss.gradient( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + gradient_out=out2, + n_threads=n_threads, + ) + loss.loss_gradient( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + loss_out=out1, + gradient_out=out2, + n_threads=n_threads, + ) + if out1 is not None and loss.is_multiclass: + out1 = np.empty_like(raw_prediction, dtype=dtype_out) + loss.gradient_hessian( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + gradient_out=out1, + hessian_out=out2, + n_threads=n_threads, + ) + loss(y_true=y_true, raw_prediction=raw_prediction, sample_weight=sample_weight) + loss.fit_intercept_only(y_true=y_true, sample_weight=sample_weight) + loss.constant_to_optimal_zero(y_true=y_true, sample_weight=sample_weight) + if hasattr(loss, "predict_proba"): + loss.predict_proba(raw_prediction=raw_prediction) + if hasattr(loss, "gradient_proba"): + loss.gradient_proba( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + gradient_out=out1, + proba_out=out2, + n_threads=n_threads, + ) + + +@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name) +@pytest.mark.parametrize("sample_weight", [None, "range"]) +def test_loss_same_as_C_functions(loss, sample_weight): + """Test that Python and Cython functions return same results.""" + y_true, raw_prediction = random_y_true_raw_prediction( + loss=loss, + n_samples=20, + y_bound=(-100, 100), + raw_bound=(-10, 10), + seed=42, + ) + if sample_weight == "range": + sample_weight = np.linspace(1, y_true.shape[0], num=y_true.shape[0]) + + out_l1 = np.empty_like(y_true) + out_l2 = np.empty_like(y_true) + out_g1 = np.empty_like(raw_prediction) + out_g2 = np.empty_like(raw_prediction) + out_h1 = np.empty_like(raw_prediction) + out_h2 = np.empty_like(raw_prediction) + assert_allclose( + loss.loss( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + loss_out=out_l1, + ), + loss.closs.loss( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + loss_out=out_l2, + ), + ) + assert_allclose( + loss.gradient( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + gradient_out=out_g1, + ), + loss.closs.gradient( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + gradient_out=out_g2, + ), + ) + loss.closs.loss_gradient( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + loss_out=out_l1, + gradient_out=out_g1, + ) + loss.closs.loss_gradient( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + loss_out=out_l2, + gradient_out=out_g2, + ) + assert_allclose(out_l1, out_l2) + assert_allclose(out_g1, out_g2) + loss.gradient_hessian( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + gradient_out=out_g1, + hessian_out=out_h1, + ) + loss.closs.gradient_hessian( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + gradient_out=out_g2, + hessian_out=out_h2, + ) + assert_allclose(out_g1, out_g2) + assert_allclose(out_h1, out_h2) + + +@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name) +@pytest.mark.parametrize("sample_weight", [None, "range"]) +def test_loss_gradients_are_the_same(loss, sample_weight, global_random_seed): + """Test that loss and gradient are the same across different functions. + + Also test that output arguments contain correct results. + """ + y_true, raw_prediction = random_y_true_raw_prediction( + loss=loss, + n_samples=20, + y_bound=(-100, 100), + raw_bound=(-10, 10), + seed=global_random_seed, + ) + if sample_weight == "range": + sample_weight = np.linspace(1, y_true.shape[0], num=y_true.shape[0]) + + out_l1 = np.empty_like(y_true) + out_l2 = np.empty_like(y_true) + out_g1 = np.empty_like(raw_prediction) + out_g2 = np.empty_like(raw_prediction) + out_g3 = np.empty_like(raw_prediction) + out_h3 = np.empty_like(raw_prediction) + + l1 = loss.loss( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + loss_out=out_l1, + ) + g1 = loss.gradient( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + gradient_out=out_g1, + ) + l2, g2 = loss.loss_gradient( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + loss_out=out_l2, + gradient_out=out_g2, + ) + g3, h3 = loss.gradient_hessian( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + gradient_out=out_g3, + hessian_out=out_h3, + ) + assert_allclose(l1, l2) + assert_array_equal(l1, out_l1) + assert np.shares_memory(l1, out_l1) + assert_array_equal(l2, out_l2) + assert np.shares_memory(l2, out_l2) + assert_allclose(g1, g2) + assert_allclose(g1, g3) + assert_array_equal(g1, out_g1) + assert np.shares_memory(g1, out_g1) + assert_array_equal(g2, out_g2) + assert np.shares_memory(g2, out_g2) + assert_array_equal(g3, out_g3) + assert np.shares_memory(g3, out_g3) + + if hasattr(loss, "gradient_proba"): + assert loss.is_multiclass # only for HalfMultinomialLoss + out_g4 = np.empty_like(raw_prediction) + out_proba = np.empty_like(raw_prediction) + g4, proba = loss.gradient_proba( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + gradient_out=out_g4, + proba_out=out_proba, + ) + assert_allclose(g1, out_g4) + assert_allclose(g1, g4) + assert_allclose(proba, out_proba) + assert_allclose(np.sum(proba, axis=1), 1, rtol=1e-11) + + +@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name) +@pytest.mark.parametrize("sample_weight", ["ones", "random"]) +def test_sample_weight_multiplies(loss, sample_weight, global_random_seed): + """Test sample weights in loss, gradients and hessians. + + Make sure that passing sample weights to loss, gradient and hessian + computation methods is equivalent to multiplying by the weights. + """ + n_samples = 100 + y_true, raw_prediction = random_y_true_raw_prediction( + loss=loss, + n_samples=n_samples, + y_bound=(-100, 100), + raw_bound=(-5, 5), + seed=global_random_seed, + ) + + if sample_weight == "ones": + sample_weight = np.ones(shape=n_samples, dtype=np.float64) + else: + rng = np.random.RandomState(global_random_seed) + sample_weight = rng.normal(size=n_samples).astype(np.float64) + + assert_allclose( + loss.loss( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + ), + sample_weight + * loss.loss( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=None, + ), + ) + + losses, gradient = loss.loss_gradient( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=None, + ) + losses_sw, gradient_sw = loss.loss_gradient( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + ) + assert_allclose(losses * sample_weight, losses_sw) + if not loss.is_multiclass: + assert_allclose(gradient * sample_weight, gradient_sw) + else: + assert_allclose(gradient * sample_weight[:, None], gradient_sw) + + gradient, hessian = loss.gradient_hessian( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=None, + ) + gradient_sw, hessian_sw = loss.gradient_hessian( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + ) + if not loss.is_multiclass: + assert_allclose(gradient * sample_weight, gradient_sw) + assert_allclose(hessian * sample_weight, hessian_sw) + else: + assert_allclose(gradient * sample_weight[:, None], gradient_sw) + assert_allclose(hessian * sample_weight[:, None], hessian_sw) + + +@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name) +def test_graceful_squeezing(loss): + """Test that reshaped raw_prediction gives same results.""" + y_true, raw_prediction = random_y_true_raw_prediction( + loss=loss, + n_samples=20, + y_bound=(-100, 100), + raw_bound=(-10, 10), + seed=42, + ) + + if raw_prediction.ndim == 1: + raw_prediction_2d = raw_prediction[:, None] + assert_allclose( + loss.loss(y_true=y_true, raw_prediction=raw_prediction_2d), + loss.loss(y_true=y_true, raw_prediction=raw_prediction), + ) + assert_allclose( + loss.loss_gradient(y_true=y_true, raw_prediction=raw_prediction_2d), + loss.loss_gradient(y_true=y_true, raw_prediction=raw_prediction), + ) + assert_allclose( + loss.gradient(y_true=y_true, raw_prediction=raw_prediction_2d), + loss.gradient(y_true=y_true, raw_prediction=raw_prediction), + ) + assert_allclose( + loss.gradient_hessian(y_true=y_true, raw_prediction=raw_prediction_2d), + loss.gradient_hessian(y_true=y_true, raw_prediction=raw_prediction), + ) + + +@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name) +@pytest.mark.parametrize("sample_weight", [None, "range"]) +def test_loss_of_perfect_prediction(loss, sample_weight): + """Test value of perfect predictions. + + Loss of y_pred = y_true plus constant_to_optimal_zero should sums up to + zero. + """ + if not loss.is_multiclass: + # Use small values such that exp(value) is not nan. + raw_prediction = np.array([-10, -0.1, 0, 0.1, 3, 10]) + # If link is identity, we must respect the interval of y_pred: + if isinstance(loss.link, IdentityLink): + eps = 1e-10 + low = loss.interval_y_pred.low + if not loss.interval_y_pred.low_inclusive: + low = low + eps + high = loss.interval_y_pred.high + if not loss.interval_y_pred.high_inclusive: + high = high - eps + raw_prediction = np.clip(raw_prediction, low, high) + y_true = loss.link.inverse(raw_prediction) + else: + # HalfMultinomialLoss + y_true = np.arange(loss.n_classes).astype(float) + # raw_prediction with entries -exp(10), but +exp(10) on the diagonal + # this is close enough to np.inf which would produce nan + raw_prediction = np.full( + shape=(loss.n_classes, loss.n_classes), + fill_value=-np.exp(10), + dtype=float, + ) + raw_prediction.flat[:: loss.n_classes + 1] = np.exp(10) + + if sample_weight == "range": + sample_weight = np.linspace(1, y_true.shape[0], num=y_true.shape[0]) + + loss_value = loss.loss( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + ) + constant_term = loss.constant_to_optimal_zero( + y_true=y_true, sample_weight=sample_weight + ) + # Comparing loss_value + constant_term to zero would result in large + # round-off errors. + assert_allclose(loss_value, -constant_term, atol=1e-14, rtol=1e-15) + + +@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name) +@pytest.mark.parametrize("sample_weight", [None, "range"]) +def test_gradients_hessians_numerically(loss, sample_weight, global_random_seed): + """Test gradients and hessians with numerical derivatives. + + Gradient should equal the numerical derivatives of the loss function. + Hessians should equal the numerical derivatives of gradients. + """ + n_samples = 20 + y_true, raw_prediction = random_y_true_raw_prediction( + loss=loss, + n_samples=n_samples, + y_bound=(-100, 100), + raw_bound=(-5, 5), + seed=global_random_seed, + ) + + if sample_weight == "range": + sample_weight = np.linspace(1, y_true.shape[0], num=y_true.shape[0]) + + g, h = loss.gradient_hessian( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + ) + + assert g.shape == raw_prediction.shape + assert h.shape == raw_prediction.shape + + if not loss.is_multiclass: + + def loss_func(x): + return loss.loss( + y_true=y_true, + raw_prediction=x, + sample_weight=sample_weight, + ) + + g_numeric = numerical_derivative(loss_func, raw_prediction, eps=1e-6) + assert_allclose(g, g_numeric, rtol=5e-6, atol=1e-10) + + def grad_func(x): + return loss.gradient( + y_true=y_true, + raw_prediction=x, + sample_weight=sample_weight, + ) + + h_numeric = numerical_derivative(grad_func, raw_prediction, eps=1e-6) + if loss.approx_hessian: + # TODO: What could we test if loss.approx_hessian? + pass + else: + assert_allclose(h, h_numeric, rtol=5e-6, atol=1e-10) + else: + # For multiclass loss, we should only change the predictions of the + # class for which the derivative is taken for, e.g. offset[:, k] = eps + # for class k. + # As a softmax is computed, offsetting the whole array by a constant + # would have no effect on the probabilities, and thus on the loss. + for k in range(loss.n_classes): + + def loss_func(x): + raw = raw_prediction.copy() + raw[:, k] = x + return loss.loss( + y_true=y_true, + raw_prediction=raw, + sample_weight=sample_weight, + ) + + g_numeric = numerical_derivative(loss_func, raw_prediction[:, k], eps=1e-5) + assert_allclose(g[:, k], g_numeric, rtol=5e-6, atol=1e-10) + + def grad_func(x): + raw = raw_prediction.copy() + raw[:, k] = x + return loss.gradient( + y_true=y_true, + raw_prediction=raw, + sample_weight=sample_weight, + )[:, k] + + h_numeric = numerical_derivative(grad_func, raw_prediction[:, k], eps=1e-6) + if loss.approx_hessian: + # TODO: What could we test if loss.approx_hessian? + pass + else: + assert_allclose(h[:, k], h_numeric, rtol=5e-6, atol=1e-10) + + +@pytest.mark.parametrize( + "loss, x0, y_true", + [ + ("squared_error", -2.0, 42), + ("squared_error", 117.0, 1.05), + ("squared_error", 0.0, 0.0), + # The argmin of binomial_loss for y_true=0 and y_true=1 is resp. + # -inf and +inf due to logit, cf. "complete separation". Therefore, we + # use 0 < y_true < 1. + ("binomial_loss", 0.3, 0.1), + ("binomial_loss", -12, 0.2), + ("binomial_loss", 30, 0.9), + ("poisson_loss", 12.0, 1.0), + ("poisson_loss", 0.0, 2.0), + ("poisson_loss", -22.0, 10.0), + ], +) +@skip_if_32bit +def test_derivatives(loss, x0, y_true): + """Test that gradients are zero at the minimum of the loss. + + We check this on a single value/sample using Halley's method with the + first and second order derivatives computed by the Loss instance. + Note that methods of Loss instances operate on arrays while the newton + root finder expects a scalar or a one-element array for this purpose. + """ + loss = _LOSSES[loss](sample_weight=None) + y_true = np.array([y_true], dtype=np.float64) + x0 = np.array([x0], dtype=np.float64) + + def func(x: np.ndarray) -> np.ndarray: + """Compute loss plus constant term. + + The constant term is such that the minimum function value is zero, + which is required by the Newton method. + """ + return loss.loss( + y_true=y_true, raw_prediction=x + ) + loss.constant_to_optimal_zero(y_true=y_true) + + def fprime(x: np.ndarray) -> np.ndarray: + return loss.gradient(y_true=y_true, raw_prediction=x) + + def fprime2(x: np.ndarray) -> np.ndarray: + return loss.gradient_hessian(y_true=y_true, raw_prediction=x)[1] + + optimum = newton( + func, + x0=x0, + fprime=fprime, + fprime2=fprime2, + maxiter=100, + tol=5e-8, + ) + + # Need to ravel arrays because assert_allclose requires matching + # dimensions. + y_true = y_true.ravel() + optimum = optimum.ravel() + assert_allclose(loss.link.inverse(optimum), y_true) + assert_allclose(func(optimum), 0, atol=1e-14) + assert_allclose(loss.gradient(y_true=y_true, raw_prediction=optimum), 0, atol=5e-7) + + +@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name) +@pytest.mark.parametrize("sample_weight", [None, "range"]) +def test_loss_intercept_only(loss, sample_weight): + """Test that fit_intercept_only returns the argmin of the loss. + + Also test that the gradient is zero at the minimum. + """ + n_samples = 50 + if not loss.is_multiclass: + y_true = loss.link.inverse(np.linspace(-4, 4, num=n_samples)) + else: + y_true = np.arange(n_samples).astype(np.float64) % loss.n_classes + y_true[::5] = 0 # exceedance of class 0 + + if sample_weight == "range": + sample_weight = np.linspace(0.1, 2, num=n_samples) + + a = loss.fit_intercept_only(y_true=y_true, sample_weight=sample_weight) + + # find minimum by optimization + def fun(x): + if not loss.is_multiclass: + raw_prediction = np.full(shape=(n_samples), fill_value=x) + else: + raw_prediction = np.ascontiguousarray( + np.broadcast_to(x, shape=(n_samples, loss.n_classes)) + ) + return loss( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + ) + + if not loss.is_multiclass: + opt = minimize_scalar(fun, tol=1e-7, options={"maxiter": 100}) + grad = loss.gradient( + y_true=y_true, + raw_prediction=np.full_like(y_true, a), + sample_weight=sample_weight, + ) + assert a.shape == tuple() # scalar + assert a.dtype == y_true.dtype + assert_all_finite(a) + a == approx(opt.x, rel=1e-7) + grad.sum() == approx(0, abs=1e-12) + else: + # The constraint corresponds to sum(raw_prediction) = 0. Without it, we would + # need to apply loss.symmetrize_raw_prediction to opt.x before comparing. + opt = minimize( + fun, + np.zeros((loss.n_classes)), + tol=1e-13, + options={"maxiter": 100}, + method="SLSQP", + constraints=LinearConstraint(np.ones((1, loss.n_classes)), 0, 0), + ) + grad = loss.gradient( + y_true=y_true, + raw_prediction=np.tile(a, (n_samples, 1)), + sample_weight=sample_weight, + ) + assert a.dtype == y_true.dtype + assert_all_finite(a) + assert_allclose(a, opt.x, rtol=5e-6, atol=1e-12) + assert_allclose(grad.sum(axis=0), 0, atol=1e-12) + + +@pytest.mark.parametrize( + "loss, func, random_dist", + [ + (HalfSquaredError(), np.mean, "normal"), + (AbsoluteError(), np.median, "normal"), + (PinballLoss(quantile=0.25), lambda x: np.percentile(x, q=25), "normal"), + (HalfPoissonLoss(), np.mean, "poisson"), + (HalfGammaLoss(), np.mean, "exponential"), + (HalfTweedieLoss(), np.mean, "exponential"), + (HalfBinomialLoss(), np.mean, "binomial"), + ], +) +def test_specific_fit_intercept_only(loss, func, random_dist, global_random_seed): + """Test that fit_intercept_only returns the correct functional. + + We test the functional for specific, meaningful distributions, e.g. + squared error estimates the expectation of a probability distribution. + """ + rng = np.random.RandomState(global_random_seed) + if random_dist == "binomial": + y_train = rng.binomial(1, 0.5, size=100) + else: + y_train = getattr(rng, random_dist)(size=100) + baseline_prediction = loss.fit_intercept_only(y_true=y_train) + # Make sure baseline prediction is the expected functional=func, e.g. mean + # or median. + assert_all_finite(baseline_prediction) + assert baseline_prediction == approx(loss.link.link(func(y_train))) + assert loss.link.inverse(baseline_prediction) == approx(func(y_train)) + if isinstance(loss, IdentityLink): + assert_allclose(loss.link.inverse(baseline_prediction), baseline_prediction) + + # Test baseline at boundary + if loss.interval_y_true.low_inclusive: + y_train.fill(loss.interval_y_true.low) + baseline_prediction = loss.fit_intercept_only(y_true=y_train) + assert_all_finite(baseline_prediction) + if loss.interval_y_true.high_inclusive: + y_train.fill(loss.interval_y_true.high) + baseline_prediction = loss.fit_intercept_only(y_true=y_train) + assert_all_finite(baseline_prediction) + + +def test_multinomial_loss_fit_intercept_only(): + """Test that fit_intercept_only returns the mean functional for CCE.""" + rng = np.random.RandomState(0) + n_classes = 4 + loss = HalfMultinomialLoss(n_classes=n_classes) + # Same logic as test_specific_fit_intercept_only. Here inverse link + # function = softmax and link function = log - symmetry term. + y_train = rng.randint(0, n_classes + 1, size=100).astype(np.float64) + baseline_prediction = loss.fit_intercept_only(y_true=y_train) + assert baseline_prediction.shape == (n_classes,) + p = np.zeros(n_classes, dtype=y_train.dtype) + for k in range(n_classes): + p[k] = (y_train == k).mean() + assert_allclose(baseline_prediction, np.log(p) - np.mean(np.log(p))) + assert_allclose(baseline_prediction[None, :], loss.link.link(p[None, :])) + + for y_train in (np.zeros(shape=10), np.ones(shape=10)): + y_train = y_train.astype(np.float64) + baseline_prediction = loss.fit_intercept_only(y_true=y_train) + assert baseline_prediction.dtype == y_train.dtype + assert_all_finite(baseline_prediction) + + +def test_binomial_and_multinomial_loss(global_random_seed): + """Test that multinomial loss with n_classes = 2 is the same as binomial loss.""" + rng = np.random.RandomState(global_random_seed) + n_samples = 20 + binom = HalfBinomialLoss() + multinom = HalfMultinomialLoss(n_classes=2) + y_train = rng.randint(0, 2, size=n_samples).astype(np.float64) + raw_prediction = rng.normal(size=n_samples) + raw_multinom = np.empty((n_samples, 2)) + raw_multinom[:, 0] = -0.5 * raw_prediction + raw_multinom[:, 1] = 0.5 * raw_prediction + assert_allclose( + binom.loss(y_true=y_train, raw_prediction=raw_prediction), + multinom.loss(y_true=y_train, raw_prediction=raw_multinom), + ) + + +@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name) +def test_predict_proba(loss, global_random_seed): + """Test that predict_proba and gradient_proba work as expected.""" + n_samples = 20 + y_true, raw_prediction = random_y_true_raw_prediction( + loss=loss, + n_samples=n_samples, + y_bound=(-100, 100), + raw_bound=(-5, 5), + seed=global_random_seed, + ) + + if hasattr(loss, "predict_proba"): + proba = loss.predict_proba(raw_prediction) + assert proba.shape == (n_samples, loss.n_classes) + assert np.sum(proba, axis=1) == approx(1, rel=1e-11) + + if hasattr(loss, "gradient_proba"): + for grad, proba in ( + (None, None), + (None, np.empty_like(raw_prediction)), + (np.empty_like(raw_prediction), None), + (np.empty_like(raw_prediction), np.empty_like(raw_prediction)), + ): + grad, proba = loss.gradient_proba( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=None, + gradient_out=grad, + proba_out=proba, + ) + assert proba.shape == (n_samples, loss.n_classes) + assert np.sum(proba, axis=1) == approx(1, rel=1e-11) + assert_allclose( + grad, + loss.gradient( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=None, + gradient_out=None, + ), + ) + + +@pytest.mark.parametrize("loss", ALL_LOSSES) +@pytest.mark.parametrize("sample_weight", [None, "range"]) +@pytest.mark.parametrize("dtype", (np.float32, np.float64)) +@pytest.mark.parametrize("order", ("C", "F")) +def test_init_gradient_and_hessians(loss, sample_weight, dtype, order): + """Test that init_gradient_and_hessian works as expected. + + passing sample_weight to a loss correctly influences the constant_hessian + attribute, and consequently the shape of the hessian array. + """ + n_samples = 5 + if sample_weight == "range": + sample_weight = np.ones(n_samples) + loss = loss(sample_weight=sample_weight) + gradient, hessian = loss.init_gradient_and_hessian( + n_samples=n_samples, + dtype=dtype, + order=order, + ) + if loss.constant_hessian: + assert gradient.shape == (n_samples,) + assert hessian.shape == (1,) + elif loss.is_multiclass: + assert gradient.shape == (n_samples, loss.n_classes) + assert hessian.shape == (n_samples, loss.n_classes) + else: + assert hessian.shape == (n_samples,) + assert hessian.shape == (n_samples,) + + assert gradient.dtype == dtype + assert hessian.dtype == dtype + + if order == "C": + assert gradient.flags.c_contiguous + assert hessian.flags.c_contiguous + else: + assert gradient.flags.f_contiguous + assert hessian.flags.f_contiguous + + +@pytest.mark.parametrize("loss", ALL_LOSSES) +@pytest.mark.parametrize( + "params, err_msg", + [ + ( + {"dtype": np.int64}, + f"Valid options for 'dtype' are .* Got dtype={np.int64} instead.", + ), + ], +) +def test_init_gradient_and_hessian_raises(loss, params, err_msg): + """Test that init_gradient_and_hessian raises errors for invalid input.""" + loss = loss() + with pytest.raises((ValueError, TypeError), match=err_msg): + gradient, hessian = loss.init_gradient_and_hessian(n_samples=5, **params) + + +@pytest.mark.parametrize( + "loss, params, err_type, err_msg", + [ + ( + PinballLoss, + {"quantile": None}, + TypeError, + "quantile must be an instance of float, not NoneType.", + ), + ( + PinballLoss, + {"quantile": 0}, + ValueError, + "quantile == 0, must be > 0.", + ), + (PinballLoss, {"quantile": 1.1}, ValueError, "quantile == 1.1, must be < 1."), + ], +) +def test_loss_init_parameter_validation(loss, params, err_type, err_msg): + """Test that loss raises errors for invalid input.""" + with pytest.raises(err_type, match=err_msg): + loss(**params) + + +@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name) +def test_loss_pickle(loss): + """Test that losses can be pickled.""" + n_samples = 20 + y_true, raw_prediction = random_y_true_raw_prediction( + loss=loss, + n_samples=n_samples, + y_bound=(-100, 100), + raw_bound=(-5, 5), + seed=42, + ) + pickled_loss = pickle.dumps(loss) + unpickled_loss = pickle.loads(pickled_loss) + assert loss(y_true=y_true, raw_prediction=raw_prediction) == approx( + unpickled_loss(y_true=y_true, raw_prediction=raw_prediction) + ) + + +@pytest.mark.parametrize("p", [-1.5, 0, 1, 1.5, 2, 3]) +def test_tweedie_log_identity_consistency(p): + """Test for identical losses when only the link function is different.""" + half_tweedie_log = HalfTweedieLoss(power=p) + half_tweedie_identity = HalfTweedieLossIdentity(power=p) + n_samples = 10 + y_true, raw_prediction = random_y_true_raw_prediction( + loss=half_tweedie_log, n_samples=n_samples, seed=42 + ) + y_pred = half_tweedie_log.link.inverse(raw_prediction) # exp(raw_prediction) + + # Let's compare the loss values, up to some constant term that is dropped + # in HalfTweedieLoss but not in HalfTweedieLossIdentity. + loss_log = half_tweedie_log.loss( + y_true=y_true, raw_prediction=raw_prediction + ) + half_tweedie_log.constant_to_optimal_zero(y_true) + loss_identity = half_tweedie_identity.loss( + y_true=y_true, raw_prediction=y_pred + ) + half_tweedie_identity.constant_to_optimal_zero(y_true) + # Note that HalfTweedieLoss ignores different constant terms than + # HalfTweedieLossIdentity. Constant terms means terms not depending on + # raw_prediction. By adding these terms, `constant_to_optimal_zero`, both losses + # give the same values. + assert_allclose(loss_log, loss_identity) + + # For gradients and hessians, the constant terms do not matter. We have, however, + # to account for the chain rule, i.e. with x=raw_prediction + # gradient_log(x) = d/dx loss_log(x) + # = d/dx loss_identity(exp(x)) + # = exp(x) * gradient_identity(exp(x)) + # Similarly, + # hessian_log(x) = exp(x) * gradient_identity(exp(x)) + # + exp(x)**2 * hessian_identity(x) + gradient_log, hessian_log = half_tweedie_log.gradient_hessian( + y_true=y_true, raw_prediction=raw_prediction + ) + gradient_identity, hessian_identity = half_tweedie_identity.gradient_hessian( + y_true=y_true, raw_prediction=y_pred + ) + assert_allclose(gradient_log, y_pred * gradient_identity) + assert_allclose( + hessian_log, y_pred * gradient_identity + y_pred**2 * hessian_identity + ) diff --git a/mgm/lib/python3.10/site-packages/sklearn/cross_decomposition/_pls.py b/mgm/lib/python3.10/site-packages/sklearn/cross_decomposition/_pls.py new file mode 100644 index 0000000000000000000000000000000000000000..bf3456791e6604eee40bd07aa6a13ccf5bcf6102 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/sklearn/cross_decomposition/_pls.py @@ -0,0 +1,1089 @@ +""" +The :mod:`sklearn.pls` module implements Partial Least Squares (PLS). +""" + +# Author: Edouard Duchesnay +# License: BSD 3 clause + +from numbers import Integral, Real + +import warnings +from abc import ABCMeta, abstractmethod + +import numpy as np +from scipy.linalg import svd + +from ..base import BaseEstimator, RegressorMixin, TransformerMixin +from ..base import MultiOutputMixin +from ..base import ClassNamePrefixFeaturesOutMixin +from ..utils import check_array, check_consistent_length +from ..utils.fixes import sp_version +from ..utils.fixes import parse_version +from ..utils.extmath import svd_flip +from ..utils.validation import check_is_fitted, FLOAT_DTYPES +from ..utils._param_validation import Interval, StrOptions +from ..exceptions import ConvergenceWarning + +__all__ = ["PLSCanonical", "PLSRegression", "PLSSVD"] + + +if sp_version >= parse_version("1.7"): + # Starting in scipy 1.7 pinv2 was deprecated in favor of pinv. + # pinv now uses the svd to compute the pseudo-inverse. + from scipy.linalg import pinv as pinv2 +else: + from scipy.linalg import pinv2 + + +def _pinv2_old(a): + # Used previous scipy pinv2 that was updated in: + # https://github.com/scipy/scipy/pull/10067 + # We can not set `cond` or `rcond` for pinv2 in scipy >= 1.3 to keep the + # same behavior of pinv2 for scipy < 1.3, because the condition used to + # determine the rank is dependent on the output of svd. + u, s, vh = svd(a, full_matrices=False, check_finite=False) + + t = u.dtype.char.lower() + factor = {"f": 1e3, "d": 1e6} + cond = np.max(s) * factor[t] * np.finfo(t).eps + rank = np.sum(s > cond) + + u = u[:, :rank] + u /= s[:rank] + return np.transpose(np.conjugate(np.dot(u, vh[:rank]))) + + +def _get_first_singular_vectors_power_method( + X, Y, mode="A", max_iter=500, tol=1e-06, norm_y_weights=False +): + """Return the first left and right singular vectors of X'Y. + + Provides an alternative to the svd(X'Y) and uses the power method instead. + With norm_y_weights to True and in mode A, this corresponds to the + algorithm section 11.3 of the Wegelin's review, except this starts at the + "update saliences" part. + """ + + eps = np.finfo(X.dtype).eps + try: + y_score = next(col for col in Y.T if np.any(np.abs(col) > eps)) + except StopIteration as e: + raise StopIteration("Y residual is constant") from e + + x_weights_old = 100 # init to big value for first convergence check + + if mode == "B": + # Precompute pseudo inverse matrices + # Basically: X_pinv = (X.T X)^-1 X.T + # Which requires inverting a (n_features, n_features) matrix. + # As a result, and as detailed in the Wegelin's review, CCA (i.e. mode + # B) will be unstable if n_features > n_samples or n_targets > + # n_samples + X_pinv, Y_pinv = _pinv2_old(X), _pinv2_old(Y) + + for i in range(max_iter): + if mode == "B": + x_weights = np.dot(X_pinv, y_score) + else: + x_weights = np.dot(X.T, y_score) / np.dot(y_score, y_score) + + x_weights /= np.sqrt(np.dot(x_weights, x_weights)) + eps + x_score = np.dot(X, x_weights) + + if mode == "B": + y_weights = np.dot(Y_pinv, x_score) + else: + y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score) + + if norm_y_weights: + y_weights /= np.sqrt(np.dot(y_weights, y_weights)) + eps + + y_score = np.dot(Y, y_weights) / (np.dot(y_weights, y_weights) + eps) + + x_weights_diff = x_weights - x_weights_old + if np.dot(x_weights_diff, x_weights_diff) < tol or Y.shape[1] == 1: + break + x_weights_old = x_weights + + n_iter = i + 1 + if n_iter == max_iter: + warnings.warn("Maximum number of iterations reached", ConvergenceWarning) + + return x_weights, y_weights, n_iter + + +def _get_first_singular_vectors_svd(X, Y): + """Return the first left and right singular vectors of X'Y. + + Here the whole SVD is computed. + """ + C = np.dot(X.T, Y) + U, _, Vt = svd(C, full_matrices=False) + return U[:, 0], Vt[0, :] + + +def _center_scale_xy(X, Y, scale=True): + """Center X, Y and scale if the scale parameter==True + + Returns + ------- + X, Y, x_mean, y_mean, x_std, y_std + """ + # center + x_mean = X.mean(axis=0) + X -= x_mean + y_mean = Y.mean(axis=0) + Y -= y_mean + # scale + if scale: + x_std = X.std(axis=0, ddof=1) + x_std[x_std == 0.0] = 1.0 + X /= x_std + y_std = Y.std(axis=0, ddof=1) + y_std[y_std == 0.0] = 1.0 + Y /= y_std + else: + x_std = np.ones(X.shape[1]) + y_std = np.ones(Y.shape[1]) + return X, Y, x_mean, y_mean, x_std, y_std + + +def _svd_flip_1d(u, v): + """Same as svd_flip but works on 1d arrays, and is inplace""" + # svd_flip would force us to convert to 2d array and would also return 2d + # arrays. We don't want that. + biggest_abs_val_idx = np.argmax(np.abs(u)) + sign = np.sign(u[biggest_abs_val_idx]) + u *= sign + v *= sign + + +class _PLS( + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + RegressorMixin, + MultiOutputMixin, + BaseEstimator, + metaclass=ABCMeta, +): + """Partial Least Squares (PLS) + + This class implements the generic PLS algorithm. + + Main ref: Wegelin, a survey of Partial Least Squares (PLS) methods, + with emphasis on the two-block case + https://stat.uw.edu/sites/default/files/files/reports/2000/tr371.pdf + """ + + _parameter_constraints: dict = { + "n_components": [Interval(Integral, 1, None, closed="left")], + "scale": ["boolean"], + "deflation_mode": [StrOptions({"regression", "canonical"})], + "mode": [StrOptions({"A", "B"})], + "algorithm": [StrOptions({"svd", "nipals"})], + "max_iter": [Interval(Integral, 1, None, closed="left")], + "tol": [Interval(Real, 0, None, closed="left")], + "copy": ["boolean"], + } + + @abstractmethod + def __init__( + self, + n_components=2, + *, + scale=True, + deflation_mode="regression", + mode="A", + algorithm="nipals", + max_iter=500, + tol=1e-06, + copy=True, + ): + self.n_components = n_components + self.deflation_mode = deflation_mode + self.mode = mode + self.scale = scale + self.algorithm = algorithm + self.max_iter = max_iter + self.tol = tol + self.copy = copy + + def fit(self, X, Y): + """Fit model to data. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training vectors, where `n_samples` is the number of samples and + `n_features` is the number of predictors. + + Y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target vectors, where `n_samples` is the number of samples and + `n_targets` is the number of response variables. + + Returns + ------- + self : object + Fitted model. + """ + self._validate_params() + + check_consistent_length(X, Y) + X = self._validate_data( + X, dtype=np.float64, copy=self.copy, ensure_min_samples=2 + ) + Y = check_array( + Y, input_name="Y", dtype=np.float64, copy=self.copy, ensure_2d=False + ) + if Y.ndim == 1: + Y = Y.reshape(-1, 1) + + n = X.shape[0] + p = X.shape[1] + q = Y.shape[1] + + n_components = self.n_components + # With PLSRegression n_components is bounded by the rank of (X.T X) see + # Wegelin page 25. With CCA and PLSCanonical, n_components is bounded + # by the rank of X and the rank of Y: see Wegelin page 12 + rank_upper_bound = p if self.deflation_mode == "regression" else min(n, p, q) + if n_components > rank_upper_bound: + raise ValueError( + f"`n_components` upper bound is {rank_upper_bound}. " + f"Got {n_components} instead. Reduce `n_components`." + ) + + self._norm_y_weights = self.deflation_mode == "canonical" # 1.1 + norm_y_weights = self._norm_y_weights + + # Scale (in place) + Xk, Yk, self._x_mean, self._y_mean, self._x_std, self._y_std = _center_scale_xy( + X, Y, self.scale + ) + + self.x_weights_ = np.zeros((p, n_components)) # U + self.y_weights_ = np.zeros((q, n_components)) # V + self._x_scores = np.zeros((n, n_components)) # Xi + self._y_scores = np.zeros((n, n_components)) # Omega + self.x_loadings_ = np.zeros((p, n_components)) # Gamma + self.y_loadings_ = np.zeros((q, n_components)) # Delta + self.n_iter_ = [] + + # This whole thing corresponds to the algorithm in section 4.1 of the + # review from Wegelin. See above for a notation mapping from code to + # paper. + Y_eps = np.finfo(Yk.dtype).eps + for k in range(n_components): + # Find first left and right singular vectors of the X.T.dot(Y) + # cross-covariance matrix. + if self.algorithm == "nipals": + # Replace columns that are all close to zero with zeros + Yk_mask = np.all(np.abs(Yk) < 10 * Y_eps, axis=0) + Yk[:, Yk_mask] = 0.0 + + try: + ( + x_weights, + y_weights, + n_iter_, + ) = _get_first_singular_vectors_power_method( + Xk, + Yk, + mode=self.mode, + max_iter=self.max_iter, + tol=self.tol, + norm_y_weights=norm_y_weights, + ) + except StopIteration as e: + if str(e) != "Y residual is constant": + raise + warnings.warn(f"Y residual is constant at iteration {k}") + break + + self.n_iter_.append(n_iter_) + + elif self.algorithm == "svd": + x_weights, y_weights = _get_first_singular_vectors_svd(Xk, Yk) + + # inplace sign flip for consistency across solvers and archs + _svd_flip_1d(x_weights, y_weights) + + # compute scores, i.e. the projections of X and Y + x_scores = np.dot(Xk, x_weights) + if norm_y_weights: + y_ss = 1 + else: + y_ss = np.dot(y_weights, y_weights) + y_scores = np.dot(Yk, y_weights) / y_ss + + # Deflation: subtract rank-one approx to obtain Xk+1 and Yk+1 + x_loadings = np.dot(x_scores, Xk) / np.dot(x_scores, x_scores) + Xk -= np.outer(x_scores, x_loadings) + + if self.deflation_mode == "canonical": + # regress Yk on y_score + y_loadings = np.dot(y_scores, Yk) / np.dot(y_scores, y_scores) + Yk -= np.outer(y_scores, y_loadings) + if self.deflation_mode == "regression": + # regress Yk on x_score + y_loadings = np.dot(x_scores, Yk) / np.dot(x_scores, x_scores) + Yk -= np.outer(x_scores, y_loadings) + + self.x_weights_[:, k] = x_weights + self.y_weights_[:, k] = y_weights + self._x_scores[:, k] = x_scores + self._y_scores[:, k] = y_scores + self.x_loadings_[:, k] = x_loadings + self.y_loadings_[:, k] = y_loadings + + # X was approximated as Xi . Gamma.T + X_(R+1) + # Xi . Gamma.T is a sum of n_components rank-1 matrices. X_(R+1) is + # whatever is left to fully reconstruct X, and can be 0 if X is of rank + # n_components. + # Similarly, Y was approximated as Omega . Delta.T + Y_(R+1) + + # Compute transformation matrices (rotations_). See User Guide. + self.x_rotations_ = np.dot( + self.x_weights_, + pinv2(np.dot(self.x_loadings_.T, self.x_weights_), check_finite=False), + ) + self.y_rotations_ = np.dot( + self.y_weights_, + pinv2(np.dot(self.y_loadings_.T, self.y_weights_), check_finite=False), + ) + # TODO(1.3): change `self._coef_` to `self.coef_` + self._coef_ = np.dot(self.x_rotations_, self.y_loadings_.T) + self._coef_ = (self._coef_ * self._y_std).T + self.intercept_ = self._y_mean + self._n_features_out = self.x_rotations_.shape[1] + return self + + def transform(self, X, Y=None, copy=True): + """Apply the dimension reduction. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Samples to transform. + + Y : array-like of shape (n_samples, n_targets), default=None + Target vectors. + + copy : bool, default=True + Whether to copy `X` and `Y`, or perform in-place normalization. + + Returns + ------- + x_scores, y_scores : array-like or tuple of array-like + Return `x_scores` if `Y` is not given, `(x_scores, y_scores)` otherwise. + """ + check_is_fitted(self) + X = self._validate_data(X, copy=copy, dtype=FLOAT_DTYPES, reset=False) + # Normalize + X -= self._x_mean + X /= self._x_std + # Apply rotation + x_scores = np.dot(X, self.x_rotations_) + if Y is not None: + Y = check_array( + Y, input_name="Y", ensure_2d=False, copy=copy, dtype=FLOAT_DTYPES + ) + if Y.ndim == 1: + Y = Y.reshape(-1, 1) + Y -= self._y_mean + Y /= self._y_std + y_scores = np.dot(Y, self.y_rotations_) + return x_scores, y_scores + + return x_scores + + def inverse_transform(self, X, Y=None): + """Transform data back to its original space. + + Parameters + ---------- + X : array-like of shape (n_samples, n_components) + New data, where `n_samples` is the number of samples + and `n_components` is the number of pls components. + + Y : array-like of shape (n_samples, n_components) + New target, where `n_samples` is the number of samples + and `n_components` is the number of pls components. + + Returns + ------- + X_reconstructed : ndarray of shape (n_samples, n_features) + Return the reconstructed `X` data. + + Y_reconstructed : ndarray of shape (n_samples, n_targets) + Return the reconstructed `X` target. Only returned when `Y` is given. + + Notes + ----- + This transformation will only be exact if `n_components=n_features`. + """ + check_is_fitted(self) + X = check_array(X, input_name="X", dtype=FLOAT_DTYPES) + # From pls space to original space + X_reconstructed = np.matmul(X, self.x_loadings_.T) + # Denormalize + X_reconstructed *= self._x_std + X_reconstructed += self._x_mean + + if Y is not None: + Y = check_array(Y, input_name="Y", dtype=FLOAT_DTYPES) + # From pls space to original space + Y_reconstructed = np.matmul(Y, self.y_loadings_.T) + # Denormalize + Y_reconstructed *= self._y_std + Y_reconstructed += self._y_mean + return X_reconstructed, Y_reconstructed + + return X_reconstructed + + def predict(self, X, copy=True): + """Predict targets of given samples. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Samples. + + copy : bool, default=True + Whether to copy `X` and `Y`, or perform in-place normalization. + + Returns + ------- + y_pred : ndarray of shape (n_samples,) or (n_samples, n_targets) + Returns predicted values. + + Notes + ----- + This call requires the estimation of a matrix of shape + `(n_features, n_targets)`, which may be an issue in high dimensional + space. + """ + check_is_fitted(self) + X = self._validate_data(X, copy=copy, dtype=FLOAT_DTYPES, reset=False) + # Normalize + X -= self._x_mean + X /= self._x_std + # TODO(1.3): change `self._coef_` to `self.coef_` + Ypred = X @ self._coef_.T + return Ypred + self.intercept_ + + def fit_transform(self, X, y=None): + """Learn and apply the dimension reduction on the train data. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training vectors, where `n_samples` is the number of samples and + `n_features` is the number of predictors. + + y : array-like of shape (n_samples, n_targets), default=None + Target vectors, where `n_samples` is the number of samples and + `n_targets` is the number of response variables. + + Returns + ------- + self : ndarray of shape (n_samples, n_components) + Return `x_scores` if `Y` is not given, `(x_scores, y_scores)` otherwise. + """ + return self.fit(X, y).transform(X, y) + + @property + def coef_(self): + """The coefficients of the linear model.""" + # TODO(1.3): remove and change `self._coef_` to `self.coef_` + # remove catch warnings from `_get_feature_importances` + # delete self._coef_no_warning + # update the docstring of `coef_` and `intercept_` attribute + if hasattr(self, "_coef_") and getattr(self, "_coef_warning", True): + warnings.warn( + "The attribute `coef_` will be transposed in version 1.3 to be " + "consistent with other linear models in scikit-learn. Currently, " + "`coef_` has a shape of (n_features, n_targets) and in the future it " + "will have a shape of (n_targets, n_features).", + FutureWarning, + ) + # Only warn the first time + self._coef_warning = False + + return self._coef_.T + + def _more_tags(self): + return {"poor_score": True, "requires_y": False} + + +class PLSRegression(_PLS): + """PLS regression. + + PLSRegression is also known as PLS2 or PLS1, depending on the number of + targets. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.8 + + Parameters + ---------- + n_components : int, default=2 + Number of components to keep. Should be in `[1, min(n_samples, + n_features, n_targets)]`. + + scale : bool, default=True + Whether to scale `X` and `Y`. + + max_iter : int, default=500 + The maximum number of iterations of the power method when + `algorithm='nipals'`. Ignored otherwise. + + tol : float, default=1e-06 + The tolerance used as convergence criteria in the power method: the + algorithm stops whenever the squared norm of `u_i - u_{i-1}` is less + than `tol`, where `u` corresponds to the left singular vector. + + copy : bool, default=True + Whether to copy `X` and `Y` in :term:`fit` before applying centering, + and potentially scaling. If `False`, these operations will be done + inplace, modifying both arrays. + + Attributes + ---------- + x_weights_ : ndarray of shape (n_features, n_components) + The left singular vectors of the cross-covariance matrices of each + iteration. + + y_weights_ : ndarray of shape (n_targets, n_components) + The right singular vectors of the cross-covariance matrices of each + iteration. + + x_loadings_ : ndarray of shape (n_features, n_components) + The loadings of `X`. + + y_loadings_ : ndarray of shape (n_targets, n_components) + The loadings of `Y`. + + x_scores_ : ndarray of shape (n_samples, n_components) + The transformed training samples. + + y_scores_ : ndarray of shape (n_samples, n_components) + The transformed training targets. + + x_rotations_ : ndarray of shape (n_features, n_components) + The projection matrix used to transform `X`. + + y_rotations_ : ndarray of shape (n_features, n_components) + The projection matrix used to transform `Y`. + + coef_ : ndarray of shape (n_features, n_targets) + The coefficients of the linear model such that `Y` is approximated as + `Y = X @ coef_ + intercept_`. + + intercept_ : ndarray of shape (n_targets,) + The intercepts of the linear model such that `Y` is approximated as + `Y = X @ coef_ + intercept_`. + + .. versionadded:: 1.1 + + n_iter_ : list of shape (n_components,) + Number of iterations of the power method, for each + component. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + PLSCanonical : Partial Least Squares transformer and regressor. + + Examples + -------- + >>> from sklearn.cross_decomposition import PLSRegression + >>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]] + >>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]] + >>> pls2 = PLSRegression(n_components=2) + >>> pls2.fit(X, Y) + PLSRegression() + >>> Y_pred = pls2.predict(X) + """ + + _parameter_constraints: dict = {**_PLS._parameter_constraints} + for param in ("deflation_mode", "mode", "algorithm"): + _parameter_constraints.pop(param) + + # This implementation provides the same results that 3 PLS packages + # provided in the R language (R-project): + # - "mixOmics" with function pls(X, Y, mode = "regression") + # - "plspm " with function plsreg2(X, Y) + # - "pls" with function oscorespls.fit(X, Y) + + def __init__( + self, n_components=2, *, scale=True, max_iter=500, tol=1e-06, copy=True + ): + super().__init__( + n_components=n_components, + scale=scale, + deflation_mode="regression", + mode="A", + algorithm="nipals", + max_iter=max_iter, + tol=tol, + copy=copy, + ) + + def fit(self, X, Y): + """Fit model to data. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training vectors, where `n_samples` is the number of samples and + `n_features` is the number of predictors. + + Y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target vectors, where `n_samples` is the number of samples and + `n_targets` is the number of response variables. + + Returns + ------- + self : object + Fitted model. + """ + super().fit(X, Y) + # expose the fitted attributes `x_scores_` and `y_scores_` + self.x_scores_ = self._x_scores + self.y_scores_ = self._y_scores + return self + + +class PLSCanonical(_PLS): + """Partial Least Squares transformer and regressor. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.8 + + Parameters + ---------- + n_components : int, default=2 + Number of components to keep. Should be in `[1, min(n_samples, + n_features, n_targets)]`. + + scale : bool, default=True + Whether to scale `X` and `Y`. + + algorithm : {'nipals', 'svd'}, default='nipals' + The algorithm used to estimate the first singular vectors of the + cross-covariance matrix. 'nipals' uses the power method while 'svd' + will compute the whole SVD. + + max_iter : int, default=500 + The maximum number of iterations of the power method when + `algorithm='nipals'`. Ignored otherwise. + + tol : float, default=1e-06 + The tolerance used as convergence criteria in the power method: the + algorithm stops whenever the squared norm of `u_i - u_{i-1}` is less + than `tol`, where `u` corresponds to the left singular vector. + + copy : bool, default=True + Whether to copy `X` and `Y` in fit before applying centering, and + potentially scaling. If False, these operations will be done inplace, + modifying both arrays. + + Attributes + ---------- + x_weights_ : ndarray of shape (n_features, n_components) + The left singular vectors of the cross-covariance matrices of each + iteration. + + y_weights_ : ndarray of shape (n_targets, n_components) + The right singular vectors of the cross-covariance matrices of each + iteration. + + x_loadings_ : ndarray of shape (n_features, n_components) + The loadings of `X`. + + y_loadings_ : ndarray of shape (n_targets, n_components) + The loadings of `Y`. + + x_rotations_ : ndarray of shape (n_features, n_components) + The projection matrix used to transform `X`. + + y_rotations_ : ndarray of shape (n_features, n_components) + The projection matrix used to transform `Y`. + + coef_ : ndarray of shape (n_features, n_targets) + The coefficients of the linear model such that `Y` is approximated as + `Y = X @ coef_ + intercept_`. + + intercept_ : ndarray of shape (n_targets,) + The intercepts of the linear model such that `Y` is approximated as + `Y = X @ coef_ + intercept_`. + + .. versionadded:: 1.1 + + n_iter_ : list of shape (n_components,) + Number of iterations of the power method, for each + component. Empty if `algorithm='svd'`. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + CCA : Canonical Correlation Analysis. + PLSSVD : Partial Least Square SVD. + + Examples + -------- + >>> from sklearn.cross_decomposition import PLSCanonical + >>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]] + >>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]] + >>> plsca = PLSCanonical(n_components=2) + >>> plsca.fit(X, Y) + PLSCanonical() + >>> X_c, Y_c = plsca.transform(X, Y) + """ + + _parameter_constraints: dict = {**_PLS._parameter_constraints} + for param in ("deflation_mode", "mode"): + _parameter_constraints.pop(param) + + # This implementation provides the same results that the "plspm" package + # provided in the R language (R-project), using the function plsca(X, Y). + # Results are equal or collinear with the function + # ``pls(..., mode = "canonical")`` of the "mixOmics" package. The + # difference relies in the fact that mixOmics implementation does not + # exactly implement the Wold algorithm since it does not normalize + # y_weights to one. + + def __init__( + self, + n_components=2, + *, + scale=True, + algorithm="nipals", + max_iter=500, + tol=1e-06, + copy=True, + ): + super().__init__( + n_components=n_components, + scale=scale, + deflation_mode="canonical", + mode="A", + algorithm=algorithm, + max_iter=max_iter, + tol=tol, + copy=copy, + ) + + +class CCA(_PLS): + """Canonical Correlation Analysis, also known as "Mode B" PLS. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, default=2 + Number of components to keep. Should be in `[1, min(n_samples, + n_features, n_targets)]`. + + scale : bool, default=True + Whether to scale `X` and `Y`. + + max_iter : int, default=500 + The maximum number of iterations of the power method. + + tol : float, default=1e-06 + The tolerance used as convergence criteria in the power method: the + algorithm stops whenever the squared norm of `u_i - u_{i-1}` is less + than `tol`, where `u` corresponds to the left singular vector. + + copy : bool, default=True + Whether to copy `X` and `Y` in fit before applying centering, and + potentially scaling. If False, these operations will be done inplace, + modifying both arrays. + + Attributes + ---------- + x_weights_ : ndarray of shape (n_features, n_components) + The left singular vectors of the cross-covariance matrices of each + iteration. + + y_weights_ : ndarray of shape (n_targets, n_components) + The right singular vectors of the cross-covariance matrices of each + iteration. + + x_loadings_ : ndarray of shape (n_features, n_components) + The loadings of `X`. + + y_loadings_ : ndarray of shape (n_targets, n_components) + The loadings of `Y`. + + x_rotations_ : ndarray of shape (n_features, n_components) + The projection matrix used to transform `X`. + + y_rotations_ : ndarray of shape (n_features, n_components) + The projection matrix used to transform `Y`. + + coef_ : ndarray of shape (n_features, n_targets) + The coefficients of the linear model such that `Y` is approximated as + `Y = X @ coef_ + intercept_`. + + intercept_ : ndarray of shape (n_targets,) + The intercepts of the linear model such that `Y` is approximated as + `Y = X @ coef_ + intercept_`. + + .. versionadded:: 1.1 + + n_iter_ : list of shape (n_components,) + Number of iterations of the power method, for each + component. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + PLSCanonical : Partial Least Squares transformer and regressor. + PLSSVD : Partial Least Square SVD. + + Examples + -------- + >>> from sklearn.cross_decomposition import CCA + >>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]] + >>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]] + >>> cca = CCA(n_components=1) + >>> cca.fit(X, Y) + CCA(n_components=1) + >>> X_c, Y_c = cca.transform(X, Y) + """ + + _parameter_constraints: dict = {**_PLS._parameter_constraints} + for param in ("deflation_mode", "mode", "algorithm"): + _parameter_constraints.pop(param) + + def __init__( + self, n_components=2, *, scale=True, max_iter=500, tol=1e-06, copy=True + ): + super().__init__( + n_components=n_components, + scale=scale, + deflation_mode="canonical", + mode="B", + algorithm="nipals", + max_iter=max_iter, + tol=tol, + copy=copy, + ) + + +class PLSSVD(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator): + """Partial Least Square SVD. + + This transformer simply performs a SVD on the cross-covariance matrix + `X'Y`. It is able to project both the training data `X` and the targets + `Y`. The training data `X` is projected on the left singular vectors, while + the targets are projected on the right singular vectors. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.8 + + Parameters + ---------- + n_components : int, default=2 + The number of components to keep. Should be in `[1, + min(n_samples, n_features, n_targets)]`. + + scale : bool, default=True + Whether to scale `X` and `Y`. + + copy : bool, default=True + Whether to copy `X` and `Y` in fit before applying centering, and + potentially scaling. If `False`, these operations will be done inplace, + modifying both arrays. + + Attributes + ---------- + x_weights_ : ndarray of shape (n_features, n_components) + The left singular vectors of the SVD of the cross-covariance matrix. + Used to project `X` in :meth:`transform`. + + y_weights_ : ndarray of (n_targets, n_components) + The right singular vectors of the SVD of the cross-covariance matrix. + Used to project `X` in :meth:`transform`. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + PLSCanonical : Partial Least Squares transformer and regressor. + CCA : Canonical Correlation Analysis. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.cross_decomposition import PLSSVD + >>> X = np.array([[0., 0., 1.], + ... [1., 0., 0.], + ... [2., 2., 2.], + ... [2., 5., 4.]]) + >>> Y = np.array([[0.1, -0.2], + ... [0.9, 1.1], + ... [6.2, 5.9], + ... [11.9, 12.3]]) + >>> pls = PLSSVD(n_components=2).fit(X, Y) + >>> X_c, Y_c = pls.transform(X, Y) + >>> X_c.shape, Y_c.shape + ((4, 2), (4, 2)) + """ + + _parameter_constraints: dict = { + "n_components": [Interval(Integral, 1, None, closed="left")], + "scale": ["boolean"], + "copy": ["boolean"], + } + + def __init__(self, n_components=2, *, scale=True, copy=True): + self.n_components = n_components + self.scale = scale + self.copy = copy + + def fit(self, X, Y): + """Fit model to data. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training samples. + + Y : array-like of shape (n_samples,) or (n_samples, n_targets) + Targets. + + Returns + ------- + self : object + Fitted estimator. + """ + self._validate_params() + + check_consistent_length(X, Y) + X = self._validate_data( + X, dtype=np.float64, copy=self.copy, ensure_min_samples=2 + ) + Y = check_array( + Y, input_name="Y", dtype=np.float64, copy=self.copy, ensure_2d=False + ) + if Y.ndim == 1: + Y = Y.reshape(-1, 1) + + # we'll compute the SVD of the cross-covariance matrix = X.T.dot(Y) + # This matrix rank is at most min(n_samples, n_features, n_targets) so + # n_components cannot be bigger than that. + n_components = self.n_components + rank_upper_bound = min(X.shape[0], X.shape[1], Y.shape[1]) + if n_components > rank_upper_bound: + raise ValueError( + f"`n_components` upper bound is {rank_upper_bound}. " + f"Got {n_components} instead. Reduce `n_components`." + ) + + X, Y, self._x_mean, self._y_mean, self._x_std, self._y_std = _center_scale_xy( + X, Y, self.scale + ) + + # Compute SVD of cross-covariance matrix + C = np.dot(X.T, Y) + U, s, Vt = svd(C, full_matrices=False) + U = U[:, :n_components] + Vt = Vt[:n_components] + U, Vt = svd_flip(U, Vt) + V = Vt.T + + self.x_weights_ = U + self.y_weights_ = V + self._n_features_out = self.x_weights_.shape[1] + return self + + def transform(self, X, Y=None): + """ + Apply the dimensionality reduction. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Samples to be transformed. + + Y : array-like of shape (n_samples,) or (n_samples, n_targets), \ + default=None + Targets. + + Returns + ------- + x_scores : array-like or tuple of array-like + The transformed data `X_transformed` if `Y is not None`, + `(X_transformed, Y_transformed)` otherwise. + """ + check_is_fitted(self) + X = self._validate_data(X, dtype=np.float64, reset=False) + Xr = (X - self._x_mean) / self._x_std + x_scores = np.dot(Xr, self.x_weights_) + if Y is not None: + Y = check_array(Y, input_name="Y", ensure_2d=False, dtype=np.float64) + if Y.ndim == 1: + Y = Y.reshape(-1, 1) + Yr = (Y - self._y_mean) / self._y_std + y_scores = np.dot(Yr, self.y_weights_) + return x_scores, y_scores + return x_scores + + def fit_transform(self, X, y=None): + """Learn and apply the dimensionality reduction. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training samples. + + y : array-like of shape (n_samples,) or (n_samples, n_targets), \ + default=None + Targets. + + Returns + ------- + out : array-like or tuple of array-like + The transformed data `X_transformed` if `Y is not None`, + `(X_transformed, Y_transformed)` otherwise. + """ + return self.fit(X, y).transform(X, y) diff --git a/mgm/lib/python3.10/site-packages/sklearn/externals/__pycache__/_arff.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/externals/__pycache__/_arff.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57cfbbaefc6f4e17ea93889ac811499a201bf970 Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/externals/__pycache__/_arff.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/sklearn/externals/_arff.py b/mgm/lib/python3.10/site-packages/sklearn/externals/_arff.py new file mode 100644 index 0000000000000000000000000000000000000000..7c9d51d0702ff5cbe70b80d405747e37a5e6cb1d --- /dev/null +++ b/mgm/lib/python3.10/site-packages/sklearn/externals/_arff.py @@ -0,0 +1,1107 @@ +# ============================================================================= +# Federal University of Rio Grande do Sul (UFRGS) +# Connectionist Artificial Intelligence Laboratory (LIAC) +# Renato de Pontes Pereira - rppereira@inf.ufrgs.br +# ============================================================================= +# Copyright (c) 2011 Renato de Pontes Pereira, renato.ppontes at gmail dot com +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +# ============================================================================= + +''' +The liac-arff module implements functions to read and write ARFF files in +Python. It was created in the Connectionist Artificial Intelligence Laboratory +(LIAC), which takes place at the Federal University of Rio Grande do Sul +(UFRGS), in Brazil. + +ARFF (Attribute-Relation File Format) is an file format specially created for +describe datasets which are commonly used for machine learning experiments and +software. This file format was created to be used in Weka, the best +representative software for machine learning automated experiments. + +An ARFF file can be divided into two sections: header and data. The Header +describes the metadata of the dataset, including a general description of the +dataset, its name and its attributes. The source below is an example of a +header section in a XOR dataset:: + + % + % XOR Dataset + % + % Created by Renato Pereira + % rppereira@inf.ufrgs.br + % http://inf.ufrgs.br/~rppereira + % + % + @RELATION XOR + + @ATTRIBUTE input1 REAL + @ATTRIBUTE input2 REAL + @ATTRIBUTE y REAL + +The Data section of an ARFF file describes the observations of the dataset, in +the case of XOR dataset:: + + @DATA + 0.0,0.0,0.0 + 0.0,1.0,1.0 + 1.0,0.0,1.0 + 1.0,1.0,0.0 + % + % + % + +Notice that several lines are starting with an ``%`` symbol, denoting a +comment, thus, lines with ``%`` at the beginning will be ignored, except by the +description part at the beginning of the file. The declarations ``@RELATION``, +``@ATTRIBUTE``, and ``@DATA`` are all case insensitive and obligatory. + +For more information and details about the ARFF file description, consult +http://www.cs.waikato.ac.nz/~ml/weka/arff.html + + +ARFF Files in Python +~~~~~~~~~~~~~~~~~~~~ + +This module uses built-ins python objects to represent a deserialized ARFF +file. A dictionary is used as the container of the data and metadata of ARFF, +and have the following keys: + +- **description**: (OPTIONAL) a string with the description of the dataset. +- **relation**: (OBLIGATORY) a string with the name of the dataset. +- **attributes**: (OBLIGATORY) a list of attributes with the following + template:: + + (attribute_name, attribute_type) + + the attribute_name is a string, and attribute_type must be an string + or a list of strings. +- **data**: (OBLIGATORY) a list of data instances. Each data instance must be + a list with values, depending on the attributes. + +The above keys must follow the case which were described, i.e., the keys are +case sensitive. The attribute type ``attribute_type`` must be one of these +strings (they are not case sensitive): ``NUMERIC``, ``INTEGER``, ``REAL`` or +``STRING``. For nominal attributes, the ``atribute_type`` must be a list of +strings. + +In this format, the XOR dataset presented above can be represented as a python +object as:: + + xor_dataset = { + 'description': 'XOR Dataset', + 'relation': 'XOR', + 'attributes': [ + ('input1', 'REAL'), + ('input2', 'REAL'), + ('y', 'REAL'), + ], + 'data': [ + [0.0, 0.0, 0.0], + [0.0, 1.0, 1.0], + [1.0, 0.0, 1.0], + [1.0, 1.0, 0.0] + ] + } + + +Features +~~~~~~~~ + +This module provides several features, including: + +- Read and write ARFF files using python built-in structures, such dictionaries + and lists; +- Supports `scipy.sparse.coo `_ + and lists of dictionaries as used by SVMLight +- Supports the following attribute types: NUMERIC, REAL, INTEGER, STRING, and + NOMINAL; +- Has an interface similar to other built-in modules such as ``json``, or + ``zipfile``; +- Supports read and write the descriptions of files; +- Supports missing values and names with spaces; +- Supports unicode values and names; +- Fully compatible with Python 2.7+, Python 3.5+, pypy and pypy3; +- Under `MIT License `_ + +''' +__author__ = 'Renato de Pontes Pereira, Matthias Feurer, Joel Nothman' +__author_email__ = ('renato.ppontes@gmail.com, ' + 'feurerm@informatik.uni-freiburg.de, ' + 'joel.nothman@gmail.com') +__version__ = '2.4.0' + +import re +import csv +from typing import TYPE_CHECKING +from typing import Optional, List, Dict, Any, Iterator, Union, Tuple + +# CONSTANTS =================================================================== +_SIMPLE_TYPES = ['NUMERIC', 'REAL', 'INTEGER', 'STRING'] + +_TK_DESCRIPTION = '%' +_TK_COMMENT = '%' +_TK_RELATION = '@RELATION' +_TK_ATTRIBUTE = '@ATTRIBUTE' +_TK_DATA = '@DATA' + +_RE_RELATION = re.compile(r'^([^\{\}%,\s]*|\".*\"|\'.*\')$', re.UNICODE) +_RE_ATTRIBUTE = re.compile(r'^(\".*\"|\'.*\'|[^\{\}%,\s]*)\s+(.+)$', re.UNICODE) +_RE_QUOTE_CHARS = re.compile(r'["\'\\\s%,\000-\031]', re.UNICODE) +_RE_ESCAPE_CHARS = re.compile(r'(?=["\'\\%])|[\n\r\t\000-\031]') +_RE_SPARSE_LINE = re.compile(r'^\s*\{.*\}\s*$', re.UNICODE) +_RE_NONTRIVIAL_DATA = re.compile('["\'{}\\s]', re.UNICODE) + +ArffDenseDataType = Iterator[List] +ArffSparseDataType = Tuple[List, ...] + + +if TYPE_CHECKING: + # typing_extensions is available when mypy is installed + from typing_extensions import TypedDict + + class ArffContainerType(TypedDict): + description: str + relation: str + attributes: List + data: Union[ArffDenseDataType, ArffSparseDataType] + +else: + ArffContainerType = Dict[str, Any] + + +def _build_re_values(): + quoted_re = r''' + " # open quote followed by zero or more of: + (?: + (?= len(conversors): + raise BadDataFormat(row) + # XXX: int 0 is used for implicit values, not '0' + values = [values[i] if i in values else 0 for i in + range(len(conversors))] + else: + if len(values) != len(conversors): + raise BadDataFormat(row) + + yield self._decode_values(values, conversors) + + @staticmethod + def _decode_values(values, conversors): + try: + values = [None if value is None else conversor(value) + for conversor, value + in zip(conversors, values)] + except ValueError as exc: + if 'float: ' in str(exc): + raise BadNumericalValue() + return values + + def encode_data(self, data, attributes): + '''(INTERNAL) Encodes a line of data. + + Data instances follow the csv format, i.e, attribute values are + delimited by commas. After converted from csv. + + :param data: a list of values. + :param attributes: a list of attributes. Used to check if data is valid. + :return: a string with the encoded data line. + ''' + current_row = 0 + + for inst in data: + if len(inst) != len(attributes): + raise BadObject( + 'Instance %d has %d attributes, expected %d' % + (current_row, len(inst), len(attributes)) + ) + + new_data = [] + for value in inst: + if value is None or value == '' or value != value: + s = '?' + else: + s = encode_string(str(value)) + new_data.append(s) + + current_row += 1 + yield ','.join(new_data) + + +class _DataListMixin: + """Mixin to return a list from decode_rows instead of a generator""" + def decode_rows(self, stream, conversors): + return list(super().decode_rows(stream, conversors)) + + +class Data(_DataListMixin, DenseGeneratorData): + pass + + +class COOData: + def decode_rows(self, stream, conversors): + data, rows, cols = [], [], [] + for i, row in enumerate(stream): + values = _parse_values(row) + if not isinstance(values, dict): + raise BadLayout() + if not values: + continue + row_cols, values = zip(*sorted(values.items())) + try: + values = [value if value is None else conversors[key](value) + for key, value in zip(row_cols, values)] + except ValueError as exc: + if 'float: ' in str(exc): + raise BadNumericalValue() + raise + except IndexError: + # conversor out of range + raise BadDataFormat(row) + + data.extend(values) + rows.extend([i] * len(values)) + cols.extend(row_cols) + + return data, rows, cols + + def encode_data(self, data, attributes): + num_attributes = len(attributes) + new_data = [] + current_row = 0 + + row = data.row + col = data.col + data = data.data + + # Check if the rows are sorted + if not all(row[i] <= row[i + 1] for i in range(len(row) - 1)): + raise ValueError("liac-arff can only output COO matrices with " + "sorted rows.") + + for v, col, row in zip(data, col, row): + if row > current_row: + # Add empty rows if necessary + while current_row < row: + yield " ".join(["{", ','.join(new_data), "}"]) + new_data = [] + current_row += 1 + + if col >= num_attributes: + raise BadObject( + 'Instance %d has at least %d attributes, expected %d' % + (current_row, col + 1, num_attributes) + ) + + if v is None or v == '' or v != v: + s = '?' + else: + s = encode_string(str(v)) + new_data.append("%d %s" % (col, s)) + + yield " ".join(["{", ','.join(new_data), "}"]) + +class LODGeneratorData: + def decode_rows(self, stream, conversors): + for row in stream: + values = _parse_values(row) + + if not isinstance(values, dict): + raise BadLayout() + try: + yield {key: None if value is None else conversors[key](value) + for key, value in values.items()} + except ValueError as exc: + if 'float: ' in str(exc): + raise BadNumericalValue() + raise + except IndexError: + # conversor out of range + raise BadDataFormat(row) + + def encode_data(self, data, attributes): + current_row = 0 + + num_attributes = len(attributes) + for row in data: + new_data = [] + + if len(row) > 0 and max(row) >= num_attributes: + raise BadObject( + 'Instance %d has %d attributes, expected %d' % + (current_row, max(row) + 1, num_attributes) + ) + + for col in sorted(row): + v = row[col] + if v is None or v == '' or v != v: + s = '?' + else: + s = encode_string(str(v)) + new_data.append("%d %s" % (col, s)) + + current_row += 1 + yield " ".join(["{", ','.join(new_data), "}"]) + +class LODData(_DataListMixin, LODGeneratorData): + pass + + +def _get_data_object_for_decoding(matrix_type): + if matrix_type == DENSE: + return Data() + elif matrix_type == COO: + return COOData() + elif matrix_type == LOD: + return LODData() + elif matrix_type == DENSE_GEN: + return DenseGeneratorData() + elif matrix_type == LOD_GEN: + return LODGeneratorData() + else: + raise ValueError("Matrix type %s not supported." % str(matrix_type)) + +def _get_data_object_for_encoding(matrix): + # Probably a scipy.sparse + if hasattr(matrix, 'format'): + if matrix.format == 'coo': + return COOData() + else: + raise ValueError('Cannot guess matrix format!') + elif isinstance(matrix[0], dict): + return LODData() + else: + return Data() + +# ============================================================================= + +# ADVANCED INTERFACE ========================================================== +class ArffDecoder: + '''An ARFF decoder.''' + + def __init__(self): + '''Constructor.''' + self._conversors = [] + self._current_line = 0 + + def _decode_comment(self, s): + '''(INTERNAL) Decodes a comment line. + + Comments are single line strings starting, obligatorily, with the ``%`` + character, and can have any symbol, including whitespaces or special + characters. + + This method must receive a normalized string, i.e., a string without + padding, including the "\r\n" characters. + + :param s: a normalized string. + :return: a string with the decoded comment. + ''' + res = re.sub(r'^\%( )?', '', s) + return res + + def _decode_relation(self, s): + '''(INTERNAL) Decodes a relation line. + + The relation declaration is a line with the format ``@RELATION + ``, where ``relation-name`` is a string. The string must + start with alphabetic character and must be quoted if the name includes + spaces, otherwise this method will raise a `BadRelationFormat` exception. + + This method must receive a normalized string, i.e., a string without + padding, including the "\r\n" characters. + + :param s: a normalized string. + :return: a string with the decoded relation name. + ''' + _, v = s.split(' ', 1) + v = v.strip() + + if not _RE_RELATION.match(v): + raise BadRelationFormat() + + res = str(v.strip('"\'')) + return res + + def _decode_attribute(self, s): + '''(INTERNAL) Decodes an attribute line. + + The attribute is the most complex declaration in an arff file. All + attributes must follow the template:: + + @attribute + + where ``attribute-name`` is a string, quoted if the name contains any + whitespace, and ``datatype`` can be: + + - Numerical attributes as ``NUMERIC``, ``INTEGER`` or ``REAL``. + - Strings as ``STRING``. + - Dates (NOT IMPLEMENTED). + - Nominal attributes with format: + + {, , , ...} + + The nominal names follow the rules for the attribute names, i.e., they + must be quoted if the name contains whitespaces. + + This method must receive a normalized string, i.e., a string without + padding, including the "\r\n" characters. + + :param s: a normalized string. + :return: a tuple (ATTRIBUTE_NAME, TYPE_OR_VALUES). + ''' + _, v = s.split(' ', 1) + v = v.strip() + + # Verify the general structure of declaration + m = _RE_ATTRIBUTE.match(v) + if not m: + raise BadAttributeFormat() + + # Extracts the raw name and type + name, type_ = m.groups() + + # Extracts the final name + name = str(name.strip('"\'')) + + # Extracts the final type + if type_[:1] == "{" and type_[-1:] == "}": + try: + type_ = _parse_values(type_.strip('{} ')) + except Exception: + raise BadAttributeType() + if isinstance(type_, dict): + raise BadAttributeType() + + else: + # If not nominal, verify the type name + type_ = str(type_).upper() + if type_ not in ['NUMERIC', 'REAL', 'INTEGER', 'STRING']: + raise BadAttributeType() + + return (name, type_) + + def _decode(self, s, encode_nominal=False, matrix_type=DENSE): + '''Do the job the ``encode``.''' + + # Make sure this method is idempotent + self._current_line = 0 + + # If string, convert to a list of lines + if isinstance(s, str): + s = s.strip('\r\n ').replace('\r\n', '\n').split('\n') + + # Create the return object + obj: ArffContainerType = { + 'description': '', + 'relation': '', + 'attributes': [], + 'data': [] + } + attribute_names = {} + + # Create the data helper object + data = _get_data_object_for_decoding(matrix_type) + + # Read all lines + STATE = _TK_DESCRIPTION + s = iter(s) + for row in s: + self._current_line += 1 + # Ignore empty lines + row = row.strip(' \r\n') + if not row: continue + + u_row = row.upper() + + # DESCRIPTION ----------------------------------------------------- + if u_row.startswith(_TK_DESCRIPTION) and STATE == _TK_DESCRIPTION: + obj['description'] += self._decode_comment(row) + '\n' + # ----------------------------------------------------------------- + + # RELATION -------------------------------------------------------- + elif u_row.startswith(_TK_RELATION): + if STATE != _TK_DESCRIPTION: + raise BadLayout() + + STATE = _TK_RELATION + obj['relation'] = self._decode_relation(row) + # ----------------------------------------------------------------- + + # ATTRIBUTE ------------------------------------------------------- + elif u_row.startswith(_TK_ATTRIBUTE): + if STATE != _TK_RELATION and STATE != _TK_ATTRIBUTE: + raise BadLayout() + + STATE = _TK_ATTRIBUTE + + attr = self._decode_attribute(row) + if attr[0] in attribute_names: + raise BadAttributeName(attr[0], attribute_names[attr[0]]) + else: + attribute_names[attr[0]] = self._current_line + obj['attributes'].append(attr) + + if isinstance(attr[1], (list, tuple)): + if encode_nominal: + conversor = EncodedNominalConversor(attr[1]) + else: + conversor = NominalConversor(attr[1]) + else: + CONVERSOR_MAP = {'STRING': str, + 'INTEGER': lambda x: int(float(x)), + 'NUMERIC': float, + 'REAL': float} + conversor = CONVERSOR_MAP[attr[1]] + + self._conversors.append(conversor) + # ----------------------------------------------------------------- + + # DATA ------------------------------------------------------------ + elif u_row.startswith(_TK_DATA): + if STATE != _TK_ATTRIBUTE: + raise BadLayout() + + break + # ----------------------------------------------------------------- + + # COMMENT --------------------------------------------------------- + elif u_row.startswith(_TK_COMMENT): + pass + # ----------------------------------------------------------------- + else: + # Never found @DATA + raise BadLayout() + + def stream(): + for row in s: + self._current_line += 1 + row = row.strip() + # Ignore empty lines and comment lines. + if row and not row.startswith(_TK_COMMENT): + yield row + + # Alter the data object + obj['data'] = data.decode_rows(stream(), self._conversors) + if obj['description'].endswith('\n'): + obj['description'] = obj['description'][:-1] + + return obj + + def decode(self, s, encode_nominal=False, return_type=DENSE): + '''Returns the Python representation of a given ARFF file. + + When a file object is passed as an argument, this method reads lines + iteratively, avoiding to load unnecessary information to the memory. + + :param s: a string or file object with the ARFF file. + :param encode_nominal: boolean, if True perform a label encoding + while reading the .arff file. + :param return_type: determines the data structure used to store the + dataset. Can be one of `arff.DENSE`, `arff.COO`, `arff.LOD`, + `arff.DENSE_GEN` or `arff.LOD_GEN`. + Consult the sections on `working with sparse data`_ and `loading + progressively`_. + ''' + try: + return self._decode(s, encode_nominal=encode_nominal, + matrix_type=return_type) + except ArffException as e: + e.line = self._current_line + raise e + + +class ArffEncoder: + '''An ARFF encoder.''' + + def _encode_comment(self, s=''): + '''(INTERNAL) Encodes a comment line. + + Comments are single line strings starting, obligatorily, with the ``%`` + character, and can have any symbol, including whitespaces or special + characters. + + If ``s`` is None, this method will simply return an empty comment. + + :param s: (OPTIONAL) string. + :return: a string with the encoded comment line. + ''' + if s: + return '%s %s'%(_TK_COMMENT, s) + else: + return '%s' % _TK_COMMENT + + def _encode_relation(self, name): + '''(INTERNAL) Decodes a relation line. + + The relation declaration is a line with the format ``@RELATION + ``, where ``relation-name`` is a string. + + :param name: a string. + :return: a string with the encoded relation declaration. + ''' + for char in ' %{},': + if char in name: + name = '"%s"'%name + break + + return '%s %s'%(_TK_RELATION, name) + + def _encode_attribute(self, name, type_): + '''(INTERNAL) Encodes an attribute line. + + The attribute follow the template:: + + @attribute + + where ``attribute-name`` is a string, and ``datatype`` can be: + + - Numerical attributes as ``NUMERIC``, ``INTEGER`` or ``REAL``. + - Strings as ``STRING``. + - Dates (NOT IMPLEMENTED). + - Nominal attributes with format: + + {, , , ...} + + This method must receive a the name of the attribute and its type, if + the attribute type is nominal, ``type`` must be a list of values. + + :param name: a string. + :param type_: a string or a list of string. + :return: a string with the encoded attribute declaration. + ''' + for char in ' %{},': + if char in name: + name = '"%s"'%name + break + + if isinstance(type_, (tuple, list)): + type_tmp = ['%s' % encode_string(type_k) for type_k in type_] + type_ = '{%s}'%(', '.join(type_tmp)) + + return '%s %s %s'%(_TK_ATTRIBUTE, name, type_) + + def encode(self, obj): + '''Encodes a given object to an ARFF file. + + :param obj: the object containing the ARFF information. + :return: the ARFF file as an string. + ''' + data = [row for row in self.iter_encode(obj)] + + return '\n'.join(data) + + def iter_encode(self, obj): + '''The iterative version of `arff.ArffEncoder.encode`. + + This encodes iteratively a given object and return, one-by-one, the + lines of the ARFF file. + + :param obj: the object containing the ARFF information. + :return: (yields) the ARFF file as strings. + ''' + # DESCRIPTION + if obj.get('description', None): + for row in obj['description'].split('\n'): + yield self._encode_comment(row) + + # RELATION + if not obj.get('relation'): + raise BadObject('Relation name not found or with invalid value.') + + yield self._encode_relation(obj['relation']) + yield '' + + # ATTRIBUTES + if not obj.get('attributes'): + raise BadObject('Attributes not found.') + + attribute_names = set() + for attr in obj['attributes']: + # Verify for bad object format + if not isinstance(attr, (tuple, list)) or \ + len(attr) != 2 or \ + not isinstance(attr[0], str): + raise BadObject('Invalid attribute declaration "%s"'%str(attr)) + + if isinstance(attr[1], str): + # Verify for invalid types + if attr[1] not in _SIMPLE_TYPES: + raise BadObject('Invalid attribute type "%s"'%str(attr)) + + # Verify for bad object format + elif not isinstance(attr[1], (tuple, list)): + raise BadObject('Invalid attribute type "%s"'%str(attr)) + + # Verify attribute name is not used twice + if attr[0] in attribute_names: + raise BadObject('Trying to use attribute name "%s" for the ' + 'second time.' % str(attr[0])) + else: + attribute_names.add(attr[0]) + + yield self._encode_attribute(attr[0], attr[1]) + yield '' + attributes = obj['attributes'] + + # DATA + yield _TK_DATA + if 'data' in obj: + data = _get_data_object_for_encoding(obj.get('data')) + yield from data.encode_data(obj.get('data'), attributes) + + yield '' + +# ============================================================================= + +# BASIC INTERFACE ============================================================= +def load(fp, encode_nominal=False, return_type=DENSE): + '''Load a file-like object containing the ARFF document and convert it into + a Python object. + + :param fp: a file-like object. + :param encode_nominal: boolean, if True perform a label encoding + while reading the .arff file. + :param return_type: determines the data structure used to store the + dataset. Can be one of `arff.DENSE`, `arff.COO`, `arff.LOD`, + `arff.DENSE_GEN` or `arff.LOD_GEN`. + Consult the sections on `working with sparse data`_ and `loading + progressively`_. + :return: a dictionary. + ''' + decoder = ArffDecoder() + return decoder.decode(fp, encode_nominal=encode_nominal, + return_type=return_type) + +def loads(s, encode_nominal=False, return_type=DENSE): + '''Convert a string instance containing the ARFF document into a Python + object. + + :param s: a string object. + :param encode_nominal: boolean, if True perform a label encoding + while reading the .arff file. + :param return_type: determines the data structure used to store the + dataset. Can be one of `arff.DENSE`, `arff.COO`, `arff.LOD`, + `arff.DENSE_GEN` or `arff.LOD_GEN`. + Consult the sections on `working with sparse data`_ and `loading + progressively`_. + :return: a dictionary. + ''' + decoder = ArffDecoder() + return decoder.decode(s, encode_nominal=encode_nominal, + return_type=return_type) + +def dump(obj, fp): + '''Serialize an object representing the ARFF document to a given file-like + object. + + :param obj: a dictionary. + :param fp: a file-like object. + ''' + encoder = ArffEncoder() + generator = encoder.iter_encode(obj) + + last_row = next(generator) + for row in generator: + fp.write(last_row + '\n') + last_row = row + fp.write(last_row) + + return fp + +def dumps(obj): + '''Serialize an object representing the ARFF document, returning a string. + + :param obj: a dictionary. + :return: a string with the ARFF document. + ''' + encoder = ArffEncoder() + return encoder.encode(obj) +# ============================================================================= diff --git a/mgm/lib/python3.10/site-packages/sklearn/externals/_lobpcg.py b/mgm/lib/python3.10/site-packages/sklearn/externals/_lobpcg.py new file mode 100644 index 0000000000000000000000000000000000000000..398800679d295b2e6b6b5f477e574e590c2dca28 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/sklearn/externals/_lobpcg.py @@ -0,0 +1,991 @@ +""" +scikit-learn copy of scipy/sparse/linalg/_eigen/lobpcg/lobpcg.py v1.10 +to be deleted after scipy 1.4 becomes a dependency in scikit-lean +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG). + +References +---------- +.. [1] A. V. Knyazev (2001), + Toward the Optimal Preconditioned Eigensolver: Locally Optimal + Block Preconditioned Conjugate Gradient Method. + SIAM Journal on Scientific Computing 23, no. 2, + pp. 517-541. :doi:`10.1137/S1064827500366124` + +.. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov (2007), + Block Locally Optimal Preconditioned Eigenvalue Xolvers (BLOPEX) + in hypre and PETSc. :arxiv:`0705.2626` + +.. [3] A. V. Knyazev's C and MATLAB implementations: + https://github.com/lobpcg/blopex +""" +import inspect +import warnings +import numpy as np +from scipy.linalg import (inv, eigh, cho_factor, cho_solve, + cholesky, LinAlgError) +from scipy.sparse.linalg import LinearOperator +from scipy.sparse import isspmatrix +from numpy import block as bmat + +__all__ = ["lobpcg"] + + +def _report_nonhermitian(M, name): + """ + Report if `M` is not a Hermitian matrix given its type. + """ + from scipy.linalg import norm + + md = M - M.T.conj() + nmd = norm(md, 1) + tol = 10 * np.finfo(M.dtype).eps + tol = max(tol, tol * norm(M, 1)) + if nmd > tol: + warnings.warn( + f"Matrix {name} of the type {M.dtype} is not Hermitian: " + f"condition: {nmd} < {tol} fails.", + UserWarning, stacklevel=4 + ) + +def _as2d(ar): + """ + If the input array is 2D return it, if it is 1D, append a dimension, + making it a column vector. + """ + if ar.ndim == 2: + return ar + else: # Assume 1! + aux = np.array(ar, copy=False) + aux.shape = (ar.shape[0], 1) + return aux + + +def _makeMatMat(m): + if m is None: + return None + elif callable(m): + return lambda v: m(v) + else: + return lambda v: m @ v + + +def _applyConstraints(blockVectorV, factYBY, blockVectorBY, blockVectorY): + """Changes blockVectorV in place.""" + YBV = np.dot(blockVectorBY.T.conj(), blockVectorV) + tmp = cho_solve(factYBY, YBV) + blockVectorV -= np.dot(blockVectorY, tmp) + + +def _b_orthonormalize(B, blockVectorV, blockVectorBV=None, + verbosityLevel=0): + """in-place B-orthonormalize the given block vector using Cholesky.""" + normalization = blockVectorV.max(axis=0) + np.finfo(blockVectorV.dtype).eps + blockVectorV = blockVectorV / normalization + if blockVectorBV is None: + if B is not None: + try: + blockVectorBV = B(blockVectorV) + except Exception as e: + if verbosityLevel: + warnings.warn( + f"Secondary MatMul call failed with error\n" + f"{e}\n", + UserWarning, stacklevel=3 + ) + return None, None, None, normalization + if blockVectorBV.shape != blockVectorV.shape: + raise ValueError( + f"The shape {blockVectorV.shape} " + f"of the orthogonalized matrix not preserved\n" + f"and changed to {blockVectorBV.shape} " + f"after multiplying by the secondary matrix.\n" + ) + else: + blockVectorBV = blockVectorV # Shared data!!! + else: + blockVectorBV = blockVectorBV / normalization + VBV = blockVectorV.T.conj() @ blockVectorBV + try: + # VBV is a Cholesky factor from now on... + VBV = cholesky(VBV, overwrite_a=True) + VBV = inv(VBV, overwrite_a=True) + blockVectorV = blockVectorV @ VBV + # blockVectorV = (cho_solve((VBV.T, True), blockVectorV.T)).T + if B is not None: + blockVectorBV = blockVectorBV @ VBV + # blockVectorBV = (cho_solve((VBV.T, True), blockVectorBV.T)).T + return blockVectorV, blockVectorBV, VBV, normalization + except LinAlgError: + if verbosityLevel: + warnings.warn( + "Cholesky has failed.", + UserWarning, stacklevel=3 + ) + return None, None, None, normalization + + +def _get_indx(_lambda, num, largest): + """Get `num` indices into `_lambda` depending on `largest` option.""" + ii = np.argsort(_lambda) + if largest: + ii = ii[:-num - 1:-1] + else: + ii = ii[:num] + + return ii + + +def _handle_gramA_gramB_verbosity(gramA, gramB, verbosityLevel): + if verbosityLevel: + _report_nonhermitian(gramA, "gramA") + _report_nonhermitian(gramB, "gramB") + + +def lobpcg( + A, + X, + B=None, + M=None, + Y=None, + tol=None, + maxiter=None, + largest=True, + verbosityLevel=0, + retLambdaHistory=False, + retResidualNormsHistory=False, + restartControl=20, +): + """Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG). + + LOBPCG is a preconditioned eigensolver for large symmetric positive + definite (SPD) generalized eigenproblems. + + Parameters + ---------- + A : {sparse matrix, dense matrix, LinearOperator, callable object} + The symmetric linear operator of the problem, usually a + sparse matrix. Often called the "stiffness matrix". + X : ndarray, float32 or float64 + Initial approximation to the ``k`` eigenvectors (non-sparse). If `A` + has ``shape=(n,n)`` then `X` should have shape ``shape=(n,k)``. + B : {dense matrix, sparse matrix, LinearOperator, callable object} + Optional. + The right hand side operator in a generalized eigenproblem. + By default, ``B = Identity``. Often called the "mass matrix". + M : {dense matrix, sparse matrix, LinearOperator, callable object} + Optional. + Preconditioner to `A`; by default ``M = Identity``. + `M` should approximate the inverse of `A`. + Y : ndarray, float32 or float64, optional. + An n-by-sizeY matrix of constraints (non-sparse), sizeY < n. + The iterations will be performed in the B-orthogonal complement + of the column-space of Y. Y must be full rank. + tol : scalar, optional. + Solver tolerance (stopping criterion). + The default is ``tol=n*sqrt(eps)``. + maxiter : int, optional. + Maximum number of iterations. The default is ``maxiter=20``. + largest : bool, optional. + When True, solve for the largest eigenvalues, otherwise the smallest. + verbosityLevel : int, optional + Controls solver output. The default is ``verbosityLevel=0``. + retLambdaHistory : bool, optional. + Whether to return eigenvalue history. Default is False. + retResidualNormsHistory : bool, optional. + Whether to return history of residual norms. Default is False. + restartControl : int, optional. + Iterations restart if the residuals jump up 2**restartControl times + compared to the smallest ones recorded in retResidualNormsHistory. + The default is ``restartControl=20``, making the restarts rare for + backward compatibility. + + Returns + ------- + w : ndarray + Array of ``k`` eigenvalues. + v : ndarray + An array of ``k`` eigenvectors. `v` has the same shape as `X`. + lambdas : ndarray, optional + The eigenvalue history, if `retLambdaHistory` is True. + rnorms : ndarray, optional + The history of residual norms, if `retResidualNormsHistory` is True. + + Notes + ----- + The iterative loop in lobpcg runs maxit=maxiter (or 20 if maxit=None) + iterations at most and finishes earler if the tolerance is met. + Breaking backward compatibility with the previous version, lobpcg + now returns the block of iterative vectors with the best accuracy rather + than the last one iterated, as a cure for possible divergence. + + The size of the iteration history output equals to the number of the best + (limited by maxit) iterations plus 3 (initial, final, and postprocessing). + + If both ``retLambdaHistory`` and ``retResidualNormsHistory`` are True, + the return tuple has the following format + ``(lambda, V, lambda history, residual norms history)``. + + In the following ``n`` denotes the matrix size and ``k`` the number + of required eigenvalues (smallest or largest). + + The LOBPCG code internally solves eigenproblems of the size ``3k`` on every + iteration by calling the dense eigensolver `eigh`, so if ``k`` is not + small enough compared to ``n``, it makes no sense to call the LOBPCG code. + Moreover, if one calls the LOBPCG algorithm for ``5k > n``, it would likely + break internally, so the code calls the standard function `eigh` instead. + It is not that ``n`` should be large for the LOBPCG to work, but rather the + ratio ``n / k`` should be large. It you call LOBPCG with ``k=1`` + and ``n=10``, it works though ``n`` is small. The method is intended + for extremely large ``n / k``. + + The convergence speed depends basically on two factors: + + 1. Relative separation of the seeking eigenvalues from the rest + of the eigenvalues. One can vary ``k`` to improve the absolute + separation and use proper preconditioning to shrink the spectral spread. + For example, a rod vibration test problem (under tests + directory) is ill-conditioned for large ``n``, so convergence will be + slow, unless efficient preconditioning is used. For this specific + problem, a good simple preconditioner function would be a linear solve + for `A`, which is easy to code since `A` is tridiagonal. + + 2. Quality of the initial approximations `X` to the seeking eigenvectors. + Randomly distributed around the origin vectors work well if no better + choice is known. + + References + ---------- + .. [1] A. V. Knyazev (2001), + Toward the Optimal Preconditioned Eigensolver: Locally Optimal + Block Preconditioned Conjugate Gradient Method. + SIAM Journal on Scientific Computing 23, no. 2, + pp. 517-541. :doi:`10.1137/S1064827500366124` + + .. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov + (2007), Block Locally Optimal Preconditioned Eigenvalue Xolvers + (BLOPEX) in hypre and PETSc. :arxiv:`0705.2626` + + .. [3] A. V. Knyazev's C and MATLAB implementations: + https://github.com/lobpcg/blopex + + Examples + -------- + Solve ``A x = lambda x`` with constraints and preconditioning. + + >>> import numpy as np + >>> from scipy.sparse import spdiags, issparse + >>> from scipy.sparse.linalg import lobpcg, LinearOperator + + The square matrix size: + + >>> n = 100 + >>> vals = np.arange(1, n + 1) + + The first mandatory input parameter, in this test + a sparse 2D array representing the square matrix + of the eigenvalue problem to solve: + + >>> A = spdiags(vals, 0, n, n) + >>> A.toarray() + array([[ 1, 0, 0, ..., 0, 0, 0], + [ 0, 2, 0, ..., 0, 0, 0], + [ 0, 0, 3, ..., 0, 0, 0], + ..., + [ 0, 0, 0, ..., 98, 0, 0], + [ 0, 0, 0, ..., 0, 99, 0], + [ 0, 0, 0, ..., 0, 0, 100]]) + + Initial guess for eigenvectors, should have linearly independent + columns. The second mandatory input parameter, a 2D array with the + row dimension determining the number of requested eigenvalues. + If no initial approximations available, randomly oriented vectors + commonly work best, e.g., with components normally disrtibuted + around zero or uniformly distributed on the interval [-1 1]. + + >>> rng = np.random.default_rng() + >>> X = rng.normal(size=(n, 3)) + + Constraints - an optional input parameter is a 2D array comprising + of column vectors that the eigenvectors must be orthogonal to: + + >>> Y = np.eye(n, 3) + + Preconditioner in the inverse of A in this example: + + >>> invA = spdiags([1./vals], 0, n, n) + + The preconditiner must be defined by a function: + + >>> def precond( x ): + ... return invA @ x + + The argument x of the preconditioner function is a matrix inside `lobpcg`, + thus the use of matrix-matrix product ``@``. + + The preconditioner function is passed to lobpcg as a `LinearOperator`: + + >>> M = LinearOperator(matvec=precond, matmat=precond, + ... shape=(n, n), dtype=np.float64) + + Let us now solve the eigenvalue problem for the matrix A: + + >>> eigenvalues, _ = lobpcg(A, X, Y=Y, M=M, largest=False) + >>> eigenvalues + array([4., 5., 6.]) + + Note that the vectors passed in Y are the eigenvectors of the 3 smallest + eigenvalues. The results returned are orthogonal to those. + """ + blockVectorX = X + bestblockVectorX = blockVectorX + blockVectorY = Y + residualTolerance = tol + if maxiter is None: + maxiter = 20 + + bestIterationNumber = maxiter + + sizeY = 0 + if blockVectorY is not None: + if len(blockVectorY.shape) != 2: + warnings.warn( + f"Expected rank-2 array for argument Y, instead got " + f"{len(blockVectorY.shape)}, " + f"so ignore it and use no constraints.", + UserWarning, stacklevel=2 + ) + blockVectorY = None + else: + sizeY = blockVectorY.shape[1] + + # Block size. + if blockVectorX is None: + raise ValueError("The mandatory initial matrix X cannot be None") + if len(blockVectorX.shape) != 2: + raise ValueError("expected rank-2 array for argument X") + + n, sizeX = blockVectorX.shape + + # Data type of iterates, determined by X, must be inexact + if not np.issubdtype(blockVectorX.dtype, np.inexact): + warnings.warn( + f"Data type for argument X is {blockVectorX.dtype}, " + f"which is not inexact, so casted to np.float32.", + UserWarning, stacklevel=2 + ) + blockVectorX = np.asarray(blockVectorX, dtype=np.float32) + + if retLambdaHistory: + lambdaHistory = np.zeros((maxiter + 3, sizeX), + dtype=blockVectorX.dtype) + if retResidualNormsHistory: + residualNormsHistory = np.zeros((maxiter + 3, sizeX), + dtype=blockVectorX.dtype) + + if verbosityLevel: + aux = "Solving " + if B is None: + aux += "standard" + else: + aux += "generalized" + aux += " eigenvalue problem with" + if M is None: + aux += "out" + aux += " preconditioning\n\n" + aux += "matrix size %d\n" % n + aux += "block size %d\n\n" % sizeX + if blockVectorY is None: + aux += "No constraints\n\n" + else: + if sizeY > 1: + aux += "%d constraints\n\n" % sizeY + else: + aux += "%d constraint\n\n" % sizeY + print(aux) + + if (n - sizeY) < (5 * sizeX): + warnings.warn( + f"The problem size {n} minus the constraints size {sizeY} " + f"is too small relative to the block size {sizeX}. " + f"Using a dense eigensolver instead of LOBPCG iterations." + f"No output of the history of the iterations.", + UserWarning, stacklevel=2 + ) + + sizeX = min(sizeX, n) + + if blockVectorY is not None: + raise NotImplementedError( + "The dense eigensolver does not support constraints." + ) + + # Define the closed range of indices of eigenvalues to return. + if largest: + eigvals = (n - sizeX, n - 1) + else: + eigvals = (0, sizeX - 1) + + try: + if isinstance(A, LinearOperator): + A = A(np.eye(n, dtype=int)) + elif callable(A): + A = A(np.eye(n, dtype=int)) + if A.shape != (n, n): + raise ValueError( + f"The shape {A.shape} of the primary matrix\n" + f"defined by a callable object is wrong.\n" + ) + elif isspmatrix(A): + A = A.toarray() + else: + A = np.asarray(A) + except Exception as e: + raise Exception( + f"Primary MatMul call failed with error\n" + f"{e}\n") + + if B is not None: + try: + if isinstance(B, LinearOperator): + B = B(np.eye(n, dtype=int)) + elif callable(B): + B = B(np.eye(n, dtype=int)) + if B.shape != (n, n): + raise ValueError( + f"The shape {B.shape} of the secondary matrix\n" + f"defined by a callable object is wrong.\n" + ) + elif isspmatrix(B): + B = B.toarray() + else: + B = np.asarray(B) + except Exception as e: + raise Exception( + f"Secondary MatMul call failed with error\n" + f"{e}\n") + + try: + if "subset_by_index" in inspect.signature(eigh).parameters: + # scipy >= 1.5 + additional_params = {"subset_by_index": eigvals} + else: + # deprecated in scipy == 1.10 + additional_params = {"eigvals": eigvals} + vals, vecs = eigh(A, + B, + check_finite=False, + **additional_params) + if largest: + # Reverse order to be compatible with eigs() in 'LM' mode. + vals = vals[::-1] + vecs = vecs[:, ::-1] + + return vals, vecs + except Exception as e: + raise Exception( + f"Dense eigensolver failed with error\n" + f"{e}\n" + ) + + if (residualTolerance is None) or (residualTolerance <= 0.0): + residualTolerance = np.sqrt(np.finfo(blockVectorX.dtype).eps) * n + + A = _makeMatMat(A) + B = _makeMatMat(B) + M = _makeMatMat(M) + + # Apply constraints to X. + if blockVectorY is not None: + + if B is not None: + blockVectorBY = B(blockVectorY) + if blockVectorBY.shape != blockVectorY.shape: + raise ValueError( + f"The shape {blockVectorY.shape} " + f"of the constraint not preserved\n" + f"and changed to {blockVectorBY.shape} " + f"after multiplying by the secondary matrix.\n" + ) + else: + blockVectorBY = blockVectorY + + # gramYBY is a dense array. + gramYBY = np.dot(blockVectorY.T.conj(), blockVectorBY) + try: + # gramYBY is a Cholesky factor from now on... + gramYBY = cho_factor(gramYBY) + except LinAlgError as e: + raise ValueError("Linearly dependent constraints") from e + + _applyConstraints(blockVectorX, gramYBY, blockVectorBY, blockVectorY) + + ## + # B-orthonormalize X. + blockVectorX, blockVectorBX, _, _ = _b_orthonormalize( + B, blockVectorX, verbosityLevel=verbosityLevel) + if blockVectorX is None: + raise ValueError("Linearly dependent initial approximations") + + ## + # Compute the initial Ritz vectors: solve the eigenproblem. + blockVectorAX = A(blockVectorX) + if blockVectorAX.shape != blockVectorX.shape: + raise ValueError( + f"The shape {blockVectorX.shape} " + f"of the initial approximations not preserved\n" + f"and changed to {blockVectorAX.shape} " + f"after multiplying by the primary matrix.\n" + ) + + gramXAX = np.dot(blockVectorX.T.conj(), blockVectorAX) + + _lambda, eigBlockVector = eigh(gramXAX, check_finite=False) + ii = _get_indx(_lambda, sizeX, largest) + _lambda = _lambda[ii] + if retLambdaHistory: + lambdaHistory[0, :] = _lambda + + eigBlockVector = np.asarray(eigBlockVector[:, ii]) + blockVectorX = np.dot(blockVectorX, eigBlockVector) + blockVectorAX = np.dot(blockVectorAX, eigBlockVector) + if B is not None: + blockVectorBX = np.dot(blockVectorBX, eigBlockVector) + + ## + # Active index set. + activeMask = np.ones((sizeX,), dtype=bool) + + ## + # Main iteration loop. + + blockVectorP = None # set during iteration + blockVectorAP = None + blockVectorBP = None + + smallestResidualNorm = np.abs(np.finfo(blockVectorX.dtype).max) + + iterationNumber = -1 + restart = True + forcedRestart = False + explicitGramFlag = False + while iterationNumber < maxiter: + iterationNumber += 1 + + if B is not None: + aux = blockVectorBX * _lambda[np.newaxis, :] + else: + aux = blockVectorX * _lambda[np.newaxis, :] + + blockVectorR = blockVectorAX - aux + + aux = np.sum(blockVectorR.conj() * blockVectorR, 0) + residualNorms = np.sqrt(np.abs(aux)) + if retResidualNormsHistory: + residualNormsHistory[iterationNumber, :] = residualNorms + residualNorm = np.sum(np.abs(residualNorms)) / sizeX + + if residualNorm < smallestResidualNorm: + smallestResidualNorm = residualNorm + bestIterationNumber = iterationNumber + bestblockVectorX = blockVectorX + elif residualNorm > 2**restartControl * smallestResidualNorm: + forcedRestart = True + blockVectorAX = A(blockVectorX) + if blockVectorAX.shape != blockVectorX.shape: + raise ValueError( + f"The shape {blockVectorX.shape} " + f"of the restarted iterate not preserved\n" + f"and changed to {blockVectorAX.shape} " + f"after multiplying by the primary matrix.\n" + ) + if B is not None: + blockVectorBX = B(blockVectorX) + if blockVectorBX.shape != blockVectorX.shape: + raise ValueError( + f"The shape {blockVectorX.shape} " + f"of the restarted iterate not preserved\n" + f"and changed to {blockVectorBX.shape} " + f"after multiplying by the secondary matrix.\n" + ) + + ii = np.where(residualNorms > residualTolerance, True, False) + activeMask = activeMask & ii + currentBlockSize = activeMask.sum() + + if verbosityLevel: + print(f"iteration {iterationNumber}") + print(f"current block size: {currentBlockSize}") + print(f"eigenvalue(s):\n{_lambda}") + print(f"residual norm(s):\n{residualNorms}") + + if currentBlockSize == 0: + break + + activeBlockVectorR = _as2d(blockVectorR[:, activeMask]) + + if iterationNumber > 0: + activeBlockVectorP = _as2d(blockVectorP[:, activeMask]) + activeBlockVectorAP = _as2d(blockVectorAP[:, activeMask]) + if B is not None: + activeBlockVectorBP = _as2d(blockVectorBP[:, activeMask]) + + if M is not None: + # Apply preconditioner T to the active residuals. + activeBlockVectorR = M(activeBlockVectorR) + + ## + # Apply constraints to the preconditioned residuals. + if blockVectorY is not None: + _applyConstraints(activeBlockVectorR, + gramYBY, + blockVectorBY, + blockVectorY) + + ## + # B-orthogonalize the preconditioned residuals to X. + if B is not None: + activeBlockVectorR = activeBlockVectorR - ( + blockVectorX @ + (blockVectorBX.T.conj() @ activeBlockVectorR) + ) + else: + activeBlockVectorR = activeBlockVectorR - ( + blockVectorX @ + (blockVectorX.T.conj() @ activeBlockVectorR) + ) + + ## + # B-orthonormalize the preconditioned residuals. + aux = _b_orthonormalize( + B, activeBlockVectorR, verbosityLevel=verbosityLevel) + activeBlockVectorR, activeBlockVectorBR, _, _ = aux + + if activeBlockVectorR is None: + warnings.warn( + f"Failed at iteration {iterationNumber} with accuracies " + f"{residualNorms}\n not reaching the requested " + f"tolerance {residualTolerance}.", + UserWarning, stacklevel=2 + ) + break + activeBlockVectorAR = A(activeBlockVectorR) + + if iterationNumber > 0: + if B is not None: + aux = _b_orthonormalize( + B, activeBlockVectorP, activeBlockVectorBP, + verbosityLevel=verbosityLevel + ) + activeBlockVectorP, activeBlockVectorBP, invR, normal = aux + else: + aux = _b_orthonormalize(B, activeBlockVectorP, + verbosityLevel=verbosityLevel) + activeBlockVectorP, _, invR, normal = aux + # Function _b_orthonormalize returns None if Cholesky fails + if activeBlockVectorP is not None: + activeBlockVectorAP = activeBlockVectorAP / normal + activeBlockVectorAP = np.dot(activeBlockVectorAP, invR) + restart = forcedRestart + else: + restart = True + + ## + # Perform the Rayleigh Ritz Procedure: + # Compute symmetric Gram matrices: + + if activeBlockVectorAR.dtype == "float32": + myeps = 1 + else: + myeps = np.sqrt(np.finfo(activeBlockVectorR.dtype).eps) + + if residualNorms.max() > myeps and not explicitGramFlag: + explicitGramFlag = False + else: + # Once explicitGramFlag, forever explicitGramFlag. + explicitGramFlag = True + + # Shared memory assingments to simplify the code + if B is None: + blockVectorBX = blockVectorX + activeBlockVectorBR = activeBlockVectorR + if not restart: + activeBlockVectorBP = activeBlockVectorP + + # Common submatrices: + gramXAR = np.dot(blockVectorX.T.conj(), activeBlockVectorAR) + gramRAR = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAR) + + gramDtype = activeBlockVectorAR.dtype + if explicitGramFlag: + gramRAR = (gramRAR + gramRAR.T.conj()) / 2 + gramXAX = np.dot(blockVectorX.T.conj(), blockVectorAX) + gramXAX = (gramXAX + gramXAX.T.conj()) / 2 + gramXBX = np.dot(blockVectorX.T.conj(), blockVectorBX) + gramRBR = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorBR) + gramXBR = np.dot(blockVectorX.T.conj(), activeBlockVectorBR) + else: + gramXAX = np.diag(_lambda).astype(gramDtype) + gramXBX = np.eye(sizeX, dtype=gramDtype) + gramRBR = np.eye(currentBlockSize, dtype=gramDtype) + gramXBR = np.zeros((sizeX, currentBlockSize), dtype=gramDtype) + + if not restart: + gramXAP = np.dot(blockVectorX.T.conj(), activeBlockVectorAP) + gramRAP = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAP) + gramPAP = np.dot(activeBlockVectorP.T.conj(), activeBlockVectorAP) + gramXBP = np.dot(blockVectorX.T.conj(), activeBlockVectorBP) + gramRBP = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorBP) + if explicitGramFlag: + gramPAP = (gramPAP + gramPAP.T.conj()) / 2 + gramPBP = np.dot(activeBlockVectorP.T.conj(), + activeBlockVectorBP) + else: + gramPBP = np.eye(currentBlockSize, dtype=gramDtype) + + gramA = bmat( + [ + [gramXAX, gramXAR, gramXAP], + [gramXAR.T.conj(), gramRAR, gramRAP], + [gramXAP.T.conj(), gramRAP.T.conj(), gramPAP], + ] + ) + gramB = bmat( + [ + [gramXBX, gramXBR, gramXBP], + [gramXBR.T.conj(), gramRBR, gramRBP], + [gramXBP.T.conj(), gramRBP.T.conj(), gramPBP], + ] + ) + + _handle_gramA_gramB_verbosity(gramA, gramB, verbosityLevel) + + try: + _lambda, eigBlockVector = eigh(gramA, + gramB, + check_finite=False) + except LinAlgError as e: + # raise ValueError("eigh failed in lobpcg iterations") from e + if verbosityLevel: + warnings.warn( + f"eigh failed at iteration {iterationNumber} \n" + f"with error {e} causing a restart.\n", + UserWarning, stacklevel=2 + ) + # try again after dropping the direction vectors P from RR + restart = True + + if restart: + gramA = bmat([[gramXAX, gramXAR], [gramXAR.T.conj(), gramRAR]]) + gramB = bmat([[gramXBX, gramXBR], [gramXBR.T.conj(), gramRBR]]) + + _handle_gramA_gramB_verbosity(gramA, gramB, verbosityLevel) + + try: + _lambda, eigBlockVector = eigh(gramA, + gramB, + check_finite=False) + except LinAlgError as e: + # raise ValueError("eigh failed in lobpcg iterations") from e + warnings.warn( + f"eigh failed at iteration {iterationNumber} with error\n" + f"{e}\n", + UserWarning, stacklevel=2 + ) + break + + ii = _get_indx(_lambda, sizeX, largest) + _lambda = _lambda[ii] + eigBlockVector = eigBlockVector[:, ii] + if retLambdaHistory: + lambdaHistory[iterationNumber + 1, :] = _lambda + + # Compute Ritz vectors. + if B is not None: + if not restart: + eigBlockVectorX = eigBlockVector[:sizeX] + eigBlockVectorR = eigBlockVector[sizeX: + sizeX + currentBlockSize] + eigBlockVectorP = eigBlockVector[sizeX + currentBlockSize:] + + pp = np.dot(activeBlockVectorR, eigBlockVectorR) + pp += np.dot(activeBlockVectorP, eigBlockVectorP) + + app = np.dot(activeBlockVectorAR, eigBlockVectorR) + app += np.dot(activeBlockVectorAP, eigBlockVectorP) + + bpp = np.dot(activeBlockVectorBR, eigBlockVectorR) + bpp += np.dot(activeBlockVectorBP, eigBlockVectorP) + else: + eigBlockVectorX = eigBlockVector[:sizeX] + eigBlockVectorR = eigBlockVector[sizeX:] + + pp = np.dot(activeBlockVectorR, eigBlockVectorR) + app = np.dot(activeBlockVectorAR, eigBlockVectorR) + bpp = np.dot(activeBlockVectorBR, eigBlockVectorR) + + blockVectorX = np.dot(blockVectorX, eigBlockVectorX) + pp + blockVectorAX = np.dot(blockVectorAX, eigBlockVectorX) + app + blockVectorBX = np.dot(blockVectorBX, eigBlockVectorX) + bpp + + blockVectorP, blockVectorAP, blockVectorBP = pp, app, bpp + + else: + if not restart: + eigBlockVectorX = eigBlockVector[:sizeX] + eigBlockVectorR = eigBlockVector[sizeX: + sizeX + currentBlockSize] + eigBlockVectorP = eigBlockVector[sizeX + currentBlockSize:] + + pp = np.dot(activeBlockVectorR, eigBlockVectorR) + pp += np.dot(activeBlockVectorP, eigBlockVectorP) + + app = np.dot(activeBlockVectorAR, eigBlockVectorR) + app += np.dot(activeBlockVectorAP, eigBlockVectorP) + else: + eigBlockVectorX = eigBlockVector[:sizeX] + eigBlockVectorR = eigBlockVector[sizeX:] + + pp = np.dot(activeBlockVectorR, eigBlockVectorR) + app = np.dot(activeBlockVectorAR, eigBlockVectorR) + + blockVectorX = np.dot(blockVectorX, eigBlockVectorX) + pp + blockVectorAX = np.dot(blockVectorAX, eigBlockVectorX) + app + + blockVectorP, blockVectorAP = pp, app + + if B is not None: + aux = blockVectorBX * _lambda[np.newaxis, :] + else: + aux = blockVectorX * _lambda[np.newaxis, :] + + blockVectorR = blockVectorAX - aux + + aux = np.sum(blockVectorR.conj() * blockVectorR, 0) + residualNorms = np.sqrt(np.abs(aux)) + # Use old lambda in case of early loop exit. + if retLambdaHistory: + lambdaHistory[iterationNumber + 1, :] = _lambda + if retResidualNormsHistory: + residualNormsHistory[iterationNumber + 1, :] = residualNorms + residualNorm = np.sum(np.abs(residualNorms)) / sizeX + if residualNorm < smallestResidualNorm: + smallestResidualNorm = residualNorm + bestIterationNumber = iterationNumber + 1 + bestblockVectorX = blockVectorX + + if np.max(np.abs(residualNorms)) > residualTolerance: + warnings.warn( + f"Exited at iteration {iterationNumber} with accuracies \n" + f"{residualNorms}\n" + f"not reaching the requested tolerance {residualTolerance}.\n" + f"Use iteration {bestIterationNumber} instead with accuracy \n" + f"{smallestResidualNorm}.\n", + UserWarning, stacklevel=2 + ) + + if verbosityLevel: + print(f"Final iterative eigenvalue(s):\n{_lambda}") + print(f"Final iterative residual norm(s):\n{residualNorms}") + + blockVectorX = bestblockVectorX + # Making eigenvectors "exactly" satisfy the blockVectorY constrains + if blockVectorY is not None: + _applyConstraints(blockVectorX, + gramYBY, + blockVectorBY, + blockVectorY) + + # Making eigenvectors "exactly" othonormalized by final "exact" RR + blockVectorAX = A(blockVectorX) + if blockVectorAX.shape != blockVectorX.shape: + raise ValueError( + f"The shape {blockVectorX.shape} " + f"of the postprocessing iterate not preserved\n" + f"and changed to {blockVectorAX.shape} " + f"after multiplying by the primary matrix.\n" + ) + gramXAX = np.dot(blockVectorX.T.conj(), blockVectorAX) + + blockVectorBX = blockVectorX + if B is not None: + blockVectorBX = B(blockVectorX) + if blockVectorBX.shape != blockVectorX.shape: + raise ValueError( + f"The shape {blockVectorX.shape} " + f"of the postprocessing iterate not preserved\n" + f"and changed to {blockVectorBX.shape} " + f"after multiplying by the secondary matrix.\n" + ) + + gramXBX = np.dot(blockVectorX.T.conj(), blockVectorBX) + _handle_gramA_gramB_verbosity(gramXAX, gramXBX, verbosityLevel) + gramXAX = (gramXAX + gramXAX.T.conj()) / 2 + gramXBX = (gramXBX + gramXBX.T.conj()) / 2 + try: + _lambda, eigBlockVector = eigh(gramXAX, + gramXBX, + check_finite=False) + except LinAlgError as e: + raise ValueError("eigh has failed in lobpcg postprocessing") from e + + ii = _get_indx(_lambda, sizeX, largest) + _lambda = _lambda[ii] + eigBlockVector = np.asarray(eigBlockVector[:, ii]) + + blockVectorX = np.dot(blockVectorX, eigBlockVector) + blockVectorAX = np.dot(blockVectorAX, eigBlockVector) + + if B is not None: + blockVectorBX = np.dot(blockVectorBX, eigBlockVector) + aux = blockVectorBX * _lambda[np.newaxis, :] + else: + aux = blockVectorX * _lambda[np.newaxis, :] + + blockVectorR = blockVectorAX - aux + + aux = np.sum(blockVectorR.conj() * blockVectorR, 0) + residualNorms = np.sqrt(np.abs(aux)) + + if retLambdaHistory: + lambdaHistory[bestIterationNumber + 1, :] = _lambda + if retResidualNormsHistory: + residualNormsHistory[bestIterationNumber + 1, :] = residualNorms + + if retLambdaHistory: + lambdaHistory = lambdaHistory[ + : bestIterationNumber + 2, :] + if retResidualNormsHistory: + residualNormsHistory = residualNormsHistory[ + : bestIterationNumber + 2, :] + + if np.max(np.abs(residualNorms)) > residualTolerance: + warnings.warn( + f"Exited postprocessing with accuracies \n" + f"{residualNorms}\n" + f"not reaching the requested tolerance {residualTolerance}.", + UserWarning, stacklevel=2 + ) + + if verbosityLevel: + print(f"Final postprocessing eigenvalue(s):\n{_lambda}") + print(f"Final residual norm(s):\n{residualNorms}") + + if retLambdaHistory: + lambdaHistory = np.vsplit(lambdaHistory, np.shape(lambdaHistory)[0]) + lambdaHistory = [np.squeeze(i) for i in lambdaHistory] + if retResidualNormsHistory: + residualNormsHistory = np.vsplit(residualNormsHistory, + np.shape(residualNormsHistory)[0]) + residualNormsHistory = [np.squeeze(i) for i in residualNormsHistory] + + if retLambdaHistory: + if retResidualNormsHistory: + return _lambda, blockVectorX, lambdaHistory, residualNormsHistory + else: + return _lambda, blockVectorX, lambdaHistory + else: + if retResidualNormsHistory: + return _lambda, blockVectorX, residualNormsHistory + else: + return _lambda, blockVectorX diff --git a/mgm/lib/python3.10/site-packages/sklearn/externals/_packaging/__init__.py b/mgm/lib/python3.10/site-packages/sklearn/externals/_packaging/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/mgm/lib/python3.10/site-packages/sklearn/externals/_packaging/__pycache__/__init__.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/externals/_packaging/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7fd50fa7531aa544b8672e8acb971b9f864ed9a Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/externals/_packaging/__pycache__/__init__.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/sklearn/externals/_packaging/__pycache__/_structures.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/externals/_packaging/__pycache__/_structures.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50fdb1ae43da8caea797dfb0ae102476f0760901 Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/externals/_packaging/__pycache__/_structures.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/sklearn/externals/_packaging/__pycache__/version.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/externals/_packaging/__pycache__/version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..279b741139f0e4276f836a788b52a8f357f3c2d1 Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/externals/_packaging/__pycache__/version.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/sklearn/externals/_packaging/_structures.py b/mgm/lib/python3.10/site-packages/sklearn/externals/_packaging/_structures.py new file mode 100644 index 0000000000000000000000000000000000000000..837e3a7946d70355b46606d20a4b6c0f038b0815 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/sklearn/externals/_packaging/_structures.py @@ -0,0 +1,90 @@ +"""Vendoered from +https://github.com/pypa/packaging/blob/main/packaging/_structures.py +""" +# Copyright (c) Donald Stufft and individual contributors. +# All rights reserved. + +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: + +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. + +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +class InfinityType: + def __repr__(self) -> str: + return "Infinity" + + def __hash__(self) -> int: + return hash(repr(self)) + + def __lt__(self, other: object) -> bool: + return False + + def __le__(self, other: object) -> bool: + return False + + def __eq__(self, other: object) -> bool: + return isinstance(other, self.__class__) + + def __ne__(self, other: object) -> bool: + return not isinstance(other, self.__class__) + + def __gt__(self, other: object) -> bool: + return True + + def __ge__(self, other: object) -> bool: + return True + + def __neg__(self: object) -> "NegativeInfinityType": + return NegativeInfinity + + +Infinity = InfinityType() + + +class NegativeInfinityType: + def __repr__(self) -> str: + return "-Infinity" + + def __hash__(self) -> int: + return hash(repr(self)) + + def __lt__(self, other: object) -> bool: + return True + + def __le__(self, other: object) -> bool: + return True + + def __eq__(self, other: object) -> bool: + return isinstance(other, self.__class__) + + def __ne__(self, other: object) -> bool: + return not isinstance(other, self.__class__) + + def __gt__(self, other: object) -> bool: + return False + + def __ge__(self, other: object) -> bool: + return False + + def __neg__(self: object) -> InfinityType: + return Infinity + + +NegativeInfinity = NegativeInfinityType() diff --git a/mgm/lib/python3.10/site-packages/sklearn/externals/_packaging/version.py b/mgm/lib/python3.10/site-packages/sklearn/externals/_packaging/version.py new file mode 100644 index 0000000000000000000000000000000000000000..0f1e5b833699c38690679515bb788820de4168b0 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/sklearn/externals/_packaging/version.py @@ -0,0 +1,535 @@ +"""Vendoered from +https://github.com/pypa/packaging/blob/main/packaging/version.py +""" +# Copyright (c) Donald Stufft and individual contributors. +# All rights reserved. + +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: + +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. + +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import collections +import itertools +import re +import warnings +from typing import Callable, Iterator, List, Optional, SupportsInt, Tuple, Union + +from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType + +__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"] + +InfiniteTypes = Union[InfinityType, NegativeInfinityType] +PrePostDevType = Union[InfiniteTypes, Tuple[str, int]] +SubLocalType = Union[InfiniteTypes, int, str] +LocalType = Union[ + NegativeInfinityType, + Tuple[ + Union[ + SubLocalType, + Tuple[SubLocalType, str], + Tuple[NegativeInfinityType, SubLocalType], + ], + ..., + ], +] +CmpKey = Tuple[ + int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType +] +LegacyCmpKey = Tuple[int, Tuple[str, ...]] +VersionComparisonMethod = Callable[ + [Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool +] + +_Version = collections.namedtuple( + "_Version", ["epoch", "release", "dev", "pre", "post", "local"] +) + + +def parse(version: str) -> Union["LegacyVersion", "Version"]: + """Parse the given version from a string to an appropriate class. + + Parameters + ---------- + version : str + Version in a string format, eg. "0.9.1" or "1.2.dev0". + + Returns + ------- + version : :class:`Version` object or a :class:`LegacyVersion` object + Returned class depends on the given version: if is a valid + PEP 440 version or a legacy version. + """ + try: + return Version(version) + except InvalidVersion: + return LegacyVersion(version) + + +class InvalidVersion(ValueError): + """ + An invalid version was found, users should refer to PEP 440. + """ + + +class _BaseVersion: + _key: Union[CmpKey, LegacyCmpKey] + + def __hash__(self) -> int: + return hash(self._key) + + # Please keep the duplicated `isinstance` check + # in the six comparisons hereunder + # unless you find a way to avoid adding overhead function calls. + def __lt__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key < other._key + + def __le__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key <= other._key + + def __eq__(self, other: object) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key == other._key + + def __ge__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key >= other._key + + def __gt__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key > other._key + + def __ne__(self, other: object) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key != other._key + + +class LegacyVersion(_BaseVersion): + def __init__(self, version: str) -> None: + self._version = str(version) + self._key = _legacy_cmpkey(self._version) + + warnings.warn( + "Creating a LegacyVersion has been deprecated and will be " + "removed in the next major release", + DeprecationWarning, + ) + + def __str__(self) -> str: + return self._version + + def __repr__(self) -> str: + return f"" + + @property + def public(self) -> str: + return self._version + + @property + def base_version(self) -> str: + return self._version + + @property + def epoch(self) -> int: + return -1 + + @property + def release(self) -> None: + return None + + @property + def pre(self) -> None: + return None + + @property + def post(self) -> None: + return None + + @property + def dev(self) -> None: + return None + + @property + def local(self) -> None: + return None + + @property + def is_prerelease(self) -> bool: + return False + + @property + def is_postrelease(self) -> bool: + return False + + @property + def is_devrelease(self) -> bool: + return False + + +_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE) + +_legacy_version_replacement_map = { + "pre": "c", + "preview": "c", + "-": "final-", + "rc": "c", + "dev": "@", +} + + +def _parse_version_parts(s: str) -> Iterator[str]: + for part in _legacy_version_component_re.split(s): + part = _legacy_version_replacement_map.get(part, part) + + if not part or part == ".": + continue + + if part[:1] in "0123456789": + # pad for numeric comparison + yield part.zfill(8) + else: + yield "*" + part + + # ensure that alpha/beta/candidate are before final + yield "*final" + + +def _legacy_cmpkey(version: str) -> LegacyCmpKey: + + # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch + # greater than or equal to 0. This will effectively put the LegacyVersion, + # which uses the defacto standard originally implemented by setuptools, + # as before all PEP 440 versions. + epoch = -1 + + # This scheme is taken from pkg_resources.parse_version setuptools prior to + # it's adoption of the packaging library. + parts: List[str] = [] + for part in _parse_version_parts(version.lower()): + if part.startswith("*"): + # remove "-" before a prerelease tag + if part < "*final": + while parts and parts[-1] == "*final-": + parts.pop() + + # remove trailing zeros from each series of numeric parts + while parts and parts[-1] == "00000000": + parts.pop() + + parts.append(part) + + return epoch, tuple(parts) + + +# Deliberately not anchored to the start and end of the string, to make it +# easier for 3rd party code to reuse +VERSION_PATTERN = r""" + v? + (?: + (?:(?P[0-9]+)!)? # epoch + (?P[0-9]+(?:\.[0-9]+)*) # release segment + (?P
                                          # pre-release
+            [-_\.]?
+            (?P(a|b|c|rc|alpha|beta|pre|preview))
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+        (?P                                         # post release
+            (?:-(?P[0-9]+))
+            |
+            (?:
+                [-_\.]?
+                (?Ppost|rev|r)
+                [-_\.]?
+                (?P[0-9]+)?
+            )
+        )?
+        (?P                                          # dev release
+            [-_\.]?
+            (?Pdev)
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+    )
+    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
+"""
+
+
+class Version(_BaseVersion):
+
+    _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+    def __init__(self, version: str) -> None:
+
+        # Validate the version and parse it into pieces
+        match = self._regex.search(version)
+        if not match:
+            raise InvalidVersion(f"Invalid version: '{version}'")
+
+        # Store the parsed out pieces of the version
+        self._version = _Version(
+            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+            release=tuple(int(i) for i in match.group("release").split(".")),
+            pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
+            post=_parse_letter_version(
+                match.group("post_l"), match.group("post_n1") or match.group("post_n2")
+            ),
+            dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
+            local=_parse_local_version(match.group("local")),
+        )
+
+        # Generate a key which will be used for sorting
+        self._key = _cmpkey(
+            self._version.epoch,
+            self._version.release,
+            self._version.pre,
+            self._version.post,
+            self._version.dev,
+            self._version.local,
+        )
+
+    def __repr__(self) -> str:
+        return f""
+
+    def __str__(self) -> str:
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self.release))
+
+        # Pre-release
+        if self.pre is not None:
+            parts.append("".join(str(x) for x in self.pre))
+
+        # Post-release
+        if self.post is not None:
+            parts.append(f".post{self.post}")
+
+        # Development release
+        if self.dev is not None:
+            parts.append(f".dev{self.dev}")
+
+        # Local version segment
+        if self.local is not None:
+            parts.append(f"+{self.local}")
+
+        return "".join(parts)
+
+    @property
+    def epoch(self) -> int:
+        _epoch: int = self._version.epoch
+        return _epoch
+
+    @property
+    def release(self) -> Tuple[int, ...]:
+        _release: Tuple[int, ...] = self._version.release
+        return _release
+
+    @property
+    def pre(self) -> Optional[Tuple[str, int]]:
+        _pre: Optional[Tuple[str, int]] = self._version.pre
+        return _pre
+
+    @property
+    def post(self) -> Optional[int]:
+        return self._version.post[1] if self._version.post else None
+
+    @property
+    def dev(self) -> Optional[int]:
+        return self._version.dev[1] if self._version.dev else None
+
+    @property
+    def local(self) -> Optional[str]:
+        if self._version.local:
+            return ".".join(str(x) for x in self._version.local)
+        else:
+            return None
+
+    @property
+    def public(self) -> str:
+        return str(self).split("+", 1)[0]
+
+    @property
+    def base_version(self) -> str:
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self.release))
+
+        return "".join(parts)
+
+    @property
+    def is_prerelease(self) -> bool:
+        return self.dev is not None or self.pre is not None
+
+    @property
+    def is_postrelease(self) -> bool:
+        return self.post is not None
+
+    @property
+    def is_devrelease(self) -> bool:
+        return self.dev is not None
+
+    @property
+    def major(self) -> int:
+        return self.release[0] if len(self.release) >= 1 else 0
+
+    @property
+    def minor(self) -> int:
+        return self.release[1] if len(self.release) >= 2 else 0
+
+    @property
+    def micro(self) -> int:
+        return self.release[2] if len(self.release) >= 3 else 0
+
+
+def _parse_letter_version(
+    letter: str, number: Union[str, bytes, SupportsInt]
+) -> Optional[Tuple[str, int]]:
+
+    if letter:
+        # We consider there to be an implicit 0 in a pre-release if there is
+        # not a numeral associated with it.
+        if number is None:
+            number = 0
+
+        # We normalize any letters to their lower case form
+        letter = letter.lower()
+
+        # We consider some words to be alternate spellings of other words and
+        # in those cases we want to normalize the spellings to our preferred
+        # spelling.
+        if letter == "alpha":
+            letter = "a"
+        elif letter == "beta":
+            letter = "b"
+        elif letter in ["c", "pre", "preview"]:
+            letter = "rc"
+        elif letter in ["rev", "r"]:
+            letter = "post"
+
+        return letter, int(number)
+    if not letter and number:
+        # We assume if we are given a number, but we are not given a letter
+        # then this is using the implicit post release syntax (e.g. 1.0-1)
+        letter = "post"
+
+        return letter, int(number)
+
+    return None
+
+
+_local_version_separators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local: str) -> Optional[LocalType]:
+    """
+    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+    """
+    if local is not None:
+        return tuple(
+            part.lower() if not part.isdigit() else int(part)
+            for part in _local_version_separators.split(local)
+        )
+    return None
+
+
+def _cmpkey(
+    epoch: int,
+    release: Tuple[int, ...],
+    pre: Optional[Tuple[str, int]],
+    post: Optional[Tuple[str, int]],
+    dev: Optional[Tuple[str, int]],
+    local: Optional[Tuple[SubLocalType]],
+) -> CmpKey:
+
+    # When we compare a release version, we want to compare it with all of the
+    # trailing zeros removed. So we'll use a reverse the list, drop all the now
+    # leading zeros until we come to something non zero, then take the rest
+    # re-reverse it back into the correct order and make it a tuple and use
+    # that for our sorting key.
+    _release = tuple(
+        reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
+    )
+
+    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+    # We'll do this by abusing the pre segment, but we _only_ want to do this
+    # if there is not a pre or a post segment. If we have one of those then
+    # the normal sorting rules will handle this case correctly.
+    if pre is None and post is None and dev is not None:
+        _pre: PrePostDevType = NegativeInfinity
+    # Versions without a pre-release (except as noted above) should sort after
+    # those with one.
+    elif pre is None:
+        _pre = Infinity
+    else:
+        _pre = pre
+
+    # Versions without a post segment should sort before those with one.
+    if post is None:
+        _post: PrePostDevType = NegativeInfinity
+
+    else:
+        _post = post
+
+    # Versions without a development segment should sort after those with one.
+    if dev is None:
+        _dev: PrePostDevType = Infinity
+
+    else:
+        _dev = dev
+
+    if local is None:
+        # Versions without a local segment should sort before those with one.
+        _local: LocalType = NegativeInfinity
+    else:
+        # Versions with a local segment need that segment parsed to implement
+        # the sorting rules in PEP440.
+        # - Alpha numeric segments sort before numeric segments
+        # - Alpha numeric segments sort lexicographically
+        # - Numeric segments sort numerically
+        # - Shorter versions sort before longer versions when the prefixes
+        #   match exactly
+        _local = tuple(
+            (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
+        )
+
+    return epoch, _release, _pre, _post, _dev, _local
diff --git a/mgm/lib/python3.10/site-packages/sklearn/impute/__init__.py b/mgm/lib/python3.10/site-packages/sklearn/impute/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e305bc2a657dc042d63dfd42fb8aa9734365ccbf
--- /dev/null
+++ b/mgm/lib/python3.10/site-packages/sklearn/impute/__init__.py
@@ -0,0 +1,24 @@
+"""Transformers for missing value imputation"""
+import typing
+
+from ._base import MissingIndicator, SimpleImputer
+from ._knn import KNNImputer
+
+if typing.TYPE_CHECKING:
+    # Avoid errors in type checkers (e.g. mypy) for experimental estimators.
+    # TODO: remove this check once the estimator is no longer experimental.
+    from ._iterative import IterativeImputer  # noqa
+
+__all__ = ["MissingIndicator", "SimpleImputer", "KNNImputer"]
+
+
+# TODO: remove this check once the estimator is no longer experimental.
+def __getattr__(name):
+    if name == "IterativeImputer":
+        raise ImportError(
+            f"{name} is experimental and the API might change without any "
+            "deprecation cycle. To use it, you need to explicitly import "
+            "enable_iterative_imputer:\n"
+            "from sklearn.experimental import enable_iterative_imputer"
+        )
+    raise AttributeError(f"module {__name__} has no attribute {name}")
diff --git a/mgm/lib/python3.10/site-packages/sklearn/impute/__pycache__/__init__.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/impute/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..505155045de9ab88509fd689d51761361d9f0777
Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/impute/__pycache__/__init__.cpython-310.pyc differ
diff --git a/mgm/lib/python3.10/site-packages/sklearn/impute/__pycache__/_base.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/impute/__pycache__/_base.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e9f548afcd4274849bedea62a48ff0e216d3f19d
Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/impute/__pycache__/_base.cpython-310.pyc differ
diff --git a/mgm/lib/python3.10/site-packages/sklearn/impute/__pycache__/_iterative.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/impute/__pycache__/_iterative.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..738643f6810994d453199c9831c861c86eca89be
Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/impute/__pycache__/_iterative.cpython-310.pyc differ
diff --git a/mgm/lib/python3.10/site-packages/sklearn/impute/__pycache__/_knn.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/impute/__pycache__/_knn.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d23b1b854bdb65ee48b28d845fa6783d6fbad5ae
Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/impute/__pycache__/_knn.cpython-310.pyc differ
diff --git a/mgm/lib/python3.10/site-packages/sklearn/impute/_base.py b/mgm/lib/python3.10/site-packages/sklearn/impute/_base.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab92e839718dfe72d934ff2b2f1570cd6cf220cc
--- /dev/null
+++ b/mgm/lib/python3.10/site-packages/sklearn/impute/_base.py
@@ -0,0 +1,1071 @@
+# Authors: Nicolas Tresegnie 
+#          Sergey Feldman 
+# License: BSD 3 clause
+
+import numbers
+import warnings
+from collections import Counter
+
+import numpy as np
+import numpy.ma as ma
+from scipy import sparse as sp
+
+from ..base import BaseEstimator, TransformerMixin
+from ..utils._param_validation import StrOptions, Hidden
+from ..utils.fixes import _mode
+from ..utils.sparsefuncs import _get_median
+from ..utils.validation import check_is_fitted
+from ..utils.validation import FLOAT_DTYPES
+from ..utils.validation import _check_feature_names_in
+from ..utils._mask import _get_mask
+from ..utils import _is_pandas_na
+from ..utils import is_scalar_nan
+
+
+def _check_inputs_dtype(X, missing_values):
+    if _is_pandas_na(missing_values):
+        # Allow using `pd.NA` as missing values to impute numerical arrays.
+        return
+    if X.dtype.kind in ("f", "i", "u") and not isinstance(missing_values, numbers.Real):
+        raise ValueError(
+            "'X' and 'missing_values' types are expected to be"
+            " both numerical. Got X.dtype={} and "
+            " type(missing_values)={}.".format(X.dtype, type(missing_values))
+        )
+
+
+def _most_frequent(array, extra_value, n_repeat):
+    """Compute the most frequent value in a 1d array extended with
+    [extra_value] * n_repeat, where extra_value is assumed to be not part
+    of the array."""
+    # Compute the most frequent value in array only
+    if array.size > 0:
+        if array.dtype == object:
+            # scipy.stats.mode is slow with object dtype array.
+            # Python Counter is more efficient
+            counter = Counter(array)
+            most_frequent_count = counter.most_common(1)[0][1]
+            # tie breaking similarly to scipy.stats.mode
+            most_frequent_value = min(
+                value
+                for value, count in counter.items()
+                if count == most_frequent_count
+            )
+        else:
+            mode = _mode(array)
+            most_frequent_value = mode[0][0]
+            most_frequent_count = mode[1][0]
+    else:
+        most_frequent_value = 0
+        most_frequent_count = 0
+
+    # Compare to array + [extra_value] * n_repeat
+    if most_frequent_count == 0 and n_repeat == 0:
+        return np.nan
+    elif most_frequent_count < n_repeat:
+        return extra_value
+    elif most_frequent_count > n_repeat:
+        return most_frequent_value
+    elif most_frequent_count == n_repeat:
+        # tie breaking similarly to scipy.stats.mode
+        return min(most_frequent_value, extra_value)
+
+
+class _BaseImputer(TransformerMixin, BaseEstimator):
+    """Base class for all imputers.
+
+    It adds automatically support for `add_indicator`.
+    """
+
+    _parameter_constraints: dict = {
+        "missing_values": ["missing_values"],
+        "add_indicator": ["boolean"],
+        "keep_empty_features": ["boolean"],
+    }
+
+    def __init__(
+        self, *, missing_values=np.nan, add_indicator=False, keep_empty_features=False
+    ):
+        self.missing_values = missing_values
+        self.add_indicator = add_indicator
+        self.keep_empty_features = keep_empty_features
+
+    def _fit_indicator(self, X):
+        """Fit a MissingIndicator."""
+        if self.add_indicator:
+            self.indicator_ = MissingIndicator(
+                missing_values=self.missing_values, error_on_new=False
+            )
+            self.indicator_._fit(X, precomputed=True)
+        else:
+            self.indicator_ = None
+
+    def _transform_indicator(self, X):
+        """Compute the indicator mask.'
+
+        Note that X must be the original data as passed to the imputer before
+        any imputation, since imputation may be done inplace in some cases.
+        """
+        if self.add_indicator:
+            if not hasattr(self, "indicator_"):
+                raise ValueError(
+                    "Make sure to call _fit_indicator before _transform_indicator"
+                )
+            return self.indicator_.transform(X)
+
+    def _concatenate_indicator(self, X_imputed, X_indicator):
+        """Concatenate indicator mask with the imputed data."""
+        if not self.add_indicator:
+            return X_imputed
+
+        hstack = sp.hstack if sp.issparse(X_imputed) else np.hstack
+        if X_indicator is None:
+            raise ValueError(
+                "Data from the missing indicator are not provided. Call "
+                "_fit_indicator and _transform_indicator in the imputer "
+                "implementation."
+            )
+
+        return hstack((X_imputed, X_indicator))
+
+    def _concatenate_indicator_feature_names_out(self, names, input_features):
+        if not self.add_indicator:
+            return names
+
+        indicator_names = self.indicator_.get_feature_names_out(input_features)
+        return np.concatenate([names, indicator_names])
+
+    def _more_tags(self):
+        return {"allow_nan": is_scalar_nan(self.missing_values)}
+
+
+class SimpleImputer(_BaseImputer):
+    """Univariate imputer for completing missing values with simple strategies.
+
+    Replace missing values using a descriptive statistic (e.g. mean, median, or
+    most frequent) along each column, or using a constant value.
+
+    Read more in the :ref:`User Guide `.
+
+    .. versionadded:: 0.20
+       `SimpleImputer` replaces the previous `sklearn.preprocessing.Imputer`
+       estimator which is now removed.
+
+    Parameters
+    ----------
+    missing_values : int, float, str, np.nan, None or pandas.NA, default=np.nan
+        The placeholder for the missing values. All occurrences of
+        `missing_values` will be imputed. For pandas' dataframes with
+        nullable integer dtypes with missing values, `missing_values`
+        can be set to either `np.nan` or `pd.NA`.
+
+    strategy : str, default='mean'
+        The imputation strategy.
+
+        - If "mean", then replace missing values using the mean along
+          each column. Can only be used with numeric data.
+        - If "median", then replace missing values using the median along
+          each column. Can only be used with numeric data.
+        - If "most_frequent", then replace missing using the most frequent
+          value along each column. Can be used with strings or numeric data.
+          If there is more than one such value, only the smallest is returned.
+        - If "constant", then replace missing values with fill_value. Can be
+          used with strings or numeric data.
+
+        .. versionadded:: 0.20
+           strategy="constant" for fixed value imputation.
+
+    fill_value : str or numerical value, default=None
+        When strategy == "constant", `fill_value` is used to replace all
+        occurrences of missing_values. For string or object data types,
+        `fill_value` must be a string.
+        If `None`, `fill_value` will be 0 when imputing numerical
+        data and "missing_value" for strings or object data types.
+
+    verbose : int, default=0
+        Controls the verbosity of the imputer.
+
+        .. deprecated:: 1.1
+           The 'verbose' parameter was deprecated in version 1.1 and will be
+           removed in 1.3. A warning will always be raised upon the removal of
+           empty columns in the future version.
+
+    copy : bool, default=True
+        If True, a copy of X will be created. If False, imputation will
+        be done in-place whenever possible. Note that, in the following cases,
+        a new copy will always be made, even if `copy=False`:
+
+        - If `X` is not an array of floating values;
+        - If `X` is encoded as a CSR matrix;
+        - If `add_indicator=True`.
+
+    add_indicator : bool, default=False
+        If True, a :class:`MissingIndicator` transform will stack onto output
+        of the imputer's transform. This allows a predictive estimator
+        to account for missingness despite imputation. If a feature has no
+        missing values at fit/train time, the feature won't appear on
+        the missing indicator even if there are missing values at
+        transform/test time.
+
+    keep_empty_features : bool, default=False
+        If True, features that consist exclusively of missing values when
+        `fit` is called are returned in results when `transform` is called.
+        The imputed value is always `0` except when `strategy="constant"`
+        in which case `fill_value` will be used instead.
+
+        .. versionadded:: 1.2
+
+    Attributes
+    ----------
+    statistics_ : array of shape (n_features,)
+        The imputation fill value for each feature.
+        Computing statistics can result in `np.nan` values.
+        During :meth:`transform`, features corresponding to `np.nan`
+        statistics will be discarded.
+
+    indicator_ : :class:`~sklearn.impute.MissingIndicator`
+        Indicator used to add binary indicators for missing values.
+        `None` if `add_indicator=False`.
+
+    n_features_in_ : int
+        Number of features seen during :term:`fit`.
+
+        .. versionadded:: 0.24
+
+    feature_names_in_ : ndarray of shape (`n_features_in_`,)
+        Names of features seen during :term:`fit`. Defined only when `X`
+        has feature names that are all strings.
+
+        .. versionadded:: 1.0
+
+    See Also
+    --------
+    IterativeImputer : Multivariate imputer that estimates values to impute for
+        each feature with missing values from all the others.
+    KNNImputer : Multivariate imputer that estimates missing features using
+        nearest samples.
+
+    Notes
+    -----
+    Columns which only contained missing values at :meth:`fit` are discarded
+    upon :meth:`transform` if strategy is not `"constant"`.
+
+    In a prediction context, simple imputation usually performs poorly when
+    associated with a weak learner. However, with a powerful learner, it can
+    lead to as good or better performance than complex imputation such as
+    :class:`~sklearn.impute.IterativeImputer` or :class:`~sklearn.impute.KNNImputer`.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from sklearn.impute import SimpleImputer
+    >>> imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean')
+    >>> imp_mean.fit([[7, 2, 3], [4, np.nan, 6], [10, 5, 9]])
+    SimpleImputer()
+    >>> X = [[np.nan, 2, 3], [4, np.nan, 6], [10, np.nan, 9]]
+    >>> print(imp_mean.transform(X))
+    [[ 7.   2.   3. ]
+     [ 4.   3.5  6. ]
+     [10.   3.5  9. ]]
+    """
+
+    _parameter_constraints: dict = {
+        **_BaseImputer._parameter_constraints,
+        "strategy": [StrOptions({"mean", "median", "most_frequent", "constant"})],
+        "fill_value": "no_validation",  # any object is valid
+        "verbose": ["verbose", Hidden(StrOptions({"deprecated"}))],
+        "copy": ["boolean"],
+    }
+
+    def __init__(
+        self,
+        *,
+        missing_values=np.nan,
+        strategy="mean",
+        fill_value=None,
+        verbose="deprecated",
+        copy=True,
+        add_indicator=False,
+        keep_empty_features=False,
+    ):
+        super().__init__(
+            missing_values=missing_values,
+            add_indicator=add_indicator,
+            keep_empty_features=keep_empty_features,
+        )
+        self.strategy = strategy
+        self.fill_value = fill_value
+        self.verbose = verbose
+        self.copy = copy
+
+    def _validate_input(self, X, in_fit):
+
+        if self.strategy in ("most_frequent", "constant"):
+            # If input is a list of strings, dtype = object.
+            # Otherwise ValueError is raised in SimpleImputer
+            # with strategy='most_frequent' or 'constant'
+            # because the list is converted to Unicode numpy array
+            if isinstance(X, list) and any(
+                isinstance(elem, str) for row in X for elem in row
+            ):
+                dtype = object
+            else:
+                dtype = None
+        else:
+            dtype = FLOAT_DTYPES
+
+        if not in_fit and self._fit_dtype.kind == "O":
+            # Use object dtype if fitted on object dtypes
+            dtype = self._fit_dtype
+
+        if _is_pandas_na(self.missing_values) or is_scalar_nan(self.missing_values):
+            force_all_finite = "allow-nan"
+        else:
+            force_all_finite = True
+
+        try:
+            X = self._validate_data(
+                X,
+                reset=in_fit,
+                accept_sparse="csc",
+                dtype=dtype,
+                force_all_finite=force_all_finite,
+                copy=self.copy,
+            )
+        except ValueError as ve:
+            if "could not convert" in str(ve):
+                new_ve = ValueError(
+                    "Cannot use {} strategy with non-numeric data:\n{}".format(
+                        self.strategy, ve
+                    )
+                )
+                raise new_ve from None
+            else:
+                raise ve
+
+        if in_fit:
+            # Use the dtype seen in `fit` for non-`fit` conversion
+            self._fit_dtype = X.dtype
+
+        _check_inputs_dtype(X, self.missing_values)
+        if X.dtype.kind not in ("i", "u", "f", "O"):
+            raise ValueError(
+                "SimpleImputer does not support data with dtype "
+                "{0}. Please provide either a numeric array (with"
+                " a floating point or integer dtype) or "
+                "categorical data represented either as an array "
+                "with integer dtype or an array of string values "
+                "with an object dtype.".format(X.dtype)
+            )
+
+        return X
+
+    def fit(self, X, y=None):
+        """Fit the imputer on `X`.
+
+        Parameters
+        ----------
+        X : {array-like, sparse matrix}, shape (n_samples, n_features)
+            Input data, where `n_samples` is the number of samples and
+            `n_features` is the number of features.
+
+        y : Ignored
+            Not used, present here for API consistency by convention.
+
+        Returns
+        -------
+        self : object
+            Fitted estimator.
+        """
+        self._validate_params()
+        if self.verbose != "deprecated":
+            warnings.warn(
+                "The 'verbose' parameter was deprecated in version "
+                "1.1 and will be removed in 1.3. A warning will "
+                "always be raised upon the removal of empty columns "
+                "in the future version.",
+                FutureWarning,
+            )
+
+        X = self._validate_input(X, in_fit=True)
+
+        # default fill_value is 0 for numerical input and "missing_value"
+        # otherwise
+        if self.fill_value is None:
+            if X.dtype.kind in ("i", "u", "f"):
+                fill_value = 0
+            else:
+                fill_value = "missing_value"
+        else:
+            fill_value = self.fill_value
+
+        # fill_value should be numerical in case of numerical input
+        if (
+            self.strategy == "constant"
+            and X.dtype.kind in ("i", "u", "f")
+            and not isinstance(fill_value, numbers.Real)
+        ):
+            raise ValueError(
+                "'fill_value'={0} is invalid. Expected a "
+                "numerical value when imputing numerical "
+                "data".format(fill_value)
+            )
+
+        if sp.issparse(X):
+            # missing_values = 0 not allowed with sparse data as it would
+            # force densification
+            if self.missing_values == 0:
+                raise ValueError(
+                    "Imputation not possible when missing_values "
+                    "== 0 and input is sparse. Provide a dense "
+                    "array instead."
+                )
+            else:
+                self.statistics_ = self._sparse_fit(
+                    X, self.strategy, self.missing_values, fill_value
+                )
+
+        else:
+            self.statistics_ = self._dense_fit(
+                X, self.strategy, self.missing_values, fill_value
+            )
+
+        return self
+
+    def _sparse_fit(self, X, strategy, missing_values, fill_value):
+        """Fit the transformer on sparse data."""
+        missing_mask = _get_mask(X, missing_values)
+        mask_data = missing_mask.data
+        n_implicit_zeros = X.shape[0] - np.diff(X.indptr)
+
+        statistics = np.empty(X.shape[1])
+
+        if strategy == "constant":
+            # for constant strategy, self.statistics_ is used to store
+            # fill_value in each column
+            statistics.fill(fill_value)
+        else:
+            for i in range(X.shape[1]):
+                column = X.data[X.indptr[i] : X.indptr[i + 1]]
+                mask_column = mask_data[X.indptr[i] : X.indptr[i + 1]]
+                column = column[~mask_column]
+
+                # combine explicit and implicit zeros
+                mask_zeros = _get_mask(column, 0)
+                column = column[~mask_zeros]
+                n_explicit_zeros = mask_zeros.sum()
+                n_zeros = n_implicit_zeros[i] + n_explicit_zeros
+
+                if len(column) == 0 and self.keep_empty_features:
+                    # in case we want to keep columns with only missing values.
+                    statistics[i] = 0
+                else:
+                    if strategy == "mean":
+                        s = column.size + n_zeros
+                        statistics[i] = np.nan if s == 0 else column.sum() / s
+
+                    elif strategy == "median":
+                        statistics[i] = _get_median(column, n_zeros)
+
+                    elif strategy == "most_frequent":
+                        statistics[i] = _most_frequent(column, 0, n_zeros)
+
+        super()._fit_indicator(missing_mask)
+
+        return statistics
+
+    def _dense_fit(self, X, strategy, missing_values, fill_value):
+        """Fit the transformer on dense data."""
+        missing_mask = _get_mask(X, missing_values)
+        masked_X = ma.masked_array(X, mask=missing_mask)
+
+        super()._fit_indicator(missing_mask)
+
+        # Mean
+        if strategy == "mean":
+            mean_masked = np.ma.mean(masked_X, axis=0)
+            # Avoid the warning "Warning: converting a masked element to nan."
+            mean = np.ma.getdata(mean_masked)
+            mean[np.ma.getmask(mean_masked)] = 0 if self.keep_empty_features else np.nan
+
+            return mean
+
+        # Median
+        elif strategy == "median":
+            median_masked = np.ma.median(masked_X, axis=0)
+            # Avoid the warning "Warning: converting a masked element to nan."
+            median = np.ma.getdata(median_masked)
+            median[np.ma.getmaskarray(median_masked)] = (
+                0 if self.keep_empty_features else np.nan
+            )
+
+            return median
+
+        # Most frequent
+        elif strategy == "most_frequent":
+            # Avoid use of scipy.stats.mstats.mode due to the required
+            # additional overhead and slow benchmarking performance.
+            # See Issue 14325 and PR 14399 for full discussion.
+
+            # To be able access the elements by columns
+            X = X.transpose()
+            mask = missing_mask.transpose()
+
+            if X.dtype.kind == "O":
+                most_frequent = np.empty(X.shape[0], dtype=object)
+            else:
+                most_frequent = np.empty(X.shape[0])
+
+            for i, (row, row_mask) in enumerate(zip(X[:], mask[:])):
+                row_mask = np.logical_not(row_mask).astype(bool)
+                row = row[row_mask]
+                if len(row) == 0 and self.keep_empty_features:
+                    most_frequent[i] = 0
+                else:
+                    most_frequent[i] = _most_frequent(row, np.nan, 0)
+
+            return most_frequent
+
+        # Constant
+        elif strategy == "constant":
+            # for constant strategy, self.statistcs_ is used to store
+            # fill_value in each column
+            return np.full(X.shape[1], fill_value, dtype=X.dtype)
+
+    def transform(self, X):
+        """Impute all missing values in `X`.
+
+        Parameters
+        ----------
+        X : {array-like, sparse matrix}, shape (n_samples, n_features)
+            The input data to complete.
+
+        Returns
+        -------
+        X_imputed : {ndarray, sparse matrix} of shape \
+                (n_samples, n_features_out)
+            `X` with imputed values.
+        """
+        check_is_fitted(self)
+
+        X = self._validate_input(X, in_fit=False)
+        statistics = self.statistics_
+
+        if X.shape[1] != statistics.shape[0]:
+            raise ValueError(
+                "X has %d features per sample, expected %d"
+                % (X.shape[1], self.statistics_.shape[0])
+            )
+
+        # compute mask before eliminating invalid features
+        missing_mask = _get_mask(X, self.missing_values)
+
+        # Decide whether to keep missing features
+        if self.strategy == "constant" or self.keep_empty_features:
+            valid_statistics = statistics
+            valid_statistics_indexes = None
+        else:
+            # same as np.isnan but also works for object dtypes
+            invalid_mask = _get_mask(statistics, np.nan)
+            valid_mask = np.logical_not(invalid_mask)
+            valid_statistics = statistics[valid_mask]
+            valid_statistics_indexes = np.flatnonzero(valid_mask)
+
+            if invalid_mask.any():
+                invalid_features = np.arange(X.shape[1])[invalid_mask]
+                if self.verbose != "deprecated" and self.verbose:
+                    # use feature names warning if features are provided
+                    if hasattr(self, "feature_names_in_"):
+                        invalid_features = self.feature_names_in_[invalid_features]
+                    warnings.warn(
+                        "Skipping features without any observed values:"
+                        f" {invalid_features}. At least one non-missing value is needed"
+                        f" for imputation with strategy='{self.strategy}'."
+                    )
+                X = X[:, valid_statistics_indexes]
+
+        # Do actual imputation
+        if sp.issparse(X):
+            if self.missing_values == 0:
+                raise ValueError(
+                    "Imputation not possible when missing_values "
+                    "== 0 and input is sparse. Provide a dense "
+                    "array instead."
+                )
+            else:
+                # if no invalid statistics are found, use the mask computed
+                # before, else recompute mask
+                if valid_statistics_indexes is None:
+                    mask = missing_mask.data
+                else:
+                    mask = _get_mask(X.data, self.missing_values)
+                indexes = np.repeat(
+                    np.arange(len(X.indptr) - 1, dtype=int), np.diff(X.indptr)
+                )[mask]
+
+                X.data[mask] = valid_statistics[indexes].astype(X.dtype, copy=False)
+        else:
+            # use mask computed before eliminating invalid mask
+            if valid_statistics_indexes is None:
+                mask_valid_features = missing_mask
+            else:
+                mask_valid_features = missing_mask[:, valid_statistics_indexes]
+            n_missing = np.sum(mask_valid_features, axis=0)
+            values = np.repeat(valid_statistics, n_missing)
+            coordinates = np.where(mask_valid_features.transpose())[::-1]
+
+            X[coordinates] = values
+
+        X_indicator = super()._transform_indicator(missing_mask)
+
+        return super()._concatenate_indicator(X, X_indicator)
+
+    def inverse_transform(self, X):
+        """Convert the data back to the original representation.
+
+        Inverts the `transform` operation performed on an array.
+        This operation can only be performed after :class:`SimpleImputer` is
+        instantiated with `add_indicator=True`.
+
+        Note that `inverse_transform` can only invert the transform in
+        features that have binary indicators for missing values. If a feature
+        has no missing values at `fit` time, the feature won't have a binary
+        indicator, and the imputation done at `transform` time won't be
+        inverted.
+
+        .. versionadded:: 0.24
+
+        Parameters
+        ----------
+        X : array-like of shape \
+                (n_samples, n_features + n_features_missing_indicator)
+            The imputed data to be reverted to original data. It has to be
+            an augmented array of imputed data and the missing indicator mask.
+
+        Returns
+        -------
+        X_original : ndarray of shape (n_samples, n_features)
+            The original `X` with missing values as it was prior
+            to imputation.
+        """
+        check_is_fitted(self)
+
+        if not self.add_indicator:
+            raise ValueError(
+                "'inverse_transform' works only when "
+                "'SimpleImputer' is instantiated with "
+                "'add_indicator=True'. "
+                f"Got 'add_indicator={self.add_indicator}' "
+                "instead."
+            )
+
+        n_features_missing = len(self.indicator_.features_)
+        non_empty_feature_count = X.shape[1] - n_features_missing
+        array_imputed = X[:, :non_empty_feature_count].copy()
+        missing_mask = X[:, non_empty_feature_count:].astype(bool)
+
+        n_features_original = len(self.statistics_)
+        shape_original = (X.shape[0], n_features_original)
+        X_original = np.zeros(shape_original)
+        X_original[:, self.indicator_.features_] = missing_mask
+        full_mask = X_original.astype(bool)
+
+        imputed_idx, original_idx = 0, 0
+        while imputed_idx < len(array_imputed.T):
+            if not np.all(X_original[:, original_idx]):
+                X_original[:, original_idx] = array_imputed.T[imputed_idx]
+                imputed_idx += 1
+                original_idx += 1
+            else:
+                original_idx += 1
+
+        X_original[full_mask] = self.missing_values
+        return X_original
+
+    def _more_tags(self):
+        return {
+            "allow_nan": (
+                _is_pandas_na(self.missing_values) or is_scalar_nan(self.missing_values)
+            )
+        }
+
+    def get_feature_names_out(self, input_features=None):
+        """Get output feature names for transformation.
+
+        Parameters
+        ----------
+        input_features : array-like of str or None, default=None
+            Input features.
+
+            - If `input_features` is `None`, then `feature_names_in_` is
+              used as feature names in. If `feature_names_in_` is not defined,
+              then the following input feature names are generated:
+              `["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
+            - If `input_features` is an array-like, then `input_features` must
+              match `feature_names_in_` if `feature_names_in_` is defined.
+
+        Returns
+        -------
+        feature_names_out : ndarray of str objects
+            Transformed feature names.
+        """
+        input_features = _check_feature_names_in(self, input_features)
+        non_missing_mask = np.logical_not(_get_mask(self.statistics_, np.nan))
+        names = input_features[non_missing_mask]
+        return self._concatenate_indicator_feature_names_out(names, input_features)
+
+
+class MissingIndicator(TransformerMixin, BaseEstimator):
+    """Binary indicators for missing values.
+
+    Note that this component typically should not be used in a vanilla
+    :class:`Pipeline` consisting of transformers and a classifier, but rather
+    could be added using a :class:`FeatureUnion` or :class:`ColumnTransformer`.
+
+    Read more in the :ref:`User Guide `.
+
+    .. versionadded:: 0.20
+
+    Parameters
+    ----------
+    missing_values : int, float, str, np.nan or None, default=np.nan
+        The placeholder for the missing values. All occurrences of
+        `missing_values` will be imputed. For pandas' dataframes with
+        nullable integer dtypes with missing values, `missing_values`
+        should be set to `np.nan`, since `pd.NA` will be converted to `np.nan`.
+
+    features : {'missing-only', 'all'}, default='missing-only'
+        Whether the imputer mask should represent all or a subset of
+        features.
+
+        - If `'missing-only'` (default), the imputer mask will only represent
+          features containing missing values during fit time.
+        - If `'all'`, the imputer mask will represent all features.
+
+    sparse : bool or 'auto', default='auto'
+        Whether the imputer mask format should be sparse or dense.
+
+        - If `'auto'` (default), the imputer mask will be of same type as
+          input.
+        - If `True`, the imputer mask will be a sparse matrix.
+        - If `False`, the imputer mask will be a numpy array.
+
+    error_on_new : bool, default=True
+        If `True`, :meth:`transform` will raise an error when there are
+        features with missing values that have no missing values in
+        :meth:`fit`. This is applicable only when `features='missing-only'`.
+
+    Attributes
+    ----------
+    features_ : ndarray of shape (n_missing_features,) or (n_features,)
+        The features indices which will be returned when calling
+        :meth:`transform`. They are computed during :meth:`fit`. If
+        `features='all'`, `features_` is equal to `range(n_features)`.
+
+    n_features_in_ : int
+        Number of features seen during :term:`fit`.
+
+        .. versionadded:: 0.24
+
+    feature_names_in_ : ndarray of shape (`n_features_in_`,)
+        Names of features seen during :term:`fit`. Defined only when `X`
+        has feature names that are all strings.
+
+        .. versionadded:: 1.0
+
+    See Also
+    --------
+    SimpleImputer : Univariate imputation of missing values.
+    IterativeImputer : Multivariate imputation of missing values.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from sklearn.impute import MissingIndicator
+    >>> X1 = np.array([[np.nan, 1, 3],
+    ...                [4, 0, np.nan],
+    ...                [8, 1, 0]])
+    >>> X2 = np.array([[5, 1, np.nan],
+    ...                [np.nan, 2, 3],
+    ...                [2, 4, 0]])
+    >>> indicator = MissingIndicator()
+    >>> indicator.fit(X1)
+    MissingIndicator()
+    >>> X2_tr = indicator.transform(X2)
+    >>> X2_tr
+    array([[False,  True],
+           [ True, False],
+           [False, False]])
+    """
+
+    _parameter_constraints: dict = {
+        "missing_values": [numbers.Real, numbers.Integral, str, None],
+        "features": [StrOptions({"missing-only", "all"})],
+        "sparse": ["boolean", StrOptions({"auto"})],
+        "error_on_new": ["boolean"],
+    }
+
+    def __init__(
+        self,
+        *,
+        missing_values=np.nan,
+        features="missing-only",
+        sparse="auto",
+        error_on_new=True,
+    ):
+        self.missing_values = missing_values
+        self.features = features
+        self.sparse = sparse
+        self.error_on_new = error_on_new
+
+    def _get_missing_features_info(self, X):
+        """Compute the imputer mask and the indices of the features
+        containing missing values.
+
+        Parameters
+        ----------
+        X : {ndarray, sparse matrix} of shape (n_samples, n_features)
+            The input data with missing values. Note that `X` has been
+            checked in :meth:`fit` and :meth:`transform` before to call this
+            function.
+
+        Returns
+        -------
+        imputer_mask : {ndarray, sparse matrix} of shape \
+        (n_samples, n_features)
+            The imputer mask of the original data.
+
+        features_with_missing : ndarray of shape (n_features_with_missing)
+            The features containing missing values.
+        """
+        if not self._precomputed:
+            imputer_mask = _get_mask(X, self.missing_values)
+        else:
+            imputer_mask = X
+
+        if sp.issparse(X):
+            imputer_mask.eliminate_zeros()
+
+            if self.features == "missing-only":
+                n_missing = imputer_mask.getnnz(axis=0)
+
+            if self.sparse is False:
+                imputer_mask = imputer_mask.toarray()
+            elif imputer_mask.format == "csr":
+                imputer_mask = imputer_mask.tocsc()
+        else:
+            if not self._precomputed:
+                imputer_mask = _get_mask(X, self.missing_values)
+            else:
+                imputer_mask = X
+
+            if self.features == "missing-only":
+                n_missing = imputer_mask.sum(axis=0)
+
+            if self.sparse is True:
+                imputer_mask = sp.csc_matrix(imputer_mask)
+
+        if self.features == "all":
+            features_indices = np.arange(X.shape[1])
+        else:
+            features_indices = np.flatnonzero(n_missing)
+
+        return imputer_mask, features_indices
+
+    def _validate_input(self, X, in_fit):
+        if not is_scalar_nan(self.missing_values):
+            force_all_finite = True
+        else:
+            force_all_finite = "allow-nan"
+        X = self._validate_data(
+            X,
+            reset=in_fit,
+            accept_sparse=("csc", "csr"),
+            dtype=None,
+            force_all_finite=force_all_finite,
+        )
+        _check_inputs_dtype(X, self.missing_values)
+        if X.dtype.kind not in ("i", "u", "f", "O"):
+            raise ValueError(
+                "MissingIndicator does not support data with "
+                "dtype {0}. Please provide either a numeric array"
+                " (with a floating point or integer dtype) or "
+                "categorical data represented either as an array "
+                "with integer dtype or an array of string values "
+                "with an object dtype.".format(X.dtype)
+            )
+
+        if sp.issparse(X) and self.missing_values == 0:
+            # missing_values = 0 not allowed with sparse data as it would
+            # force densification
+            raise ValueError(
+                "Sparse input with missing_values=0 is "
+                "not supported. Provide a dense "
+                "array instead."
+            )
+
+        return X
+
+    def _fit(self, X, y=None, precomputed=False):
+        """Fit the transformer on `X`.
+
+        Parameters
+        ----------
+        X : {array-like, sparse matrix} of shape (n_samples, n_features)
+            Input data, where `n_samples` is the number of samples and
+            `n_features` is the number of features.
+            If `precomputed=True`, then `X` is a mask of the input data.
+
+        precomputed : bool
+            Whether the input data is a mask.
+
+        Returns
+        -------
+        imputer_mask : {ndarray, sparse matrix} of shape (n_samples, \
+        n_features)
+            The imputer mask of the original data.
+        """
+        if precomputed:
+            if not (hasattr(X, "dtype") and X.dtype.kind == "b"):
+                raise ValueError("precomputed is True but the input data is not a mask")
+            self._precomputed = True
+        else:
+            self._precomputed = False
+
+        # Need not validate X again as it would have already been validated
+        # in the Imputer calling MissingIndicator
+        if not self._precomputed:
+            X = self._validate_input(X, in_fit=True)
+
+        self._n_features = X.shape[1]
+
+        missing_features_info = self._get_missing_features_info(X)
+        self.features_ = missing_features_info[1]
+
+        return missing_features_info[0]
+
+    def fit(self, X, y=None):
+        """Fit the transformer on `X`.
+
+        Parameters
+        ----------
+        X : {array-like, sparse matrix} of shape (n_samples, n_features)
+            Input data, where `n_samples` is the number of samples and
+            `n_features` is the number of features.
+
+        y : Ignored
+            Not used, present for API consistency by convention.
+
+        Returns
+        -------
+        self : object
+            Fitted estimator.
+        """
+        self._validate_params()
+        self._fit(X, y)
+
+        return self
+
+    def transform(self, X):
+        """Generate missing values indicator for `X`.
+
+        Parameters
+        ----------
+        X : {array-like, sparse matrix} of shape (n_samples, n_features)
+            The input data to complete.
+
+        Returns
+        -------
+        Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) \
+        or (n_samples, n_features_with_missing)
+            The missing indicator for input data. The data type of `Xt`
+            will be boolean.
+        """
+        check_is_fitted(self)
+
+        # Need not validate X again as it would have already been validated
+        # in the Imputer calling MissingIndicator
+        if not self._precomputed:
+            X = self._validate_input(X, in_fit=False)
+        else:
+            if not (hasattr(X, "dtype") and X.dtype.kind == "b"):
+                raise ValueError("precomputed is True but the input data is not a mask")
+
+        imputer_mask, features = self._get_missing_features_info(X)
+
+        if self.features == "missing-only":
+            features_diff_fit_trans = np.setdiff1d(features, self.features_)
+            if self.error_on_new and features_diff_fit_trans.size > 0:
+                raise ValueError(
+                    "The features {} have missing values "
+                    "in transform but have no missing values "
+                    "in fit.".format(features_diff_fit_trans)
+                )
+
+            if self.features_.size < self._n_features:
+                imputer_mask = imputer_mask[:, self.features_]
+
+        return imputer_mask
+
+    def fit_transform(self, X, y=None):
+        """Generate missing values indicator for `X`.
+
+        Parameters
+        ----------
+        X : {array-like, sparse matrix} of shape (n_samples, n_features)
+            The input data to complete.
+
+        y : Ignored
+            Not used, present for API consistency by convention.
+
+        Returns
+        -------
+        Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) \
+        or (n_samples, n_features_with_missing)
+            The missing indicator for input data. The data type of `Xt`
+            will be boolean.
+        """
+        self._validate_params()
+        imputer_mask = self._fit(X, y)
+
+        if self.features_.size < self._n_features:
+            imputer_mask = imputer_mask[:, self.features_]
+
+        return imputer_mask
+
+    def get_feature_names_out(self, input_features=None):
+        """Get output feature names for transformation.
+
+        Parameters
+        ----------
+        input_features : array-like of str or None, default=None
+            Input features.
+
+            - If `input_features` is `None`, then `feature_names_in_` is
+              used as feature names in. If `feature_names_in_` is not defined,
+              then the following input feature names are generated:
+              `["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
+            - If `input_features` is an array-like, then `input_features` must
+              match `feature_names_in_` if `feature_names_in_` is defined.
+
+        Returns
+        -------
+        feature_names_out : ndarray of str objects
+            Transformed feature names.
+        """
+        input_features = _check_feature_names_in(self, input_features)
+        prefix = self.__class__.__name__.lower()
+        return np.asarray(
+            [
+                f"{prefix}_{feature_name}"
+                for feature_name in input_features[self.features_]
+            ],
+            dtype=object,
+        )
+
+    def _more_tags(self):
+        return {
+            "allow_nan": True,
+            "X_types": ["2darray", "string"],
+            "preserves_dtype": [],
+        }
diff --git a/mgm/lib/python3.10/site-packages/sklearn/impute/_iterative.py b/mgm/lib/python3.10/site-packages/sklearn/impute/_iterative.py
new file mode 100644
index 0000000000000000000000000000000000000000..1d918bc0c46433a93d4eaa3283eab1820039e528
--- /dev/null
+++ b/mgm/lib/python3.10/site-packages/sklearn/impute/_iterative.py
@@ -0,0 +1,889 @@
+from time import time
+from collections import namedtuple
+from numbers import Integral, Real
+import warnings
+
+from scipy import stats
+import numpy as np
+
+from ..base import clone
+from ..exceptions import ConvergenceWarning
+from ..preprocessing import normalize
+from ..utils import (
+    check_array,
+    check_random_state,
+    is_scalar_nan,
+    _safe_assign,
+    _safe_indexing,
+)
+from ..utils.validation import FLOAT_DTYPES, check_is_fitted
+from ..utils.validation import _check_feature_names_in
+from ..utils._mask import _get_mask
+from ..utils._param_validation import HasMethods, Interval, StrOptions
+
+from ._base import _BaseImputer
+from ._base import SimpleImputer
+from ._base import _check_inputs_dtype
+
+
+_ImputerTriplet = namedtuple(
+    "_ImputerTriplet", ["feat_idx", "neighbor_feat_idx", "estimator"]
+)
+
+
+def _assign_where(X1, X2, cond):
+    """Assign X2 to X1 where cond is True.
+
+    Parameters
+    ----------
+    X1 : ndarray or dataframe of shape (n_samples, n_features)
+        Data.
+
+    X2 : ndarray of shape (n_samples, n_features)
+        Data to be assigned.
+
+    cond : ndarray of shape (n_samples, n_features)
+        Boolean mask to assign data.
+    """
+    if hasattr(X1, "mask"):  # pandas dataframes
+        X1.mask(cond=cond, other=X2, inplace=True)
+    else:  # ndarrays
+        X1[cond] = X2[cond]
+
+
+class IterativeImputer(_BaseImputer):
+    """Multivariate imputer that estimates each feature from all the others.
+
+    A strategy for imputing missing values by modeling each feature with
+    missing values as a function of other features in a round-robin fashion.
+
+    Read more in the :ref:`User Guide `.
+
+    .. versionadded:: 0.21
+
+    .. note::
+
+      This estimator is still **experimental** for now: the predictions
+      and the API might change without any deprecation cycle. To use it,
+      you need to explicitly import `enable_iterative_imputer`::
+
+        >>> # explicitly require this experimental feature
+        >>> from sklearn.experimental import enable_iterative_imputer  # noqa
+        >>> # now you can import normally from sklearn.impute
+        >>> from sklearn.impute import IterativeImputer
+
+    Parameters
+    ----------
+    estimator : estimator object, default=BayesianRidge()
+        The estimator to use at each step of the round-robin imputation.
+        If `sample_posterior=True`, the estimator must support
+        `return_std` in its `predict` method.
+
+    missing_values : int or np.nan, default=np.nan
+        The placeholder for the missing values. All occurrences of
+        `missing_values` will be imputed. For pandas' dataframes with
+        nullable integer dtypes with missing values, `missing_values`
+        should be set to `np.nan`, since `pd.NA` will be converted to `np.nan`.
+
+    sample_posterior : bool, default=False
+        Whether to sample from the (Gaussian) predictive posterior of the
+        fitted estimator for each imputation. Estimator must support
+        `return_std` in its `predict` method if set to `True`. Set to
+        `True` if using `IterativeImputer` for multiple imputations.
+
+    max_iter : int, default=10
+        Maximum number of imputation rounds to perform before returning the
+        imputations computed during the final round. A round is a single
+        imputation of each feature with missing values. The stopping criterion
+        is met once `max(abs(X_t - X_{t-1}))/max(abs(X[known_vals])) < tol`,
+        where `X_t` is `X` at iteration `t`. Note that early stopping is only
+        applied if `sample_posterior=False`.
+
+    tol : float, default=1e-3
+        Tolerance of the stopping condition.
+
+    n_nearest_features : int, default=None
+        Number of other features to use to estimate the missing values of
+        each feature column. Nearness between features is measured using
+        the absolute correlation coefficient between each feature pair (after
+        initial imputation). To ensure coverage of features throughout the
+        imputation process, the neighbor features are not necessarily nearest,
+        but are drawn with probability proportional to correlation for each
+        imputed target feature. Can provide significant speed-up when the
+        number of features is huge. If `None`, all features will be used.
+
+    initial_strategy : {'mean', 'median', 'most_frequent', 'constant'}, \
+            default='mean'
+        Which strategy to use to initialize the missing values. Same as the
+        `strategy` parameter in :class:`~sklearn.impute.SimpleImputer`.
+
+    imputation_order : {'ascending', 'descending', 'roman', 'arabic', \
+            'random'}, default='ascending'
+        The order in which the features will be imputed. Possible values:
+
+        - `'ascending'`: From features with fewest missing values to most.
+        - `'descending'`: From features with most missing values to fewest.
+        - `'roman'`: Left to right.
+        - `'arabic'`: Right to left.
+        - `'random'`: A random order for each round.
+
+    skip_complete : bool, default=False
+        If `True` then features with missing values during :meth:`transform`
+        which did not have any missing values during :meth:`fit` will be
+        imputed with the initial imputation method only. Set to `True` if you
+        have many features with no missing values at both :meth:`fit` and
+        :meth:`transform` time to save compute.
+
+    min_value : float or array-like of shape (n_features,), default=-np.inf
+        Minimum possible imputed value. Broadcast to shape `(n_features,)` if
+        scalar. If array-like, expects shape `(n_features,)`, one min value for
+        each feature. The default is `-np.inf`.
+
+        .. versionchanged:: 0.23
+           Added support for array-like.
+
+    max_value : float or array-like of shape (n_features,), default=np.inf
+        Maximum possible imputed value. Broadcast to shape `(n_features,)` if
+        scalar. If array-like, expects shape `(n_features,)`, one max value for
+        each feature. The default is `np.inf`.
+
+        .. versionchanged:: 0.23
+           Added support for array-like.
+
+    verbose : int, default=0
+        Verbosity flag, controls the debug messages that are issued
+        as functions are evaluated. The higher, the more verbose. Can be 0, 1,
+        or 2.
+
+    random_state : int, RandomState instance or None, default=None
+        The seed of the pseudo random number generator to use. Randomizes
+        selection of estimator features if `n_nearest_features` is not `None`,
+        the `imputation_order` if `random`, and the sampling from posterior if
+        `sample_posterior=True`. Use an integer for determinism.
+        See :term:`the Glossary `.
+
+    add_indicator : bool, default=False
+        If `True`, a :class:`MissingIndicator` transform will stack onto output
+        of the imputer's transform. This allows a predictive estimator
+        to account for missingness despite imputation. If a feature has no
+        missing values at fit/train time, the feature won't appear on
+        the missing indicator even if there are missing values at
+        transform/test time.
+
+    keep_empty_features : bool, default=False
+        If True, features that consist exclusively of missing values when
+        `fit` is called are returned in results when `transform` is called.
+        The imputed value is always `0` except when
+        `initial_strategy="constant"` in which case `fill_value` will be
+        used instead.
+
+        .. versionadded:: 1.2
+
+    Attributes
+    ----------
+    initial_imputer_ : object of type :class:`~sklearn.impute.SimpleImputer`
+        Imputer used to initialize the missing values.
+
+    imputation_sequence_ : list of tuples
+        Each tuple has `(feat_idx, neighbor_feat_idx, estimator)`, where
+        `feat_idx` is the current feature to be imputed,
+        `neighbor_feat_idx` is the array of other features used to impute the
+        current feature, and `estimator` is the trained estimator used for
+        the imputation. Length is `self.n_features_with_missing_ *
+        self.n_iter_`.
+
+    n_iter_ : int
+        Number of iteration rounds that occurred. Will be less than
+        `self.max_iter` if early stopping criterion was reached.
+
+    n_features_in_ : int
+        Number of features seen during :term:`fit`.
+
+        .. versionadded:: 0.24
+
+    feature_names_in_ : ndarray of shape (`n_features_in_`,)
+        Names of features seen during :term:`fit`. Defined only when `X`
+        has feature names that are all strings.
+
+        .. versionadded:: 1.0
+
+    n_features_with_missing_ : int
+        Number of features with missing values.
+
+    indicator_ : :class:`~sklearn.impute.MissingIndicator`
+        Indicator used to add binary indicators for missing values.
+        `None` if `add_indicator=False`.
+
+    random_state_ : RandomState instance
+        RandomState instance that is generated either from a seed, the random
+        number generator or by `np.random`.
+
+    See Also
+    --------
+    SimpleImputer : Univariate imputer for completing missing values
+        with simple strategies.
+    KNNImputer : Multivariate imputer that estimates missing features using
+        nearest samples.
+
+    Notes
+    -----
+    To support imputation in inductive mode we store each feature's estimator
+    during the :meth:`fit` phase, and predict without refitting (in order)
+    during the :meth:`transform` phase.
+
+    Features which contain all missing values at :meth:`fit` are discarded upon
+    :meth:`transform`.
+
+    Using defaults, the imputer scales in :math:`\\mathcal{O}(knp^3\\min(n,p))`
+    where :math:`k` = `max_iter`, :math:`n` the number of samples and
+    :math:`p` the number of features. It thus becomes prohibitively costly when
+    the number of features increases. Setting
+    `n_nearest_features << n_features`, `skip_complete=True` or increasing `tol`
+    can help to reduce its computational cost.
+
+    Depending on the nature of missing values, simple imputers can be
+    preferable in a prediction context.
+
+    References
+    ----------
+    .. [1] `Stef van Buuren, Karin Groothuis-Oudshoorn (2011). "mice:
+        Multivariate Imputation by Chained Equations in R". Journal of
+        Statistical Software 45: 1-67.
+        `_
+
+    .. [2] `S. F. Buck, (1960). "A Method of Estimation of Missing Values in
+        Multivariate Data Suitable for use with an Electronic Computer".
+        Journal of the Royal Statistical Society 22(2): 302-306.
+        `_
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from sklearn.experimental import enable_iterative_imputer
+    >>> from sklearn.impute import IterativeImputer
+    >>> imp_mean = IterativeImputer(random_state=0)
+    >>> imp_mean.fit([[7, 2, 3], [4, np.nan, 6], [10, 5, 9]])
+    IterativeImputer(random_state=0)
+    >>> X = [[np.nan, 2, 3], [4, np.nan, 6], [10, np.nan, 9]]
+    >>> imp_mean.transform(X)
+    array([[ 6.9584...,  2.       ,  3.        ],
+           [ 4.       ,  2.6000...,  6.        ],
+           [10.       ,  4.9999...,  9.        ]])
+    """
+
+    _parameter_constraints: dict = {
+        **_BaseImputer._parameter_constraints,
+        "estimator": [None, HasMethods(["fit", "predict"])],
+        "sample_posterior": ["boolean"],
+        "max_iter": [Interval(Integral, 0, None, closed="left")],
+        "tol": [Interval(Real, 0, None, closed="left")],
+        "n_nearest_features": [None, Interval(Integral, 1, None, closed="left")],
+        "initial_strategy": [
+            StrOptions({"mean", "median", "most_frequent", "constant"})
+        ],
+        "imputation_order": [
+            StrOptions({"ascending", "descending", "roman", "arabic", "random"})
+        ],
+        "skip_complete": ["boolean"],
+        "min_value": [None, Interval(Real, None, None, closed="both"), "array-like"],
+        "max_value": [None, Interval(Real, None, None, closed="both"), "array-like"],
+        "verbose": ["verbose"],
+        "random_state": ["random_state"],
+    }
+
+    def __init__(
+        self,
+        estimator=None,
+        *,
+        missing_values=np.nan,
+        sample_posterior=False,
+        max_iter=10,
+        tol=1e-3,
+        n_nearest_features=None,
+        initial_strategy="mean",
+        imputation_order="ascending",
+        skip_complete=False,
+        min_value=-np.inf,
+        max_value=np.inf,
+        verbose=0,
+        random_state=None,
+        add_indicator=False,
+        keep_empty_features=False,
+    ):
+        super().__init__(
+            missing_values=missing_values,
+            add_indicator=add_indicator,
+            keep_empty_features=keep_empty_features,
+        )
+
+        self.estimator = estimator
+        self.sample_posterior = sample_posterior
+        self.max_iter = max_iter
+        self.tol = tol
+        self.n_nearest_features = n_nearest_features
+        self.initial_strategy = initial_strategy
+        self.imputation_order = imputation_order
+        self.skip_complete = skip_complete
+        self.min_value = min_value
+        self.max_value = max_value
+        self.verbose = verbose
+        self.random_state = random_state
+
+    def _impute_one_feature(
+        self,
+        X_filled,
+        mask_missing_values,
+        feat_idx,
+        neighbor_feat_idx,
+        estimator=None,
+        fit_mode=True,
+    ):
+        """Impute a single feature from the others provided.
+
+        This function predicts the missing values of one of the features using
+        the current estimates of all the other features. The `estimator` must
+        support `return_std=True` in its `predict` method for this function
+        to work.
+
+        Parameters
+        ----------
+        X_filled : ndarray
+            Input data with the most recent imputations.
+
+        mask_missing_values : ndarray
+            Input data's missing indicator matrix.
+
+        feat_idx : int
+            Index of the feature currently being imputed.
+
+        neighbor_feat_idx : ndarray
+            Indices of the features to be used in imputing `feat_idx`.
+
+        estimator : object
+            The estimator to use at this step of the round-robin imputation.
+            If `sample_posterior=True`, the estimator must support
+            `return_std` in its `predict` method.
+            If None, it will be cloned from self._estimator.
+
+        fit_mode : boolean, default=True
+            Whether to fit and predict with the estimator or just predict.
+
+        Returns
+        -------
+        X_filled : ndarray
+            Input data with `X_filled[missing_row_mask, feat_idx]` updated.
+
+        estimator : estimator with sklearn API
+            The fitted estimator used to impute
+            `X_filled[missing_row_mask, feat_idx]`.
+        """
+        if estimator is None and fit_mode is False:
+            raise ValueError(
+                "If fit_mode is False, then an already-fitted "
+                "estimator should be passed in."
+            )
+
+        if estimator is None:
+            estimator = clone(self._estimator)
+
+        missing_row_mask = mask_missing_values[:, feat_idx]
+        if fit_mode:
+            X_train = _safe_indexing(
+                _safe_indexing(X_filled, neighbor_feat_idx, axis=1),
+                ~missing_row_mask,
+                axis=0,
+            )
+            y_train = _safe_indexing(
+                _safe_indexing(X_filled, feat_idx, axis=1),
+                ~missing_row_mask,
+                axis=0,
+            )
+            estimator.fit(X_train, y_train)
+
+        # if no missing values, don't predict
+        if np.sum(missing_row_mask) == 0:
+            return X_filled, estimator
+
+        # get posterior samples if there is at least one missing value
+        X_test = _safe_indexing(
+            _safe_indexing(X_filled, neighbor_feat_idx, axis=1),
+            missing_row_mask,
+            axis=0,
+        )
+        if self.sample_posterior:
+            mus, sigmas = estimator.predict(X_test, return_std=True)
+            imputed_values = np.zeros(mus.shape, dtype=X_filled.dtype)
+            # two types of problems: (1) non-positive sigmas
+            # (2) mus outside legal range of min_value and max_value
+            # (results in inf sample)
+            positive_sigmas = sigmas > 0
+            imputed_values[~positive_sigmas] = mus[~positive_sigmas]
+            mus_too_low = mus < self._min_value[feat_idx]
+            imputed_values[mus_too_low] = self._min_value[feat_idx]
+            mus_too_high = mus > self._max_value[feat_idx]
+            imputed_values[mus_too_high] = self._max_value[feat_idx]
+            # the rest can be sampled without statistical issues
+            inrange_mask = positive_sigmas & ~mus_too_low & ~mus_too_high
+            mus = mus[inrange_mask]
+            sigmas = sigmas[inrange_mask]
+            a = (self._min_value[feat_idx] - mus) / sigmas
+            b = (self._max_value[feat_idx] - mus) / sigmas
+
+            truncated_normal = stats.truncnorm(a=a, b=b, loc=mus, scale=sigmas)
+            imputed_values[inrange_mask] = truncated_normal.rvs(
+                random_state=self.random_state_
+            )
+        else:
+            imputed_values = estimator.predict(X_test)
+            imputed_values = np.clip(
+                imputed_values, self._min_value[feat_idx], self._max_value[feat_idx]
+            )
+
+        # update the feature
+        _safe_assign(
+            X_filled,
+            imputed_values,
+            row_indexer=missing_row_mask,
+            column_indexer=feat_idx,
+        )
+        return X_filled, estimator
+
+    def _get_neighbor_feat_idx(self, n_features, feat_idx, abs_corr_mat):
+        """Get a list of other features to predict `feat_idx`.
+
+        If `self.n_nearest_features` is less than or equal to the total
+        number of features, then use a probability proportional to the absolute
+        correlation between `feat_idx` and each other feature to randomly
+        choose a subsample of the other features (without replacement).
+
+        Parameters
+        ----------
+        n_features : int
+            Number of features in `X`.
+
+        feat_idx : int
+            Index of the feature currently being imputed.
+
+        abs_corr_mat : ndarray, shape (n_features, n_features)
+            Absolute correlation matrix of `X`. The diagonal has been zeroed
+            out and each feature has been normalized to sum to 1. Can be None.
+
+        Returns
+        -------
+        neighbor_feat_idx : array-like
+            The features to use to impute `feat_idx`.
+        """
+        if self.n_nearest_features is not None and self.n_nearest_features < n_features:
+            p = abs_corr_mat[:, feat_idx]
+            neighbor_feat_idx = self.random_state_.choice(
+                np.arange(n_features), self.n_nearest_features, replace=False, p=p
+            )
+        else:
+            inds_left = np.arange(feat_idx)
+            inds_right = np.arange(feat_idx + 1, n_features)
+            neighbor_feat_idx = np.concatenate((inds_left, inds_right))
+        return neighbor_feat_idx
+
+    def _get_ordered_idx(self, mask_missing_values):
+        """Decide in what order we will update the features.
+
+        As a homage to the MICE R package, we will have 4 main options of
+        how to order the updates, and use a random order if anything else
+        is specified.
+
+        Also, this function skips features which have no missing values.
+
+        Parameters
+        ----------
+        mask_missing_values : array-like, shape (n_samples, n_features)
+            Input data's missing indicator matrix, where `n_samples` is the
+            number of samples and `n_features` is the number of features.
+
+        Returns
+        -------
+        ordered_idx : ndarray, shape (n_features,)
+            The order in which to impute the features.
+        """
+        frac_of_missing_values = mask_missing_values.mean(axis=0)
+        if self.skip_complete:
+            missing_values_idx = np.flatnonzero(frac_of_missing_values)
+        else:
+            missing_values_idx = np.arange(np.shape(frac_of_missing_values)[0])
+        if self.imputation_order == "roman":
+            ordered_idx = missing_values_idx
+        elif self.imputation_order == "arabic":
+            ordered_idx = missing_values_idx[::-1]
+        elif self.imputation_order == "ascending":
+            n = len(frac_of_missing_values) - len(missing_values_idx)
+            ordered_idx = np.argsort(frac_of_missing_values, kind="mergesort")[n:]
+        elif self.imputation_order == "descending":
+            n = len(frac_of_missing_values) - len(missing_values_idx)
+            ordered_idx = np.argsort(frac_of_missing_values, kind="mergesort")[n:][::-1]
+        elif self.imputation_order == "random":
+            ordered_idx = missing_values_idx
+            self.random_state_.shuffle(ordered_idx)
+        return ordered_idx
+
+    def _get_abs_corr_mat(self, X_filled, tolerance=1e-6):
+        """Get absolute correlation matrix between features.
+
+        Parameters
+        ----------
+        X_filled : ndarray, shape (n_samples, n_features)
+            Input data with the most recent imputations.
+
+        tolerance : float, default=1e-6
+            `abs_corr_mat` can have nans, which will be replaced
+            with `tolerance`.
+
+        Returns
+        -------
+        abs_corr_mat : ndarray, shape (n_features, n_features)
+            Absolute correlation matrix of `X` at the beginning of the
+            current round. The diagonal has been zeroed out and each feature's
+            absolute correlations with all others have been normalized to sum
+            to 1.
+        """
+        n_features = X_filled.shape[1]
+        if self.n_nearest_features is None or self.n_nearest_features >= n_features:
+            return None
+        with np.errstate(invalid="ignore"):
+            # if a feature in the neighborhood has only a single value
+            # (e.g., categorical feature), the std. dev. will be null and
+            # np.corrcoef will raise a warning due to a division by zero
+            abs_corr_mat = np.abs(np.corrcoef(X_filled.T))
+        # np.corrcoef is not defined for features with zero std
+        abs_corr_mat[np.isnan(abs_corr_mat)] = tolerance
+        # ensures exploration, i.e. at least some probability of sampling
+        np.clip(abs_corr_mat, tolerance, None, out=abs_corr_mat)
+        # features are not their own neighbors
+        np.fill_diagonal(abs_corr_mat, 0)
+        # needs to sum to 1 for np.random.choice sampling
+        abs_corr_mat = normalize(abs_corr_mat, norm="l1", axis=0, copy=False)
+        return abs_corr_mat
+
+    def _initial_imputation(self, X, in_fit=False):
+        """Perform initial imputation for input `X`.
+
+        Parameters
+        ----------
+        X : ndarray of shape (n_samples, n_features)
+            Input data, where `n_samples` is the number of samples and
+            `n_features` is the number of features.
+
+        in_fit : bool, default=False
+            Whether function is called in :meth:`fit`.
+
+        Returns
+        -------
+        Xt : ndarray of shape (n_samples, n_features)
+            Input data, where `n_samples` is the number of samples and
+            `n_features` is the number of features.
+
+        X_filled : ndarray of shape (n_samples, n_features)
+            Input data with the most recent imputations.
+
+        mask_missing_values : ndarray of shape (n_samples, n_features)
+            Input data's missing indicator matrix, where `n_samples` is the
+            number of samples and `n_features` is the number of features,
+            masked by non-missing features.
+
+        X_missing_mask : ndarray, shape (n_samples, n_features)
+            Input data's mask matrix indicating missing datapoints, where
+            `n_samples` is the number of samples and `n_features` is the
+            number of features.
+        """
+        if is_scalar_nan(self.missing_values):
+            force_all_finite = "allow-nan"
+        else:
+            force_all_finite = True
+
+        X = self._validate_data(
+            X,
+            dtype=FLOAT_DTYPES,
+            order="F",
+            reset=in_fit,
+            force_all_finite=force_all_finite,
+        )
+        _check_inputs_dtype(X, self.missing_values)
+
+        X_missing_mask = _get_mask(X, self.missing_values)
+        mask_missing_values = X_missing_mask.copy()
+        if self.initial_imputer_ is None:
+            self.initial_imputer_ = SimpleImputer(
+                missing_values=self.missing_values,
+                strategy=self.initial_strategy,
+                keep_empty_features=self.keep_empty_features,
+            )
+            X_filled = self.initial_imputer_.fit_transform(X)
+        else:
+            X_filled = self.initial_imputer_.transform(X)
+
+        valid_mask = np.flatnonzero(
+            np.logical_not(np.isnan(self.initial_imputer_.statistics_))
+        )
+
+        if not self.keep_empty_features:
+            # drop empty features
+            Xt = X[:, valid_mask]
+            mask_missing_values = mask_missing_values[:, valid_mask]
+        else:
+            # mark empty features as not missing and keep the original
+            # imputation
+            mask_missing_values[:, valid_mask] = True
+            Xt = X
+
+        return Xt, X_filled, mask_missing_values, X_missing_mask
+
+    @staticmethod
+    def _validate_limit(limit, limit_type, n_features):
+        """Validate the limits (min/max) of the feature values.
+
+        Converts scalar min/max limits to vectors of shape `(n_features,)`.
+
+        Parameters
+        ----------
+        limit: scalar or array-like
+            The user-specified limit (i.e, min_value or max_value).
+        limit_type: {'max', 'min'}
+            Type of limit to validate.
+        n_features: int
+            Number of features in the dataset.
+
+        Returns
+        -------
+        limit: ndarray, shape(n_features,)
+            Array of limits, one for each feature.
+        """
+        limit_bound = np.inf if limit_type == "max" else -np.inf
+        limit = limit_bound if limit is None else limit
+        if np.isscalar(limit):
+            limit = np.full(n_features, limit)
+        limit = check_array(limit, force_all_finite=False, copy=False, ensure_2d=False)
+        if not limit.shape[0] == n_features:
+            raise ValueError(
+                f"'{limit_type}_value' should be of "
+                f"shape ({n_features},) when an array-like "
+                f"is provided. Got {limit.shape}, instead."
+            )
+        return limit
+
+    def fit_transform(self, X, y=None):
+        """Fit the imputer on `X` and return the transformed `X`.
+
+        Parameters
+        ----------
+        X : array-like, shape (n_samples, n_features)
+            Input data, where `n_samples` is the number of samples and
+            `n_features` is the number of features.
+
+        y : Ignored
+            Not used, present for API consistency by convention.
+
+        Returns
+        -------
+        Xt : array-like, shape (n_samples, n_features)
+            The imputed input data.
+        """
+        self._validate_params()
+        self.random_state_ = getattr(
+            self, "random_state_", check_random_state(self.random_state)
+        )
+
+        if self.estimator is None:
+            from ..linear_model import BayesianRidge
+
+            self._estimator = BayesianRidge()
+        else:
+            self._estimator = clone(self.estimator)
+
+        self.imputation_sequence_ = []
+
+        self.initial_imputer_ = None
+
+        X, Xt, mask_missing_values, complete_mask = self._initial_imputation(
+            X, in_fit=True
+        )
+
+        super()._fit_indicator(complete_mask)
+        X_indicator = super()._transform_indicator(complete_mask)
+
+        if self.max_iter == 0 or np.all(mask_missing_values):
+            self.n_iter_ = 0
+            return super()._concatenate_indicator(Xt, X_indicator)
+
+        # Edge case: a single feature. We return the initial ...
+        if Xt.shape[1] == 1:
+            self.n_iter_ = 0
+            return super()._concatenate_indicator(Xt, X_indicator)
+
+        self._min_value = self._validate_limit(self.min_value, "min", X.shape[1])
+        self._max_value = self._validate_limit(self.max_value, "max", X.shape[1])
+
+        if not np.all(np.greater(self._max_value, self._min_value)):
+            raise ValueError("One (or more) features have min_value >= max_value.")
+
+        # order in which to impute
+        # note this is probably too slow for large feature data (d > 100000)
+        # and a better way would be good.
+        # see: https://goo.gl/KyCNwj and subsequent comments
+        ordered_idx = self._get_ordered_idx(mask_missing_values)
+        self.n_features_with_missing_ = len(ordered_idx)
+
+        abs_corr_mat = self._get_abs_corr_mat(Xt)
+
+        n_samples, n_features = Xt.shape
+        if self.verbose > 0:
+            print("[IterativeImputer] Completing matrix with shape %s" % (X.shape,))
+        start_t = time()
+        if not self.sample_posterior:
+            Xt_previous = Xt.copy()
+            normalized_tol = self.tol * np.max(np.abs(X[~mask_missing_values]))
+        for self.n_iter_ in range(1, self.max_iter + 1):
+            if self.imputation_order == "random":
+                ordered_idx = self._get_ordered_idx(mask_missing_values)
+
+            for feat_idx in ordered_idx:
+                neighbor_feat_idx = self._get_neighbor_feat_idx(
+                    n_features, feat_idx, abs_corr_mat
+                )
+                Xt, estimator = self._impute_one_feature(
+                    Xt,
+                    mask_missing_values,
+                    feat_idx,
+                    neighbor_feat_idx,
+                    estimator=None,
+                    fit_mode=True,
+                )
+                estimator_triplet = _ImputerTriplet(
+                    feat_idx, neighbor_feat_idx, estimator
+                )
+                self.imputation_sequence_.append(estimator_triplet)
+
+            if self.verbose > 1:
+                print(
+                    "[IterativeImputer] Ending imputation round "
+                    "%d/%d, elapsed time %0.2f"
+                    % (self.n_iter_, self.max_iter, time() - start_t)
+                )
+
+            if not self.sample_posterior:
+                inf_norm = np.linalg.norm(Xt - Xt_previous, ord=np.inf, axis=None)
+                if self.verbose > 0:
+                    print(
+                        "[IterativeImputer] Change: {}, scaled tolerance: {} ".format(
+                            inf_norm, normalized_tol
+                        )
+                    )
+                if inf_norm < normalized_tol:
+                    if self.verbose > 0:
+                        print("[IterativeImputer] Early stopping criterion reached.")
+                    break
+                Xt_previous = Xt.copy()
+        else:
+            if not self.sample_posterior:
+                warnings.warn(
+                    "[IterativeImputer] Early stopping criterion not reached.",
+                    ConvergenceWarning,
+                )
+        _assign_where(Xt, X, cond=~mask_missing_values)
+
+        return super()._concatenate_indicator(Xt, X_indicator)
+
+    def transform(self, X):
+        """Impute all missing values in `X`.
+
+        Note that this is stochastic, and that if `random_state` is not fixed,
+        repeated calls, or permuted input, results will differ.
+
+        Parameters
+        ----------
+        X : array-like of shape (n_samples, n_features)
+            The input data to complete.
+
+        Returns
+        -------
+        Xt : array-like, shape (n_samples, n_features)
+             The imputed input data.
+        """
+        check_is_fitted(self)
+
+        X, Xt, mask_missing_values, complete_mask = self._initial_imputation(
+            X, in_fit=False
+        )
+
+        X_indicator = super()._transform_indicator(complete_mask)
+
+        if self.n_iter_ == 0 or np.all(mask_missing_values):
+            return super()._concatenate_indicator(Xt, X_indicator)
+
+        imputations_per_round = len(self.imputation_sequence_) // self.n_iter_
+        i_rnd = 0
+        if self.verbose > 0:
+            print("[IterativeImputer] Completing matrix with shape %s" % (X.shape,))
+        start_t = time()
+        for it, estimator_triplet in enumerate(self.imputation_sequence_):
+            Xt, _ = self._impute_one_feature(
+                Xt,
+                mask_missing_values,
+                estimator_triplet.feat_idx,
+                estimator_triplet.neighbor_feat_idx,
+                estimator=estimator_triplet.estimator,
+                fit_mode=False,
+            )
+            if not (it + 1) % imputations_per_round:
+                if self.verbose > 1:
+                    print(
+                        "[IterativeImputer] Ending imputation round "
+                        "%d/%d, elapsed time %0.2f"
+                        % (i_rnd + 1, self.n_iter_, time() - start_t)
+                    )
+                i_rnd += 1
+
+        _assign_where(Xt, X, cond=~mask_missing_values)
+
+        return super()._concatenate_indicator(Xt, X_indicator)
+
+    def fit(self, X, y=None):
+        """Fit the imputer on `X` and return self.
+
+        Parameters
+        ----------
+        X : array-like, shape (n_samples, n_features)
+            Input data, where `n_samples` is the number of samples and
+            `n_features` is the number of features.
+
+        y : Ignored
+            Not used, present for API consistency by convention.
+
+        Returns
+        -------
+        self : object
+            Fitted estimator.
+        """
+        self.fit_transform(X)
+        return self
+
+    def get_feature_names_out(self, input_features=None):
+        """Get output feature names for transformation.
+
+        Parameters
+        ----------
+        input_features : array-like of str or None, default=None
+            Input features.
+
+            - If `input_features` is `None`, then `feature_names_in_` is
+              used as feature names in. If `feature_names_in_` is not defined,
+              then the following input feature names are generated:
+              `["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
+            - If `input_features` is an array-like, then `input_features` must
+              match `feature_names_in_` if `feature_names_in_` is defined.
+
+        Returns
+        -------
+        feature_names_out : ndarray of str objects
+            Transformed feature names.
+        """
+        input_features = _check_feature_names_in(self, input_features)
+        names = self.initial_imputer_.get_feature_names_out(input_features)
+        return self._concatenate_indicator_feature_names_out(names, input_features)
diff --git a/mgm/lib/python3.10/site-packages/sklearn/impute/_knn.py b/mgm/lib/python3.10/site-packages/sklearn/impute/_knn.py
new file mode 100644
index 0000000000000000000000000000000000000000..2e58808b76ecfbac2a2395d8f43b974e48381412
--- /dev/null
+++ b/mgm/lib/python3.10/site-packages/sklearn/impute/_knn.py
@@ -0,0 +1,391 @@
+# Authors: Ashim Bhattarai 
+#          Thomas J Fan 
+# License: BSD 3 clause
+
+from numbers import Integral
+import numpy as np
+
+from ._base import _BaseImputer
+from ..utils.validation import FLOAT_DTYPES
+from ..metrics import pairwise_distances_chunked
+from ..metrics.pairwise import _NAN_METRICS
+from ..neighbors._base import _get_weights
+from ..utils import is_scalar_nan
+from ..utils._mask import _get_mask
+from ..utils.validation import check_is_fitted
+from ..utils.validation import _check_feature_names_in
+from ..utils._param_validation import Hidden, Interval, StrOptions
+
+
+class KNNImputer(_BaseImputer):
+    """Imputation for completing missing values using k-Nearest Neighbors.
+
+    Each sample's missing values are imputed using the mean value from
+    `n_neighbors` nearest neighbors found in the training set. Two samples are
+    close if the features that neither is missing are close.
+
+    Read more in the :ref:`User Guide `.
+
+    .. versionadded:: 0.22
+
+    Parameters
+    ----------
+    missing_values : int, float, str, np.nan or None, default=np.nan
+        The placeholder for the missing values. All occurrences of
+        `missing_values` will be imputed. For pandas' dataframes with
+        nullable integer dtypes with missing values, `missing_values`
+        should be set to np.nan, since `pd.NA` will be converted to np.nan.
+
+    n_neighbors : int, default=5
+        Number of neighboring samples to use for imputation.
+
+    weights : {'uniform', 'distance'} or callable, default='uniform'
+        Weight function used in prediction.  Possible values:
+
+        - 'uniform' : uniform weights. All points in each neighborhood are
+          weighted equally.
+        - 'distance' : weight points by the inverse of their distance.
+          in this case, closer neighbors of a query point will have a
+          greater influence than neighbors which are further away.
+        - callable : a user-defined function which accepts an
+          array of distances, and returns an array of the same shape
+          containing the weights.
+
+    metric : {'nan_euclidean'} or callable, default='nan_euclidean'
+        Distance metric for searching neighbors. Possible values:
+
+        - 'nan_euclidean'
+        - callable : a user-defined function which conforms to the definition
+          of ``_pairwise_callable(X, Y, metric, **kwds)``. The function
+          accepts two arrays, X and Y, and a `missing_values` keyword in
+          `kwds` and returns a scalar distance value.
+
+    copy : bool, default=True
+        If True, a copy of X will be created. If False, imputation will
+        be done in-place whenever possible.
+
+    add_indicator : bool, default=False
+        If True, a :class:`MissingIndicator` transform will stack onto the
+        output of the imputer's transform. This allows a predictive estimator
+        to account for missingness despite imputation. If a feature has no
+        missing values at fit/train time, the feature won't appear on the
+        missing indicator even if there are missing values at transform/test
+        time.
+
+    keep_empty_features : bool, default=False
+        If True, features that consist exclusively of missing values when
+        `fit` is called are returned in results when `transform` is called.
+        The imputed value is always `0`.
+
+        .. versionadded:: 1.2
+
+    Attributes
+    ----------
+    indicator_ : :class:`~sklearn.impute.MissingIndicator`
+        Indicator used to add binary indicators for missing values.
+        ``None`` if add_indicator is False.
+
+    n_features_in_ : int
+        Number of features seen during :term:`fit`.
+
+        .. versionadded:: 0.24
+
+    feature_names_in_ : ndarray of shape (`n_features_in_`,)
+        Names of features seen during :term:`fit`. Defined only when `X`
+        has feature names that are all strings.
+
+        .. versionadded:: 1.0
+
+    See Also
+    --------
+    SimpleImputer : Univariate imputer for completing missing values
+        with simple strategies.
+    IterativeImputer : Multivariate imputer that estimates values to impute for
+        each feature with missing values from all the others.
+
+    References
+    ----------
+    * Olga Troyanskaya, Michael Cantor, Gavin Sherlock, Pat Brown, Trevor
+      Hastie, Robert Tibshirani, David Botstein and Russ B. Altman, Missing
+      value estimation methods for DNA microarrays, BIOINFORMATICS Vol. 17
+      no. 6, 2001 Pages 520-525.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from sklearn.impute import KNNImputer
+    >>> X = [[1, 2, np.nan], [3, 4, 3], [np.nan, 6, 5], [8, 8, 7]]
+    >>> imputer = KNNImputer(n_neighbors=2)
+    >>> imputer.fit_transform(X)
+    array([[1. , 2. , 4. ],
+           [3. , 4. , 3. ],
+           [5.5, 6. , 5. ],
+           [8. , 8. , 7. ]])
+    """
+
+    _parameter_constraints: dict = {
+        **_BaseImputer._parameter_constraints,
+        "n_neighbors": [Interval(Integral, 1, None, closed="left")],
+        "weights": [StrOptions({"uniform", "distance"}), callable, Hidden(None)],
+        "metric": [StrOptions(set(_NAN_METRICS)), callable],
+        "copy": ["boolean"],
+    }
+
+    def __init__(
+        self,
+        *,
+        missing_values=np.nan,
+        n_neighbors=5,
+        weights="uniform",
+        metric="nan_euclidean",
+        copy=True,
+        add_indicator=False,
+        keep_empty_features=False,
+    ):
+        super().__init__(
+            missing_values=missing_values,
+            add_indicator=add_indicator,
+            keep_empty_features=keep_empty_features,
+        )
+        self.n_neighbors = n_neighbors
+        self.weights = weights
+        self.metric = metric
+        self.copy = copy
+
+    def _calc_impute(self, dist_pot_donors, n_neighbors, fit_X_col, mask_fit_X_col):
+        """Helper function to impute a single column.
+
+        Parameters
+        ----------
+        dist_pot_donors : ndarray of shape (n_receivers, n_potential_donors)
+            Distance matrix between the receivers and potential donors from
+            training set. There must be at least one non-nan distance between
+            a receiver and a potential donor.
+
+        n_neighbors : int
+            Number of neighbors to consider.
+
+        fit_X_col : ndarray of shape (n_potential_donors,)
+            Column of potential donors from training set.
+
+        mask_fit_X_col : ndarray of shape (n_potential_donors,)
+            Missing mask for fit_X_col.
+
+        Returns
+        -------
+        imputed_values: ndarray of shape (n_receivers,)
+            Imputed values for receiver.
+        """
+        # Get donors
+        donors_idx = np.argpartition(dist_pot_donors, n_neighbors - 1, axis=1)[
+            :, :n_neighbors
+        ]
+
+        # Get weight matrix from distance matrix
+        donors_dist = dist_pot_donors[
+            np.arange(donors_idx.shape[0])[:, None], donors_idx
+        ]
+
+        weight_matrix = _get_weights(donors_dist, self.weights)
+
+        # fill nans with zeros
+        if weight_matrix is not None:
+            weight_matrix[np.isnan(weight_matrix)] = 0.0
+
+        # Retrieve donor values and calculate kNN average
+        donors = fit_X_col.take(donors_idx)
+        donors_mask = mask_fit_X_col.take(donors_idx)
+        donors = np.ma.array(donors, mask=donors_mask)
+
+        return np.ma.average(donors, axis=1, weights=weight_matrix).data
+
+    def fit(self, X, y=None):
+        """Fit the imputer on X.
+
+        Parameters
+        ----------
+        X : array-like shape of (n_samples, n_features)
+            Input data, where `n_samples` is the number of samples and
+            `n_features` is the number of features.
+
+        y : Ignored
+            Not used, present here for API consistency by convention.
+
+        Returns
+        -------
+        self : object
+            The fitted `KNNImputer` class instance.
+        """
+        self._validate_params()
+        # Check data integrity and calling arguments
+        if not is_scalar_nan(self.missing_values):
+            force_all_finite = True
+        else:
+            force_all_finite = "allow-nan"
+
+        X = self._validate_data(
+            X,
+            accept_sparse=False,
+            dtype=FLOAT_DTYPES,
+            force_all_finite=force_all_finite,
+            copy=self.copy,
+        )
+
+        self._fit_X = X
+        self._mask_fit_X = _get_mask(self._fit_X, self.missing_values)
+        self._valid_mask = ~np.all(self._mask_fit_X, axis=0)
+
+        super()._fit_indicator(self._mask_fit_X)
+
+        return self
+
+    def transform(self, X):
+        """Impute all missing values in X.
+
+        Parameters
+        ----------
+        X : array-like of shape (n_samples, n_features)
+            The input data to complete.
+
+        Returns
+        -------
+        X : array-like of shape (n_samples, n_output_features)
+            The imputed dataset. `n_output_features` is the number of features
+            that is not always missing during `fit`.
+        """
+
+        check_is_fitted(self)
+        if not is_scalar_nan(self.missing_values):
+            force_all_finite = True
+        else:
+            force_all_finite = "allow-nan"
+        X = self._validate_data(
+            X,
+            accept_sparse=False,
+            dtype=FLOAT_DTYPES,
+            force_all_finite=force_all_finite,
+            copy=self.copy,
+            reset=False,
+        )
+
+        mask = _get_mask(X, self.missing_values)
+        mask_fit_X = self._mask_fit_X
+        valid_mask = self._valid_mask
+
+        X_indicator = super()._transform_indicator(mask)
+
+        # Removes columns where the training data is all nan
+        if not np.any(mask):
+            # No missing values in X
+            if self.keep_empty_features:
+                Xc = X
+                Xc[:, ~valid_mask] = 0
+            else:
+                Xc = X[:, valid_mask]
+            return Xc
+
+        row_missing_idx = np.flatnonzero(mask.any(axis=1))
+
+        non_missing_fix_X = np.logical_not(mask_fit_X)
+
+        # Maps from indices from X to indices in dist matrix
+        dist_idx_map = np.zeros(X.shape[0], dtype=int)
+        dist_idx_map[row_missing_idx] = np.arange(row_missing_idx.shape[0])
+
+        def process_chunk(dist_chunk, start):
+            row_missing_chunk = row_missing_idx[start : start + len(dist_chunk)]
+
+            # Find and impute missing by column
+            for col in range(X.shape[1]):
+                if not valid_mask[col]:
+                    # column was all missing during training
+                    continue
+
+                col_mask = mask[row_missing_chunk, col]
+                if not np.any(col_mask):
+                    # column has no missing values
+                    continue
+
+                (potential_donors_idx,) = np.nonzero(non_missing_fix_X[:, col])
+
+                # receivers_idx are indices in X
+                receivers_idx = row_missing_chunk[np.flatnonzero(col_mask)]
+
+                # distances for samples that needed imputation for column
+                dist_subset = dist_chunk[dist_idx_map[receivers_idx] - start][
+                    :, potential_donors_idx
+                ]
+
+                # receivers with all nan distances impute with mean
+                all_nan_dist_mask = np.isnan(dist_subset).all(axis=1)
+                all_nan_receivers_idx = receivers_idx[all_nan_dist_mask]
+
+                if all_nan_receivers_idx.size:
+                    col_mean = np.ma.array(
+                        self._fit_X[:, col], mask=mask_fit_X[:, col]
+                    ).mean()
+                    X[all_nan_receivers_idx, col] = col_mean
+
+                    if len(all_nan_receivers_idx) == len(receivers_idx):
+                        # all receivers imputed with mean
+                        continue
+
+                    # receivers with at least one defined distance
+                    receivers_idx = receivers_idx[~all_nan_dist_mask]
+                    dist_subset = dist_chunk[dist_idx_map[receivers_idx] - start][
+                        :, potential_donors_idx
+                    ]
+
+                n_neighbors = min(self.n_neighbors, len(potential_donors_idx))
+                value = self._calc_impute(
+                    dist_subset,
+                    n_neighbors,
+                    self._fit_X[potential_donors_idx, col],
+                    mask_fit_X[potential_donors_idx, col],
+                )
+                X[receivers_idx, col] = value
+
+        # process in fixed-memory chunks
+        gen = pairwise_distances_chunked(
+            X[row_missing_idx, :],
+            self._fit_X,
+            metric=self.metric,
+            missing_values=self.missing_values,
+            force_all_finite=force_all_finite,
+            reduce_func=process_chunk,
+        )
+        for chunk in gen:
+            # process_chunk modifies X in place. No return value.
+            pass
+
+        if self.keep_empty_features:
+            Xc = X
+            Xc[:, ~valid_mask] = 0
+        else:
+            Xc = X[:, valid_mask]
+
+        return super()._concatenate_indicator(Xc, X_indicator)
+
+    def get_feature_names_out(self, input_features=None):
+        """Get output feature names for transformation.
+
+        Parameters
+        ----------
+        input_features : array-like of str or None, default=None
+            Input features.
+
+            - If `input_features` is `None`, then `feature_names_in_` is
+              used as feature names in. If `feature_names_in_` is not defined,
+              then the following input feature names are generated:
+              `["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
+            - If `input_features` is an array-like, then `input_features` must
+              match `feature_names_in_` if `feature_names_in_` is defined.
+
+        Returns
+        -------
+        feature_names_out : ndarray of str objects
+            Transformed feature names.
+        """
+        input_features = _check_feature_names_in(self, input_features)
+        names = input_features[self._valid_mask]
+        return self._concatenate_indicator_feature_names_out(names, input_features)
diff --git a/mgm/lib/python3.10/site-packages/sklearn/impute/tests/__init__.py b/mgm/lib/python3.10/site-packages/sklearn/impute/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/mgm/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/__init__.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a9dead62338dbc368b68bfdad585bd45f871ae26
Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/__init__.cpython-310.pyc differ
diff --git a/mgm/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_base.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_base.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3beda2b2890472e4ab8b09324f4293396b9249b6
Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_base.cpython-310.pyc differ
diff --git a/mgm/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_common.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_common.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..229318f681de8c55a4c7a1022110de4396b291a2
Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_common.cpython-310.pyc differ
diff --git a/mgm/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_impute.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_impute.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a6a477b06f5d57d59762d34a67342723b56b83d3
Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_impute.cpython-310.pyc differ
diff --git a/mgm/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_knn.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_knn.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..392848c61123952b9f9d63ee4e816691f41ca423
Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/test_knn.cpython-310.pyc differ
diff --git a/mgm/lib/python3.10/site-packages/sklearn/impute/tests/test_base.py b/mgm/lib/python3.10/site-packages/sklearn/impute/tests/test_base.py
new file mode 100644
index 0000000000000000000000000000000000000000..fedfdebb20a1f99f6df7ff3ea202c38c698774a9
--- /dev/null
+++ b/mgm/lib/python3.10/site-packages/sklearn/impute/tests/test_base.py
@@ -0,0 +1,109 @@
+import pytest
+
+import numpy as np
+
+from sklearn.utils._mask import _get_mask
+from sklearn.utils._testing import _convert_container, assert_allclose
+
+from sklearn.impute._base import _BaseImputer
+from sklearn.impute._iterative import _assign_where
+
+
+@pytest.fixture
+def data():
+    X = np.random.randn(10, 2)
+    X[::2] = np.nan
+    return X
+
+
+class NoFitIndicatorImputer(_BaseImputer):
+    def fit(self, X, y=None):
+        return self
+
+    def transform(self, X, y=None):
+        return self._concatenate_indicator(X, self._transform_indicator(X))
+
+
+class NoTransformIndicatorImputer(_BaseImputer):
+    def fit(self, X, y=None):
+        mask = _get_mask(X, value_to_mask=np.nan)
+        super()._fit_indicator(mask)
+        return self
+
+    def transform(self, X, y=None):
+        return self._concatenate_indicator(X, None)
+
+
+class NoPrecomputedMaskFit(_BaseImputer):
+    def fit(self, X, y=None):
+        self._fit_indicator(X)
+        return self
+
+    def transform(self, X):
+        return self._concatenate_indicator(X, self._transform_indicator(X))
+
+
+class NoPrecomputedMaskTransform(_BaseImputer):
+    def fit(self, X, y=None):
+        mask = _get_mask(X, value_to_mask=np.nan)
+        self._fit_indicator(mask)
+        return self
+
+    def transform(self, X):
+        return self._concatenate_indicator(X, self._transform_indicator(X))
+
+
+def test_base_imputer_not_fit(data):
+    imputer = NoFitIndicatorImputer(add_indicator=True)
+    err_msg = "Make sure to call _fit_indicator before _transform_indicator"
+    with pytest.raises(ValueError, match=err_msg):
+        imputer.fit(data).transform(data)
+    with pytest.raises(ValueError, match=err_msg):
+        imputer.fit_transform(data)
+
+
+def test_base_imputer_not_transform(data):
+    imputer = NoTransformIndicatorImputer(add_indicator=True)
+    err_msg = (
+        "Call _fit_indicator and _transform_indicator in the imputer implementation"
+    )
+    with pytest.raises(ValueError, match=err_msg):
+        imputer.fit(data).transform(data)
+    with pytest.raises(ValueError, match=err_msg):
+        imputer.fit_transform(data)
+
+
+def test_base_no_precomputed_mask_fit(data):
+    imputer = NoPrecomputedMaskFit(add_indicator=True)
+    err_msg = "precomputed is True but the input data is not a mask"
+    with pytest.raises(ValueError, match=err_msg):
+        imputer.fit(data)
+    with pytest.raises(ValueError, match=err_msg):
+        imputer.fit_transform(data)
+
+
+def test_base_no_precomputed_mask_transform(data):
+    imputer = NoPrecomputedMaskTransform(add_indicator=True)
+    err_msg = "precomputed is True but the input data is not a mask"
+    imputer.fit(data)
+    with pytest.raises(ValueError, match=err_msg):
+        imputer.transform(data)
+    with pytest.raises(ValueError, match=err_msg):
+        imputer.fit_transform(data)
+
+
+@pytest.mark.parametrize("X1_type", ["array", "dataframe"])
+def test_assign_where(X1_type):
+    """Check the behaviour of the private helpers `_assign_where`."""
+    rng = np.random.RandomState(0)
+
+    n_samples, n_features = 10, 5
+    X1 = _convert_container(rng.randn(n_samples, n_features), constructor_name=X1_type)
+    X2 = rng.randn(n_samples, n_features)
+    mask = rng.randint(0, 2, size=(n_samples, n_features)).astype(bool)
+
+    _assign_where(X1, X2, mask)
+
+    if X1_type == "dataframe":
+        X1 = X1.to_numpy()
+    assert_allclose(X1[mask], X2[mask])
diff --git a/mgm/lib/python3.10/site-packages/sklearn/impute/tests/test_common.py b/mgm/lib/python3.10/site-packages/sklearn/impute/tests/test_common.py
new file mode 100644
index 0000000000000000000000000000000000000000..00521ca090dc5763500356b93fcbcc0545822f56
--- /dev/null
+++ b/mgm/lib/python3.10/site-packages/sklearn/impute/tests/test_common.py
@@ -0,0 +1,186 @@
+import pytest
+
+import numpy as np
+from scipy import sparse
+
+from sklearn.utils._testing import assert_allclose
+from sklearn.utils._testing import assert_allclose_dense_sparse
+from sklearn.utils._testing import assert_array_equal
+
+from sklearn.experimental import enable_iterative_imputer  # noqa
+
+from sklearn.impute import IterativeImputer
+from sklearn.impute import KNNImputer
+from sklearn.impute import SimpleImputer
+
+
+def imputers():
+    return [IterativeImputer(tol=0.1), KNNImputer(), SimpleImputer()]
+
+
+def sparse_imputers():
+    return [SimpleImputer()]
+
+
+# ConvergenceWarning will be raised by the IterativeImputer
+@pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning")
+@pytest.mark.parametrize("imputer", imputers(), ids=lambda x: x.__class__.__name__)
+def test_imputation_missing_value_in_test_array(imputer):
+    # [Non Regression Test for issue #13968] Missing value in test set should
+    # not throw an error and return a finite dataset
+    train = [[1], [2]]
+    test = [[3], [np.nan]]
+    imputer.set_params(add_indicator=True)
+    imputer.fit(train).transform(test)
+
+
+# ConvergenceWarning will be raised by the IterativeImputer
+@pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning")
+@pytest.mark.parametrize("marker", [np.nan, -1, 0])
+@pytest.mark.parametrize("imputer", imputers(), ids=lambda x: x.__class__.__name__)
+def test_imputers_add_indicator(marker, imputer):
+    X = np.array(
+        [
+            [marker, 1, 5, marker, 1],
+            [2, marker, 1, marker, 2],
+            [6, 3, marker, marker, 3],
+            [1, 2, 9, marker, 4],
+        ]
+    )
+    X_true_indicator = np.array(
+        [
+            [1.0, 0.0, 0.0, 1.0],
+            [0.0, 1.0, 0.0, 1.0],
+            [0.0, 0.0, 1.0, 1.0],
+            [0.0, 0.0, 0.0, 1.0],
+        ]
+    )
+    imputer.set_params(missing_values=marker, add_indicator=True)
+
+    X_trans = imputer.fit_transform(X)
+    assert_allclose(X_trans[:, -4:], X_true_indicator)
+    assert_array_equal(imputer.indicator_.features_, np.array([0, 1, 2, 3]))
+
+    imputer.set_params(add_indicator=False)
+    X_trans_no_indicator = imputer.fit_transform(X)
+    assert_allclose(X_trans[:, :-4], X_trans_no_indicator)
+
+
+# ConvergenceWarning will be raised by the IterativeImputer
+@pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning")
+@pytest.mark.parametrize("marker", [np.nan, -1])
+@pytest.mark.parametrize(
+    "imputer", sparse_imputers(), ids=lambda x: x.__class__.__name__
+)
+def test_imputers_add_indicator_sparse(imputer, marker):
+    X = sparse.csr_matrix(
+        [
+            [marker, 1, 5, marker, 1],
+            [2, marker, 1, marker, 2],
+            [6, 3, marker, marker, 3],
+            [1, 2, 9, marker, 4],
+        ]
+    )
+    X_true_indicator = sparse.csr_matrix(
+        [
+            [1.0, 0.0, 0.0, 1.0],
+            [0.0, 1.0, 0.0, 1.0],
+            [0.0, 0.0, 1.0, 1.0],
+            [0.0, 0.0, 0.0, 1.0],
+        ]
+    )
+    imputer.set_params(missing_values=marker, add_indicator=True)
+
+    X_trans = imputer.fit_transform(X)
+    assert_allclose_dense_sparse(X_trans[:, -4:], X_true_indicator)
+    assert_array_equal(imputer.indicator_.features_, np.array([0, 1, 2, 3]))
+
+    imputer.set_params(add_indicator=False)
+    X_trans_no_indicator = imputer.fit_transform(X)
+    assert_allclose_dense_sparse(X_trans[:, :-4], X_trans_no_indicator)
+
+
+# ConvergenceWarning will be raised by the IterativeImputer
+@pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning")
+@pytest.mark.parametrize("imputer", imputers(), ids=lambda x: x.__class__.__name__)
+@pytest.mark.parametrize("add_indicator", [True, False])
+def test_imputers_pandas_na_integer_array_support(imputer, add_indicator):
+    # Test pandas IntegerArray with pd.NA
+    pd = pytest.importorskip("pandas")
+    marker = np.nan
+    imputer = imputer.set_params(add_indicator=add_indicator, missing_values=marker)
+
+    X = np.array(
+        [
+            [marker, 1, 5, marker, 1],
+            [2, marker, 1, marker, 2],
+            [6, 3, marker, marker, 3],
+            [1, 2, 9, marker, 4],
+        ]
+    )
+    # fit on numpy array
+    X_trans_expected = imputer.fit_transform(X)
+
+    # Creates dataframe with IntegerArrays with pd.NA
+    X_df = pd.DataFrame(X, dtype="Int16", columns=["a", "b", "c", "d", "e"])
+
+    # fit on pandas dataframe with IntegerArrays
+    X_trans = imputer.fit_transform(X_df)
+
+    assert_allclose(X_trans_expected, X_trans)
+
+
+@pytest.mark.parametrize("imputer", imputers(), ids=lambda x: x.__class__.__name__)
+@pytest.mark.parametrize("add_indicator", [True, False])
+def test_imputers_feature_names_out_pandas(imputer, add_indicator):
+    """Check feature names out for imputers."""
+    pd = pytest.importorskip("pandas")
+    marker = np.nan
+    imputer = imputer.set_params(add_indicator=add_indicator, missing_values=marker)
+
+    X = np.array(
+        [
+            [marker, 1, 5, 3, marker, 1],
+            [2, marker, 1, 4, marker, 2],
+            [6, 3, 7, marker, marker, 3],
+            [1, 2, 9, 8, marker, 4],
+        ]
+    )
+    X_df = pd.DataFrame(X, columns=["a", "b", "c", "d", "e", "f"])
+    imputer.fit(X_df)
+
+    names = imputer.get_feature_names_out()
+
+    if add_indicator:
+        expected_names = [
+            "a",
+            "b",
+            "c",
+            "d",
+            "f",
+            "missingindicator_a",
+            "missingindicator_b",
+            "missingindicator_d",
+            "missingindicator_e",
+        ]
+        assert_array_equal(expected_names, names)
+    else:
+        expected_names = ["a", "b", "c", "d", "f"]
+        assert_array_equal(expected_names, names)
+
+
+@pytest.mark.parametrize("keep_empty_features", [True, False])
+@pytest.mark.parametrize("imputer", imputers(), ids=lambda x: x.__class__.__name__)
+def test_keep_empty_features(imputer, keep_empty_features):
+    """Check that the imputer keeps features with only missing values."""
+    X = np.array([[np.nan, 1], [np.nan, 2], [np.nan, 3]])
+    imputer = imputer.set_params(
+        add_indicator=False, keep_empty_features=keep_empty_features
+    )
+
+    for method in ["fit_transform", "transform"]:
+        X_imputed = getattr(imputer, method)(X)
+        if keep_empty_features:
+            assert X_imputed.shape == X.shape
+        else:
+            assert X_imputed.shape == (X.shape[0], X.shape[1] - 1)
diff --git a/mgm/lib/python3.10/site-packages/sklearn/impute/tests/test_impute.py b/mgm/lib/python3.10/site-packages/sklearn/impute/tests/test_impute.py
new file mode 100644
index 0000000000000000000000000000000000000000..86553effafcbfbfc59bb0adfac55bd9c0ce4738a
--- /dev/null
+++ b/mgm/lib/python3.10/site-packages/sklearn/impute/tests/test_impute.py
@@ -0,0 +1,1693 @@
+import pytest
+import warnings
+
+import numpy as np
+from scipy import sparse
+from scipy.stats import kstest
+
+import io
+
+from sklearn.utils._testing import _convert_container
+from sklearn.utils._testing import assert_allclose
+from sklearn.utils._testing import assert_allclose_dense_sparse
+from sklearn.utils._testing import assert_array_equal
+from sklearn.utils._testing import assert_array_almost_equal
+
+# make IterativeImputer available
+from sklearn.experimental import enable_iterative_imputer  # noqa
+
+from sklearn.datasets import load_diabetes
+from sklearn.impute import MissingIndicator
+from sklearn.impute import SimpleImputer, IterativeImputer, KNNImputer
+from sklearn.dummy import DummyRegressor
+from sklearn.linear_model import BayesianRidge, ARDRegression, RidgeCV
+from sklearn.pipeline import Pipeline
+from sklearn.pipeline import make_union
+from sklearn.model_selection import GridSearchCV
+from sklearn import tree
+from sklearn.random_projection import _sparse_random_matrix
+from sklearn.exceptions import ConvergenceWarning
+from sklearn.impute._base import _most_frequent
+
+
+def _assert_array_equal_and_same_dtype(x, y):
+    assert_array_equal(x, y)
+    assert x.dtype == y.dtype
+
+
+def _assert_allclose_and_same_dtype(x, y):
+    assert_allclose(x, y)
+    assert x.dtype == y.dtype
+
+
+def _check_statistics(X, X_true, strategy, statistics, missing_values):
+    """Utility function for testing imputation for a given strategy.
+
+    Test with dense and sparse arrays
+
+    Check that:
+        - the statistics (mean, median, mode) are correct
+        - the missing values are imputed correctly"""
+
+    err_msg = "Parameters: strategy = %s, missing_values = %s, sparse = {0}" % (
+        strategy,
+        missing_values,
+    )
+
+    assert_ae = assert_array_equal
+
+    if X.dtype.kind == "f" or X_true.dtype.kind == "f":
+        assert_ae = assert_array_almost_equal
+
+    # Normal matrix
+    imputer = SimpleImputer(missing_values=missing_values, strategy=strategy)
+    X_trans = imputer.fit(X).transform(X.copy())
+    assert_ae(imputer.statistics_, statistics, err_msg=err_msg.format(False))
+    assert_ae(X_trans, X_true, err_msg=err_msg.format(False))
+
+    # Sparse matrix
+    imputer = SimpleImputer(missing_values=missing_values, strategy=strategy)
+    imputer.fit(sparse.csc_matrix(X))
+    X_trans = imputer.transform(sparse.csc_matrix(X.copy()))
+
+    if sparse.issparse(X_trans):
+        X_trans = X_trans.toarray()
+
+    assert_ae(imputer.statistics_, statistics, err_msg=err_msg.format(True))
+    assert_ae(X_trans, X_true, err_msg=err_msg.format(True))
+
+
+@pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent", "constant"])
+def test_imputation_shape(strategy):
+    # Verify the shapes of the imputed matrix for different strategies.
+    X = np.random.randn(10, 2)
+    X[::2] = np.nan
+
+    imputer = SimpleImputer(strategy=strategy)
+    X_imputed = imputer.fit_transform(sparse.csr_matrix(X))
+    assert X_imputed.shape == (10, 2)
+    X_imputed = imputer.fit_transform(X)
+    assert X_imputed.shape == (10, 2)
+
+    iterative_imputer = IterativeImputer(initial_strategy=strategy)
+    X_imputed = iterative_imputer.fit_transform(X)
+    assert X_imputed.shape == (10, 2)
+
+
+@pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent"])
+def test_imputation_deletion_warning(strategy):
+    X = np.ones((3, 5))
+    X[:, 0] = np.nan
+    imputer = SimpleImputer(strategy=strategy, verbose=1)
+
+    # TODO: Remove in 1.3
+    with pytest.warns(FutureWarning, match="The 'verbose' parameter"):
+        imputer.fit(X)
+
+    with pytest.warns(UserWarning, match="Skipping"):
+        imputer.transform(X)
+
+
+@pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent"])
+def test_imputation_deletion_warning_feature_names(strategy):
+
+    pd = pytest.importorskip("pandas")
+
+    missing_values = np.nan
+    feature_names = np.array(["a", "b", "c", "d"], dtype=object)
+    X = pd.DataFrame(
+        [
+            [missing_values, missing_values, 1, missing_values],
+            [4, missing_values, 2, 10],
+        ],
+        columns=feature_names,
+    )
+
+    imputer = SimpleImputer(strategy=strategy, verbose=1)
+
+    # TODO: Remove in 1.3
+    with pytest.warns(FutureWarning, match="The 'verbose' parameter"):
+        imputer.fit(X)
+
+    # check SimpleImputer returning feature name attribute correctly
+    assert_array_equal(imputer.feature_names_in_, feature_names)
+
+    # ensure that skipped feature warning includes feature name
+    with pytest.warns(
+        UserWarning, match=r"Skipping features without any observed values: \['b'\]"
+    ):
+        imputer.transform(X)
+
+
+@pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent", "constant"])
+def test_imputation_error_sparse_0(strategy):
+    # check that error are raised when missing_values = 0 and input is sparse
+    X = np.ones((3, 5))
+    X[0] = 0
+    X = sparse.csc_matrix(X)
+
+    imputer = SimpleImputer(strategy=strategy, missing_values=0)
+    with pytest.raises(ValueError, match="Provide a dense array"):
+        imputer.fit(X)
+
+    imputer.fit(X.toarray())
+    with pytest.raises(ValueError, match="Provide a dense array"):
+        imputer.transform(X)
+
+
+def safe_median(arr, *args, **kwargs):
+    # np.median([]) raises a TypeError for numpy >= 1.10.1
+    length = arr.size if hasattr(arr, "size") else len(arr)
+    return np.nan if length == 0 else np.median(arr, *args, **kwargs)
+
+
+def safe_mean(arr, *args, **kwargs):
+    # np.mean([]) raises a RuntimeWarning for numpy >= 1.10.1
+    length = arr.size if hasattr(arr, "size") else len(arr)
+    return np.nan if length == 0 else np.mean(arr, *args, **kwargs)
+
+
+def test_imputation_mean_median():
+    # Test imputation using the mean and median strategies, when
+    # missing_values != 0.
+    rng = np.random.RandomState(0)
+
+    dim = 10
+    dec = 10
+    shape = (dim * dim, dim + dec)
+
+    zeros = np.zeros(shape[0])
+    values = np.arange(1, shape[0] + 1)
+    values[4::2] = -values[4::2]
+
+    tests = [
+        ("mean", np.nan, lambda z, v, p: safe_mean(np.hstack((z, v)))),
+        ("median", np.nan, lambda z, v, p: safe_median(np.hstack((z, v)))),
+    ]
+
+    for strategy, test_missing_values, true_value_fun in tests:
+        X = np.empty(shape)
+        X_true = np.empty(shape)
+        true_statistics = np.empty(shape[1])
+
+        # Create a matrix X with columns
+        #    - with only zeros,
+        #    - with only missing values
+        #    - with zeros, missing values and values
+        # And a matrix X_true containing all true values
+        for j in range(shape[1]):
+            nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1)
+            nb_missing_values = max(shape[0] + dec * dec - (j + dec) * (j + dec), 0)
+            nb_values = shape[0] - nb_zeros - nb_missing_values
+
+            z = zeros[:nb_zeros]
+            p = np.repeat(test_missing_values, nb_missing_values)
+            v = values[rng.permutation(len(values))[:nb_values]]
+
+            true_statistics[j] = true_value_fun(z, v, p)
+
+            # Create the columns
+            X[:, j] = np.hstack((v, z, p))
+
+            if 0 == test_missing_values:
+                # XXX unreached code as of v0.22
+                X_true[:, j] = np.hstack(
+                    (v, np.repeat(true_statistics[j], nb_missing_values + nb_zeros))
+                )
+            else:
+                X_true[:, j] = np.hstack(
+                    (v, z, np.repeat(true_statistics[j], nb_missing_values))
+                )
+
+            # Shuffle them the same way
+            np.random.RandomState(j).shuffle(X[:, j])
+            np.random.RandomState(j).shuffle(X_true[:, j])
+
+        # Mean doesn't support columns containing NaNs, median does
+        if strategy == "median":
+            cols_to_keep = ~np.isnan(X_true).any(axis=0)
+        else:
+            cols_to_keep = ~np.isnan(X_true).all(axis=0)
+
+        X_true = X_true[:, cols_to_keep]
+
+        _check_statistics(X, X_true, strategy, true_statistics, test_missing_values)
+
+
+def test_imputation_median_special_cases():
+    # Test median imputation with sparse boundary cases
+    X = np.array(
+        [
+            [0, np.nan, np.nan],  # odd: implicit zero
+            [5, np.nan, np.nan],  # odd: explicit nonzero
+            [0, 0, np.nan],  # even: average two zeros
+            [-5, 0, np.nan],  # even: avg zero and neg
+            [0, 5, np.nan],  # even: avg zero and pos
+            [4, 5, np.nan],  # even: avg nonzeros
+            [-4, -5, np.nan],  # even: avg negatives
+            [-1, 2, np.nan],  # even: crossing neg and pos
+        ]
+    ).transpose()
+
+    X_imputed_median = np.array(
+        [
+            [0, 0, 0],
+            [5, 5, 5],
+            [0, 0, 0],
+            [-5, 0, -2.5],
+            [0, 5, 2.5],
+            [4, 5, 4.5],
+            [-4, -5, -4.5],
+            [-1, 2, 0.5],
+        ]
+    ).transpose()
+    statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, 0.5]
+
+    _check_statistics(X, X_imputed_median, "median", statistics_median, np.nan)
+
+
+@pytest.mark.parametrize("strategy", ["mean", "median"])
+@pytest.mark.parametrize("dtype", [None, object, str])
+def test_imputation_mean_median_error_invalid_type(strategy, dtype):
+    X = np.array([["a", "b", 3], [4, "e", 6], ["g", "h", 9]], dtype=dtype)
+    msg = "non-numeric data:\ncould not convert string to float: '"
+    with pytest.raises(ValueError, match=msg):
+        imputer = SimpleImputer(strategy=strategy)
+        imputer.fit_transform(X)
+
+
+@pytest.mark.parametrize("strategy", ["mean", "median"])
+@pytest.mark.parametrize("type", ["list", "dataframe"])
+def test_imputation_mean_median_error_invalid_type_list_pandas(strategy, type):
+    X = [["a", "b", 3], [4, "e", 6], ["g", "h", 9]]
+    if type == "dataframe":
+        pd = pytest.importorskip("pandas")
+        X = pd.DataFrame(X)
+    msg = "non-numeric data:\ncould not convert string to float: '"
+    with pytest.raises(ValueError, match=msg):
+        imputer = SimpleImputer(strategy=strategy)
+        imputer.fit_transform(X)
+
+
+@pytest.mark.parametrize("strategy", ["constant", "most_frequent"])
+@pytest.mark.parametrize("dtype", [str, np.dtype("U"), np.dtype("S")])
+def test_imputation_const_mostf_error_invalid_types(strategy, dtype):
+    # Test imputation on non-numeric data using "most_frequent" and "constant"
+    # strategy
+    X = np.array(
+        [
+            [np.nan, np.nan, "a", "f"],
+            [np.nan, "c", np.nan, "d"],
+            [np.nan, "b", "d", np.nan],
+            [np.nan, "c", "d", "h"],
+        ],
+        dtype=dtype,
+    )
+
+    err_msg = "SimpleImputer does not support data"
+    with pytest.raises(ValueError, match=err_msg):
+        imputer = SimpleImputer(strategy=strategy)
+        imputer.fit(X).transform(X)
+
+
+def test_imputation_most_frequent():
+    # Test imputation using the most-frequent strategy.
+    X = np.array(
+        [
+            [-1, -1, 0, 5],
+            [-1, 2, -1, 3],
+            [-1, 1, 3, -1],
+            [-1, 2, 3, 7],
+        ]
+    )
+
+    X_true = np.array(
+        [
+            [2, 0, 5],
+            [2, 3, 3],
+            [1, 3, 3],
+            [2, 3, 7],
+        ]
+    )
+
+    # scipy.stats.mode, used in SimpleImputer, doesn't return the first most
+    # frequent as promised in the doc but the lowest most frequent. When this
+    # test will fail after an update of scipy, SimpleImputer will need to be
+    # updated to be consistent with the new (correct) behaviour
+    _check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1)
+
+
+@pytest.mark.parametrize("marker", [None, np.nan, "NAN", "", 0])
+def test_imputation_most_frequent_objects(marker):
+    # Test imputation using the most-frequent strategy.
+    X = np.array(
+        [
+            [marker, marker, "a", "f"],
+            [marker, "c", marker, "d"],
+            [marker, "b", "d", marker],
+            [marker, "c", "d", "h"],
+        ],
+        dtype=object,
+    )
+
+    X_true = np.array(
+        [
+            ["c", "a", "f"],
+            ["c", "d", "d"],
+            ["b", "d", "d"],
+            ["c", "d", "h"],
+        ],
+        dtype=object,
+    )
+
+    imputer = SimpleImputer(missing_values=marker, strategy="most_frequent")
+    X_trans = imputer.fit(X).transform(X)
+
+    assert_array_equal(X_trans, X_true)
+
+
+@pytest.mark.parametrize("dtype", [object, "category"])
+def test_imputation_most_frequent_pandas(dtype):
+    # Test imputation using the most frequent strategy on pandas df
+    pd = pytest.importorskip("pandas")
+
+    f = io.StringIO("Cat1,Cat2,Cat3,Cat4\n,i,x,\na,,y,\na,j,,\nb,j,x,")
+
+    df = pd.read_csv(f, dtype=dtype)
+
+    X_true = np.array(
+        [["a", "i", "x"], ["a", "j", "y"], ["a", "j", "x"], ["b", "j", "x"]],
+        dtype=object,
+    )
+
+    imputer = SimpleImputer(strategy="most_frequent")
+    X_trans = imputer.fit_transform(df)
+
+    assert_array_equal(X_trans, X_true)
+
+
+@pytest.mark.parametrize("X_data, missing_value", [(1, 0), (1.0, np.nan)])
+def test_imputation_constant_error_invalid_type(X_data, missing_value):
+    # Verify that exceptions are raised on invalid fill_value type
+    X = np.full((3, 5), X_data, dtype=float)
+    X[0, 0] = missing_value
+
+    with pytest.raises(ValueError, match="imputing numerical"):
+        imputer = SimpleImputer(
+            missing_values=missing_value, strategy="constant", fill_value="x"
+        )
+        imputer.fit_transform(X)
+
+
+def test_imputation_constant_integer():
+    # Test imputation using the constant strategy on integers
+    X = np.array([[-1, 2, 3, -1], [4, -1, 5, -1], [6, 7, -1, -1], [8, 9, 0, -1]])
+
+    X_true = np.array([[0, 2, 3, 0], [4, 0, 5, 0], [6, 7, 0, 0], [8, 9, 0, 0]])
+
+    imputer = SimpleImputer(missing_values=-1, strategy="constant", fill_value=0)
+    X_trans = imputer.fit_transform(X)
+
+    assert_array_equal(X_trans, X_true)
+
+
+@pytest.mark.parametrize("array_constructor", [sparse.csr_matrix, np.asarray])
+def test_imputation_constant_float(array_constructor):
+    # Test imputation using the constant strategy on floats
+    X = np.array(
+        [
+            [np.nan, 1.1, 0, np.nan],
+            [1.2, np.nan, 1.3, np.nan],
+            [0, 0, np.nan, np.nan],
+            [1.4, 1.5, 0, np.nan],
+        ]
+    )
+
+    X_true = np.array(
+        [[-1, 1.1, 0, -1], [1.2, -1, 1.3, -1], [0, 0, -1, -1], [1.4, 1.5, 0, -1]]
+    )
+
+    X = array_constructor(X)
+
+    X_true = array_constructor(X_true)
+
+    imputer = SimpleImputer(strategy="constant", fill_value=-1)
+    X_trans = imputer.fit_transform(X)
+
+    assert_allclose_dense_sparse(X_trans, X_true)
+
+
+@pytest.mark.parametrize("marker", [None, np.nan, "NAN", "", 0])
+def test_imputation_constant_object(marker):
+    # Test imputation using the constant strategy on objects
+    X = np.array(
+        [
+            [marker, "a", "b", marker],
+            ["c", marker, "d", marker],
+            ["e", "f", marker, marker],
+            ["g", "h", "i", marker],
+        ],
+        dtype=object,
+    )
+
+    X_true = np.array(
+        [
+            ["missing", "a", "b", "missing"],
+            ["c", "missing", "d", "missing"],
+            ["e", "f", "missing", "missing"],
+            ["g", "h", "i", "missing"],
+        ],
+        dtype=object,
+    )
+
+    imputer = SimpleImputer(
+        missing_values=marker, strategy="constant", fill_value="missing"
+    )
+    X_trans = imputer.fit_transform(X)
+
+    assert_array_equal(X_trans, X_true)
+
+
+@pytest.mark.parametrize("dtype", [object, "category"])
+def test_imputation_constant_pandas(dtype):
+    # Test imputation using the constant strategy on pandas df
+    pd = pytest.importorskip("pandas")
+
+    f = io.StringIO("Cat1,Cat2,Cat3,Cat4\n,i,x,\na,,y,\na,j,,\nb,j,x,")
+
+    df = pd.read_csv(f, dtype=dtype)
+
+    X_true = np.array(
+        [
+            ["missing_value", "i", "x", "missing_value"],
+            ["a", "missing_value", "y", "missing_value"],
+            ["a", "j", "missing_value", "missing_value"],
+            ["b", "j", "x", "missing_value"],
+        ],
+        dtype=object,
+    )
+
+    imputer = SimpleImputer(strategy="constant")
+    X_trans = imputer.fit_transform(df)
+
+    assert_array_equal(X_trans, X_true)
+
+
+@pytest.mark.parametrize("X", [[[1], [2]], [[1], [np.nan]]])
+def test_iterative_imputer_one_feature(X):
+    # check we exit early when there is a single feature
+    imputer = IterativeImputer().fit(X)
+    assert imputer.n_iter_ == 0
+    imputer = IterativeImputer()
+    imputer.fit([[1], [2]])
+    assert imputer.n_iter_ == 0
+    imputer.fit([[1], [np.nan]])
+    assert imputer.n_iter_ == 0
+
+
+def test_imputation_pipeline_grid_search():
+    # Test imputation within a pipeline + gridsearch.
+    X = _sparse_random_matrix(100, 100, density=0.10)
+    missing_values = X.data[0]
+
+    pipeline = Pipeline(
+        [
+            ("imputer", SimpleImputer(missing_values=missing_values)),
+            ("tree", tree.DecisionTreeRegressor(random_state=0)),
+        ]
+    )
+
+    parameters = {"imputer__strategy": ["mean", "median", "most_frequent"]}
+
+    Y = _sparse_random_matrix(100, 1, density=0.10).toarray()
+    gs = GridSearchCV(pipeline, parameters)
+    gs.fit(X, Y)
+
+
+def test_imputation_copy():
+    # Test imputation with copy
+    X_orig = _sparse_random_matrix(5, 5, density=0.75, random_state=0)
+
+    # copy=True, dense => copy
+    X = X_orig.copy().toarray()
+    imputer = SimpleImputer(missing_values=0, strategy="mean", copy=True)
+    Xt = imputer.fit(X).transform(X)
+    Xt[0, 0] = -1
+    assert not np.all(X == Xt)
+
+    # copy=True, sparse csr => copy
+    X = X_orig.copy()
+    imputer = SimpleImputer(missing_values=X.data[0], strategy="mean", copy=True)
+    Xt = imputer.fit(X).transform(X)
+    Xt.data[0] = -1
+    assert not np.all(X.data == Xt.data)
+
+    # copy=False, dense => no copy
+    X = X_orig.copy().toarray()
+    imputer = SimpleImputer(missing_values=0, strategy="mean", copy=False)
+    Xt = imputer.fit(X).transform(X)
+    Xt[0, 0] = -1
+    assert_array_almost_equal(X, Xt)
+
+    # copy=False, sparse csc => no copy
+    X = X_orig.copy().tocsc()
+    imputer = SimpleImputer(missing_values=X.data[0], strategy="mean", copy=False)
+    Xt = imputer.fit(X).transform(X)
+    Xt.data[0] = -1
+    assert_array_almost_equal(X.data, Xt.data)
+
+    # copy=False, sparse csr => copy
+    X = X_orig.copy()
+    imputer = SimpleImputer(missing_values=X.data[0], strategy="mean", copy=False)
+    Xt = imputer.fit(X).transform(X)
+    Xt.data[0] = -1
+    assert not np.all(X.data == Xt.data)
+
+    # Note: If X is sparse and if missing_values=0, then a (dense) copy of X is
+    # made, even if copy=False.
+
+
+def test_iterative_imputer_zero_iters():
+    rng = np.random.RandomState(0)
+
+    n = 100
+    d = 10
+    X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray()
+    missing_flag = X == 0
+    X[missing_flag] = np.nan
+
+    imputer = IterativeImputer(max_iter=0)
+    X_imputed = imputer.fit_transform(X)
+    # with max_iter=0, only initial imputation is performed
+    assert_allclose(X_imputed, imputer.initial_imputer_.transform(X))
+
+    # repeat but force n_iter_ to 0
+    imputer = IterativeImputer(max_iter=5).fit(X)
+    # transformed should not be equal to initial imputation
+    assert not np.all(imputer.transform(X) == imputer.initial_imputer_.transform(X))
+
+    imputer.n_iter_ = 0
+    # now they should be equal as only initial imputation is done
+    assert_allclose(imputer.transform(X), imputer.initial_imputer_.transform(X))
+
+
+def test_iterative_imputer_verbose():
+    rng = np.random.RandomState(0)
+
+    n = 100
+    d = 3
+    X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray()
+    imputer = IterativeImputer(missing_values=0, max_iter=1, verbose=1)
+    imputer.fit(X)
+    imputer.transform(X)
+    imputer = IterativeImputer(missing_values=0, max_iter=1, verbose=2)
+    imputer.fit(X)
+    imputer.transform(X)
+
+
+def test_iterative_imputer_all_missing():
+    n = 100
+    d = 3
+    X = np.zeros((n, d))
+    imputer = IterativeImputer(missing_values=0, max_iter=1)
+    X_imputed = imputer.fit_transform(X)
+    assert_allclose(X_imputed, imputer.initial_imputer_.transform(X))
+
+
+@pytest.mark.parametrize(
+    "imputation_order", ["random", "roman", "ascending", "descending", "arabic"]
+)
+def test_iterative_imputer_imputation_order(imputation_order):
+    rng = np.random.RandomState(0)
+    n = 100
+    d = 10
+    max_iter = 2
+    X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray()
+    X[:, 0] = 1  # this column should not be discarded by IterativeImputer
+
+    imputer = IterativeImputer(
+        missing_values=0,
+        max_iter=max_iter,
+        n_nearest_features=5,
+        sample_posterior=False,
+        skip_complete=True,
+        min_value=0,
+        max_value=1,
+        verbose=1,
+        imputation_order=imputation_order,
+        random_state=rng,
+    )
+    imputer.fit_transform(X)
+    ordered_idx = [i.feat_idx for i in imputer.imputation_sequence_]
+
+    assert len(ordered_idx) // imputer.n_iter_ == imputer.n_features_with_missing_
+
+    if imputation_order == "roman":
+        assert np.all(ordered_idx[: d - 1] == np.arange(1, d))
+    elif imputation_order == "arabic":
+        assert np.all(ordered_idx[: d - 1] == np.arange(d - 1, 0, -1))
+    elif imputation_order == "random":
+        ordered_idx_round_1 = ordered_idx[: d - 1]
+        ordered_idx_round_2 = ordered_idx[d - 1 :]
+        assert ordered_idx_round_1 != ordered_idx_round_2
+    elif "ending" in imputation_order:
+        assert len(ordered_idx) == max_iter * (d - 1)
+
+
+@pytest.mark.parametrize(
+    "estimator", [None, DummyRegressor(), BayesianRidge(), ARDRegression(), RidgeCV()]
+)
+def test_iterative_imputer_estimators(estimator):
+    rng = np.random.RandomState(0)
+
+    n = 100
+    d = 10
+    X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray()
+
+    imputer = IterativeImputer(
+        missing_values=0, max_iter=1, estimator=estimator, random_state=rng
+    )
+    imputer.fit_transform(X)
+
+    # check that types are correct for estimators
+    hashes = []
+    for triplet in imputer.imputation_sequence_:
+        expected_type = (
+            type(estimator) if estimator is not None else type(BayesianRidge())
+        )
+        assert isinstance(triplet.estimator, expected_type)
+        hashes.append(id(triplet.estimator))
+
+    # check that each estimator is unique
+    assert len(set(hashes)) == len(hashes)
+
+
+def test_iterative_imputer_clip():
+    rng = np.random.RandomState(0)
+    n = 100
+    d = 10
+    X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray()
+
+    imputer = IterativeImputer(
+        missing_values=0, max_iter=1, min_value=0.1, max_value=0.2, random_state=rng
+    )
+
+    Xt = imputer.fit_transform(X)
+    assert_allclose(np.min(Xt[X == 0]), 0.1)
+    assert_allclose(np.max(Xt[X == 0]), 0.2)
+    assert_allclose(Xt[X != 0], X[X != 0])
+
+
+def test_iterative_imputer_clip_truncnorm():
+    rng = np.random.RandomState(0)
+    n = 100
+    d = 10
+    X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray()
+    X[:, 0] = 1
+
+    imputer = IterativeImputer(
+        missing_values=0,
+        max_iter=2,
+        n_nearest_features=5,
+        sample_posterior=True,
+        min_value=0.1,
+        max_value=0.2,
+        verbose=1,
+        imputation_order="random",
+        random_state=rng,
+    )
+    Xt = imputer.fit_transform(X)
+    assert_allclose(np.min(Xt[X == 0]), 0.1)
+    assert_allclose(np.max(Xt[X == 0]), 0.2)
+    assert_allclose(Xt[X != 0], X[X != 0])
+
+
+def test_iterative_imputer_truncated_normal_posterior():
+    #  test that the values that are imputed using `sample_posterior=True`
+    #  with boundaries (`min_value` and `max_value` are not None) are drawn
+    #  from a distribution that looks gaussian via the Kolmogorov Smirnov test.
+    #  note that starting from the wrong random seed will make this test fail
+    #  because random sampling doesn't occur at all when the imputation
+    #  is outside of the (min_value, max_value) range
+    rng = np.random.RandomState(42)
+
+    X = rng.normal(size=(5, 5))
+    X[0][0] = np.nan
+
+    imputer = IterativeImputer(
+        min_value=0, max_value=0.5, sample_posterior=True, random_state=rng
+    )
+
+    imputer.fit_transform(X)
+    # generate multiple imputations for the single missing value
+    imputations = np.array([imputer.transform(X)[0][0] for _ in range(100)])
+
+    assert all(imputations >= 0)
+    assert all(imputations <= 0.5)
+
+    mu, sigma = imputations.mean(), imputations.std()
+    ks_statistic, p_value = kstest((imputations - mu) / sigma, "norm")
+    if sigma == 0:
+        sigma += 1e-12
+    ks_statistic, p_value = kstest((imputations - mu) / sigma, "norm")
+    # we want to fail to reject null hypothesis
+    # null hypothesis: distributions are the same
+    assert ks_statistic < 0.2 or p_value > 0.1, "The posterior does appear to be normal"
+
+
+@pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent"])
+def test_iterative_imputer_missing_at_transform(strategy):
+    rng = np.random.RandomState(0)
+    n = 100
+    d = 10
+    X_train = rng.randint(low=0, high=3, size=(n, d))
+    X_test = rng.randint(low=0, high=3, size=(n, d))
+
+    X_train[:, 0] = 1  # definitely no missing values in 0th column
+    X_test[0, 0] = 0  # definitely missing value in 0th column
+
+    imputer = IterativeImputer(
+        missing_values=0, max_iter=1, initial_strategy=strategy, random_state=rng
+    ).fit(X_train)
+    initial_imputer = SimpleImputer(missing_values=0, strategy=strategy).fit(X_train)
+
+    # if there were no missing values at time of fit, then imputer will
+    # only use the initial imputer for that feature at transform
+    assert_allclose(
+        imputer.transform(X_test)[:, 0], initial_imputer.transform(X_test)[:, 0]
+    )
+
+
+def test_iterative_imputer_transform_stochasticity():
+    rng1 = np.random.RandomState(0)
+    rng2 = np.random.RandomState(1)
+    n = 100
+    d = 10
+    X = _sparse_random_matrix(n, d, density=0.10, random_state=rng1).toarray()
+
+    # when sample_posterior=True, two transforms shouldn't be equal
+    imputer = IterativeImputer(
+        missing_values=0, max_iter=1, sample_posterior=True, random_state=rng1
+    )
+    imputer.fit(X)
+
+    X_fitted_1 = imputer.transform(X)
+    X_fitted_2 = imputer.transform(X)
+
+    # sufficient to assert that the means are not the same
+    assert np.mean(X_fitted_1) != pytest.approx(np.mean(X_fitted_2))
+
+    # when sample_posterior=False, and n_nearest_features=None
+    # and imputation_order is not random
+    # the two transforms should be identical even if rng are different
+    imputer1 = IterativeImputer(
+        missing_values=0,
+        max_iter=1,
+        sample_posterior=False,
+        n_nearest_features=None,
+        imputation_order="ascending",
+        random_state=rng1,
+    )
+
+    imputer2 = IterativeImputer(
+        missing_values=0,
+        max_iter=1,
+        sample_posterior=False,
+        n_nearest_features=None,
+        imputation_order="ascending",
+        random_state=rng2,
+    )
+    imputer1.fit(X)
+    imputer2.fit(X)
+
+    X_fitted_1a = imputer1.transform(X)
+    X_fitted_1b = imputer1.transform(X)
+    X_fitted_2 = imputer2.transform(X)
+
+    assert_allclose(X_fitted_1a, X_fitted_1b)
+    assert_allclose(X_fitted_1a, X_fitted_2)
+
+
+def test_iterative_imputer_no_missing():
+    rng = np.random.RandomState(0)
+    X = rng.rand(100, 100)
+    X[:, 0] = np.nan
+    m1 = IterativeImputer(max_iter=10, random_state=rng)
+    m2 = IterativeImputer(max_iter=10, random_state=rng)
+    pred1 = m1.fit(X).transform(X)
+    pred2 = m2.fit_transform(X)
+    # should exclude the first column entirely
+    assert_allclose(X[:, 1:], pred1)
+    # fit and fit_transform should both be identical
+    assert_allclose(pred1, pred2)
+
+
+def test_iterative_imputer_rank_one():
+    rng = np.random.RandomState(0)
+    d = 50
+    A = rng.rand(d, 1)
+    B = rng.rand(1, d)
+    X = np.dot(A, B)
+    nan_mask = rng.rand(d, d) < 0.5
+    X_missing = X.copy()
+    X_missing[nan_mask] = np.nan
+
+    imputer = IterativeImputer(max_iter=5, verbose=1, random_state=rng)
+    X_filled = imputer.fit_transform(X_missing)
+    assert_allclose(X_filled, X, atol=0.02)
+
+
+@pytest.mark.parametrize("rank", [3, 5])
+def test_iterative_imputer_transform_recovery(rank):
+    rng = np.random.RandomState(0)
+    n = 70
+    d = 70
+    A = rng.rand(n, rank)
+    B = rng.rand(rank, d)
+    X_filled = np.dot(A, B)
+    nan_mask = rng.rand(n, d) < 0.5
+    X_missing = X_filled.copy()
+    X_missing[nan_mask] = np.nan
+
+    # split up data in half
+    n = n // 2
+    X_train = X_missing[:n]
+    X_test_filled = X_filled[n:]
+    X_test = X_missing[n:]
+
+    imputer = IterativeImputer(
+        max_iter=5, imputation_order="descending", verbose=1, random_state=rng
+    ).fit(X_train)
+    X_test_est = imputer.transform(X_test)
+    assert_allclose(X_test_filled, X_test_est, atol=0.1)
+
+
+def test_iterative_imputer_additive_matrix():
+    rng = np.random.RandomState(0)
+    n = 100
+    d = 10
+    A = rng.randn(n, d)
+    B = rng.randn(n, d)
+    X_filled = np.zeros(A.shape)
+    for i in range(d):
+        for j in range(d):
+            X_filled[:, (i + j) % d] += (A[:, i] + B[:, j]) / 2
+    # a quarter is randomly missing
+    nan_mask = rng.rand(n, d) < 0.25
+    X_missing = X_filled.copy()
+    X_missing[nan_mask] = np.nan
+
+    # split up data
+    n = n // 2
+    X_train = X_missing[:n]
+    X_test_filled = X_filled[n:]
+    X_test = X_missing[n:]
+
+    imputer = IterativeImputer(max_iter=10, verbose=1, random_state=rng).fit(X_train)
+    X_test_est = imputer.transform(X_test)
+    assert_allclose(X_test_filled, X_test_est, rtol=1e-3, atol=0.01)
+
+
+def test_iterative_imputer_early_stopping():
+    rng = np.random.RandomState(0)
+    n = 50
+    d = 5
+    A = rng.rand(n, 1)
+    B = rng.rand(1, d)
+    X = np.dot(A, B)
+    nan_mask = rng.rand(n, d) < 0.5
+    X_missing = X.copy()
+    X_missing[nan_mask] = np.nan
+
+    imputer = IterativeImputer(
+        max_iter=100, tol=1e-2, sample_posterior=False, verbose=1, random_state=rng
+    )
+    X_filled_100 = imputer.fit_transform(X_missing)
+    assert len(imputer.imputation_sequence_) == d * imputer.n_iter_
+
+    imputer = IterativeImputer(
+        max_iter=imputer.n_iter_, sample_posterior=False, verbose=1, random_state=rng
+    )
+    X_filled_early = imputer.fit_transform(X_missing)
+    assert_allclose(X_filled_100, X_filled_early, atol=1e-7)
+
+    imputer = IterativeImputer(
+        max_iter=100, tol=0, sample_posterior=False, verbose=1, random_state=rng
+    )
+    imputer.fit(X_missing)
+    assert imputer.n_iter_ == imputer.max_iter
+
+
+def test_iterative_imputer_catch_warning():
+    # check that we catch a RuntimeWarning due to a division by zero when a
+    # feature is constant in the dataset
+    X, y = load_diabetes(return_X_y=True)
+    n_samples, n_features = X.shape
+
+    # simulate that a feature only contain one category during fit
+    X[:, 3] = 1
+
+    # add some missing values
+    rng = np.random.RandomState(0)
+    missing_rate = 0.15
+    for feat in range(n_features):
+        sample_idx = rng.choice(
+            np.arange(n_samples), size=int(n_samples * missing_rate), replace=False
+        )
+        X[sample_idx, feat] = np.nan
+
+    imputer = IterativeImputer(n_nearest_features=5, sample_posterior=True)
+    with warnings.catch_warnings():
+        warnings.simplefilter("error", RuntimeWarning)
+        X_fill = imputer.fit_transform(X, y)
+    assert not np.any(np.isnan(X_fill))
+
+
+@pytest.mark.parametrize(
+    "min_value, max_value, correct_output",
+    [
+        (0, 100, np.array([[0] * 3, [100] * 3])),
+        (None, None, np.array([[-np.inf] * 3, [np.inf] * 3])),
+        (-np.inf, np.inf, np.array([[-np.inf] * 3, [np.inf] * 3])),
+        ([-5, 5, 10], [100, 200, 300], np.array([[-5, 5, 10], [100, 200, 300]])),
+        (
+            [-5, -np.inf, 10],
+            [100, 200, np.inf],
+            np.array([[-5, -np.inf, 10], [100, 200, np.inf]]),
+        ),
+    ],
+    ids=["scalars", "None-default", "inf", "lists", "lists-with-inf"],
+)
+def test_iterative_imputer_min_max_array_like(min_value, max_value, correct_output):
+    # check that passing scalar or array-like
+    # for min_value and max_value in IterativeImputer works
+    X = np.random.RandomState(0).randn(10, 3)
+    imputer = IterativeImputer(min_value=min_value, max_value=max_value)
+    imputer.fit(X)
+
+    assert isinstance(imputer._min_value, np.ndarray) and isinstance(
+        imputer._max_value, np.ndarray
+    )
+    assert (imputer._min_value.shape[0] == X.shape[1]) and (
+        imputer._max_value.shape[0] == X.shape[1]
+    )
+
+    assert_allclose(correct_output[0, :], imputer._min_value)
+    assert_allclose(correct_output[1, :], imputer._max_value)
+
+
+@pytest.mark.parametrize(
+    "min_value, max_value, err_msg",
+    [
+        (100, 0, "min_value >= max_value."),
+        (np.inf, -np.inf, "min_value >= max_value."),
+        ([-5, 5], [100, 200, 0], "_value' should be of shape"),
+    ],
+)
+def test_iterative_imputer_catch_min_max_error(min_value, max_value, err_msg):
+    # check that passing scalar or array-like
+    # for min_value and max_value in IterativeImputer works
+    X = np.random.random((10, 3))
+    imputer = IterativeImputer(min_value=min_value, max_value=max_value)
+    with pytest.raises(ValueError, match=err_msg):
+        imputer.fit(X)
+
+
+@pytest.mark.parametrize(
+    "min_max_1, min_max_2",
+    [([None, None], [-np.inf, np.inf]), ([-10, 10], [[-10] * 4, [10] * 4])],
+    ids=["None-vs-inf", "Scalar-vs-vector"],
+)
+def test_iterative_imputer_min_max_array_like_imputation(min_max_1, min_max_2):
+    # Test that None/inf and scalar/vector give the same imputation
+    X_train = np.array(
+        [
+            [np.nan, 2, 2, 1],
+            [10, np.nan, np.nan, 7],
+            [3, 1, np.nan, 1],
+            [np.nan, 4, 2, np.nan],
+        ]
+    )
+    X_test = np.array(
+        [[np.nan, 2, np.nan, 5], [2, 4, np.nan, np.nan], [np.nan, 1, 10, 1]]
+    )
+    imputer1 = IterativeImputer(
+        min_value=min_max_1[0], max_value=min_max_1[1], random_state=0
+    )
+    imputer2 = IterativeImputer(
+        min_value=min_max_2[0], max_value=min_max_2[1], random_state=0
+    )
+    X_test_imputed1 = imputer1.fit(X_train).transform(X_test)
+    X_test_imputed2 = imputer2.fit(X_train).transform(X_test)
+    assert_allclose(X_test_imputed1[:, 0], X_test_imputed2[:, 0])
+
+
+@pytest.mark.parametrize("skip_complete", [True, False])
+def test_iterative_imputer_skip_non_missing(skip_complete):
+    # check the imputing strategy when missing data are present in the
+    # testing set only.
+    # taken from: https://github.com/scikit-learn/scikit-learn/issues/14383
+    rng = np.random.RandomState(0)
+    X_train = np.array([[5, 2, 2, 1], [10, 1, 2, 7], [3, 1, 1, 1], [8, 4, 2, 2]])
+    X_test = np.array([[np.nan, 2, 4, 5], [np.nan, 4, 1, 2], [np.nan, 1, 10, 1]])
+    imputer = IterativeImputer(
+        initial_strategy="mean", skip_complete=skip_complete, random_state=rng
+    )
+    X_test_est = imputer.fit(X_train).transform(X_test)
+    if skip_complete:
+        # impute with the initial strategy: 'mean'
+        assert_allclose(X_test_est[:, 0], np.mean(X_train[:, 0]))
+    else:
+        assert_allclose(X_test_est[:, 0], [11, 7, 12], rtol=1e-4)
+
+
+@pytest.mark.parametrize("rs_imputer", [None, 1, np.random.RandomState(seed=1)])
+@pytest.mark.parametrize("rs_estimator", [None, 1, np.random.RandomState(seed=1)])
+def test_iterative_imputer_dont_set_random_state(rs_imputer, rs_estimator):
+    class ZeroEstimator:
+        def __init__(self, random_state):
+            self.random_state = random_state
+
+        def fit(self, *args, **kgards):
+            return self
+
+        def predict(self, X):
+            return np.zeros(X.shape[0])
+
+    estimator = ZeroEstimator(random_state=rs_estimator)
+    imputer = IterativeImputer(random_state=rs_imputer)
+    X_train = np.zeros((10, 3))
+    imputer.fit(X_train)
+    assert estimator.random_state == rs_estimator
+
+
+@pytest.mark.parametrize(
+    "X_fit, X_trans, params, msg_err",
+    [
+        (
+            np.array([[-1, 1], [1, 2]]),
+            np.array([[-1, 1], [1, -1]]),
+            {"features": "missing-only", "sparse": "auto"},
+            "have missing values in transform but have no missing values in fit",
+        ),
+        (
+            np.array([["a", "b"], ["c", "a"]], dtype=str),
+            np.array([["a", "b"], ["c", "a"]], dtype=str),
+            {},
+            "MissingIndicator does not support data with dtype",
+        ),
+    ],
+)
+def test_missing_indicator_error(X_fit, X_trans, params, msg_err):
+    indicator = MissingIndicator(missing_values=-1)
+    indicator.set_params(**params)
+    with pytest.raises(ValueError, match=msg_err):
+        indicator.fit(X_fit).transform(X_trans)
+
+
+@pytest.mark.parametrize(
+    "missing_values, dtype, arr_type",
+    [
+        (np.nan, np.float64, np.array),
+        (0, np.int32, np.array),
+        (-1, np.int32, np.array),
+        (np.nan, np.float64, sparse.csc_matrix),
+        (-1, np.int32, sparse.csc_matrix),
+        (np.nan, np.float64, sparse.csr_matrix),
+        (-1, np.int32, sparse.csr_matrix),
+        (np.nan, np.float64, sparse.coo_matrix),
+        (-1, np.int32, sparse.coo_matrix),
+        (np.nan, np.float64, sparse.lil_matrix),
+        (-1, np.int32, sparse.lil_matrix),
+        (np.nan, np.float64, sparse.bsr_matrix),
+        (-1, np.int32, sparse.bsr_matrix),
+    ],
+)
+@pytest.mark.parametrize(
+    "param_features, n_features, features_indices",
+    [("missing-only", 3, np.array([0, 1, 2])), ("all", 3, np.array([0, 1, 2]))],
+)
+def test_missing_indicator_new(
+    missing_values, arr_type, dtype, param_features, n_features, features_indices
+):
+    X_fit = np.array([[missing_values, missing_values, 1], [4, 2, missing_values]])
+    X_trans = np.array([[missing_values, missing_values, 1], [4, 12, 10]])
+    X_fit_expected = np.array([[1, 1, 0], [0, 0, 1]])
+    X_trans_expected = np.array([[1, 1, 0], [0, 0, 0]])
+
+    # convert the input to the right array format and right dtype
+    X_fit = arr_type(X_fit).astype(dtype)
+    X_trans = arr_type(X_trans).astype(dtype)
+    X_fit_expected = X_fit_expected.astype(dtype)
+    X_trans_expected = X_trans_expected.astype(dtype)
+
+    indicator = MissingIndicator(
+        missing_values=missing_values, features=param_features, sparse=False
+    )
+    X_fit_mask = indicator.fit_transform(X_fit)
+    X_trans_mask = indicator.transform(X_trans)
+
+    assert X_fit_mask.shape[1] == n_features
+    assert X_trans_mask.shape[1] == n_features
+
+    assert_array_equal(indicator.features_, features_indices)
+    assert_allclose(X_fit_mask, X_fit_expected[:, features_indices])
+    assert_allclose(X_trans_mask, X_trans_expected[:, features_indices])
+
+    assert X_fit_mask.dtype == bool
+    assert X_trans_mask.dtype == bool
+    assert isinstance(X_fit_mask, np.ndarray)
+    assert isinstance(X_trans_mask, np.ndarray)
+
+    indicator.set_params(sparse=True)
+    X_fit_mask_sparse = indicator.fit_transform(X_fit)
+    X_trans_mask_sparse = indicator.transform(X_trans)
+
+    assert X_fit_mask_sparse.dtype == bool
+    assert X_trans_mask_sparse.dtype == bool
+    assert X_fit_mask_sparse.format == "csc"
+    assert X_trans_mask_sparse.format == "csc"
+    assert_allclose(X_fit_mask_sparse.toarray(), X_fit_mask)
+    assert_allclose(X_trans_mask_sparse.toarray(), X_trans_mask)
+
+
+@pytest.mark.parametrize(
+    "arr_type",
+    [
+        sparse.csc_matrix,
+        sparse.csr_matrix,
+        sparse.coo_matrix,
+        sparse.lil_matrix,
+        sparse.bsr_matrix,
+    ],
+)
+def test_missing_indicator_raise_on_sparse_with_missing_0(arr_type):
+    # test for sparse input and missing_value == 0
+
+    missing_values = 0
+    X_fit = np.array([[missing_values, missing_values, 1], [4, missing_values, 2]])
+    X_trans = np.array([[missing_values, missing_values, 1], [4, 12, 10]])
+
+    # convert the input to the right array format
+    X_fit_sparse = arr_type(X_fit)
+    X_trans_sparse = arr_type(X_trans)
+
+    indicator = MissingIndicator(missing_values=missing_values)
+
+    with pytest.raises(ValueError, match="Sparse input with missing_values=0"):
+        indicator.fit_transform(X_fit_sparse)
+
+    indicator.fit_transform(X_fit)
+    with pytest.raises(ValueError, match="Sparse input with missing_values=0"):
+        indicator.transform(X_trans_sparse)
+
+
+@pytest.mark.parametrize("param_sparse", [True, False, "auto"])
+@pytest.mark.parametrize(
+    "missing_values, arr_type",
+    [
+        (np.nan, np.array),
+        (0, np.array),
+        (np.nan, sparse.csc_matrix),
+        (np.nan, sparse.csr_matrix),
+        (np.nan, sparse.coo_matrix),
+        (np.nan, sparse.lil_matrix),
+    ],
+)
+def test_missing_indicator_sparse_param(arr_type, missing_values, param_sparse):
+    # check the format of the output with different sparse parameter
+    X_fit = np.array([[missing_values, missing_values, 1], [4, missing_values, 2]])
+    X_trans = np.array([[missing_values, missing_values, 1], [4, 12, 10]])
+    X_fit = arr_type(X_fit).astype(np.float64)
+    X_trans = arr_type(X_trans).astype(np.float64)
+
+    indicator = MissingIndicator(missing_values=missing_values, sparse=param_sparse)
+    X_fit_mask = indicator.fit_transform(X_fit)
+    X_trans_mask = indicator.transform(X_trans)
+
+    if param_sparse is True:
+        assert X_fit_mask.format == "csc"
+        assert X_trans_mask.format == "csc"
+    elif param_sparse == "auto" and missing_values == 0:
+        assert isinstance(X_fit_mask, np.ndarray)
+        assert isinstance(X_trans_mask, np.ndarray)
+    elif param_sparse is False:
+        assert isinstance(X_fit_mask, np.ndarray)
+        assert isinstance(X_trans_mask, np.ndarray)
+    else:
+        if sparse.issparse(X_fit):
+            assert X_fit_mask.format == "csc"
+            assert X_trans_mask.format == "csc"
+        else:
+            assert isinstance(X_fit_mask, np.ndarray)
+            assert isinstance(X_trans_mask, np.ndarray)
+
+
+def test_missing_indicator_string():
+    X = np.array([["a", "b", "c"], ["b", "c", "a"]], dtype=object)
+    indicator = MissingIndicator(missing_values="a", features="all")
+    X_trans = indicator.fit_transform(X)
+    assert_array_equal(X_trans, np.array([[True, False, False], [False, False, True]]))
+
+
+@pytest.mark.parametrize(
+    "X, missing_values, X_trans_exp",
+    [
+        (
+            np.array([["a", "b"], ["b", "a"]], dtype=object),
+            "a",
+            np.array([["b", "b", True, False], ["b", "b", False, True]], dtype=object),
+        ),
+        (
+            np.array([[np.nan, 1.0], [1.0, np.nan]]),
+            np.nan,
+            np.array([[1.0, 1.0, True, False], [1.0, 1.0, False, True]]),
+        ),
+        (
+            np.array([[np.nan, "b"], ["b", np.nan]], dtype=object),
+            np.nan,
+            np.array([["b", "b", True, False], ["b", "b", False, True]], dtype=object),
+        ),
+        (
+            np.array([[None, "b"], ["b", None]], dtype=object),
+            None,
+            np.array([["b", "b", True, False], ["b", "b", False, True]], dtype=object),
+        ),
+    ],
+)
+def test_missing_indicator_with_imputer(X, missing_values, X_trans_exp):
+    trans = make_union(
+        SimpleImputer(missing_values=missing_values, strategy="most_frequent"),
+        MissingIndicator(missing_values=missing_values),
+    )
+    X_trans = trans.fit_transform(X)
+    assert_array_equal(X_trans, X_trans_exp)
+
+
+@pytest.mark.parametrize("imputer_constructor", [SimpleImputer, IterativeImputer])
+@pytest.mark.parametrize(
+    "imputer_missing_values, missing_value, err_msg",
+    [
+        ("NaN", np.nan, "Input X contains NaN"),
+        ("-1", -1, "types are expected to be both numerical."),
+    ],
+)
+def test_inconsistent_dtype_X_missing_values(
+    imputer_constructor, imputer_missing_values, missing_value, err_msg
+):
+    # regression test for issue #11390. Comparison between incoherent dtype
+    # for X and missing_values was not raising a proper error.
+    rng = np.random.RandomState(42)
+    X = rng.randn(10, 10)
+    X[0, 0] = missing_value
+
+    imputer = imputer_constructor(missing_values=imputer_missing_values)
+
+    with pytest.raises(ValueError, match=err_msg):
+        imputer.fit_transform(X)
+
+
+def test_missing_indicator_no_missing():
+    # check that all features are dropped if there are no missing values when
+    # features='missing-only' (#13491)
+    X = np.array([[1, 1], [1, 1]])
+
+    mi = MissingIndicator(features="missing-only", missing_values=-1)
+    Xt = mi.fit_transform(X)
+
+    assert Xt.shape[1] == 0
+
+
+def test_missing_indicator_sparse_no_explicit_zeros():
+    # Check that non missing values don't become explicit zeros in the mask
+    # generated by missing indicator when X is sparse. (#13491)
+    X = sparse.csr_matrix([[0, 1, 2], [1, 2, 0], [2, 0, 1]])
+
+    mi = MissingIndicator(features="all", missing_values=1)
+    Xt = mi.fit_transform(X)
+
+    assert Xt.getnnz() == Xt.sum()
+
+
+@pytest.mark.parametrize("imputer_constructor", [SimpleImputer, IterativeImputer])
+def test_imputer_without_indicator(imputer_constructor):
+    X = np.array([[1, 1], [1, 1]])
+    imputer = imputer_constructor()
+    imputer.fit(X)
+
+    assert imputer.indicator_ is None
+
+
+@pytest.mark.parametrize(
+    "arr_type",
+    [
+        sparse.csc_matrix,
+        sparse.csr_matrix,
+        sparse.coo_matrix,
+        sparse.lil_matrix,
+        sparse.bsr_matrix,
+    ],
+)
+def test_simple_imputation_add_indicator_sparse_matrix(arr_type):
+    X_sparse = arr_type([[np.nan, 1, 5], [2, np.nan, 1], [6, 3, np.nan], [1, 2, 9]])
+    X_true = np.array(
+        [
+            [3.0, 1.0, 5.0, 1.0, 0.0, 0.0],
+            [2.0, 2.0, 1.0, 0.0, 1.0, 0.0],
+            [6.0, 3.0, 5.0, 0.0, 0.0, 1.0],
+            [1.0, 2.0, 9.0, 0.0, 0.0, 0.0],
+        ]
+    )
+
+    imputer = SimpleImputer(missing_values=np.nan, add_indicator=True)
+    X_trans = imputer.fit_transform(X_sparse)
+
+    assert sparse.issparse(X_trans)
+    assert X_trans.shape == X_true.shape
+    assert_allclose(X_trans.toarray(), X_true)
+
+
+@pytest.mark.parametrize(
+    "strategy, expected", [("most_frequent", "b"), ("constant", "missing_value")]
+)
+def test_simple_imputation_string_list(strategy, expected):
+    X = [["a", "b"], ["c", np.nan]]
+
+    X_true = np.array([["a", "b"], ["c", expected]], dtype=object)
+
+    imputer = SimpleImputer(strategy=strategy)
+    X_trans = imputer.fit_transform(X)
+
+    assert_array_equal(X_trans, X_true)
+
+
+@pytest.mark.parametrize(
+    "order, idx_order",
+    [("ascending", [3, 4, 2, 0, 1]), ("descending", [1, 0, 2, 4, 3])],
+)
+def test_imputation_order(order, idx_order):
+    # regression test for #15393
+    rng = np.random.RandomState(42)
+    X = rng.rand(100, 5)
+    X[:50, 1] = np.nan
+    X[:30, 0] = np.nan
+    X[:20, 2] = np.nan
+    X[:10, 4] = np.nan
+
+    with pytest.warns(ConvergenceWarning):
+        trs = IterativeImputer(max_iter=1, imputation_order=order, random_state=0).fit(
+            X
+        )
+        idx = [x.feat_idx for x in trs.imputation_sequence_]
+        assert idx == idx_order
+
+
+@pytest.mark.parametrize("missing_value", [-1, np.nan])
+def test_simple_imputation_inverse_transform(missing_value):
+    # Test inverse_transform feature for np.nan
+    X_1 = np.array(
+        [
+            [9, missing_value, 3, -1],
+            [4, -1, 5, 4],
+            [6, 7, missing_value, -1],
+            [8, 9, 0, missing_value],
+        ]
+    )
+
+    X_2 = np.array(
+        [
+            [5, 4, 2, 1],
+            [2, 1, missing_value, 3],
+            [9, missing_value, 7, 1],
+            [6, 4, 2, missing_value],
+        ]
+    )
+
+    X_3 = np.array(
+        [
+            [1, missing_value, 5, 9],
+            [missing_value, 4, missing_value, missing_value],
+            [2, missing_value, 7, missing_value],
+            [missing_value, 3, missing_value, 8],
+        ]
+    )
+
+    X_4 = np.array(
+        [
+            [1, 1, 1, 3],
+            [missing_value, 2, missing_value, 1],
+            [2, 3, 3, 4],
+            [missing_value, 4, missing_value, 2],
+        ]
+    )
+
+    imputer = SimpleImputer(
+        missing_values=missing_value, strategy="mean", add_indicator=True
+    )
+
+    X_1_trans = imputer.fit_transform(X_1)
+    X_1_inv_trans = imputer.inverse_transform(X_1_trans)
+
+    X_2_trans = imputer.transform(X_2)  # test on new data
+    X_2_inv_trans = imputer.inverse_transform(X_2_trans)
+
+    assert_array_equal(X_1_inv_trans, X_1)
+    assert_array_equal(X_2_inv_trans, X_2)
+
+    for X in [X_3, X_4]:
+        X_trans = imputer.fit_transform(X)
+        X_inv_trans = imputer.inverse_transform(X_trans)
+        assert_array_equal(X_inv_trans, X)
+
+
+@pytest.mark.parametrize("missing_value", [-1, np.nan])
+def test_simple_imputation_inverse_transform_exceptions(missing_value):
+    X_1 = np.array(
+        [
+            [9, missing_value, 3, -1],
+            [4, -1, 5, 4],
+            [6, 7, missing_value, -1],
+            [8, 9, 0, missing_value],
+        ]
+    )
+
+    imputer = SimpleImputer(missing_values=missing_value, strategy="mean")
+    X_1_trans = imputer.fit_transform(X_1)
+    with pytest.raises(
+        ValueError, match=f"Got 'add_indicator={imputer.add_indicator}'"
+    ):
+        imputer.inverse_transform(X_1_trans)
+
+
+@pytest.mark.parametrize(
+    "expected,array,dtype,extra_value,n_repeat",
+    [
+        # array of object dtype
+        ("extra_value", ["a", "b", "c"], object, "extra_value", 2),
+        (
+            "most_frequent_value",
+            ["most_frequent_value", "most_frequent_value", "value"],
+            object,
+            "extra_value",
+            1,
+        ),
+        ("a", ["min_value", "min_valuevalue"], object, "a", 2),
+        ("min_value", ["min_value", "min_value", "value"], object, "z", 2),
+        # array of numeric dtype
+        (10, [1, 2, 3], int, 10, 2),
+        (1, [1, 1, 2], int, 10, 1),
+        (10, [20, 20, 1], int, 10, 2),
+        (1, [1, 1, 20], int, 10, 2),
+    ],
+)
+def test_most_frequent(expected, array, dtype, extra_value, n_repeat):
+    assert expected == _most_frequent(
+        np.array(array, dtype=dtype), extra_value, n_repeat
+    )
+
+
+@pytest.mark.parametrize(
+    "initial_strategy", ["mean", "median", "most_frequent", "constant"]
+)
+def test_iterative_imputer_keep_empty_features(initial_strategy):
+    """Check the behaviour of the iterative imputer with different initial strategy
+    and keeping empty features (i.e. features containing only missing values).
+    """
+    X = np.array([[1, np.nan, 2], [3, np.nan, np.nan]])
+
+    imputer = IterativeImputer(
+        initial_strategy=initial_strategy, keep_empty_features=True
+    )
+    X_imputed = imputer.fit_transform(X)
+    assert_allclose(X_imputed[:, 1], 0)
+    X_imputed = imputer.transform(X)
+    assert_allclose(X_imputed[:, 1], 0)
+
+
+@pytest.mark.parametrize("keep_empty_features", [True, False])
+def test_knn_imputer_keep_empty_features(keep_empty_features):
+    """Check the behaviour of `keep_empty_features` for `KNNImputer`."""
+    X = np.array([[1, np.nan, 2], [3, np.nan, np.nan]])
+
+    imputer = KNNImputer(keep_empty_features=keep_empty_features)
+
+    for method in ["fit_transform", "transform"]:
+        X_imputed = getattr(imputer, method)(X)
+        if keep_empty_features:
+            assert X_imputed.shape == X.shape
+            assert_array_equal(X_imputed[:, 1], 0)
+        else:
+            assert X_imputed.shape == (X.shape[0], X.shape[1] - 1)
+
+
+def test_simple_impute_pd_na():
+    pd = pytest.importorskip("pandas")
+
+    # Impute pandas array of string types.
+    df = pd.DataFrame({"feature": pd.Series(["abc", None, "de"], dtype="string")})
+    imputer = SimpleImputer(missing_values=pd.NA, strategy="constant", fill_value="na")
+    _assert_array_equal_and_same_dtype(
+        imputer.fit_transform(df), np.array([["abc"], ["na"], ["de"]], dtype=object)
+    )
+
+    # Impute pandas array of string types without any missing values.
+    df = pd.DataFrame({"feature": pd.Series(["abc", "de", "fgh"], dtype="string")})
+    imputer = SimpleImputer(fill_value="ok", strategy="constant")
+    _assert_array_equal_and_same_dtype(
+        imputer.fit_transform(df), np.array([["abc"], ["de"], ["fgh"]], dtype=object)
+    )
+
+    # Impute pandas array of integer types.
+    df = pd.DataFrame({"feature": pd.Series([1, None, 3], dtype="Int64")})
+    imputer = SimpleImputer(missing_values=pd.NA, strategy="constant", fill_value=-1)
+    _assert_allclose_and_same_dtype(
+        imputer.fit_transform(df), np.array([[1], [-1], [3]], dtype="float64")
+    )
+
+    # Use `np.nan` also works.
+    imputer = SimpleImputer(missing_values=np.nan, strategy="constant", fill_value=-1)
+    _assert_allclose_and_same_dtype(
+        imputer.fit_transform(df), np.array([[1], [-1], [3]], dtype="float64")
+    )
+
+    # Impute pandas array of integer types with 'median' strategy.
+    df = pd.DataFrame({"feature": pd.Series([1, None, 2, 3], dtype="Int64")})
+    imputer = SimpleImputer(missing_values=pd.NA, strategy="median")
+    _assert_allclose_and_same_dtype(
+        imputer.fit_transform(df), np.array([[1], [2], [2], [3]], dtype="float64")
+    )
+
+    # Impute pandas array of integer types with 'mean' strategy.
+    df = pd.DataFrame({"feature": pd.Series([1, None, 2], dtype="Int64")})
+    imputer = SimpleImputer(missing_values=pd.NA, strategy="mean")
+    _assert_allclose_and_same_dtype(
+        imputer.fit_transform(df), np.array([[1], [1.5], [2]], dtype="float64")
+    )
+
+    # Impute pandas array of float types.
+    df = pd.DataFrame({"feature": pd.Series([1.0, None, 3.0], dtype="float64")})
+    imputer = SimpleImputer(missing_values=pd.NA, strategy="constant", fill_value=-2.0)
+    _assert_allclose_and_same_dtype(
+        imputer.fit_transform(df), np.array([[1.0], [-2.0], [3.0]], dtype="float64")
+    )
+
+    # Impute pandas array of float types with 'median' strategy.
+    df = pd.DataFrame({"feature": pd.Series([1.0, None, 2.0, 3.0], dtype="float64")})
+    imputer = SimpleImputer(missing_values=pd.NA, strategy="median")
+    _assert_allclose_and_same_dtype(
+        imputer.fit_transform(df),
+        np.array([[1.0], [2.0], [2.0], [3.0]], dtype="float64"),
+    )
+
+
+def test_missing_indicator_feature_names_out():
+    """Check that missing indicator return the feature names with a prefix."""
+    pd = pytest.importorskip("pandas")
+
+    missing_values = np.nan
+    X = pd.DataFrame(
+        [
+            [missing_values, missing_values, 1, missing_values],
+            [4, missing_values, 2, 10],
+        ],
+        columns=["a", "b", "c", "d"],
+    )
+
+    indicator = MissingIndicator(missing_values=missing_values).fit(X)
+    feature_names = indicator.get_feature_names_out()
+    expected_names = ["missingindicator_a", "missingindicator_b", "missingindicator_d"]
+    assert_array_equal(expected_names, feature_names)
+
+
+def test_imputer_lists_fit_transform():
+    """Check transform uses object dtype when fitted on an object dtype.
+
+    Non-regression test for #19572.
+    """
+
+    X = [["a", "b"], ["c", "b"], ["a", "a"]]
+    imp_frequent = SimpleImputer(strategy="most_frequent").fit(X)
+    X_trans = imp_frequent.transform([[np.nan, np.nan]])
+    assert X_trans.dtype == object
+    assert_array_equal(X_trans, [["a", "b"]])
+
+
+@pytest.mark.parametrize("dtype_test", [np.float32, np.float64])
+def test_imputer_transform_preserves_numeric_dtype(dtype_test):
+    """Check transform preserves numeric dtype independent of fit dtype."""
+    X = np.asarray(
+        [[1.2, 3.4, np.nan], [np.nan, 1.2, 1.3], [4.2, 2, 1]], dtype=np.float64
+    )
+    imp = SimpleImputer().fit(X)
+
+    X_test = np.asarray([[np.nan, np.nan, np.nan]], dtype=dtype_test)
+    X_trans = imp.transform(X_test)
+    assert X_trans.dtype == dtype_test
+
+
+@pytest.mark.parametrize("array_type", ["array", "sparse"])
+@pytest.mark.parametrize("keep_empty_features", [True, False])
+def test_simple_imputer_constant_keep_empty_features(array_type, keep_empty_features):
+    """Check the behaviour of `keep_empty_features` with `strategy='constant'.
+    For backward compatibility, a column full of missing values will always be
+    fill and never dropped.
+    """
+    X = np.array([[np.nan, 2], [np.nan, 3], [np.nan, 6]])
+    X = _convert_container(X, array_type)
+    fill_value = 10
+    imputer = SimpleImputer(
+        strategy="constant",
+        fill_value=fill_value,
+        keep_empty_features=keep_empty_features,
+    )
+
+    for method in ["fit_transform", "transform"]:
+        X_imputed = getattr(imputer, method)(X)
+        assert X_imputed.shape == X.shape
+        constant_feature = (
+            X_imputed[:, 0].A if array_type == "sparse" else X_imputed[:, 0]
+        )
+        assert_array_equal(constant_feature, fill_value)
+
+
+@pytest.mark.parametrize("array_type", ["array", "sparse"])
+@pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent"])
+@pytest.mark.parametrize("keep_empty_features", [True, False])
+def test_simple_imputer_keep_empty_features(strategy, array_type, keep_empty_features):
+    """Check the behaviour of `keep_empty_features` with all strategies but
+    'constant'.
+    """
+    X = np.array([[np.nan, 2], [np.nan, 3], [np.nan, 6]])
+    X = _convert_container(X, array_type)
+    imputer = SimpleImputer(strategy=strategy, keep_empty_features=keep_empty_features)
+
+    for method in ["fit_transform", "transform"]:
+        X_imputed = getattr(imputer, method)(X)
+        if keep_empty_features:
+            assert X_imputed.shape == X.shape
+            constant_feature = (
+                X_imputed[:, 0].A if array_type == "sparse" else X_imputed[:, 0]
+            )
+            assert_array_equal(constant_feature, 0)
+        else:
+            assert X_imputed.shape == (X.shape[0], X.shape[1] - 1)
diff --git a/mgm/lib/python3.10/site-packages/sklearn/impute/tests/test_knn.py b/mgm/lib/python3.10/site-packages/sklearn/impute/tests/test_knn.py
new file mode 100644
index 0000000000000000000000000000000000000000..8a489ab23701fbeb87815b699e3ecaafe523e1dc
--- /dev/null
+++ b/mgm/lib/python3.10/site-packages/sklearn/impute/tests/test_knn.py
@@ -0,0 +1,552 @@
+import numpy as np
+import pytest
+
+from sklearn import config_context
+from sklearn.impute import KNNImputer
+from sklearn.metrics.pairwise import nan_euclidean_distances
+from sklearn.metrics.pairwise import pairwise_distances
+from sklearn.neighbors import KNeighborsRegressor
+from sklearn.utils._testing import assert_allclose
+
+
+@pytest.mark.parametrize("weights", ["uniform", "distance"])
+@pytest.mark.parametrize("n_neighbors", range(1, 6))
+def test_knn_imputer_shape(weights, n_neighbors):
+    # Verify the shapes of the imputed matrix for different weights and
+    # number of neighbors.
+    n_rows = 10
+    n_cols = 2
+    X = np.random.rand(n_rows, n_cols)
+    X[0, 0] = np.nan
+
+    imputer = KNNImputer(n_neighbors=n_neighbors, weights=weights)
+    X_imputed = imputer.fit_transform(X)
+    assert X_imputed.shape == (n_rows, n_cols)
+
+
+@pytest.mark.parametrize("na", [np.nan, -1])
+def test_knn_imputer_default_with_invalid_input(na):
+    # Test imputation with default values and invalid input
+
+    # Test with inf present
+    X = np.array(
+        [
+            [np.inf, 1, 1, 2, na],
+            [2, 1, 2, 2, 3],
+            [3, 2, 3, 3, 8],
+            [na, 6, 0, 5, 13],
+            [na, 7, 0, 7, 8],
+            [6, 6, 2, 5, 7],
+        ]
+    )
+    with pytest.raises(ValueError, match="Input X contains (infinity|NaN)"):
+        KNNImputer(missing_values=na).fit(X)
+
+    # Test with inf present in matrix passed in transform()
+    X = np.array(
+        [
+            [np.inf, 1, 1, 2, na],
+            [2, 1, 2, 2, 3],
+            [3, 2, 3, 3, 8],
+            [na, 6, 0, 5, 13],
+            [na, 7, 0, 7, 8],
+            [6, 6, 2, 5, 7],
+        ]
+    )
+
+    X_fit = np.array(
+        [
+            [0, 1, 1, 2, na],
+            [2, 1, 2, 2, 3],
+            [3, 2, 3, 3, 8],
+            [na, 6, 0, 5, 13],
+            [na, 7, 0, 7, 8],
+            [6, 6, 2, 5, 7],
+        ]
+    )
+    imputer = KNNImputer(missing_values=na).fit(X_fit)
+    with pytest.raises(ValueError, match="Input X contains (infinity|NaN)"):
+        imputer.transform(X)
+
+    # Test with missing_values=0 when NaN present
+    imputer = KNNImputer(missing_values=0, n_neighbors=2, weights="uniform")
+    X = np.array(
+        [
+            [np.nan, 0, 0, 0, 5],
+            [np.nan, 1, 0, np.nan, 3],
+            [np.nan, 2, 0, 0, 0],
+            [np.nan, 6, 0, 5, 13],
+        ]
+    )
+    msg = "Input X contains NaN"
+    with pytest.raises(ValueError, match=msg):
+        imputer.fit(X)
+
+    X = np.array(
+        [
+            [0, 0],
+            [np.nan, 2],
+        ]
+    )
+
+
+@pytest.mark.parametrize("na", [np.nan, -1])
+def test_knn_imputer_removes_all_na_features(na):
+    X = np.array(
+        [
+            [1, 1, na, 1, 1, 1.0],
+            [2, 3, na, 2, 2, 2],
+            [3, 4, na, 3, 3, na],
+            [6, 4, na, na, 6, 6],
+        ]
+    )
+    knn = KNNImputer(missing_values=na, n_neighbors=2).fit(X)
+
+    X_transform = knn.transform(X)
+    assert not np.isnan(X_transform).any()
+    assert X_transform.shape == (4, 5)
+
+    X_test = np.arange(0, 12).reshape(2, 6)
+    X_transform = knn.transform(X_test)
+    assert_allclose(X_test[:, [0, 1, 3, 4, 5]], X_transform)
+
+
+@pytest.mark.parametrize("na", [np.nan, -1])
+def test_knn_imputer_zero_nan_imputes_the_same(na):
+    # Test with an imputable matrix and compare with different missing_values
+    X_zero = np.array(
+        [
+            [1, 0, 1, 1, 1.0],
+            [2, 2, 2, 2, 2],
+            [3, 3, 3, 3, 0],
+            [6, 6, 0, 6, 6],
+        ]
+    )
+
+    X_nan = np.array(
+        [
+            [1, na, 1, 1, 1.0],
+            [2, 2, 2, 2, 2],
+            [3, 3, 3, 3, na],
+            [6, 6, na, 6, 6],
+        ]
+    )
+
+    X_imputed = np.array(
+        [
+            [1, 2.5, 1, 1, 1.0],
+            [2, 2, 2, 2, 2],
+            [3, 3, 3, 3, 1.5],
+            [6, 6, 2.5, 6, 6],
+        ]
+    )
+
+    imputer_zero = KNNImputer(missing_values=0, n_neighbors=2, weights="uniform")
+
+    imputer_nan = KNNImputer(missing_values=na, n_neighbors=2, weights="uniform")
+
+    assert_allclose(imputer_zero.fit_transform(X_zero), X_imputed)
+    assert_allclose(
+        imputer_zero.fit_transform(X_zero), imputer_nan.fit_transform(X_nan)
+    )
+
+
+@pytest.mark.parametrize("na", [np.nan, -1])
+def test_knn_imputer_verify(na):
+    # Test with an imputable matrix
+    X = np.array(
+        [
+            [1, 0, 0, 1],
+            [2, 1, 2, na],
+            [3, 2, 3, na],
+            [na, 4, 5, 5],
+            [6, na, 6, 7],
+            [8, 8, 8, 8],
+            [16, 15, 18, 19],
+        ]
+    )
+
+    X_imputed = np.array(
+        [
+            [1, 0, 0, 1],
+            [2, 1, 2, 8],
+            [3, 2, 3, 8],
+            [4, 4, 5, 5],
+            [6, 3, 6, 7],
+            [8, 8, 8, 8],
+            [16, 15, 18, 19],
+        ]
+    )
+
+    imputer = KNNImputer(missing_values=na)
+    assert_allclose(imputer.fit_transform(X), X_imputed)
+
+    # Test when there is not enough neighbors
+    X = np.array(
+        [
+            [1, 0, 0, na],
+            [2, 1, 2, na],
+            [3, 2, 3, na],
+            [4, 4, 5, na],
+            [6, 7, 6, na],
+            [8, 8, 8, na],
+            [20, 20, 20, 20],
+            [22, 22, 22, 22],
+        ]
+    )
+
+    # Not enough neighbors, use column mean from training
+    X_impute_value = (20 + 22) / 2
+    X_imputed = np.array(
+        [
+            [1, 0, 0, X_impute_value],
+            [2, 1, 2, X_impute_value],
+            [3, 2, 3, X_impute_value],
+            [4, 4, 5, X_impute_value],
+            [6, 7, 6, X_impute_value],
+            [8, 8, 8, X_impute_value],
+            [20, 20, 20, 20],
+            [22, 22, 22, 22],
+        ]
+    )
+
+    imputer = KNNImputer(missing_values=na)
+    assert_allclose(imputer.fit_transform(X), X_imputed)
+
+    # Test when data in fit() and transform() are different
+    X = np.array([[0, 0], [na, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 16]])
+
+    X1 = np.array([[1, 0], [3, 2], [4, na]])
+
+    X_2_1 = (0 + 3 + 6 + 7 + 8) / 5
+    X1_imputed = np.array([[1, 0], [3, 2], [4, X_2_1]])
+
+    imputer = KNNImputer(missing_values=na)
+    assert_allclose(imputer.fit(X).transform(X1), X1_imputed)
+
+
+@pytest.mark.parametrize("na", [np.nan, -1])
+def test_knn_imputer_one_n_neighbors(na):
+
+    X = np.array([[0, 0], [na, 2], [4, 3], [5, na], [7, 7], [na, 8], [14, 13]])
+
+    X_imputed = np.array([[0, 0], [4, 2], [4, 3], [5, 3], [7, 7], [7, 8], [14, 13]])
+
+    imputer = KNNImputer(n_neighbors=1, missing_values=na)
+
+    assert_allclose(imputer.fit_transform(X), X_imputed)
+
+
+@pytest.mark.parametrize("na", [np.nan, -1])
+def test_knn_imputer_all_samples_are_neighbors(na):
+    X = np.array([[0, 0], [na, 2], [4, 3], [5, na], [7, 7], [na, 8], [14, 13]])
+
+    X_imputed = np.array([[0, 0], [6, 2], [4, 3], [5, 5.5], [7, 7], [6, 8], [14, 13]])
+
+    n_neighbors = X.shape[0] - 1
+    imputer = KNNImputer(n_neighbors=n_neighbors, missing_values=na)
+
+    assert_allclose(imputer.fit_transform(X), X_imputed)
+
+    n_neighbors = X.shape[0]
+    imputer_plus1 = KNNImputer(n_neighbors=n_neighbors, missing_values=na)
+    assert_allclose(imputer_plus1.fit_transform(X), X_imputed)
+
+
+@pytest.mark.parametrize("na", [np.nan, -1])
+def test_knn_imputer_weight_uniform(na):
+
+    X = np.array([[0, 0], [na, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 10]])
+
+    # Test with "uniform" weight (or unweighted)
+    X_imputed_uniform = np.array(
+        [[0, 0], [5, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 10]]
+    )
+
+    imputer = KNNImputer(weights="uniform", missing_values=na)
+    assert_allclose(imputer.fit_transform(X), X_imputed_uniform)
+
+    # Test with "callable" weight
+    def no_weight(dist):
+        return None
+
+    imputer = KNNImputer(weights=no_weight, missing_values=na)
+    assert_allclose(imputer.fit_transform(X), X_imputed_uniform)
+
+    # Test with "callable" uniform weight
+    def uniform_weight(dist):
+        return np.ones_like(dist)
+
+    imputer = KNNImputer(weights=uniform_weight, missing_values=na)
+    assert_allclose(imputer.fit_transform(X), X_imputed_uniform)
+
+
+@pytest.mark.parametrize("na", [np.nan, -1])
+def test_knn_imputer_weight_distance(na):
+    X = np.array([[0, 0], [na, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 10]])
+
+    # Test with "distance" weight
+    nn = KNeighborsRegressor(metric="euclidean", weights="distance")
+    X_rows_idx = [0, 2, 3, 4, 5, 6]
+    nn.fit(X[X_rows_idx, 1:], X[X_rows_idx, 0])
+    knn_imputed_value = nn.predict(X[1:2, 1:])[0]
+
+    # Manual calculation
+    X_neighbors_idx = [0, 2, 3, 4, 5]
+    dist = nan_euclidean_distances(X[1:2, :], X, missing_values=na)
+    weights = 1 / dist[:, X_neighbors_idx].ravel()
+    manual_imputed_value = np.average(X[X_neighbors_idx, 0], weights=weights)
+
+    X_imputed_distance1 = np.array(
+        [[0, 0], [manual_imputed_value, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 10]]
+    )
+
+    # NearestNeighbor calculation
+    X_imputed_distance2 = np.array(
+        [[0, 0], [knn_imputed_value, 2], [4, 3], [5, 6], [7, 7], [9, 8], [11, 10]]
+    )
+
+    imputer = KNNImputer(weights="distance", missing_values=na)
+    assert_allclose(imputer.fit_transform(X), X_imputed_distance1)
+    assert_allclose(imputer.fit_transform(X), X_imputed_distance2)
+
+    # Test with weights = "distance" and n_neighbors=2
+    X = np.array(
+        [
+            [na, 0, 0],
+            [2, 1, 2],
+            [3, 2, 3],
+            [4, 5, 5],
+        ]
+    )
+
+    # neighbors are rows 1, 2, the nan_euclidean_distances are:
+    dist_0_1 = np.sqrt((3 / 2) * ((1 - 0) ** 2 + (2 - 0) ** 2))
+    dist_0_2 = np.sqrt((3 / 2) * ((2 - 0) ** 2 + (3 - 0) ** 2))
+    imputed_value = np.average([2, 3], weights=[1 / dist_0_1, 1 / dist_0_2])
+
+    X_imputed = np.array(
+        [
+            [imputed_value, 0, 0],
+            [2, 1, 2],
+            [3, 2, 3],
+            [4, 5, 5],
+        ]
+    )
+
+    imputer = KNNImputer(n_neighbors=2, weights="distance", missing_values=na)
+    assert_allclose(imputer.fit_transform(X), X_imputed)
+
+    # Test with varying missingness patterns
+    X = np.array(
+        [
+            [1, 0, 0, 1],
+            [0, na, 1, na],
+            [1, 1, 1, na],
+            [0, 1, 0, 0],
+            [0, 0, 0, 0],
+            [1, 0, 1, 1],
+            [10, 10, 10, 10],
+        ]
+    )
+
+    # Get weights of donor neighbors
+    dist = nan_euclidean_distances(X, missing_values=na)
+    r1c1_nbor_dists = dist[1, [0, 2, 3, 4, 5]]
+    r1c3_nbor_dists = dist[1, [0, 3, 4, 5, 6]]
+    r1c1_nbor_wt = 1 / r1c1_nbor_dists
+    r1c3_nbor_wt = 1 / r1c3_nbor_dists
+
+    r2c3_nbor_dists = dist[2, [0, 3, 4, 5, 6]]
+    r2c3_nbor_wt = 1 / r2c3_nbor_dists
+
+    # Collect donor values
+    col1_donor_values = np.ma.masked_invalid(X[[0, 2, 3, 4, 5], 1]).copy()
+    col3_donor_values = np.ma.masked_invalid(X[[0, 3, 4, 5, 6], 3]).copy()
+
+    # Final imputed values
+    r1c1_imp = np.ma.average(col1_donor_values, weights=r1c1_nbor_wt)
+    r1c3_imp = np.ma.average(col3_donor_values, weights=r1c3_nbor_wt)
+    r2c3_imp = np.ma.average(col3_donor_values, weights=r2c3_nbor_wt)
+
+    X_imputed = np.array(
+        [
+            [1, 0, 0, 1],
+            [0, r1c1_imp, 1, r1c3_imp],
+            [1, 1, 1, r2c3_imp],
+            [0, 1, 0, 0],
+            [0, 0, 0, 0],
+            [1, 0, 1, 1],
+            [10, 10, 10, 10],
+        ]
+    )
+
+    imputer = KNNImputer(weights="distance", missing_values=na)
+    assert_allclose(imputer.fit_transform(X), X_imputed)
+
+    X = np.array(
+        [
+            [0, 0, 0, na],
+            [1, 1, 1, na],
+            [2, 2, na, 2],
+            [3, 3, 3, 3],
+            [4, 4, 4, 4],
+            [5, 5, 5, 5],
+            [6, 6, 6, 6],
+            [na, 7, 7, 7],
+        ]
+    )
+
+    dist = pairwise_distances(
+        X, metric="nan_euclidean", squared=False, missing_values=na
+    )
+
+    # Calculate weights
+    r0c3_w = 1.0 / dist[0, 2:-1]
+    r1c3_w = 1.0 / dist[1, 2:-1]
+    r2c2_w = 1.0 / dist[2, (0, 1, 3, 4, 5)]
+    r7c0_w = 1.0 / dist[7, 2:7]
+
+    # Calculate weighted averages
+    r0c3 = np.average(X[2:-1, -1], weights=r0c3_w)
+    r1c3 = np.average(X[2:-1, -1], weights=r1c3_w)
+    r2c2 = np.average(X[(0, 1, 3, 4, 5), 2], weights=r2c2_w)
+    r7c0 = np.average(X[2:7, 0], weights=r7c0_w)
+
+    X_imputed = np.array(
+        [
+            [0, 0, 0, r0c3],
+            [1, 1, 1, r1c3],
+            [2, 2, r2c2, 2],
+            [3, 3, 3, 3],
+            [4, 4, 4, 4],
+            [5, 5, 5, 5],
+            [6, 6, 6, 6],
+            [r7c0, 7, 7, 7],
+        ]
+    )
+
+    imputer_comp_wt = KNNImputer(missing_values=na, weights="distance")
+    assert_allclose(imputer_comp_wt.fit_transform(X), X_imputed)
+
+
+def test_knn_imputer_callable_metric():
+
+    # Define callable metric that returns the l1 norm:
+    def custom_callable(x, y, missing_values=np.nan, squared=False):
+        x = np.ma.array(x, mask=np.isnan(x))
+        y = np.ma.array(y, mask=np.isnan(y))
+        dist = np.nansum(np.abs(x - y))
+        return dist
+
+    X = np.array([[4, 3, 3, np.nan], [6, 9, 6, 9], [4, 8, 6, 9], [np.nan, 9, 11, 10.0]])
+
+    X_0_3 = (9 + 9) / 2
+    X_3_0 = (6 + 4) / 2
+    X_imputed = np.array(
+        [[4, 3, 3, X_0_3], [6, 9, 6, 9], [4, 8, 6, 9], [X_3_0, 9, 11, 10.0]]
+    )
+
+    imputer = KNNImputer(n_neighbors=2, metric=custom_callable)
+    assert_allclose(imputer.fit_transform(X), X_imputed)
+
+
+@pytest.mark.parametrize("working_memory", [None, 0])
+@pytest.mark.parametrize("na", [-1, np.nan])
+# Note that we use working_memory=0 to ensure that chunking is tested, even
+# for a small dataset. However, it should raise a UserWarning that we ignore.
+@pytest.mark.filterwarnings("ignore:adhere to working_memory")
+def test_knn_imputer_with_simple_example(na, working_memory):
+
+    X = np.array(
+        [
+            [0, na, 0, na],
+            [1, 1, 1, na],
+            [2, 2, na, 2],
+            [3, 3, 3, 3],
+            [4, 4, 4, 4],
+            [5, 5, 5, 5],
+            [6, 6, 6, 6],
+            [na, 7, 7, 7],
+        ]
+    )
+
+    r0c1 = np.mean(X[1:6, 1])
+    r0c3 = np.mean(X[2:-1, -1])
+    r1c3 = np.mean(X[2:-1, -1])
+    r2c2 = np.mean(X[[0, 1, 3, 4, 5], 2])
+    r7c0 = np.mean(X[2:-1, 0])
+
+    X_imputed = np.array(
+        [
+            [0, r0c1, 0, r0c3],
+            [1, 1, 1, r1c3],
+            [2, 2, r2c2, 2],
+            [3, 3, 3, 3],
+            [4, 4, 4, 4],
+            [5, 5, 5, 5],
+            [6, 6, 6, 6],
+            [r7c0, 7, 7, 7],
+        ]
+    )
+
+    with config_context(working_memory=working_memory):
+        imputer_comp = KNNImputer(missing_values=na)
+        assert_allclose(imputer_comp.fit_transform(X), X_imputed)
+
+
+@pytest.mark.parametrize("na", [-1, np.nan])
+@pytest.mark.parametrize("weights", ["uniform", "distance"])
+def test_knn_imputer_not_enough_valid_distances(na, weights):
+    # Samples with needed feature has nan distance
+    X1 = np.array([[na, 11], [na, 1], [3, na]])
+    X1_imputed = np.array([[3, 11], [3, 1], [3, 6]])
+
+    knn = KNNImputer(missing_values=na, n_neighbors=1, weights=weights)
+    assert_allclose(knn.fit_transform(X1), X1_imputed)
+
+    X2 = np.array([[4, na]])
+    X2_imputed = np.array([[4, 6]])
+    assert_allclose(knn.transform(X2), X2_imputed)
+
+
+@pytest.mark.parametrize("na", [-1, np.nan])
+def test_knn_imputer_drops_all_nan_features(na):
+    X1 = np.array([[na, 1], [na, 2]])
+    knn = KNNImputer(missing_values=na, n_neighbors=1)
+    X1_expected = np.array([[1], [2]])
+    assert_allclose(knn.fit_transform(X1), X1_expected)
+
+    X2 = np.array([[1, 2], [3, na]])
+    X2_expected = np.array([[2], [1.5]])
+    assert_allclose(knn.transform(X2), X2_expected)
+
+
+@pytest.mark.parametrize("working_memory", [None, 0])
+@pytest.mark.parametrize("na", [-1, np.nan])
+def test_knn_imputer_distance_weighted_not_enough_neighbors(na, working_memory):
+    X = np.array([[3, na], [2, na], [na, 4], [5, 6], [6, 8], [na, 5]])
+
+    dist = pairwise_distances(
+        X, metric="nan_euclidean", squared=False, missing_values=na
+    )
+
+    X_01 = np.average(X[3:5, 1], weights=1 / dist[0, 3:5])
+    X_11 = np.average(X[3:5, 1], weights=1 / dist[1, 3:5])
+    X_20 = np.average(X[3:5, 0], weights=1 / dist[2, 3:5])
+    X_50 = np.average(X[3:5, 0], weights=1 / dist[5, 3:5])
+
+    X_expected = np.array([[3, X_01], [2, X_11], [X_20, 4], [5, 6], [6, 8], [X_50, 5]])
+
+    with config_context(working_memory=working_memory):
+        knn_3 = KNNImputer(missing_values=na, n_neighbors=3, weights="distance")
+        assert_allclose(knn_3.fit_transform(X), X_expected)
+
+        knn_4 = KNNImputer(missing_values=na, n_neighbors=4, weights="distance")
+        assert_allclose(knn_4.fit_transform(X), X_expected)
+
+
+@pytest.mark.parametrize("na, allow_nan", [(-1, False), (np.nan, True)])
+def test_knn_tags(na, allow_nan):
+    knn = KNNImputer(missing_values=na)
+    assert knn._get_tags()["allow_nan"] == allow_nan
diff --git a/mgm/lib/python3.10/site-packages/sklearn/metrics/__pycache__/__init__.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/metrics/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5c4f45338a64a8a4c685dacb0a38b34ad5535478
Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/metrics/__pycache__/__init__.cpython-310.pyc differ
diff --git a/mgm/lib/python3.10/site-packages/sklearn/metrics/__pycache__/_base.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/metrics/__pycache__/_base.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fd0015868d5aebf93cb609ae09e37c42af8e0136
Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/metrics/__pycache__/_base.cpython-310.pyc differ
diff --git a/mgm/lib/python3.10/site-packages/sklearn/metrics/__pycache__/_classification.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/metrics/__pycache__/_classification.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..97dd82eecdb924ce241c24fb09b12f00465503e2
Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/metrics/__pycache__/_classification.cpython-310.pyc differ
diff --git a/mgm/lib/python3.10/site-packages/sklearn/metrics/__pycache__/_ranking.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/metrics/__pycache__/_ranking.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..789147d3b3662ae82186d69759de27d647655eff
Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/metrics/__pycache__/_ranking.cpython-310.pyc differ
diff --git a/mgm/lib/python3.10/site-packages/sklearn/metrics/__pycache__/_scorer.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/metrics/__pycache__/_scorer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8e2aebe30dcc191cf458380b5ab716313406ef39
Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/metrics/__pycache__/_scorer.cpython-310.pyc differ
diff --git a/mgm/lib/python3.10/site-packages/sklearn/metrics/__pycache__/pairwise.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/metrics/__pycache__/pairwise.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b65a9dc7faec13864eed8dfc7b681e3520854b30
Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/metrics/__pycache__/pairwise.cpython-310.pyc differ
diff --git a/mgm/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/test_base.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/test_base.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a15b98131fdb1182750ec932882d3a2ae8fc239c
Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/test_base.cpython-310.pyc differ
diff --git a/mgm/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/test_common_curve_display.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/test_common_curve_display.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..24c3f55ebd61e6a24c753056ed2f863425c6cf53
Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/test_common_curve_display.cpython-310.pyc differ
diff --git a/mgm/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/test_confusion_matrix_display.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/test_confusion_matrix_display.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..92a879dc03ba0c72e82b61379fa697b39795a716
Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/test_confusion_matrix_display.cpython-310.pyc differ
diff --git a/mgm/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/test_det_curve_display.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/test_det_curve_display.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..66abec06777d86849dca84ed23f0b5b4a528f251
Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/test_det_curve_display.cpython-310.pyc differ
diff --git a/mgm/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/test_precision_recall_display.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/test_precision_recall_display.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8a35d4c8e699a49d367c24b8e5a548595e95d288
Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/test_precision_recall_display.cpython-310.pyc differ
diff --git a/mgm/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/test_predict_error_display.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/test_predict_error_display.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0731f025d6aa5e309d8aac7fe6bccdd1ace043d3
Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/test_predict_error_display.cpython-310.pyc differ
diff --git a/mgm/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/test_roc_curve_display.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/test_roc_curve_display.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..22b9f3167e6fad594482020750f568d9552a443a
Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/test_roc_curve_display.cpython-310.pyc differ
diff --git a/mgm/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/test_det_curve_display.py b/mgm/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/test_det_curve_display.py
new file mode 100644
index 0000000000000000000000000000000000000000..5d7a26d5e49a0980749885ef81472712a92e5d0b
--- /dev/null
+++ b/mgm/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/test_det_curve_display.py
@@ -0,0 +1,108 @@
+import pytest
+import numpy as np
+from numpy.testing import assert_allclose
+
+from sklearn.datasets import load_iris
+from sklearn.linear_model import LogisticRegression
+
+from sklearn.metrics import det_curve
+from sklearn.metrics import DetCurveDisplay
+
+
+@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"])
+@pytest.mark.parametrize("response_method", ["predict_proba", "decision_function"])
+@pytest.mark.parametrize("with_sample_weight", [True, False])
+@pytest.mark.parametrize("with_strings", [True, False])
+def test_det_curve_display(
+    pyplot, constructor_name, response_method, with_sample_weight, with_strings
+):
+    X, y = load_iris(return_X_y=True)
+    # Binarize the data with only the two first classes
+    X, y = X[y < 2], y[y < 2]
+
+    pos_label = None
+    if with_strings:
+        y = np.array(["c", "b"])[y]
+        pos_label = "c"
+
+    if with_sample_weight:
+        rng = np.random.RandomState(42)
+        sample_weight = rng.randint(1, 4, size=(X.shape[0]))
+    else:
+        sample_weight = None
+
+    lr = LogisticRegression()
+    lr.fit(X, y)
+    y_pred = getattr(lr, response_method)(X)
+    if y_pred.ndim == 2:
+        y_pred = y_pred[:, 1]
+
+    # safe guard for the binary if/else construction
+    assert constructor_name in ("from_estimator", "from_predictions")
+
+    common_kwargs = {
+        "name": lr.__class__.__name__,
+        "alpha": 0.8,
+        "sample_weight": sample_weight,
+        "pos_label": pos_label,
+    }
+    if constructor_name == "from_estimator":
+        disp = DetCurveDisplay.from_estimator(lr, X, y, **common_kwargs)
+    else:
+        disp = DetCurveDisplay.from_predictions(y, y_pred, **common_kwargs)
+
+    fpr, fnr, _ = det_curve(
+        y,
+        y_pred,
+        sample_weight=sample_weight,
+        pos_label=pos_label,
+    )
+
+    assert_allclose(disp.fpr, fpr)
+    assert_allclose(disp.fnr, fnr)
+
+    assert disp.estimator_name == "LogisticRegression"
+
+    # cannot fail thanks to pyplot fixture
+    import matplotlib as mpl  # noqal
+
+    assert isinstance(disp.line_, mpl.lines.Line2D)
+    assert disp.line_.get_alpha() == 0.8
+    assert isinstance(disp.ax_, mpl.axes.Axes)
+    assert isinstance(disp.figure_, mpl.figure.Figure)
+    assert disp.line_.get_label() == "LogisticRegression"
+
+    expected_pos_label = 1 if pos_label is None else pos_label
+    expected_ylabel = f"False Negative Rate (Positive label: {expected_pos_label})"
+    expected_xlabel = f"False Positive Rate (Positive label: {expected_pos_label})"
+    assert disp.ax_.get_ylabel() == expected_ylabel
+    assert disp.ax_.get_xlabel() == expected_xlabel
+
+
+@pytest.mark.parametrize(
+    "constructor_name, expected_clf_name",
+    [
+        ("from_estimator", "LogisticRegression"),
+        ("from_predictions", "Classifier"),
+    ],
+)
+def test_det_curve_display_default_name(
+    pyplot,
+    constructor_name,
+    expected_clf_name,
+):
+    # Check the default name display in the figure when `name` is not provided
+    X, y = load_iris(return_X_y=True)
+    # Binarize the data with only the two first classes
+    X, y = X[y < 2], y[y < 2]
+
+    lr = LogisticRegression().fit(X, y)
+    y_pred = lr.predict_proba(X)[:, 1]
+
+    if constructor_name == "from_estimator":
+        disp = DetCurveDisplay.from_estimator(lr, X, y)
+    else:
+        disp = DetCurveDisplay.from_predictions(y, y_pred)
+
+    assert disp.estimator_name == expected_clf_name
+    assert disp.line_.get_label() == expected_clf_name
diff --git a/mgm/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/test_precision_recall_display.py b/mgm/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/test_precision_recall_display.py
new file mode 100644
index 0000000000000000000000000000000000000000..4d514fa1f32b3988bd7cfd6de078132274dcad50
--- /dev/null
+++ b/mgm/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/test_precision_recall_display.py
@@ -0,0 +1,292 @@
+import numpy as np
+import pytest
+
+from sklearn.compose import make_column_transformer
+from sklearn.datasets import load_breast_cancer, make_classification
+from sklearn.exceptions import NotFittedError
+from sklearn.linear_model import LogisticRegression
+from sklearn.metrics import average_precision_score, precision_recall_curve
+from sklearn.model_selection import train_test_split
+from sklearn.pipeline import make_pipeline
+from sklearn.preprocessing import StandardScaler
+from sklearn.svm import SVC, SVR
+from sklearn.utils import shuffle
+
+from sklearn.metrics import PrecisionRecallDisplay
+
+# TODO: Remove when https://github.com/numpy/numpy/issues/14397 is resolved
+pytestmark = pytest.mark.filterwarnings(
+    "ignore:In future, it will be an error for 'np.bool_':DeprecationWarning:"
+    "matplotlib.*"
+)
+
+
+def test_precision_recall_display_validation(pyplot):
+    """Check that we raise the proper error when validating parameters."""
+    X, y = make_classification(
+        n_samples=100, n_informative=5, n_classes=5, random_state=0
+    )
+
+    with pytest.raises(NotFittedError):
+        PrecisionRecallDisplay.from_estimator(SVC(), X, y)
+
+    regressor = SVR().fit(X, y)
+    y_pred_regressor = regressor.predict(X)
+    classifier = SVC(probability=True).fit(X, y)
+    y_pred_classifier = classifier.predict_proba(X)[:, -1]
+
+    err_msg = "PrecisionRecallDisplay.from_estimator only supports classifiers"
+    with pytest.raises(ValueError, match=err_msg):
+        PrecisionRecallDisplay.from_estimator(regressor, X, y)
+
+    err_msg = "Expected 'estimator' to be a binary classifier, but got SVC"
+    with pytest.raises(ValueError, match=err_msg):
+        PrecisionRecallDisplay.from_estimator(classifier, X, y)
+
+    err_msg = "{} format is not supported"
+    with pytest.raises(ValueError, match=err_msg.format("continuous")):
+        # Force `y_true` to be seen as a regression problem
+        PrecisionRecallDisplay.from_predictions(y + 0.5, y_pred_classifier, pos_label=1)
+    with pytest.raises(ValueError, match=err_msg.format("multiclass")):
+        PrecisionRecallDisplay.from_predictions(y, y_pred_regressor, pos_label=1)
+
+    err_msg = "Found input variables with inconsistent numbers of samples"
+    with pytest.raises(ValueError, match=err_msg):
+        PrecisionRecallDisplay.from_predictions(y, y_pred_classifier[::2])
+
+    X, y = make_classification(n_classes=2, n_samples=50, random_state=0)
+    y += 10
+    classifier.fit(X, y)
+    y_pred_classifier = classifier.predict_proba(X)[:, -1]
+    err_msg = r"y_true takes value in {10, 11} and pos_label is not specified"
+    with pytest.raises(ValueError, match=err_msg):
+        PrecisionRecallDisplay.from_predictions(y, y_pred_classifier)
+
+
+@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"])
+@pytest.mark.parametrize("response_method", ["predict_proba", "decision_function"])
+def test_precision_recall_display_plotting(pyplot, constructor_name, response_method):
+    """Check the overall plotting rendering."""
+    X, y = make_classification(n_classes=2, n_samples=50, random_state=0)
+    pos_label = 1
+
+    classifier = LogisticRegression().fit(X, y)
+    classifier.fit(X, y)
+
+    y_pred = getattr(classifier, response_method)(X)
+    y_pred = y_pred if y_pred.ndim == 1 else y_pred[:, pos_label]
+
+    # safe guard for the binary if/else construction
+    assert constructor_name in ("from_estimator", "from_predictions")
+
+    if constructor_name == "from_estimator":
+        display = PrecisionRecallDisplay.from_estimator(
+            classifier, X, y, response_method=response_method
+        )
+    else:
+        display = PrecisionRecallDisplay.from_predictions(
+            y, y_pred, pos_label=pos_label
+        )
+
+    precision, recall, _ = precision_recall_curve(y, y_pred, pos_label=pos_label)
+    average_precision = average_precision_score(y, y_pred, pos_label=pos_label)
+
+    np.testing.assert_allclose(display.precision, precision)
+    np.testing.assert_allclose(display.recall, recall)
+    assert display.average_precision == pytest.approx(average_precision)
+
+    import matplotlib as mpl
+
+    assert isinstance(display.line_, mpl.lines.Line2D)
+    assert isinstance(display.ax_, mpl.axes.Axes)
+    assert isinstance(display.figure_, mpl.figure.Figure)
+
+    assert display.ax_.get_xlabel() == "Recall (Positive label: 1)"
+    assert display.ax_.get_ylabel() == "Precision (Positive label: 1)"
+
+    # plotting passing some new parameters
+    display.plot(alpha=0.8, name="MySpecialEstimator")
+    expected_label = f"MySpecialEstimator (AP = {average_precision:0.2f})"
+    assert display.line_.get_label() == expected_label
+    assert display.line_.get_alpha() == pytest.approx(0.8)
+
+
+@pytest.mark.parametrize(
+    "constructor_name, default_label",
+    [
+        ("from_estimator", "LogisticRegression (AP = {:.2f})"),
+        ("from_predictions", "Classifier (AP = {:.2f})"),
+    ],
+)
+def test_precision_recall_display_name(pyplot, constructor_name, default_label):
+    """Check the behaviour of the name parameters"""
+    X, y = make_classification(n_classes=2, n_samples=100, random_state=0)
+    pos_label = 1
+
+    classifier = LogisticRegression().fit(X, y)
+    classifier.fit(X, y)
+
+    y_pred = classifier.predict_proba(X)[:, pos_label]
+
+    # safe guard for the binary if/else construction
+    assert constructor_name in ("from_estimator", "from_predictions")
+
+    if constructor_name == "from_estimator":
+        display = PrecisionRecallDisplay.from_estimator(classifier, X, y)
+    else:
+        display = PrecisionRecallDisplay.from_predictions(
+            y, y_pred, pos_label=pos_label
+        )
+
+    average_precision = average_precision_score(y, y_pred, pos_label=pos_label)
+
+    # check that the default name is used
+    assert display.line_.get_label() == default_label.format(average_precision)
+
+    # check that the name can be set
+    display.plot(name="MySpecialEstimator")
+    assert (
+        display.line_.get_label()
+        == f"MySpecialEstimator (AP = {average_precision:.2f})"
+    )
+
+
+@pytest.mark.parametrize(
+    "clf",
+    [
+        make_pipeline(StandardScaler(), LogisticRegression()),
+        make_pipeline(
+            make_column_transformer((StandardScaler(), [0, 1])), LogisticRegression()
+        ),
+    ],
+)
+def test_precision_recall_display_pipeline(pyplot, clf):
+    X, y = make_classification(n_classes=2, n_samples=50, random_state=0)
+    with pytest.raises(NotFittedError):
+        PrecisionRecallDisplay.from_estimator(clf, X, y)
+    clf.fit(X, y)
+    display = PrecisionRecallDisplay.from_estimator(clf, X, y)
+    assert display.estimator_name == clf.__class__.__name__
+
+
+def test_precision_recall_display_string_labels(pyplot):
+    # regression test #15738
+    cancer = load_breast_cancer()
+    X, y = cancer.data, cancer.target_names[cancer.target]
+
+    lr = make_pipeline(StandardScaler(), LogisticRegression())
+    lr.fit(X, y)
+    for klass in cancer.target_names:
+        assert klass in lr.classes_
+    display = PrecisionRecallDisplay.from_estimator(lr, X, y)
+
+    y_pred = lr.predict_proba(X)[:, 1]
+    avg_prec = average_precision_score(y, y_pred, pos_label=lr.classes_[1])
+
+    assert display.average_precision == pytest.approx(avg_prec)
+    assert display.estimator_name == lr.__class__.__name__
+
+    err_msg = r"y_true takes value in {'benign', 'malignant'}"
+    with pytest.raises(ValueError, match=err_msg):
+        PrecisionRecallDisplay.from_predictions(y, y_pred)
+
+    display = PrecisionRecallDisplay.from_predictions(
+        y, y_pred, pos_label=lr.classes_[1]
+    )
+    assert display.average_precision == pytest.approx(avg_prec)
+
+
+@pytest.mark.parametrize(
+    "average_precision, estimator_name, expected_label",
+    [
+        (0.9, None, "AP = 0.90"),
+        (None, "my_est", "my_est"),
+        (0.8, "my_est2", "my_est2 (AP = 0.80)"),
+    ],
+)
+def test_default_labels(pyplot, average_precision, estimator_name, expected_label):
+    """Check the default labels used in the display."""
+    precision = np.array([1, 0.5, 0])
+    recall = np.array([0, 0.5, 1])
+    display = PrecisionRecallDisplay(
+        precision,
+        recall,
+        average_precision=average_precision,
+        estimator_name=estimator_name,
+    )
+    display.plot()
+    assert display.line_.get_label() == expected_label
+
+
+@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"])
+@pytest.mark.parametrize("response_method", ["predict_proba", "decision_function"])
+def test_plot_precision_recall_pos_label(pyplot, constructor_name, response_method):
+    # check that we can provide the positive label and display the proper
+    # statistics
+    X, y = load_breast_cancer(return_X_y=True)
+    # create an highly imbalanced version of the breast cancer dataset
+    idx_positive = np.flatnonzero(y == 1)
+    idx_negative = np.flatnonzero(y == 0)
+    idx_selected = np.hstack([idx_negative, idx_positive[:25]])
+    X, y = X[idx_selected], y[idx_selected]
+    X, y = shuffle(X, y, random_state=42)
+    # only use 2 features to make the problem even harder
+    X = X[:, :2]
+    y = np.array(["cancer" if c == 1 else "not cancer" for c in y], dtype=object)
+    X_train, X_test, y_train, y_test = train_test_split(
+        X,
+        y,
+        stratify=y,
+        random_state=0,
+    )
+
+    classifier = LogisticRegression()
+    classifier.fit(X_train, y_train)
+
+    # sanity check to be sure the positive class is classes_[0] and that we
+    # are betrayed by the class imbalance
+    assert classifier.classes_.tolist() == ["cancer", "not cancer"]
+
+    y_pred = getattr(classifier, response_method)(X_test)
+    # we select the corresponding probability columns or reverse the decision
+    #  function otherwise
+    y_pred_cancer = -1 * y_pred if y_pred.ndim == 1 else y_pred[:, 0]
+    y_pred_not_cancer = y_pred if y_pred.ndim == 1 else y_pred[:, 1]
+
+    if constructor_name == "from_estimator":
+        display = PrecisionRecallDisplay.from_estimator(
+            classifier,
+            X_test,
+            y_test,
+            pos_label="cancer",
+            response_method=response_method,
+        )
+    else:
+        display = PrecisionRecallDisplay.from_predictions(
+            y_test,
+            y_pred_cancer,
+            pos_label="cancer",
+        )
+    # we should obtain the statistics of the "cancer" class
+    avg_prec_limit = 0.65
+    assert display.average_precision < avg_prec_limit
+    assert -np.trapz(display.precision, display.recall) < avg_prec_limit
+
+    # otherwise we should obtain the statistics of the "not cancer" class
+    if constructor_name == "from_estimator":
+        display = PrecisionRecallDisplay.from_estimator(
+            classifier,
+            X_test,
+            y_test,
+            response_method=response_method,
+            pos_label="not cancer",
+        )
+    else:
+        display = PrecisionRecallDisplay.from_predictions(
+            y_test,
+            y_pred_not_cancer,
+            pos_label="not cancer",
+        )
+    avg_prec_limit = 0.95
+    assert display.average_precision > avg_prec_limit
+    assert -np.trapz(display.precision, display.recall) > avg_prec_limit
diff --git a/mgm/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/test_roc_curve_display.py b/mgm/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/test_roc_curve_display.py
new file mode 100644
index 0000000000000000000000000000000000000000..7ba5b35f705f6db6588f91f427515f8b294a8c58
--- /dev/null
+++ b/mgm/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/test_roc_curve_display.py
@@ -0,0 +1,251 @@
+import pytest
+import numpy as np
+from numpy.testing import assert_allclose
+
+
+from sklearn.compose import make_column_transformer
+from sklearn.datasets import load_iris
+
+from sklearn.datasets import load_breast_cancer
+from sklearn.exceptions import NotFittedError
+from sklearn.linear_model import LogisticRegression
+from sklearn.metrics import roc_curve
+from sklearn.metrics import auc
+
+from sklearn.model_selection import train_test_split
+from sklearn.pipeline import make_pipeline
+from sklearn.preprocessing import StandardScaler
+from sklearn.utils import shuffle
+
+
+from sklearn.metrics import RocCurveDisplay
+
+
+@pytest.fixture(scope="module")
+def data():
+    return load_iris(return_X_y=True)
+
+
+@pytest.fixture(scope="module")
+def data_binary(data):
+    X, y = data
+    return X[y < 2], y[y < 2]
+
+
+@pytest.mark.parametrize("response_method", ["predict_proba", "decision_function"])
+@pytest.mark.parametrize("with_sample_weight", [True, False])
+@pytest.mark.parametrize("drop_intermediate", [True, False])
+@pytest.mark.parametrize("with_strings", [True, False])
+@pytest.mark.parametrize(
+    "constructor_name, default_name",
+    [
+        ("from_estimator", "LogisticRegression"),
+        ("from_predictions", "Classifier"),
+    ],
+)
+def test_roc_curve_display_plotting(
+    pyplot,
+    response_method,
+    data_binary,
+    with_sample_weight,
+    drop_intermediate,
+    with_strings,
+    constructor_name,
+    default_name,
+):
+    """Check the overall plotting behaviour."""
+    X, y = data_binary
+
+    pos_label = None
+    if with_strings:
+        y = np.array(["c", "b"])[y]
+        pos_label = "c"
+
+    if with_sample_weight:
+        rng = np.random.RandomState(42)
+        sample_weight = rng.randint(1, 4, size=(X.shape[0]))
+    else:
+        sample_weight = None
+
+    lr = LogisticRegression()
+    lr.fit(X, y)
+
+    y_pred = getattr(lr, response_method)(X)
+    y_pred = y_pred if y_pred.ndim == 1 else y_pred[:, 1]
+
+    if constructor_name == "from_estimator":
+        display = RocCurveDisplay.from_estimator(
+            lr,
+            X,
+            y,
+            sample_weight=sample_weight,
+            drop_intermediate=drop_intermediate,
+            pos_label=pos_label,
+            alpha=0.8,
+        )
+    else:
+        display = RocCurveDisplay.from_predictions(
+            y,
+            y_pred,
+            sample_weight=sample_weight,
+            drop_intermediate=drop_intermediate,
+            pos_label=pos_label,
+            alpha=0.8,
+        )
+
+    fpr, tpr, _ = roc_curve(
+        y,
+        y_pred,
+        sample_weight=sample_weight,
+        drop_intermediate=drop_intermediate,
+        pos_label=pos_label,
+    )
+
+    assert_allclose(display.roc_auc, auc(fpr, tpr))
+    assert_allclose(display.fpr, fpr)
+    assert_allclose(display.tpr, tpr)
+
+    assert display.estimator_name == default_name
+
+    import matplotlib as mpl  # noqal
+
+    assert isinstance(display.line_, mpl.lines.Line2D)
+    assert display.line_.get_alpha() == 0.8
+    assert isinstance(display.ax_, mpl.axes.Axes)
+    assert isinstance(display.figure_, mpl.figure.Figure)
+
+    expected_label = f"{default_name} (AUC = {display.roc_auc:.2f})"
+    assert display.line_.get_label() == expected_label
+
+    expected_pos_label = 1 if pos_label is None else pos_label
+    expected_ylabel = f"True Positive Rate (Positive label: {expected_pos_label})"
+    expected_xlabel = f"False Positive Rate (Positive label: {expected_pos_label})"
+
+    assert display.ax_.get_ylabel() == expected_ylabel
+    assert display.ax_.get_xlabel() == expected_xlabel
+
+
+@pytest.mark.parametrize(
+    "clf",
+    [
+        LogisticRegression(),
+        make_pipeline(StandardScaler(), LogisticRegression()),
+        make_pipeline(
+            make_column_transformer((StandardScaler(), [0, 1])), LogisticRegression()
+        ),
+    ],
+)
+@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"])
+def test_roc_curve_display_complex_pipeline(pyplot, data_binary, clf, constructor_name):
+    """Check the behaviour with complex pipeline."""
+    X, y = data_binary
+
+    if constructor_name == "from_estimator":
+        with pytest.raises(NotFittedError):
+            RocCurveDisplay.from_estimator(clf, X, y)
+
+    clf.fit(X, y)
+
+    if constructor_name == "from_estimator":
+        display = RocCurveDisplay.from_estimator(clf, X, y)
+        name = clf.__class__.__name__
+    else:
+        display = RocCurveDisplay.from_predictions(y, y)
+        name = "Classifier"
+
+    assert name in display.line_.get_label()
+    assert display.estimator_name == name
+
+
+@pytest.mark.parametrize(
+    "roc_auc, estimator_name, expected_label",
+    [
+        (0.9, None, "AUC = 0.90"),
+        (None, "my_est", "my_est"),
+        (0.8, "my_est2", "my_est2 (AUC = 0.80)"),
+    ],
+)
+def test_roc_curve_display_default_labels(
+    pyplot, roc_auc, estimator_name, expected_label
+):
+    """Check the default labels used in the display."""
+    fpr = np.array([0, 0.5, 1])
+    tpr = np.array([0, 0.5, 1])
+    disp = RocCurveDisplay(
+        fpr=fpr, tpr=tpr, roc_auc=roc_auc, estimator_name=estimator_name
+    ).plot()
+    assert disp.line_.get_label() == expected_label
+
+
+@pytest.mark.parametrize("response_method", ["predict_proba", "decision_function"])
+@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"])
+def test_plot_roc_curve_pos_label(pyplot, response_method, constructor_name):
+    # check that we can provide the positive label and display the proper
+    # statistics
+    X, y = load_breast_cancer(return_X_y=True)
+    # create an highly imbalanced
+    idx_positive = np.flatnonzero(y == 1)
+    idx_negative = np.flatnonzero(y == 0)
+    idx_selected = np.hstack([idx_negative, idx_positive[:25]])
+    X, y = X[idx_selected], y[idx_selected]
+    X, y = shuffle(X, y, random_state=42)
+    # only use 2 features to make the problem even harder
+    X = X[:, :2]
+    y = np.array(["cancer" if c == 1 else "not cancer" for c in y], dtype=object)
+    X_train, X_test, y_train, y_test = train_test_split(
+        X,
+        y,
+        stratify=y,
+        random_state=0,
+    )
+
+    classifier = LogisticRegression()
+    classifier.fit(X_train, y_train)
+
+    # sanity check to be sure the positive class is classes_[0] and that we
+    # are betrayed by the class imbalance
+    assert classifier.classes_.tolist() == ["cancer", "not cancer"]
+
+    y_pred = getattr(classifier, response_method)(X_test)
+    # we select the corresponding probability columns or reverse the decision
+    # function otherwise
+    y_pred_cancer = -1 * y_pred if y_pred.ndim == 1 else y_pred[:, 0]
+    y_pred_not_cancer = y_pred if y_pred.ndim == 1 else y_pred[:, 1]
+
+    if constructor_name == "from_estimator":
+        display = RocCurveDisplay.from_estimator(
+            classifier,
+            X_test,
+            y_test,
+            pos_label="cancer",
+            response_method=response_method,
+        )
+    else:
+        display = RocCurveDisplay.from_predictions(
+            y_test,
+            y_pred_cancer,
+            pos_label="cancer",
+        )
+
+    roc_auc_limit = 0.95679
+
+    assert display.roc_auc == pytest.approx(roc_auc_limit)
+    assert np.trapz(display.tpr, display.fpr) == pytest.approx(roc_auc_limit)
+
+    if constructor_name == "from_estimator":
+        display = RocCurveDisplay.from_estimator(
+            classifier,
+            X_test,
+            y_test,
+            response_method=response_method,
+            pos_label="not cancer",
+        )
+    else:
+        display = RocCurveDisplay.from_predictions(
+            y_test,
+            y_pred_not_cancer,
+            pos_label="not cancer",
+        )
+
+    assert display.roc_auc == pytest.approx(roc_auc_limit)
+    assert np.trapz(display.tpr, display.fpr) == pytest.approx(roc_auc_limit)
diff --git a/mgm/lib/python3.10/site-packages/sklearn/metrics/cluster/__pycache__/_supervised.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/metrics/cluster/__pycache__/_supervised.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7d2144e6ff8a975a8dcc3289a2118d5539ce2bcc
Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/metrics/cluster/__pycache__/_supervised.cpython-310.pyc differ
diff --git a/mgm/lib/python3.10/site-packages/sklearn/metrics/cluster/_expected_mutual_info_fast.cpython-310-x86_64-linux-gnu.so b/mgm/lib/python3.10/site-packages/sklearn/metrics/cluster/_expected_mutual_info_fast.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..42e80151d62bd5f89ce3ac675571a8a171807002
--- /dev/null
+++ b/mgm/lib/python3.10/site-packages/sklearn/metrics/cluster/_expected_mutual_info_fast.cpython-310-x86_64-linux-gnu.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7dcff059e9dbe8a6d6828cf0c6e0aca190de19cf3928cc02d8e7ea9da63c7014
+size 121433
diff --git a/mgm/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/__pycache__/test_bicluster.cpython-310.pyc b/mgm/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/__pycache__/test_bicluster.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7469e9d72e535e32074dde1369cca60baf9fed24
Binary files /dev/null and b/mgm/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/__pycache__/test_bicluster.cpython-310.pyc differ