Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- mgm/lib/python3.10/site-packages/sklearn/__pycache__/__init__.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/sklearn/__pycache__/_config.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/sklearn/__pycache__/_distributor_init.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/sklearn/__pycache__/_min_dependencies.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/sklearn/__pycache__/base.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/sklearn/__pycache__/calibration.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/sklearn/__pycache__/conftest.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/sklearn/__pycache__/discriminant_analysis.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/sklearn/__pycache__/dummy.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/sklearn/__pycache__/exceptions.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/sklearn/__pycache__/isotonic.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/sklearn/__pycache__/kernel_approximation.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/sklearn/__pycache__/kernel_ridge.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/sklearn/__pycache__/multiclass.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/sklearn/__pycache__/multioutput.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/sklearn/__pycache__/naive_bayes.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/sklearn/__pycache__/pipeline.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/sklearn/__pycache__/random_projection.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/sklearn/_loss/__init__.py +29 -0
- mgm/lib/python3.10/site-packages/sklearn/_loss/_loss.pxd +81 -0
- mgm/lib/python3.10/site-packages/sklearn/_loss/glm_distribution.py +373 -0
- mgm/lib/python3.10/site-packages/sklearn/_loss/link.py +261 -0
- mgm/lib/python3.10/site-packages/sklearn/_loss/loss.py +1027 -0
- mgm/lib/python3.10/site-packages/sklearn/_loss/tests/__init__.py +0 -0
- mgm/lib/python3.10/site-packages/sklearn/_loss/tests/__pycache__/test_glm_distribution.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/sklearn/_loss/tests/__pycache__/test_loss.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/sklearn/_loss/tests/test_glm_distribution.py +123 -0
- mgm/lib/python3.10/site-packages/sklearn/_loss/tests/test_link.py +109 -0
- mgm/lib/python3.10/site-packages/sklearn/_loss/tests/test_loss.py +1161 -0
- mgm/lib/python3.10/site-packages/sklearn/cross_decomposition/_pls.py +1089 -0
- mgm/lib/python3.10/site-packages/sklearn/externals/__pycache__/_arff.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/sklearn/externals/_arff.py +1107 -0
- mgm/lib/python3.10/site-packages/sklearn/externals/_lobpcg.py +991 -0
- mgm/lib/python3.10/site-packages/sklearn/externals/_packaging/__init__.py +0 -0
- mgm/lib/python3.10/site-packages/sklearn/externals/_packaging/__pycache__/__init__.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/sklearn/externals/_packaging/__pycache__/_structures.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/sklearn/externals/_packaging/__pycache__/version.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/sklearn/externals/_packaging/_structures.py +90 -0
- mgm/lib/python3.10/site-packages/sklearn/externals/_packaging/version.py +535 -0
- mgm/lib/python3.10/site-packages/sklearn/impute/__init__.py +24 -0
- mgm/lib/python3.10/site-packages/sklearn/impute/__pycache__/__init__.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/sklearn/impute/__pycache__/_base.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/sklearn/impute/__pycache__/_iterative.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/sklearn/impute/__pycache__/_knn.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/sklearn/impute/_base.py +1071 -0
- mgm/lib/python3.10/site-packages/sklearn/impute/_iterative.py +889 -0
- mgm/lib/python3.10/site-packages/sklearn/impute/_knn.py +391 -0
- mgm/lib/python3.10/site-packages/sklearn/impute/tests/__init__.py +0 -0
- mgm/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/__init__.cpython-310.pyc +0 -0
.gitattributes
CHANGED
|
@@ -1087,3 +1087,4 @@ mgm/lib/python3.10/site-packages/sympy/physics/continuum_mechanics/__pycache__/b
|
|
| 1087 |
vila/lib/python3.10/site-packages/opencv_python.libs/libavcodec-402e4b05.so.59.37.100 filter=lfs diff=lfs merge=lfs -text
|
| 1088 |
mgm/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/perm_groups.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1089 |
videollama2/lib/python3.10/site-packages/torch/lib/libtorch_cuda_linalg.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 1087 |
vila/lib/python3.10/site-packages/opencv_python.libs/libavcodec-402e4b05.so.59.37.100 filter=lfs diff=lfs merge=lfs -text
|
| 1088 |
mgm/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/perm_groups.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1089 |
videollama2/lib/python3.10/site-packages/torch/lib/libtorch_cuda_linalg.so filter=lfs diff=lfs merge=lfs -text
|
| 1090 |
+
mgm/lib/python3.10/site-packages/sklearn/metrics/cluster/_expected_mutual_info_fast.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
mgm/lib/python3.10/site-packages/sklearn/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (2.29 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/sklearn/__pycache__/_config.cpython-310.pyc
ADDED
|
Binary file (9.29 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/sklearn/__pycache__/_distributor_init.cpython-310.pyc
ADDED
|
Binary file (518 Bytes). View file
|
|
|
mgm/lib/python3.10/site-packages/sklearn/__pycache__/_min_dependencies.cpython-310.pyc
ADDED
|
Binary file (1.94 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/sklearn/__pycache__/base.cpython-310.pyc
ADDED
|
Binary file (33.2 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/sklearn/__pycache__/calibration.cpython-310.pyc
ADDED
|
Binary file (42.3 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/sklearn/__pycache__/conftest.cpython-310.pyc
ADDED
|
Binary file (6.53 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/sklearn/__pycache__/discriminant_analysis.cpython-310.pyc
ADDED
|
Binary file (32 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/sklearn/__pycache__/dummy.cpython-310.pyc
ADDED
|
Binary file (20.2 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/sklearn/__pycache__/exceptions.cpython-310.pyc
ADDED
|
Binary file (4.93 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/sklearn/__pycache__/isotonic.cpython-310.pyc
ADDED
|
Binary file (13.5 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/sklearn/__pycache__/kernel_approximation.cpython-310.pyc
ADDED
|
Binary file (34.4 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/sklearn/__pycache__/kernel_ridge.cpython-310.pyc
ADDED
|
Binary file (9 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/sklearn/__pycache__/multiclass.cpython-310.pyc
ADDED
|
Binary file (35 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/sklearn/__pycache__/multioutput.cpython-310.pyc
ADDED
|
Binary file (33.9 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/sklearn/__pycache__/naive_bayes.cpython-310.pyc
ADDED
|
Binary file (50.3 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/sklearn/__pycache__/pipeline.cpython-310.pyc
ADDED
|
Binary file (46.4 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/sklearn/__pycache__/random_projection.cpython-310.pyc
ADDED
|
Binary file (26.1 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/sklearn/_loss/__init__.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
The :mod:`sklearn._loss` module includes loss function classes suitable for
|
| 3 |
+
fitting classification and regression tasks.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
from .loss import (
|
| 7 |
+
HalfSquaredError,
|
| 8 |
+
AbsoluteError,
|
| 9 |
+
PinballLoss,
|
| 10 |
+
HalfPoissonLoss,
|
| 11 |
+
HalfGammaLoss,
|
| 12 |
+
HalfTweedieLoss,
|
| 13 |
+
HalfTweedieLossIdentity,
|
| 14 |
+
HalfBinomialLoss,
|
| 15 |
+
HalfMultinomialLoss,
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
__all__ = [
|
| 20 |
+
"HalfSquaredError",
|
| 21 |
+
"AbsoluteError",
|
| 22 |
+
"PinballLoss",
|
| 23 |
+
"HalfPoissonLoss",
|
| 24 |
+
"HalfGammaLoss",
|
| 25 |
+
"HalfTweedieLoss",
|
| 26 |
+
"HalfTweedieLossIdentity",
|
| 27 |
+
"HalfBinomialLoss",
|
| 28 |
+
"HalfMultinomialLoss",
|
| 29 |
+
]
|
mgm/lib/python3.10/site-packages/sklearn/_loss/_loss.pxd
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# cython: language_level=3
|
| 2 |
+
|
| 3 |
+
cimport numpy as cnp
|
| 4 |
+
|
| 5 |
+
cnp.import_array()
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
# Fused types for y_true, y_pred, raw_prediction
|
| 9 |
+
ctypedef fused Y_DTYPE_C:
|
| 10 |
+
cnp.npy_float64
|
| 11 |
+
cnp.npy_float32
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
# Fused types for gradient and hessian
|
| 15 |
+
ctypedef fused G_DTYPE_C:
|
| 16 |
+
cnp.npy_float64
|
| 17 |
+
cnp.npy_float32
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# Struct to return 2 doubles
|
| 21 |
+
ctypedef struct double_pair:
|
| 22 |
+
double val1
|
| 23 |
+
double val2
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
# C base class for loss functions
|
| 27 |
+
cdef class CyLossFunction:
|
| 28 |
+
cdef double cy_loss(self, double y_true, double raw_prediction) nogil
|
| 29 |
+
cdef double cy_gradient(self, double y_true, double raw_prediction) nogil
|
| 30 |
+
cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) nogil
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
cdef class CyHalfSquaredError(CyLossFunction):
|
| 34 |
+
cdef double cy_loss(self, double y_true, double raw_prediction) nogil
|
| 35 |
+
cdef double cy_gradient(self, double y_true, double raw_prediction) nogil
|
| 36 |
+
cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) nogil
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
cdef class CyAbsoluteError(CyLossFunction):
|
| 40 |
+
cdef double cy_loss(self, double y_true, double raw_prediction) nogil
|
| 41 |
+
cdef double cy_gradient(self, double y_true, double raw_prediction) nogil
|
| 42 |
+
cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) nogil
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
cdef class CyPinballLoss(CyLossFunction):
|
| 46 |
+
cdef readonly double quantile # readonly makes it accessible from Python
|
| 47 |
+
cdef double cy_loss(self, double y_true, double raw_prediction) nogil
|
| 48 |
+
cdef double cy_gradient(self, double y_true, double raw_prediction) nogil
|
| 49 |
+
cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) nogil
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
cdef class CyHalfPoissonLoss(CyLossFunction):
|
| 53 |
+
cdef double cy_loss(self, double y_true, double raw_prediction) nogil
|
| 54 |
+
cdef double cy_gradient(self, double y_true, double raw_prediction) nogil
|
| 55 |
+
cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) nogil
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
cdef class CyHalfGammaLoss(CyLossFunction):
|
| 59 |
+
cdef double cy_loss(self, double y_true, double raw_prediction) nogil
|
| 60 |
+
cdef double cy_gradient(self, double y_true, double raw_prediction) nogil
|
| 61 |
+
cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) nogil
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
cdef class CyHalfTweedieLoss(CyLossFunction):
|
| 65 |
+
cdef readonly double power # readonly makes it accessible from Python
|
| 66 |
+
cdef double cy_loss(self, double y_true, double raw_prediction) nogil
|
| 67 |
+
cdef double cy_gradient(self, double y_true, double raw_prediction) nogil
|
| 68 |
+
cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) nogil
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
cdef class CyHalfTweedieLossIdentity(CyLossFunction):
|
| 72 |
+
cdef readonly double power # readonly makes it accessible from Python
|
| 73 |
+
cdef double cy_loss(self, double y_true, double raw_prediction) nogil
|
| 74 |
+
cdef double cy_gradient(self, double y_true, double raw_prediction) nogil
|
| 75 |
+
cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) nogil
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
cdef class CyHalfBinomialLoss(CyLossFunction):
|
| 79 |
+
cdef double cy_loss(self, double y_true, double raw_prediction) nogil
|
| 80 |
+
cdef double cy_gradient(self, double y_true, double raw_prediction) nogil
|
| 81 |
+
cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) nogil
|
mgm/lib/python3.10/site-packages/sklearn/_loss/glm_distribution.py
ADDED
|
@@ -0,0 +1,373 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Distribution functions used in GLM
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
# Author: Christian Lorentzen <lorentzen.ch@googlemail.com>
|
| 6 |
+
# License: BSD 3 clause
|
| 7 |
+
#
|
| 8 |
+
# TODO(1.3): remove file
|
| 9 |
+
# This is only used for backward compatibility in _GeneralizedLinearRegressor
|
| 10 |
+
# for the deprecated family attribute.
|
| 11 |
+
|
| 12 |
+
from abc import ABCMeta, abstractmethod
|
| 13 |
+
from collections import namedtuple
|
| 14 |
+
import numbers
|
| 15 |
+
|
| 16 |
+
import numpy as np
|
| 17 |
+
from scipy.special import xlogy
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
DistributionBoundary = namedtuple("DistributionBoundary", ("value", "inclusive"))
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class ExponentialDispersionModel(metaclass=ABCMeta):
|
| 24 |
+
r"""Base class for reproductive Exponential Dispersion Models (EDM).
|
| 25 |
+
|
| 26 |
+
The pdf of :math:`Y\sim \mathrm{EDM}(y_\textrm{pred}, \phi)` is given by
|
| 27 |
+
|
| 28 |
+
.. math:: p(y| \theta, \phi) = c(y, \phi)
|
| 29 |
+
\exp\left(\frac{\theta y-A(\theta)}{\phi}\right)
|
| 30 |
+
= \tilde{c}(y, \phi)
|
| 31 |
+
\exp\left(-\frac{d(y, y_\textrm{pred})}{2\phi}\right)
|
| 32 |
+
|
| 33 |
+
with mean :math:`\mathrm{E}[Y] = A'(\theta) = y_\textrm{pred}`,
|
| 34 |
+
variance :math:`\mathrm{Var}[Y] = \phi \cdot v(y_\textrm{pred})`,
|
| 35 |
+
unit variance :math:`v(y_\textrm{pred})` and
|
| 36 |
+
unit deviance :math:`d(y,y_\textrm{pred})`.
|
| 37 |
+
|
| 38 |
+
Methods
|
| 39 |
+
-------
|
| 40 |
+
deviance
|
| 41 |
+
deviance_derivative
|
| 42 |
+
in_y_range
|
| 43 |
+
unit_deviance
|
| 44 |
+
unit_deviance_derivative
|
| 45 |
+
unit_variance
|
| 46 |
+
|
| 47 |
+
References
|
| 48 |
+
----------
|
| 49 |
+
https://en.wikipedia.org/wiki/Exponential_dispersion_model.
|
| 50 |
+
"""
|
| 51 |
+
|
| 52 |
+
def in_y_range(self, y):
|
| 53 |
+
"""Returns ``True`` if y is in the valid range of Y~EDM.
|
| 54 |
+
|
| 55 |
+
Parameters
|
| 56 |
+
----------
|
| 57 |
+
y : array of shape (n_samples,)
|
| 58 |
+
Target values.
|
| 59 |
+
"""
|
| 60 |
+
# Note that currently supported distributions have +inf upper bound
|
| 61 |
+
|
| 62 |
+
if not isinstance(self._lower_bound, DistributionBoundary):
|
| 63 |
+
raise TypeError(
|
| 64 |
+
"_lower_bound attribute must be of type DistributionBoundary"
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
if self._lower_bound.inclusive:
|
| 68 |
+
return np.greater_equal(y, self._lower_bound.value)
|
| 69 |
+
else:
|
| 70 |
+
return np.greater(y, self._lower_bound.value)
|
| 71 |
+
|
| 72 |
+
@abstractmethod
|
| 73 |
+
def unit_variance(self, y_pred):
|
| 74 |
+
r"""Compute the unit variance function.
|
| 75 |
+
|
| 76 |
+
The unit variance :math:`v(y_\textrm{pred})` determines the variance as
|
| 77 |
+
a function of the mean :math:`y_\textrm{pred}` by
|
| 78 |
+
:math:`\mathrm{Var}[Y_i] = \phi/s_i*v(y_\textrm{pred}_i)`.
|
| 79 |
+
It can also be derived from the unit deviance
|
| 80 |
+
:math:`d(y,y_\textrm{pred})` as
|
| 81 |
+
|
| 82 |
+
.. math:: v(y_\textrm{pred}) = \frac{2}{
|
| 83 |
+
\frac{\partial^2 d(y,y_\textrm{pred})}{
|
| 84 |
+
\partialy_\textrm{pred}^2}}\big|_{y=y_\textrm{pred}}
|
| 85 |
+
|
| 86 |
+
See also :func:`variance`.
|
| 87 |
+
|
| 88 |
+
Parameters
|
| 89 |
+
----------
|
| 90 |
+
y_pred : array of shape (n_samples,)
|
| 91 |
+
Predicted mean.
|
| 92 |
+
"""
|
| 93 |
+
|
| 94 |
+
@abstractmethod
|
| 95 |
+
def unit_deviance(self, y, y_pred, check_input=False):
|
| 96 |
+
r"""Compute the unit deviance.
|
| 97 |
+
|
| 98 |
+
The unit_deviance :math:`d(y,y_\textrm{pred})` can be defined by the
|
| 99 |
+
log-likelihood as
|
| 100 |
+
:math:`d(y,y_\textrm{pred}) = -2\phi\cdot
|
| 101 |
+
\left(loglike(y,y_\textrm{pred},\phi) - loglike(y,y,\phi)\right).`
|
| 102 |
+
|
| 103 |
+
Parameters
|
| 104 |
+
----------
|
| 105 |
+
y : array of shape (n_samples,)
|
| 106 |
+
Target values.
|
| 107 |
+
|
| 108 |
+
y_pred : array of shape (n_samples,)
|
| 109 |
+
Predicted mean.
|
| 110 |
+
|
| 111 |
+
check_input : bool, default=False
|
| 112 |
+
If True raise an exception on invalid y or y_pred values, otherwise
|
| 113 |
+
they will be propagated as NaN.
|
| 114 |
+
Returns
|
| 115 |
+
-------
|
| 116 |
+
deviance: array of shape (n_samples,)
|
| 117 |
+
Computed deviance
|
| 118 |
+
"""
|
| 119 |
+
|
| 120 |
+
def unit_deviance_derivative(self, y, y_pred):
|
| 121 |
+
r"""Compute the derivative of the unit deviance w.r.t. y_pred.
|
| 122 |
+
|
| 123 |
+
The derivative of the unit deviance is given by
|
| 124 |
+
:math:`\frac{\partial}{\partialy_\textrm{pred}}d(y,y_\textrm{pred})
|
| 125 |
+
= -2\frac{y-y_\textrm{pred}}{v(y_\textrm{pred})}`
|
| 126 |
+
with unit variance :math:`v(y_\textrm{pred})`.
|
| 127 |
+
|
| 128 |
+
Parameters
|
| 129 |
+
----------
|
| 130 |
+
y : array of shape (n_samples,)
|
| 131 |
+
Target values.
|
| 132 |
+
|
| 133 |
+
y_pred : array of shape (n_samples,)
|
| 134 |
+
Predicted mean.
|
| 135 |
+
"""
|
| 136 |
+
return -2 * (y - y_pred) / self.unit_variance(y_pred)
|
| 137 |
+
|
| 138 |
+
def deviance(self, y, y_pred, weights=1):
|
| 139 |
+
r"""Compute the deviance.
|
| 140 |
+
|
| 141 |
+
The deviance is a weighted sum of the per sample unit deviances,
|
| 142 |
+
:math:`D = \sum_i s_i \cdot d(y_i, y_\textrm{pred}_i)`
|
| 143 |
+
with weights :math:`s_i` and unit deviance
|
| 144 |
+
:math:`d(y,y_\textrm{pred})`.
|
| 145 |
+
In terms of the log-likelihood it is :math:`D = -2\phi\cdot
|
| 146 |
+
\left(loglike(y,y_\textrm{pred},\frac{phi}{s})
|
| 147 |
+
- loglike(y,y,\frac{phi}{s})\right)`.
|
| 148 |
+
|
| 149 |
+
Parameters
|
| 150 |
+
----------
|
| 151 |
+
y : array of shape (n_samples,)
|
| 152 |
+
Target values.
|
| 153 |
+
|
| 154 |
+
y_pred : array of shape (n_samples,)
|
| 155 |
+
Predicted mean.
|
| 156 |
+
|
| 157 |
+
weights : {int, array of shape (n_samples,)}, default=1
|
| 158 |
+
Weights or exposure to which variance is inverse proportional.
|
| 159 |
+
"""
|
| 160 |
+
return np.sum(weights * self.unit_deviance(y, y_pred))
|
| 161 |
+
|
| 162 |
+
def deviance_derivative(self, y, y_pred, weights=1):
|
| 163 |
+
r"""Compute the derivative of the deviance w.r.t. y_pred.
|
| 164 |
+
|
| 165 |
+
It gives :math:`\frac{\partial}{\partial y_\textrm{pred}}
|
| 166 |
+
D(y, \y_\textrm{pred}; weights)`.
|
| 167 |
+
|
| 168 |
+
Parameters
|
| 169 |
+
----------
|
| 170 |
+
y : array, shape (n_samples,)
|
| 171 |
+
Target values.
|
| 172 |
+
|
| 173 |
+
y_pred : array, shape (n_samples,)
|
| 174 |
+
Predicted mean.
|
| 175 |
+
|
| 176 |
+
weights : {int, array of shape (n_samples,)}, default=1
|
| 177 |
+
Weights or exposure to which variance is inverse proportional.
|
| 178 |
+
"""
|
| 179 |
+
return weights * self.unit_deviance_derivative(y, y_pred)
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
class TweedieDistribution(ExponentialDispersionModel):
|
| 183 |
+
r"""A class for the Tweedie distribution.
|
| 184 |
+
|
| 185 |
+
A Tweedie distribution with mean :math:`y_\textrm{pred}=\mathrm{E}[Y]`
|
| 186 |
+
is uniquely defined by it's mean-variance relationship
|
| 187 |
+
:math:`\mathrm{Var}[Y] \propto y_\textrm{pred}^power`.
|
| 188 |
+
|
| 189 |
+
Special cases are:
|
| 190 |
+
|
| 191 |
+
===== ================
|
| 192 |
+
Power Distribution
|
| 193 |
+
===== ================
|
| 194 |
+
0 Normal
|
| 195 |
+
1 Poisson
|
| 196 |
+
(1,2) Compound Poisson
|
| 197 |
+
2 Gamma
|
| 198 |
+
3 Inverse Gaussian
|
| 199 |
+
|
| 200 |
+
Parameters
|
| 201 |
+
----------
|
| 202 |
+
power : float, default=0
|
| 203 |
+
The variance power of the `unit_variance`
|
| 204 |
+
:math:`v(y_\textrm{pred}) = y_\textrm{pred}^{power}`.
|
| 205 |
+
For ``0<power<1``, no distribution exists.
|
| 206 |
+
"""
|
| 207 |
+
|
| 208 |
+
def __init__(self, power=0):
|
| 209 |
+
self.power = power
|
| 210 |
+
|
| 211 |
+
@property
|
| 212 |
+
def power(self):
|
| 213 |
+
return self._power
|
| 214 |
+
|
| 215 |
+
@power.setter
|
| 216 |
+
def power(self, power):
|
| 217 |
+
# We use a property with a setter, to update lower and
|
| 218 |
+
# upper bound when the power parameter is updated e.g. in grid
|
| 219 |
+
# search.
|
| 220 |
+
if not isinstance(power, numbers.Real):
|
| 221 |
+
raise TypeError("power must be a real number, input was {0}".format(power))
|
| 222 |
+
|
| 223 |
+
if power <= 0:
|
| 224 |
+
# Extreme Stable or Normal distribution
|
| 225 |
+
self._lower_bound = DistributionBoundary(-np.Inf, inclusive=False)
|
| 226 |
+
elif 0 < power < 1:
|
| 227 |
+
raise ValueError(
|
| 228 |
+
"Tweedie distribution is only defined for power<=0 and power>=1."
|
| 229 |
+
)
|
| 230 |
+
elif 1 <= power < 2:
|
| 231 |
+
# Poisson or Compound Poisson distribution
|
| 232 |
+
self._lower_bound = DistributionBoundary(0, inclusive=True)
|
| 233 |
+
elif power >= 2:
|
| 234 |
+
# Gamma, Positive Stable, Inverse Gaussian distributions
|
| 235 |
+
self._lower_bound = DistributionBoundary(0, inclusive=False)
|
| 236 |
+
else: # pragma: no cover
|
| 237 |
+
# this branch should be unreachable.
|
| 238 |
+
raise ValueError
|
| 239 |
+
|
| 240 |
+
self._power = power
|
| 241 |
+
|
| 242 |
+
def unit_variance(self, y_pred):
|
| 243 |
+
"""Compute the unit variance of a Tweedie distribution
|
| 244 |
+
v(y_\textrm{pred})=y_\textrm{pred}**power.
|
| 245 |
+
|
| 246 |
+
Parameters
|
| 247 |
+
----------
|
| 248 |
+
y_pred : array of shape (n_samples,)
|
| 249 |
+
Predicted mean.
|
| 250 |
+
"""
|
| 251 |
+
return np.power(y_pred, self.power)
|
| 252 |
+
|
| 253 |
+
def unit_deviance(self, y, y_pred, check_input=False):
|
| 254 |
+
r"""Compute the unit deviance.
|
| 255 |
+
|
| 256 |
+
The unit_deviance :math:`d(y,y_\textrm{pred})` can be defined by the
|
| 257 |
+
log-likelihood as
|
| 258 |
+
:math:`d(y,y_\textrm{pred}) = -2\phi\cdot
|
| 259 |
+
\left(loglike(y,y_\textrm{pred},\phi) - loglike(y,y,\phi)\right).`
|
| 260 |
+
|
| 261 |
+
Parameters
|
| 262 |
+
----------
|
| 263 |
+
y : array of shape (n_samples,)
|
| 264 |
+
Target values.
|
| 265 |
+
|
| 266 |
+
y_pred : array of shape (n_samples,)
|
| 267 |
+
Predicted mean.
|
| 268 |
+
|
| 269 |
+
check_input : bool, default=False
|
| 270 |
+
If True raise an exception on invalid y or y_pred values, otherwise
|
| 271 |
+
they will be propagated as NaN.
|
| 272 |
+
Returns
|
| 273 |
+
-------
|
| 274 |
+
deviance: array of shape (n_samples,)
|
| 275 |
+
Computed deviance
|
| 276 |
+
"""
|
| 277 |
+
p = self.power
|
| 278 |
+
|
| 279 |
+
if check_input:
|
| 280 |
+
message = (
|
| 281 |
+
"Mean Tweedie deviance error with power={} can only be used on ".format(
|
| 282 |
+
p
|
| 283 |
+
)
|
| 284 |
+
)
|
| 285 |
+
if p < 0:
|
| 286 |
+
# 'Extreme stable', y any real number, y_pred > 0
|
| 287 |
+
if (y_pred <= 0).any():
|
| 288 |
+
raise ValueError(message + "strictly positive y_pred.")
|
| 289 |
+
elif p == 0:
|
| 290 |
+
# Normal, y and y_pred can be any real number
|
| 291 |
+
pass
|
| 292 |
+
elif 0 < p < 1:
|
| 293 |
+
raise ValueError(
|
| 294 |
+
"Tweedie deviance is only defined for power<=0 and power>=1."
|
| 295 |
+
)
|
| 296 |
+
elif 1 <= p < 2:
|
| 297 |
+
# Poisson and compound Poisson distribution, y >= 0, y_pred > 0
|
| 298 |
+
if (y < 0).any() or (y_pred <= 0).any():
|
| 299 |
+
raise ValueError(
|
| 300 |
+
message + "non-negative y and strictly positive y_pred."
|
| 301 |
+
)
|
| 302 |
+
elif p >= 2:
|
| 303 |
+
# Gamma and Extreme stable distribution, y and y_pred > 0
|
| 304 |
+
if (y <= 0).any() or (y_pred <= 0).any():
|
| 305 |
+
raise ValueError(message + "strictly positive y and y_pred.")
|
| 306 |
+
else: # pragma: nocover
|
| 307 |
+
# Unreachable statement
|
| 308 |
+
raise ValueError
|
| 309 |
+
|
| 310 |
+
if p < 0:
|
| 311 |
+
# 'Extreme stable', y any real number, y_pred > 0
|
| 312 |
+
dev = 2 * (
|
| 313 |
+
np.power(np.maximum(y, 0), 2 - p) / ((1 - p) * (2 - p))
|
| 314 |
+
- y * np.power(y_pred, 1 - p) / (1 - p)
|
| 315 |
+
+ np.power(y_pred, 2 - p) / (2 - p)
|
| 316 |
+
)
|
| 317 |
+
|
| 318 |
+
elif p == 0:
|
| 319 |
+
# Normal distribution, y and y_pred any real number
|
| 320 |
+
dev = (y - y_pred) ** 2
|
| 321 |
+
elif p < 1:
|
| 322 |
+
raise ValueError(
|
| 323 |
+
"Tweedie deviance is only defined for power<=0 and power>=1."
|
| 324 |
+
)
|
| 325 |
+
elif p == 1:
|
| 326 |
+
# Poisson distribution
|
| 327 |
+
dev = 2 * (xlogy(y, y / y_pred) - y + y_pred)
|
| 328 |
+
elif p == 2:
|
| 329 |
+
# Gamma distribution
|
| 330 |
+
dev = 2 * (np.log(y_pred / y) + y / y_pred - 1)
|
| 331 |
+
else:
|
| 332 |
+
dev = 2 * (
|
| 333 |
+
np.power(y, 2 - p) / ((1 - p) * (2 - p))
|
| 334 |
+
- y * np.power(y_pred, 1 - p) / (1 - p)
|
| 335 |
+
+ np.power(y_pred, 2 - p) / (2 - p)
|
| 336 |
+
)
|
| 337 |
+
return dev
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
class NormalDistribution(TweedieDistribution):
|
| 341 |
+
"""Class for the Normal (aka Gaussian) distribution."""
|
| 342 |
+
|
| 343 |
+
def __init__(self):
|
| 344 |
+
super().__init__(power=0)
|
| 345 |
+
|
| 346 |
+
|
| 347 |
+
class PoissonDistribution(TweedieDistribution):
|
| 348 |
+
"""Class for the scaled Poisson distribution."""
|
| 349 |
+
|
| 350 |
+
def __init__(self):
|
| 351 |
+
super().__init__(power=1)
|
| 352 |
+
|
| 353 |
+
|
| 354 |
+
class GammaDistribution(TweedieDistribution):
|
| 355 |
+
"""Class for the Gamma distribution."""
|
| 356 |
+
|
| 357 |
+
def __init__(self):
|
| 358 |
+
super().__init__(power=2)
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
class InverseGaussianDistribution(TweedieDistribution):
|
| 362 |
+
"""Class for the scaled InverseGaussianDistribution distribution."""
|
| 363 |
+
|
| 364 |
+
def __init__(self):
|
| 365 |
+
super().__init__(power=3)
|
| 366 |
+
|
| 367 |
+
|
| 368 |
+
EDM_DISTRIBUTIONS = {
|
| 369 |
+
"normal": NormalDistribution,
|
| 370 |
+
"poisson": PoissonDistribution,
|
| 371 |
+
"gamma": GammaDistribution,
|
| 372 |
+
"inverse-gaussian": InverseGaussianDistribution,
|
| 373 |
+
}
|
mgm/lib/python3.10/site-packages/sklearn/_loss/link.py
ADDED
|
@@ -0,0 +1,261 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Module contains classes for invertible (and differentiable) link functions.
|
| 3 |
+
"""
|
| 4 |
+
# Author: Christian Lorentzen <lorentzen.ch@gmail.com>
|
| 5 |
+
|
| 6 |
+
from abc import ABC, abstractmethod
|
| 7 |
+
from dataclasses import dataclass
|
| 8 |
+
|
| 9 |
+
import numpy as np
|
| 10 |
+
from scipy.special import expit, logit
|
| 11 |
+
from scipy.stats import gmean
|
| 12 |
+
from ..utils.extmath import softmax
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@dataclass
|
| 16 |
+
class Interval:
|
| 17 |
+
low: float
|
| 18 |
+
high: float
|
| 19 |
+
low_inclusive: bool
|
| 20 |
+
high_inclusive: bool
|
| 21 |
+
|
| 22 |
+
def __post_init__(self):
|
| 23 |
+
"""Check that low <= high"""
|
| 24 |
+
if self.low > self.high:
|
| 25 |
+
raise ValueError(
|
| 26 |
+
f"One must have low <= high; got low={self.low}, high={self.high}."
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
def includes(self, x):
|
| 30 |
+
"""Test whether all values of x are in interval range.
|
| 31 |
+
|
| 32 |
+
Parameters
|
| 33 |
+
----------
|
| 34 |
+
x : ndarray
|
| 35 |
+
Array whose elements are tested to be in interval range.
|
| 36 |
+
|
| 37 |
+
Returns
|
| 38 |
+
-------
|
| 39 |
+
result : bool
|
| 40 |
+
"""
|
| 41 |
+
if self.low_inclusive:
|
| 42 |
+
low = np.greater_equal(x, self.low)
|
| 43 |
+
else:
|
| 44 |
+
low = np.greater(x, self.low)
|
| 45 |
+
|
| 46 |
+
if not np.all(low):
|
| 47 |
+
return False
|
| 48 |
+
|
| 49 |
+
if self.high_inclusive:
|
| 50 |
+
high = np.less_equal(x, self.high)
|
| 51 |
+
else:
|
| 52 |
+
high = np.less(x, self.high)
|
| 53 |
+
|
| 54 |
+
# Note: np.all returns numpy.bool_
|
| 55 |
+
return bool(np.all(high))
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def _inclusive_low_high(interval, dtype=np.float64):
|
| 59 |
+
"""Generate values low and high to be within the interval range.
|
| 60 |
+
|
| 61 |
+
This is used in tests only.
|
| 62 |
+
|
| 63 |
+
Returns
|
| 64 |
+
-------
|
| 65 |
+
low, high : tuple
|
| 66 |
+
The returned values low and high lie within the interval.
|
| 67 |
+
"""
|
| 68 |
+
eps = 10 * np.finfo(dtype).eps
|
| 69 |
+
if interval.low == -np.inf:
|
| 70 |
+
low = -1e10
|
| 71 |
+
elif interval.low < 0:
|
| 72 |
+
low = interval.low * (1 - eps) + eps
|
| 73 |
+
else:
|
| 74 |
+
low = interval.low * (1 + eps) + eps
|
| 75 |
+
|
| 76 |
+
if interval.high == np.inf:
|
| 77 |
+
high = 1e10
|
| 78 |
+
elif interval.high < 0:
|
| 79 |
+
high = interval.high * (1 + eps) - eps
|
| 80 |
+
else:
|
| 81 |
+
high = interval.high * (1 - eps) - eps
|
| 82 |
+
|
| 83 |
+
return low, high
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
class BaseLink(ABC):
|
| 87 |
+
"""Abstract base class for differentiable, invertible link functions.
|
| 88 |
+
|
| 89 |
+
Convention:
|
| 90 |
+
- link function g: raw_prediction = g(y_pred)
|
| 91 |
+
- inverse link h: y_pred = h(raw_prediction)
|
| 92 |
+
|
| 93 |
+
For (generalized) linear models, `raw_prediction = X @ coef` is the so
|
| 94 |
+
called linear predictor, and `y_pred = h(raw_prediction)` is the predicted
|
| 95 |
+
conditional (on X) expected value of the target `y_true`.
|
| 96 |
+
|
| 97 |
+
The methods are not implemented as staticmethods in case a link function needs
|
| 98 |
+
parameters.
|
| 99 |
+
"""
|
| 100 |
+
|
| 101 |
+
is_multiclass = False # used for testing only
|
| 102 |
+
|
| 103 |
+
# Usually, raw_prediction may be any real number and y_pred is an open
|
| 104 |
+
# interval.
|
| 105 |
+
# interval_raw_prediction = Interval(-np.inf, np.inf, False, False)
|
| 106 |
+
interval_y_pred = Interval(-np.inf, np.inf, False, False)
|
| 107 |
+
|
| 108 |
+
@abstractmethod
|
| 109 |
+
def link(self, y_pred, out=None):
|
| 110 |
+
"""Compute the link function g(y_pred).
|
| 111 |
+
|
| 112 |
+
The link function maps (predicted) target values to raw predictions,
|
| 113 |
+
i.e. `g(y_pred) = raw_prediction`.
|
| 114 |
+
|
| 115 |
+
Parameters
|
| 116 |
+
----------
|
| 117 |
+
y_pred : array
|
| 118 |
+
Predicted target values.
|
| 119 |
+
out : array
|
| 120 |
+
A location into which the result is stored. If provided, it must
|
| 121 |
+
have a shape that the inputs broadcast to. If not provided or None,
|
| 122 |
+
a freshly-allocated array is returned.
|
| 123 |
+
|
| 124 |
+
Returns
|
| 125 |
+
-------
|
| 126 |
+
out : array
|
| 127 |
+
Output array, element-wise link function.
|
| 128 |
+
"""
|
| 129 |
+
|
| 130 |
+
@abstractmethod
|
| 131 |
+
def inverse(self, raw_prediction, out=None):
|
| 132 |
+
"""Compute the inverse link function h(raw_prediction).
|
| 133 |
+
|
| 134 |
+
The inverse link function maps raw predictions to predicted target
|
| 135 |
+
values, i.e. `h(raw_prediction) = y_pred`.
|
| 136 |
+
|
| 137 |
+
Parameters
|
| 138 |
+
----------
|
| 139 |
+
raw_prediction : array
|
| 140 |
+
Raw prediction values (in link space).
|
| 141 |
+
out : array
|
| 142 |
+
A location into which the result is stored. If provided, it must
|
| 143 |
+
have a shape that the inputs broadcast to. If not provided or None,
|
| 144 |
+
a freshly-allocated array is returned.
|
| 145 |
+
|
| 146 |
+
Returns
|
| 147 |
+
-------
|
| 148 |
+
out : array
|
| 149 |
+
Output array, element-wise inverse link function.
|
| 150 |
+
"""
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
class IdentityLink(BaseLink):
|
| 154 |
+
"""The identity link function g(x)=x."""
|
| 155 |
+
|
| 156 |
+
def link(self, y_pred, out=None):
|
| 157 |
+
if out is not None:
|
| 158 |
+
np.copyto(out, y_pred)
|
| 159 |
+
return out
|
| 160 |
+
else:
|
| 161 |
+
return y_pred
|
| 162 |
+
|
| 163 |
+
inverse = link
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
class LogLink(BaseLink):
|
| 167 |
+
"""The log link function g(x)=log(x)."""
|
| 168 |
+
|
| 169 |
+
interval_y_pred = Interval(0, np.inf, False, False)
|
| 170 |
+
|
| 171 |
+
def link(self, y_pred, out=None):
|
| 172 |
+
return np.log(y_pred, out=out)
|
| 173 |
+
|
| 174 |
+
def inverse(self, raw_prediction, out=None):
|
| 175 |
+
return np.exp(raw_prediction, out=out)
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
class LogitLink(BaseLink):
|
| 179 |
+
"""The logit link function g(x)=logit(x)."""
|
| 180 |
+
|
| 181 |
+
interval_y_pred = Interval(0, 1, False, False)
|
| 182 |
+
|
| 183 |
+
def link(self, y_pred, out=None):
|
| 184 |
+
return logit(y_pred, out=out)
|
| 185 |
+
|
| 186 |
+
def inverse(self, raw_prediction, out=None):
|
| 187 |
+
return expit(raw_prediction, out=out)
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
class MultinomialLogit(BaseLink):
|
| 191 |
+
"""The symmetric multinomial logit function.
|
| 192 |
+
|
| 193 |
+
Convention:
|
| 194 |
+
- y_pred.shape = raw_prediction.shape = (n_samples, n_classes)
|
| 195 |
+
|
| 196 |
+
Notes:
|
| 197 |
+
- The inverse link h is the softmax function.
|
| 198 |
+
- The sum is over the second axis, i.e. axis=1 (n_classes).
|
| 199 |
+
|
| 200 |
+
We have to choose additional constraints in order to make
|
| 201 |
+
|
| 202 |
+
y_pred[k] = exp(raw_pred[k]) / sum(exp(raw_pred[k]), k=0..n_classes-1)
|
| 203 |
+
|
| 204 |
+
for n_classes classes identifiable and invertible.
|
| 205 |
+
We choose the symmetric side constraint where the geometric mean response
|
| 206 |
+
is set as reference category, see [2]:
|
| 207 |
+
|
| 208 |
+
The symmetric multinomial logit link function for a single data point is
|
| 209 |
+
then defined as
|
| 210 |
+
|
| 211 |
+
raw_prediction[k] = g(y_pred[k]) = log(y_pred[k]/gmean(y_pred))
|
| 212 |
+
= log(y_pred[k]) - mean(log(y_pred)).
|
| 213 |
+
|
| 214 |
+
Note that this is equivalent to the definition in [1] and implies mean
|
| 215 |
+
centered raw predictions:
|
| 216 |
+
|
| 217 |
+
sum(raw_prediction[k], k=0..n_classes-1) = 0.
|
| 218 |
+
|
| 219 |
+
For linear models with raw_prediction = X @ coef, this corresponds to
|
| 220 |
+
sum(coef[k], k=0..n_classes-1) = 0, i.e. the sum over classes for every
|
| 221 |
+
feature is zero.
|
| 222 |
+
|
| 223 |
+
Reference
|
| 224 |
+
---------
|
| 225 |
+
.. [1] Friedman, Jerome; Hastie, Trevor; Tibshirani, Robert. "Additive
|
| 226 |
+
logistic regression: a statistical view of boosting" Ann. Statist.
|
| 227 |
+
28 (2000), no. 2, 337--407. doi:10.1214/aos/1016218223.
|
| 228 |
+
https://projecteuclid.org/euclid.aos/1016218223
|
| 229 |
+
|
| 230 |
+
.. [2] Zahid, Faisal Maqbool and Gerhard Tutz. "Ridge estimation for
|
| 231 |
+
multinomial logit models with symmetric side constraints."
|
| 232 |
+
Computational Statistics 28 (2013): 1017-1034.
|
| 233 |
+
http://epub.ub.uni-muenchen.de/11001/1/tr067.pdf
|
| 234 |
+
"""
|
| 235 |
+
|
| 236 |
+
is_multiclass = True
|
| 237 |
+
interval_y_pred = Interval(0, 1, False, False)
|
| 238 |
+
|
| 239 |
+
def symmetrize_raw_prediction(self, raw_prediction):
|
| 240 |
+
return raw_prediction - np.mean(raw_prediction, axis=1)[:, np.newaxis]
|
| 241 |
+
|
| 242 |
+
def link(self, y_pred, out=None):
|
| 243 |
+
# geometric mean as reference category
|
| 244 |
+
gm = gmean(y_pred, axis=1)
|
| 245 |
+
return np.log(y_pred / gm[:, np.newaxis], out=out)
|
| 246 |
+
|
| 247 |
+
def inverse(self, raw_prediction, out=None):
|
| 248 |
+
if out is None:
|
| 249 |
+
return softmax(raw_prediction, copy=True)
|
| 250 |
+
else:
|
| 251 |
+
np.copyto(out, raw_prediction)
|
| 252 |
+
softmax(out, copy=False)
|
| 253 |
+
return out
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
_LINKS = {
|
| 257 |
+
"identity": IdentityLink,
|
| 258 |
+
"log": LogLink,
|
| 259 |
+
"logit": LogitLink,
|
| 260 |
+
"multinomial_logit": MultinomialLogit,
|
| 261 |
+
}
|
mgm/lib/python3.10/site-packages/sklearn/_loss/loss.py
ADDED
|
@@ -0,0 +1,1027 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
This module contains loss classes suitable for fitting.
|
| 3 |
+
|
| 4 |
+
It is not part of the public API.
|
| 5 |
+
Specific losses are used for regression, binary classification or multiclass
|
| 6 |
+
classification.
|
| 7 |
+
"""
|
| 8 |
+
# Goals:
|
| 9 |
+
# - Provide a common private module for loss functions/classes.
|
| 10 |
+
# - To be used in:
|
| 11 |
+
# - LogisticRegression
|
| 12 |
+
# - PoissonRegressor, GammaRegressor, TweedieRegressor
|
| 13 |
+
# - HistGradientBoostingRegressor, HistGradientBoostingClassifier
|
| 14 |
+
# - GradientBoostingRegressor, GradientBoostingClassifier
|
| 15 |
+
# - SGDRegressor, SGDClassifier
|
| 16 |
+
# - Replace link module of GLMs.
|
| 17 |
+
|
| 18 |
+
import numbers
|
| 19 |
+
import numpy as np
|
| 20 |
+
from scipy.special import xlogy
|
| 21 |
+
from ._loss import (
|
| 22 |
+
CyHalfSquaredError,
|
| 23 |
+
CyAbsoluteError,
|
| 24 |
+
CyPinballLoss,
|
| 25 |
+
CyHalfPoissonLoss,
|
| 26 |
+
CyHalfGammaLoss,
|
| 27 |
+
CyHalfTweedieLoss,
|
| 28 |
+
CyHalfTweedieLossIdentity,
|
| 29 |
+
CyHalfBinomialLoss,
|
| 30 |
+
CyHalfMultinomialLoss,
|
| 31 |
+
)
|
| 32 |
+
from .link import (
|
| 33 |
+
Interval,
|
| 34 |
+
IdentityLink,
|
| 35 |
+
LogLink,
|
| 36 |
+
LogitLink,
|
| 37 |
+
MultinomialLogit,
|
| 38 |
+
)
|
| 39 |
+
from ..utils import check_scalar
|
| 40 |
+
from ..utils._readonly_array_wrapper import ReadonlyArrayWrapper
|
| 41 |
+
from ..utils.stats import _weighted_percentile
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
# Note: The shape of raw_prediction for multiclass classifications are
|
| 45 |
+
# - GradientBoostingClassifier: (n_samples, n_classes)
|
| 46 |
+
# - HistGradientBoostingClassifier: (n_classes, n_samples)
|
| 47 |
+
#
|
| 48 |
+
# Note: Instead of inheritance like
|
| 49 |
+
#
|
| 50 |
+
# class BaseLoss(BaseLink, CyLossFunction):
|
| 51 |
+
# ...
|
| 52 |
+
#
|
| 53 |
+
# # Note: Naturally, we would inherit in the following order
|
| 54 |
+
# # class HalfSquaredError(IdentityLink, CyHalfSquaredError, BaseLoss)
|
| 55 |
+
# # But because of https://github.com/cython/cython/issues/4350 we set BaseLoss as
|
| 56 |
+
# # the last one. This, of course, changes the MRO.
|
| 57 |
+
# class HalfSquaredError(IdentityLink, CyHalfSquaredError, BaseLoss):
|
| 58 |
+
#
|
| 59 |
+
# we use composition. This way we improve maintainability by avoiding the above
|
| 60 |
+
# mentioned Cython edge case and have easier to understand code (which method calls
|
| 61 |
+
# which code).
|
| 62 |
+
class BaseLoss:
|
| 63 |
+
"""Base class for a loss function of 1-dimensional targets.
|
| 64 |
+
|
| 65 |
+
Conventions:
|
| 66 |
+
|
| 67 |
+
- y_true.shape = sample_weight.shape = (n_samples,)
|
| 68 |
+
- y_pred.shape = raw_prediction.shape = (n_samples,)
|
| 69 |
+
- If is_multiclass is true (multiclass classification), then
|
| 70 |
+
y_pred.shape = raw_prediction.shape = (n_samples, n_classes)
|
| 71 |
+
Note that this corresponds to the return value of decision_function.
|
| 72 |
+
|
| 73 |
+
y_true, y_pred, sample_weight and raw_prediction must either be all float64
|
| 74 |
+
or all float32.
|
| 75 |
+
gradient and hessian must be either both float64 or both float32.
|
| 76 |
+
|
| 77 |
+
Note that y_pred = link.inverse(raw_prediction).
|
| 78 |
+
|
| 79 |
+
Specific loss classes can inherit specific link classes to satisfy
|
| 80 |
+
BaseLink's abstractmethods.
|
| 81 |
+
|
| 82 |
+
Parameters
|
| 83 |
+
----------
|
| 84 |
+
sample_weight : {None, ndarray}
|
| 85 |
+
If sample_weight is None, the hessian might be constant.
|
| 86 |
+
n_classes : {None, int}
|
| 87 |
+
The number of classes for classification, else None.
|
| 88 |
+
|
| 89 |
+
Attributes
|
| 90 |
+
----------
|
| 91 |
+
closs: CyLossFunction
|
| 92 |
+
link : BaseLink
|
| 93 |
+
interval_y_true : Interval
|
| 94 |
+
Valid interval for y_true
|
| 95 |
+
interval_y_pred : Interval
|
| 96 |
+
Valid Interval for y_pred
|
| 97 |
+
differentiable : bool
|
| 98 |
+
Indicates whether or not loss function is differentiable in
|
| 99 |
+
raw_prediction everywhere.
|
| 100 |
+
need_update_leaves_values : bool
|
| 101 |
+
Indicates whether decision trees in gradient boosting need to uptade
|
| 102 |
+
leave values after having been fit to the (negative) gradients.
|
| 103 |
+
approx_hessian : bool
|
| 104 |
+
Indicates whether the hessian is approximated or exact. If,
|
| 105 |
+
approximated, it should be larger or equal to the exact one.
|
| 106 |
+
constant_hessian : bool
|
| 107 |
+
Indicates whether the hessian is one for this loss.
|
| 108 |
+
is_multiclass : bool
|
| 109 |
+
Indicates whether n_classes > 2 is allowed.
|
| 110 |
+
"""
|
| 111 |
+
|
| 112 |
+
# For decision trees:
|
| 113 |
+
# This variable indicates whether the loss requires the leaves values to
|
| 114 |
+
# be updated once the tree has been trained. The trees are trained to
|
| 115 |
+
# predict a Newton-Raphson step (see grower._finalize_leaf()). But for
|
| 116 |
+
# some losses (e.g. least absolute deviation) we need to adjust the tree
|
| 117 |
+
# values to account for the "line search" of the gradient descent
|
| 118 |
+
# procedure. See the original paper Greedy Function Approximation: A
|
| 119 |
+
# Gradient Boosting Machine by Friedman
|
| 120 |
+
# (https://statweb.stanford.edu/~jhf/ftp/trebst.pdf) for the theory.
|
| 121 |
+
need_update_leaves_values = False
|
| 122 |
+
differentiable = True
|
| 123 |
+
is_multiclass = False
|
| 124 |
+
|
| 125 |
+
def __init__(self, closs, link, n_classes=None):
|
| 126 |
+
self.closs = closs
|
| 127 |
+
self.link = link
|
| 128 |
+
self.approx_hessian = False
|
| 129 |
+
self.constant_hessian = False
|
| 130 |
+
self.n_classes = n_classes
|
| 131 |
+
self.interval_y_true = Interval(-np.inf, np.inf, False, False)
|
| 132 |
+
self.interval_y_pred = self.link.interval_y_pred
|
| 133 |
+
|
| 134 |
+
def in_y_true_range(self, y):
|
| 135 |
+
"""Return True if y is in the valid range of y_true.
|
| 136 |
+
|
| 137 |
+
Parameters
|
| 138 |
+
----------
|
| 139 |
+
y : ndarray
|
| 140 |
+
"""
|
| 141 |
+
return self.interval_y_true.includes(y)
|
| 142 |
+
|
| 143 |
+
def in_y_pred_range(self, y):
|
| 144 |
+
"""Return True if y is in the valid range of y_pred.
|
| 145 |
+
|
| 146 |
+
Parameters
|
| 147 |
+
----------
|
| 148 |
+
y : ndarray
|
| 149 |
+
"""
|
| 150 |
+
return self.interval_y_pred.includes(y)
|
| 151 |
+
|
| 152 |
+
def loss(
|
| 153 |
+
self,
|
| 154 |
+
y_true,
|
| 155 |
+
raw_prediction,
|
| 156 |
+
sample_weight=None,
|
| 157 |
+
loss_out=None,
|
| 158 |
+
n_threads=1,
|
| 159 |
+
):
|
| 160 |
+
"""Compute the pointwise loss value for each input.
|
| 161 |
+
|
| 162 |
+
Parameters
|
| 163 |
+
----------
|
| 164 |
+
y_true : C-contiguous array of shape (n_samples,)
|
| 165 |
+
Observed, true target values.
|
| 166 |
+
raw_prediction : C-contiguous array of shape (n_samples,) or array of \
|
| 167 |
+
shape (n_samples, n_classes)
|
| 168 |
+
Raw prediction values (in link space).
|
| 169 |
+
sample_weight : None or C-contiguous array of shape (n_samples,)
|
| 170 |
+
Sample weights.
|
| 171 |
+
loss_out : None or C-contiguous array of shape (n_samples,)
|
| 172 |
+
A location into which the result is stored. If None, a new array
|
| 173 |
+
might be created.
|
| 174 |
+
n_threads : int, default=1
|
| 175 |
+
Might use openmp thread parallelism.
|
| 176 |
+
|
| 177 |
+
Returns
|
| 178 |
+
-------
|
| 179 |
+
loss : array of shape (n_samples,)
|
| 180 |
+
Element-wise loss function.
|
| 181 |
+
"""
|
| 182 |
+
if loss_out is None:
|
| 183 |
+
loss_out = np.empty_like(y_true)
|
| 184 |
+
# Be graceful to shape (n_samples, 1) -> (n_samples,)
|
| 185 |
+
if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1:
|
| 186 |
+
raw_prediction = raw_prediction.squeeze(1)
|
| 187 |
+
|
| 188 |
+
y_true = ReadonlyArrayWrapper(y_true)
|
| 189 |
+
raw_prediction = ReadonlyArrayWrapper(raw_prediction)
|
| 190 |
+
if sample_weight is not None:
|
| 191 |
+
sample_weight = ReadonlyArrayWrapper(sample_weight)
|
| 192 |
+
return self.closs.loss(
|
| 193 |
+
y_true=y_true,
|
| 194 |
+
raw_prediction=raw_prediction,
|
| 195 |
+
sample_weight=sample_weight,
|
| 196 |
+
loss_out=loss_out,
|
| 197 |
+
n_threads=n_threads,
|
| 198 |
+
)
|
| 199 |
+
|
| 200 |
+
def loss_gradient(
|
| 201 |
+
self,
|
| 202 |
+
y_true,
|
| 203 |
+
raw_prediction,
|
| 204 |
+
sample_weight=None,
|
| 205 |
+
loss_out=None,
|
| 206 |
+
gradient_out=None,
|
| 207 |
+
n_threads=1,
|
| 208 |
+
):
|
| 209 |
+
"""Compute loss and gradient w.r.t. raw_prediction for each input.
|
| 210 |
+
|
| 211 |
+
Parameters
|
| 212 |
+
----------
|
| 213 |
+
y_true : C-contiguous array of shape (n_samples,)
|
| 214 |
+
Observed, true target values.
|
| 215 |
+
raw_prediction : C-contiguous array of shape (n_samples,) or array of \
|
| 216 |
+
shape (n_samples, n_classes)
|
| 217 |
+
Raw prediction values (in link space).
|
| 218 |
+
sample_weight : None or C-contiguous array of shape (n_samples,)
|
| 219 |
+
Sample weights.
|
| 220 |
+
loss_out : None or C-contiguous array of shape (n_samples,)
|
| 221 |
+
A location into which the loss is stored. If None, a new array
|
| 222 |
+
might be created.
|
| 223 |
+
gradient_out : None or C-contiguous array of shape (n_samples,) or array \
|
| 224 |
+
of shape (n_samples, n_classes)
|
| 225 |
+
A location into which the gradient is stored. If None, a new array
|
| 226 |
+
might be created.
|
| 227 |
+
n_threads : int, default=1
|
| 228 |
+
Might use openmp thread parallelism.
|
| 229 |
+
|
| 230 |
+
Returns
|
| 231 |
+
-------
|
| 232 |
+
loss : array of shape (n_samples,)
|
| 233 |
+
Element-wise loss function.
|
| 234 |
+
|
| 235 |
+
gradient : array of shape (n_samples,) or (n_samples, n_classes)
|
| 236 |
+
Element-wise gradients.
|
| 237 |
+
"""
|
| 238 |
+
if loss_out is None:
|
| 239 |
+
if gradient_out is None:
|
| 240 |
+
loss_out = np.empty_like(y_true)
|
| 241 |
+
gradient_out = np.empty_like(raw_prediction)
|
| 242 |
+
else:
|
| 243 |
+
loss_out = np.empty_like(y_true, dtype=gradient_out.dtype)
|
| 244 |
+
elif gradient_out is None:
|
| 245 |
+
gradient_out = np.empty_like(raw_prediction, dtype=loss_out.dtype)
|
| 246 |
+
|
| 247 |
+
# Be graceful to shape (n_samples, 1) -> (n_samples,)
|
| 248 |
+
if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1:
|
| 249 |
+
raw_prediction = raw_prediction.squeeze(1)
|
| 250 |
+
if gradient_out.ndim == 2 and gradient_out.shape[1] == 1:
|
| 251 |
+
gradient_out = gradient_out.squeeze(1)
|
| 252 |
+
|
| 253 |
+
y_true = ReadonlyArrayWrapper(y_true)
|
| 254 |
+
raw_prediction = ReadonlyArrayWrapper(raw_prediction)
|
| 255 |
+
if sample_weight is not None:
|
| 256 |
+
sample_weight = ReadonlyArrayWrapper(sample_weight)
|
| 257 |
+
return self.closs.loss_gradient(
|
| 258 |
+
y_true=y_true,
|
| 259 |
+
raw_prediction=raw_prediction,
|
| 260 |
+
sample_weight=sample_weight,
|
| 261 |
+
loss_out=loss_out,
|
| 262 |
+
gradient_out=gradient_out,
|
| 263 |
+
n_threads=n_threads,
|
| 264 |
+
)
|
| 265 |
+
|
| 266 |
+
def gradient(
|
| 267 |
+
self,
|
| 268 |
+
y_true,
|
| 269 |
+
raw_prediction,
|
| 270 |
+
sample_weight=None,
|
| 271 |
+
gradient_out=None,
|
| 272 |
+
n_threads=1,
|
| 273 |
+
):
|
| 274 |
+
"""Compute gradient of loss w.r.t raw_prediction for each input.
|
| 275 |
+
|
| 276 |
+
Parameters
|
| 277 |
+
----------
|
| 278 |
+
y_true : C-contiguous array of shape (n_samples,)
|
| 279 |
+
Observed, true target values.
|
| 280 |
+
raw_prediction : C-contiguous array of shape (n_samples,) or array of \
|
| 281 |
+
shape (n_samples, n_classes)
|
| 282 |
+
Raw prediction values (in link space).
|
| 283 |
+
sample_weight : None or C-contiguous array of shape (n_samples,)
|
| 284 |
+
Sample weights.
|
| 285 |
+
gradient_out : None or C-contiguous array of shape (n_samples,) or array \
|
| 286 |
+
of shape (n_samples, n_classes)
|
| 287 |
+
A location into which the result is stored. If None, a new array
|
| 288 |
+
might be created.
|
| 289 |
+
n_threads : int, default=1
|
| 290 |
+
Might use openmp thread parallelism.
|
| 291 |
+
|
| 292 |
+
Returns
|
| 293 |
+
-------
|
| 294 |
+
gradient : array of shape (n_samples,) or (n_samples, n_classes)
|
| 295 |
+
Element-wise gradients.
|
| 296 |
+
"""
|
| 297 |
+
if gradient_out is None:
|
| 298 |
+
gradient_out = np.empty_like(raw_prediction)
|
| 299 |
+
|
| 300 |
+
# Be graceful to shape (n_samples, 1) -> (n_samples,)
|
| 301 |
+
if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1:
|
| 302 |
+
raw_prediction = raw_prediction.squeeze(1)
|
| 303 |
+
if gradient_out.ndim == 2 and gradient_out.shape[1] == 1:
|
| 304 |
+
gradient_out = gradient_out.squeeze(1)
|
| 305 |
+
|
| 306 |
+
y_true = ReadonlyArrayWrapper(y_true)
|
| 307 |
+
raw_prediction = ReadonlyArrayWrapper(raw_prediction)
|
| 308 |
+
if sample_weight is not None:
|
| 309 |
+
sample_weight = ReadonlyArrayWrapper(sample_weight)
|
| 310 |
+
return self.closs.gradient(
|
| 311 |
+
y_true=y_true,
|
| 312 |
+
raw_prediction=raw_prediction,
|
| 313 |
+
sample_weight=sample_weight,
|
| 314 |
+
gradient_out=gradient_out,
|
| 315 |
+
n_threads=n_threads,
|
| 316 |
+
)
|
| 317 |
+
|
| 318 |
+
def gradient_hessian(
|
| 319 |
+
self,
|
| 320 |
+
y_true,
|
| 321 |
+
raw_prediction,
|
| 322 |
+
sample_weight=None,
|
| 323 |
+
gradient_out=None,
|
| 324 |
+
hessian_out=None,
|
| 325 |
+
n_threads=1,
|
| 326 |
+
):
|
| 327 |
+
"""Compute gradient and hessian of loss w.r.t raw_prediction.
|
| 328 |
+
|
| 329 |
+
Parameters
|
| 330 |
+
----------
|
| 331 |
+
y_true : C-contiguous array of shape (n_samples,)
|
| 332 |
+
Observed, true target values.
|
| 333 |
+
raw_prediction : C-contiguous array of shape (n_samples,) or array of \
|
| 334 |
+
shape (n_samples, n_classes)
|
| 335 |
+
Raw prediction values (in link space).
|
| 336 |
+
sample_weight : None or C-contiguous array of shape (n_samples,)
|
| 337 |
+
Sample weights.
|
| 338 |
+
gradient_out : None or C-contiguous array of shape (n_samples,) or array \
|
| 339 |
+
of shape (n_samples, n_classes)
|
| 340 |
+
A location into which the gradient is stored. If None, a new array
|
| 341 |
+
might be created.
|
| 342 |
+
hessian_out : None or C-contiguous array of shape (n_samples,) or array \
|
| 343 |
+
of shape (n_samples, n_classes)
|
| 344 |
+
A location into which the hessian is stored. If None, a new array
|
| 345 |
+
might be created.
|
| 346 |
+
n_threads : int, default=1
|
| 347 |
+
Might use openmp thread parallelism.
|
| 348 |
+
|
| 349 |
+
Returns
|
| 350 |
+
-------
|
| 351 |
+
gradient : arrays of shape (n_samples,) or (n_samples, n_classes)
|
| 352 |
+
Element-wise gradients.
|
| 353 |
+
|
| 354 |
+
hessian : arrays of shape (n_samples,) or (n_samples, n_classes)
|
| 355 |
+
Element-wise hessians.
|
| 356 |
+
"""
|
| 357 |
+
if gradient_out is None:
|
| 358 |
+
if hessian_out is None:
|
| 359 |
+
gradient_out = np.empty_like(raw_prediction)
|
| 360 |
+
hessian_out = np.empty_like(raw_prediction)
|
| 361 |
+
else:
|
| 362 |
+
gradient_out = np.empty_like(hessian_out)
|
| 363 |
+
elif hessian_out is None:
|
| 364 |
+
hessian_out = np.empty_like(gradient_out)
|
| 365 |
+
|
| 366 |
+
# Be graceful to shape (n_samples, 1) -> (n_samples,)
|
| 367 |
+
if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1:
|
| 368 |
+
raw_prediction = raw_prediction.squeeze(1)
|
| 369 |
+
if gradient_out.ndim == 2 and gradient_out.shape[1] == 1:
|
| 370 |
+
gradient_out = gradient_out.squeeze(1)
|
| 371 |
+
if hessian_out.ndim == 2 and hessian_out.shape[1] == 1:
|
| 372 |
+
hessian_out = hessian_out.squeeze(1)
|
| 373 |
+
|
| 374 |
+
y_true = ReadonlyArrayWrapper(y_true)
|
| 375 |
+
raw_prediction = ReadonlyArrayWrapper(raw_prediction)
|
| 376 |
+
if sample_weight is not None:
|
| 377 |
+
sample_weight = ReadonlyArrayWrapper(sample_weight)
|
| 378 |
+
return self.closs.gradient_hessian(
|
| 379 |
+
y_true=y_true,
|
| 380 |
+
raw_prediction=raw_prediction,
|
| 381 |
+
sample_weight=sample_weight,
|
| 382 |
+
gradient_out=gradient_out,
|
| 383 |
+
hessian_out=hessian_out,
|
| 384 |
+
n_threads=n_threads,
|
| 385 |
+
)
|
| 386 |
+
|
| 387 |
+
def __call__(self, y_true, raw_prediction, sample_weight=None, n_threads=1):
|
| 388 |
+
"""Compute the weighted average loss.
|
| 389 |
+
|
| 390 |
+
Parameters
|
| 391 |
+
----------
|
| 392 |
+
y_true : C-contiguous array of shape (n_samples,)
|
| 393 |
+
Observed, true target values.
|
| 394 |
+
raw_prediction : C-contiguous array of shape (n_samples,) or array of \
|
| 395 |
+
shape (n_samples, n_classes)
|
| 396 |
+
Raw prediction values (in link space).
|
| 397 |
+
sample_weight : None or C-contiguous array of shape (n_samples,)
|
| 398 |
+
Sample weights.
|
| 399 |
+
n_threads : int, default=1
|
| 400 |
+
Might use openmp thread parallelism.
|
| 401 |
+
|
| 402 |
+
Returns
|
| 403 |
+
-------
|
| 404 |
+
loss : float
|
| 405 |
+
Mean or averaged loss function.
|
| 406 |
+
"""
|
| 407 |
+
return np.average(
|
| 408 |
+
self.loss(
|
| 409 |
+
y_true=y_true,
|
| 410 |
+
raw_prediction=raw_prediction,
|
| 411 |
+
sample_weight=None,
|
| 412 |
+
loss_out=None,
|
| 413 |
+
n_threads=n_threads,
|
| 414 |
+
),
|
| 415 |
+
weights=sample_weight,
|
| 416 |
+
)
|
| 417 |
+
|
| 418 |
+
def fit_intercept_only(self, y_true, sample_weight=None):
|
| 419 |
+
"""Compute raw_prediction of an intercept-only model.
|
| 420 |
+
|
| 421 |
+
This can be used as initial estimates of predictions, i.e. before the
|
| 422 |
+
first iteration in fit.
|
| 423 |
+
|
| 424 |
+
Parameters
|
| 425 |
+
----------
|
| 426 |
+
y_true : array-like of shape (n_samples,)
|
| 427 |
+
Observed, true target values.
|
| 428 |
+
sample_weight : None or array of shape (n_samples,)
|
| 429 |
+
Sample weights.
|
| 430 |
+
|
| 431 |
+
Returns
|
| 432 |
+
-------
|
| 433 |
+
raw_prediction : numpy scalar or array of shape (n_classes,)
|
| 434 |
+
Raw predictions of an intercept-only model.
|
| 435 |
+
"""
|
| 436 |
+
# As default, take weighted average of the target over the samples
|
| 437 |
+
# axis=0 and then transform into link-scale (raw_prediction).
|
| 438 |
+
y_pred = np.average(y_true, weights=sample_weight, axis=0)
|
| 439 |
+
eps = 10 * np.finfo(y_pred.dtype).eps
|
| 440 |
+
|
| 441 |
+
if self.interval_y_pred.low == -np.inf:
|
| 442 |
+
a_min = None
|
| 443 |
+
elif self.interval_y_pred.low_inclusive:
|
| 444 |
+
a_min = self.interval_y_pred.low
|
| 445 |
+
else:
|
| 446 |
+
a_min = self.interval_y_pred.low + eps
|
| 447 |
+
|
| 448 |
+
if self.interval_y_pred.high == np.inf:
|
| 449 |
+
a_max = None
|
| 450 |
+
elif self.interval_y_pred.high_inclusive:
|
| 451 |
+
a_max = self.interval_y_pred.high
|
| 452 |
+
else:
|
| 453 |
+
a_max = self.interval_y_pred.high - eps
|
| 454 |
+
|
| 455 |
+
if a_min is None and a_max is None:
|
| 456 |
+
return self.link.link(y_pred)
|
| 457 |
+
else:
|
| 458 |
+
return self.link.link(np.clip(y_pred, a_min, a_max))
|
| 459 |
+
|
| 460 |
+
def constant_to_optimal_zero(self, y_true, sample_weight=None):
|
| 461 |
+
"""Calculate term dropped in loss.
|
| 462 |
+
|
| 463 |
+
With this term added, the loss of perfect predictions is zero.
|
| 464 |
+
"""
|
| 465 |
+
return np.zeros_like(y_true)
|
| 466 |
+
|
| 467 |
+
def init_gradient_and_hessian(self, n_samples, dtype=np.float64, order="F"):
|
| 468 |
+
"""Initialize arrays for gradients and hessians.
|
| 469 |
+
|
| 470 |
+
Unless hessians are constant, arrays are initialized with undefined values.
|
| 471 |
+
|
| 472 |
+
Parameters
|
| 473 |
+
----------
|
| 474 |
+
n_samples : int
|
| 475 |
+
The number of samples, usually passed to `fit()`.
|
| 476 |
+
dtype : {np.float64, np.float32}, default=np.float64
|
| 477 |
+
The dtype of the arrays gradient and hessian.
|
| 478 |
+
order : {'C', 'F'}, default='F'
|
| 479 |
+
Order of the arrays gradient and hessian. The default 'F' makes the arrays
|
| 480 |
+
contiguous along samples.
|
| 481 |
+
|
| 482 |
+
Returns
|
| 483 |
+
-------
|
| 484 |
+
gradient : C-contiguous array of shape (n_samples,) or array of shape \
|
| 485 |
+
(n_samples, n_classes)
|
| 486 |
+
Empty array (allocated but not initialized) to be used as argument
|
| 487 |
+
gradient_out.
|
| 488 |
+
hessian : C-contiguous array of shape (n_samples,), array of shape
|
| 489 |
+
(n_samples, n_classes) or shape (1,)
|
| 490 |
+
Empty (allocated but not initialized) array to be used as argument
|
| 491 |
+
hessian_out.
|
| 492 |
+
If constant_hessian is True (e.g. `HalfSquaredError`), the array is
|
| 493 |
+
initialized to ``1``.
|
| 494 |
+
"""
|
| 495 |
+
if dtype not in (np.float32, np.float64):
|
| 496 |
+
raise ValueError(
|
| 497 |
+
"Valid options for 'dtype' are np.float32 and np.float64. "
|
| 498 |
+
f"Got dtype={dtype} instead."
|
| 499 |
+
)
|
| 500 |
+
|
| 501 |
+
if self.is_multiclass:
|
| 502 |
+
shape = (n_samples, self.n_classes)
|
| 503 |
+
else:
|
| 504 |
+
shape = (n_samples,)
|
| 505 |
+
gradient = np.empty(shape=shape, dtype=dtype, order=order)
|
| 506 |
+
|
| 507 |
+
if self.constant_hessian:
|
| 508 |
+
# If the hessians are constant, we consider them equal to 1.
|
| 509 |
+
# - This is correct for HalfSquaredError
|
| 510 |
+
# - For AbsoluteError, hessians are actually 0, but they are
|
| 511 |
+
# always ignored anyway.
|
| 512 |
+
hessian = np.ones(shape=(1,), dtype=dtype)
|
| 513 |
+
else:
|
| 514 |
+
hessian = np.empty(shape=shape, dtype=dtype, order=order)
|
| 515 |
+
|
| 516 |
+
return gradient, hessian
|
| 517 |
+
|
| 518 |
+
|
| 519 |
+
# Note: Naturally, we would inherit in the following order
|
| 520 |
+
# class HalfSquaredError(IdentityLink, CyHalfSquaredError, BaseLoss)
|
| 521 |
+
# But because of https://github.com/cython/cython/issues/4350 we
|
| 522 |
+
# set BaseLoss as the last one. This, of course, changes the MRO.
|
| 523 |
+
class HalfSquaredError(BaseLoss):
|
| 524 |
+
"""Half squared error with identity link, for regression.
|
| 525 |
+
|
| 526 |
+
Domain:
|
| 527 |
+
y_true and y_pred all real numbers
|
| 528 |
+
|
| 529 |
+
Link:
|
| 530 |
+
y_pred = raw_prediction
|
| 531 |
+
|
| 532 |
+
For a given sample x_i, half squared error is defined as::
|
| 533 |
+
|
| 534 |
+
loss(x_i) = 0.5 * (y_true_i - raw_prediction_i)**2
|
| 535 |
+
|
| 536 |
+
The factor of 0.5 simplifies the computation of gradients and results in a
|
| 537 |
+
unit hessian (and is consistent with what is done in LightGBM). It is also
|
| 538 |
+
half the Normal distribution deviance.
|
| 539 |
+
"""
|
| 540 |
+
|
| 541 |
+
def __init__(self, sample_weight=None):
|
| 542 |
+
super().__init__(closs=CyHalfSquaredError(), link=IdentityLink())
|
| 543 |
+
self.constant_hessian = sample_weight is None
|
| 544 |
+
|
| 545 |
+
|
| 546 |
+
class AbsoluteError(BaseLoss):
|
| 547 |
+
"""Absolute error with identity link, for regression.
|
| 548 |
+
|
| 549 |
+
Domain:
|
| 550 |
+
y_true and y_pred all real numbers
|
| 551 |
+
|
| 552 |
+
Link:
|
| 553 |
+
y_pred = raw_prediction
|
| 554 |
+
|
| 555 |
+
For a given sample x_i, the absolute error is defined as::
|
| 556 |
+
|
| 557 |
+
loss(x_i) = |y_true_i - raw_prediction_i|
|
| 558 |
+
"""
|
| 559 |
+
|
| 560 |
+
differentiable = False
|
| 561 |
+
need_update_leaves_values = True
|
| 562 |
+
|
| 563 |
+
def __init__(self, sample_weight=None):
|
| 564 |
+
super().__init__(closs=CyAbsoluteError(), link=IdentityLink())
|
| 565 |
+
self.approx_hessian = True
|
| 566 |
+
self.constant_hessian = sample_weight is None
|
| 567 |
+
|
| 568 |
+
def fit_intercept_only(self, y_true, sample_weight=None):
|
| 569 |
+
"""Compute raw_prediction of an intercept-only model.
|
| 570 |
+
|
| 571 |
+
This is the weighted median of the target, i.e. over the samples
|
| 572 |
+
axis=0.
|
| 573 |
+
"""
|
| 574 |
+
if sample_weight is None:
|
| 575 |
+
return np.median(y_true, axis=0)
|
| 576 |
+
else:
|
| 577 |
+
return _weighted_percentile(y_true, sample_weight, 50)
|
| 578 |
+
|
| 579 |
+
|
| 580 |
+
class PinballLoss(BaseLoss):
|
| 581 |
+
"""Quantile loss aka pinball loss, for regression.
|
| 582 |
+
|
| 583 |
+
Domain:
|
| 584 |
+
y_true and y_pred all real numbers
|
| 585 |
+
quantile in (0, 1)
|
| 586 |
+
|
| 587 |
+
Link:
|
| 588 |
+
y_pred = raw_prediction
|
| 589 |
+
|
| 590 |
+
For a given sample x_i, the pinball loss is defined as::
|
| 591 |
+
|
| 592 |
+
loss(x_i) = rho_{quantile}(y_true_i - raw_prediction_i)
|
| 593 |
+
|
| 594 |
+
rho_{quantile}(u) = u * (quantile - 1_{u<0})
|
| 595 |
+
= -u *(1 - quantile) if u < 0
|
| 596 |
+
u * quantile if u >= 0
|
| 597 |
+
|
| 598 |
+
Note: 2 * PinballLoss(quantile=0.5) equals AbsoluteError().
|
| 599 |
+
|
| 600 |
+
Additional Attributes
|
| 601 |
+
---------------------
|
| 602 |
+
quantile : float
|
| 603 |
+
The quantile to be estimated. Must be in range (0, 1).
|
| 604 |
+
"""
|
| 605 |
+
|
| 606 |
+
differentiable = False
|
| 607 |
+
need_update_leaves_values = True
|
| 608 |
+
|
| 609 |
+
def __init__(self, sample_weight=None, quantile=0.5):
|
| 610 |
+
check_scalar(
|
| 611 |
+
quantile,
|
| 612 |
+
"quantile",
|
| 613 |
+
target_type=numbers.Real,
|
| 614 |
+
min_val=0,
|
| 615 |
+
max_val=1,
|
| 616 |
+
include_boundaries="neither",
|
| 617 |
+
)
|
| 618 |
+
super().__init__(
|
| 619 |
+
closs=CyPinballLoss(quantile=float(quantile)),
|
| 620 |
+
link=IdentityLink(),
|
| 621 |
+
)
|
| 622 |
+
self.approx_hessian = True
|
| 623 |
+
self.constant_hessian = sample_weight is None
|
| 624 |
+
|
| 625 |
+
def fit_intercept_only(self, y_true, sample_weight=None):
|
| 626 |
+
"""Compute raw_prediction of an intercept-only model.
|
| 627 |
+
|
| 628 |
+
This is the weighted median of the target, i.e. over the samples
|
| 629 |
+
axis=0.
|
| 630 |
+
"""
|
| 631 |
+
if sample_weight is None:
|
| 632 |
+
return np.percentile(y_true, 100 * self.closs.quantile, axis=0)
|
| 633 |
+
else:
|
| 634 |
+
return _weighted_percentile(
|
| 635 |
+
y_true, sample_weight, 100 * self.closs.quantile
|
| 636 |
+
)
|
| 637 |
+
|
| 638 |
+
|
| 639 |
+
class HalfPoissonLoss(BaseLoss):
|
| 640 |
+
"""Half Poisson deviance loss with log-link, for regression.
|
| 641 |
+
|
| 642 |
+
Domain:
|
| 643 |
+
y_true in non-negative real numbers
|
| 644 |
+
y_pred in positive real numbers
|
| 645 |
+
|
| 646 |
+
Link:
|
| 647 |
+
y_pred = exp(raw_prediction)
|
| 648 |
+
|
| 649 |
+
For a given sample x_i, half the Poisson deviance is defined as::
|
| 650 |
+
|
| 651 |
+
loss(x_i) = y_true_i * log(y_true_i/exp(raw_prediction_i))
|
| 652 |
+
- y_true_i + exp(raw_prediction_i)
|
| 653 |
+
|
| 654 |
+
Half the Poisson deviance is actually the negative log-likelihood up to
|
| 655 |
+
constant terms (not involving raw_prediction) and simplifies the
|
| 656 |
+
computation of the gradients.
|
| 657 |
+
We also skip the constant term `y_true_i * log(y_true_i) - y_true_i`.
|
| 658 |
+
"""
|
| 659 |
+
|
| 660 |
+
def __init__(self, sample_weight=None):
|
| 661 |
+
super().__init__(closs=CyHalfPoissonLoss(), link=LogLink())
|
| 662 |
+
self.interval_y_true = Interval(0, np.inf, True, False)
|
| 663 |
+
|
| 664 |
+
def constant_to_optimal_zero(self, y_true, sample_weight=None):
|
| 665 |
+
term = xlogy(y_true, y_true) - y_true
|
| 666 |
+
if sample_weight is not None:
|
| 667 |
+
term *= sample_weight
|
| 668 |
+
return term
|
| 669 |
+
|
| 670 |
+
|
| 671 |
+
class HalfGammaLoss(BaseLoss):
|
| 672 |
+
"""Half Gamma deviance loss with log-link, for regression.
|
| 673 |
+
|
| 674 |
+
Domain:
|
| 675 |
+
y_true and y_pred in positive real numbers
|
| 676 |
+
|
| 677 |
+
Link:
|
| 678 |
+
y_pred = exp(raw_prediction)
|
| 679 |
+
|
| 680 |
+
For a given sample x_i, half Gamma deviance loss is defined as::
|
| 681 |
+
|
| 682 |
+
loss(x_i) = log(exp(raw_prediction_i)/y_true_i)
|
| 683 |
+
+ y_true/exp(raw_prediction_i) - 1
|
| 684 |
+
|
| 685 |
+
Half the Gamma deviance is actually proportional to the negative log-
|
| 686 |
+
likelihood up to constant terms (not involving raw_prediction) and
|
| 687 |
+
simplifies the computation of the gradients.
|
| 688 |
+
We also skip the constant term `-log(y_true_i) - 1`.
|
| 689 |
+
"""
|
| 690 |
+
|
| 691 |
+
def __init__(self, sample_weight=None):
|
| 692 |
+
super().__init__(closs=CyHalfGammaLoss(), link=LogLink())
|
| 693 |
+
self.interval_y_true = Interval(0, np.inf, False, False)
|
| 694 |
+
|
| 695 |
+
def constant_to_optimal_zero(self, y_true, sample_weight=None):
|
| 696 |
+
term = -np.log(y_true) - 1
|
| 697 |
+
if sample_weight is not None:
|
| 698 |
+
term *= sample_weight
|
| 699 |
+
return term
|
| 700 |
+
|
| 701 |
+
|
| 702 |
+
class HalfTweedieLoss(BaseLoss):
|
| 703 |
+
"""Half Tweedie deviance loss with log-link, for regression.
|
| 704 |
+
|
| 705 |
+
Domain:
|
| 706 |
+
y_true in real numbers for power <= 0
|
| 707 |
+
y_true in non-negative real numbers for 0 < power < 2
|
| 708 |
+
y_true in positive real numbers for 2 <= power
|
| 709 |
+
y_pred in positive real numbers
|
| 710 |
+
power in real numbers
|
| 711 |
+
|
| 712 |
+
Link:
|
| 713 |
+
y_pred = exp(raw_prediction)
|
| 714 |
+
|
| 715 |
+
For a given sample x_i, half Tweedie deviance loss with p=power is defined
|
| 716 |
+
as::
|
| 717 |
+
|
| 718 |
+
loss(x_i) = max(y_true_i, 0)**(2-p) / (1-p) / (2-p)
|
| 719 |
+
- y_true_i * exp(raw_prediction_i)**(1-p) / (1-p)
|
| 720 |
+
+ exp(raw_prediction_i)**(2-p) / (2-p)
|
| 721 |
+
|
| 722 |
+
Taking the limits for p=0, 1, 2 gives HalfSquaredError with a log link,
|
| 723 |
+
HalfPoissonLoss and HalfGammaLoss.
|
| 724 |
+
|
| 725 |
+
We also skip constant terms, but those are different for p=0, 1, 2.
|
| 726 |
+
Therefore, the loss is not continuous in `power`.
|
| 727 |
+
|
| 728 |
+
Note furthermore that although no Tweedie distribution exists for
|
| 729 |
+
0 < power < 1, it still gives a strictly consistent scoring function for
|
| 730 |
+
the expectation.
|
| 731 |
+
"""
|
| 732 |
+
|
| 733 |
+
def __init__(self, sample_weight=None, power=1.5):
|
| 734 |
+
super().__init__(
|
| 735 |
+
closs=CyHalfTweedieLoss(power=float(power)),
|
| 736 |
+
link=LogLink(),
|
| 737 |
+
)
|
| 738 |
+
if self.closs.power <= 0:
|
| 739 |
+
self.interval_y_true = Interval(-np.inf, np.inf, False, False)
|
| 740 |
+
elif self.closs.power < 2:
|
| 741 |
+
self.interval_y_true = Interval(0, np.inf, True, False)
|
| 742 |
+
else:
|
| 743 |
+
self.interval_y_true = Interval(0, np.inf, False, False)
|
| 744 |
+
|
| 745 |
+
def constant_to_optimal_zero(self, y_true, sample_weight=None):
|
| 746 |
+
if self.closs.power == 0:
|
| 747 |
+
return HalfSquaredError().constant_to_optimal_zero(
|
| 748 |
+
y_true=y_true, sample_weight=sample_weight
|
| 749 |
+
)
|
| 750 |
+
elif self.closs.power == 1:
|
| 751 |
+
return HalfPoissonLoss().constant_to_optimal_zero(
|
| 752 |
+
y_true=y_true, sample_weight=sample_weight
|
| 753 |
+
)
|
| 754 |
+
elif self.closs.power == 2:
|
| 755 |
+
return HalfGammaLoss().constant_to_optimal_zero(
|
| 756 |
+
y_true=y_true, sample_weight=sample_weight
|
| 757 |
+
)
|
| 758 |
+
else:
|
| 759 |
+
p = self.closs.power
|
| 760 |
+
term = np.power(np.maximum(y_true, 0), 2 - p) / (1 - p) / (2 - p)
|
| 761 |
+
if sample_weight is not None:
|
| 762 |
+
term *= sample_weight
|
| 763 |
+
return term
|
| 764 |
+
|
| 765 |
+
|
| 766 |
+
class HalfTweedieLossIdentity(BaseLoss):
|
| 767 |
+
"""Half Tweedie deviance loss with identity link, for regression.
|
| 768 |
+
|
| 769 |
+
Domain:
|
| 770 |
+
y_true in real numbers for power <= 0
|
| 771 |
+
y_true in non-negative real numbers for 0 < power < 2
|
| 772 |
+
y_true in positive real numbers for 2 <= power
|
| 773 |
+
y_pred in positive real numbers for power != 0
|
| 774 |
+
y_pred in real numbers for power = 0
|
| 775 |
+
power in real numbers
|
| 776 |
+
|
| 777 |
+
Link:
|
| 778 |
+
y_pred = raw_prediction
|
| 779 |
+
|
| 780 |
+
For a given sample x_i, half Tweedie deviance loss with p=power is defined
|
| 781 |
+
as::
|
| 782 |
+
|
| 783 |
+
loss(x_i) = max(y_true_i, 0)**(2-p) / (1-p) / (2-p)
|
| 784 |
+
- y_true_i * raw_prediction_i**(1-p) / (1-p)
|
| 785 |
+
+ raw_prediction_i**(2-p) / (2-p)
|
| 786 |
+
|
| 787 |
+
Note that the minimum value of this loss is 0.
|
| 788 |
+
|
| 789 |
+
Note furthermore that although no Tweedie distribution exists for
|
| 790 |
+
0 < power < 1, it still gives a strictly consistent scoring function for
|
| 791 |
+
the expectation.
|
| 792 |
+
"""
|
| 793 |
+
|
| 794 |
+
def __init__(self, sample_weight=None, power=1.5):
|
| 795 |
+
super().__init__(
|
| 796 |
+
closs=CyHalfTweedieLossIdentity(power=float(power)),
|
| 797 |
+
link=IdentityLink(),
|
| 798 |
+
)
|
| 799 |
+
if self.closs.power <= 0:
|
| 800 |
+
self.interval_y_true = Interval(-np.inf, np.inf, False, False)
|
| 801 |
+
elif self.closs.power < 2:
|
| 802 |
+
self.interval_y_true = Interval(0, np.inf, True, False)
|
| 803 |
+
else:
|
| 804 |
+
self.interval_y_true = Interval(0, np.inf, False, False)
|
| 805 |
+
|
| 806 |
+
if self.closs.power == 0:
|
| 807 |
+
self.interval_y_pred = Interval(-np.inf, np.inf, False, False)
|
| 808 |
+
else:
|
| 809 |
+
self.interval_y_pred = Interval(0, np.inf, False, False)
|
| 810 |
+
|
| 811 |
+
|
| 812 |
+
class HalfBinomialLoss(BaseLoss):
|
| 813 |
+
"""Half Binomial deviance loss with logit link, for binary classification.
|
| 814 |
+
|
| 815 |
+
This is also know as binary cross entropy, log-loss and logistic loss.
|
| 816 |
+
|
| 817 |
+
Domain:
|
| 818 |
+
y_true in [0, 1], i.e. regression on the unit interval
|
| 819 |
+
y_pred in (0, 1), i.e. boundaries excluded
|
| 820 |
+
|
| 821 |
+
Link:
|
| 822 |
+
y_pred = expit(raw_prediction)
|
| 823 |
+
|
| 824 |
+
For a given sample x_i, half Binomial deviance is defined as the negative
|
| 825 |
+
log-likelihood of the Binomial/Bernoulli distribution and can be expressed
|
| 826 |
+
as::
|
| 827 |
+
|
| 828 |
+
loss(x_i) = log(1 + exp(raw_pred_i)) - y_true_i * raw_pred_i
|
| 829 |
+
|
| 830 |
+
See The Elements of Statistical Learning, by Hastie, Tibshirani, Friedman,
|
| 831 |
+
section 4.4.1 (about logistic regression).
|
| 832 |
+
|
| 833 |
+
Note that the formulation works for classification, y = {0, 1}, as well as
|
| 834 |
+
logistic regression, y = [0, 1].
|
| 835 |
+
If you add `constant_to_optimal_zero` to the loss, you get half the
|
| 836 |
+
Bernoulli/binomial deviance.
|
| 837 |
+
"""
|
| 838 |
+
|
| 839 |
+
def __init__(self, sample_weight=None):
|
| 840 |
+
super().__init__(
|
| 841 |
+
closs=CyHalfBinomialLoss(),
|
| 842 |
+
link=LogitLink(),
|
| 843 |
+
n_classes=2,
|
| 844 |
+
)
|
| 845 |
+
self.interval_y_true = Interval(0, 1, True, True)
|
| 846 |
+
|
| 847 |
+
def constant_to_optimal_zero(self, y_true, sample_weight=None):
|
| 848 |
+
# This is non-zero only if y_true is neither 0 nor 1.
|
| 849 |
+
term = xlogy(y_true, y_true) + xlogy(1 - y_true, 1 - y_true)
|
| 850 |
+
if sample_weight is not None:
|
| 851 |
+
term *= sample_weight
|
| 852 |
+
return term
|
| 853 |
+
|
| 854 |
+
def predict_proba(self, raw_prediction):
|
| 855 |
+
"""Predict probabilities.
|
| 856 |
+
|
| 857 |
+
Parameters
|
| 858 |
+
----------
|
| 859 |
+
raw_prediction : array of shape (n_samples,) or (n_samples, 1)
|
| 860 |
+
Raw prediction values (in link space).
|
| 861 |
+
|
| 862 |
+
Returns
|
| 863 |
+
-------
|
| 864 |
+
proba : array of shape (n_samples, 2)
|
| 865 |
+
Element-wise class probabilities.
|
| 866 |
+
"""
|
| 867 |
+
# Be graceful to shape (n_samples, 1) -> (n_samples,)
|
| 868 |
+
if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1:
|
| 869 |
+
raw_prediction = raw_prediction.squeeze(1)
|
| 870 |
+
proba = np.empty((raw_prediction.shape[0], 2), dtype=raw_prediction.dtype)
|
| 871 |
+
proba[:, 1] = self.link.inverse(raw_prediction)
|
| 872 |
+
proba[:, 0] = 1 - proba[:, 1]
|
| 873 |
+
return proba
|
| 874 |
+
|
| 875 |
+
|
| 876 |
+
class HalfMultinomialLoss(BaseLoss):
|
| 877 |
+
"""Categorical cross-entropy loss, for multiclass classification.
|
| 878 |
+
|
| 879 |
+
Domain:
|
| 880 |
+
y_true in {0, 1, 2, 3, .., n_classes - 1}
|
| 881 |
+
y_pred has n_classes elements, each element in (0, 1)
|
| 882 |
+
|
| 883 |
+
Link:
|
| 884 |
+
y_pred = softmax(raw_prediction)
|
| 885 |
+
|
| 886 |
+
Note: We assume y_true to be already label encoded. The inverse link is
|
| 887 |
+
softmax. But the full link function is the symmetric multinomial logit
|
| 888 |
+
function.
|
| 889 |
+
|
| 890 |
+
For a given sample x_i, the categorical cross-entropy loss is defined as
|
| 891 |
+
the negative log-likelihood of the multinomial distribution, it
|
| 892 |
+
generalizes the binary cross-entropy to more than 2 classes::
|
| 893 |
+
|
| 894 |
+
loss_i = log(sum(exp(raw_pred_{i, k}), k=0..n_classes-1))
|
| 895 |
+
- sum(y_true_{i, k} * raw_pred_{i, k}, k=0..n_classes-1)
|
| 896 |
+
|
| 897 |
+
See [1].
|
| 898 |
+
|
| 899 |
+
Note that for the hessian, we calculate only the diagonal part in the
|
| 900 |
+
classes: If the full hessian for classes k and l and sample i is H_i_k_l,
|
| 901 |
+
we calculate H_i_k_k, i.e. k=l.
|
| 902 |
+
|
| 903 |
+
Reference
|
| 904 |
+
---------
|
| 905 |
+
.. [1] :arxiv:`Simon, Noah, J. Friedman and T. Hastie.
|
| 906 |
+
"A Blockwise Descent Algorithm for Group-penalized Multiresponse and
|
| 907 |
+
Multinomial Regression".
|
| 908 |
+
<1311.6529>`
|
| 909 |
+
"""
|
| 910 |
+
|
| 911 |
+
is_multiclass = True
|
| 912 |
+
|
| 913 |
+
def __init__(self, sample_weight=None, n_classes=3):
|
| 914 |
+
super().__init__(
|
| 915 |
+
closs=CyHalfMultinomialLoss(),
|
| 916 |
+
link=MultinomialLogit(),
|
| 917 |
+
n_classes=n_classes,
|
| 918 |
+
)
|
| 919 |
+
self.interval_y_true = Interval(0, np.inf, True, False)
|
| 920 |
+
self.interval_y_pred = Interval(0, 1, False, False)
|
| 921 |
+
|
| 922 |
+
def in_y_true_range(self, y):
|
| 923 |
+
"""Return True if y is in the valid range of y_true.
|
| 924 |
+
|
| 925 |
+
Parameters
|
| 926 |
+
----------
|
| 927 |
+
y : ndarray
|
| 928 |
+
"""
|
| 929 |
+
return self.interval_y_true.includes(y) and np.all(y.astype(int) == y)
|
| 930 |
+
|
| 931 |
+
def fit_intercept_only(self, y_true, sample_weight=None):
|
| 932 |
+
"""Compute raw_prediction of an intercept-only model.
|
| 933 |
+
|
| 934 |
+
This is the softmax of the weighted average of the target, i.e. over
|
| 935 |
+
the samples axis=0.
|
| 936 |
+
"""
|
| 937 |
+
out = np.zeros(self.n_classes, dtype=y_true.dtype)
|
| 938 |
+
eps = np.finfo(y_true.dtype).eps
|
| 939 |
+
for k in range(self.n_classes):
|
| 940 |
+
out[k] = np.average(y_true == k, weights=sample_weight, axis=0)
|
| 941 |
+
out[k] = np.clip(out[k], eps, 1 - eps)
|
| 942 |
+
return self.link.link(out[None, :]).reshape(-1)
|
| 943 |
+
|
| 944 |
+
def predict_proba(self, raw_prediction):
|
| 945 |
+
"""Predict probabilities.
|
| 946 |
+
|
| 947 |
+
Parameters
|
| 948 |
+
----------
|
| 949 |
+
raw_prediction : array of shape (n_samples, n_classes)
|
| 950 |
+
Raw prediction values (in link space).
|
| 951 |
+
|
| 952 |
+
Returns
|
| 953 |
+
-------
|
| 954 |
+
proba : array of shape (n_samples, n_classes)
|
| 955 |
+
Element-wise class probabilities.
|
| 956 |
+
"""
|
| 957 |
+
return self.link.inverse(raw_prediction)
|
| 958 |
+
|
| 959 |
+
def gradient_proba(
|
| 960 |
+
self,
|
| 961 |
+
y_true,
|
| 962 |
+
raw_prediction,
|
| 963 |
+
sample_weight=None,
|
| 964 |
+
gradient_out=None,
|
| 965 |
+
proba_out=None,
|
| 966 |
+
n_threads=1,
|
| 967 |
+
):
|
| 968 |
+
"""Compute gradient and class probabilities fow raw_prediction.
|
| 969 |
+
|
| 970 |
+
Parameters
|
| 971 |
+
----------
|
| 972 |
+
y_true : C-contiguous array of shape (n_samples,)
|
| 973 |
+
Observed, true target values.
|
| 974 |
+
raw_prediction : array of shape (n_samples, n_classes)
|
| 975 |
+
Raw prediction values (in link space).
|
| 976 |
+
sample_weight : None or C-contiguous array of shape (n_samples,)
|
| 977 |
+
Sample weights.
|
| 978 |
+
gradient_out : None or array of shape (n_samples, n_classes)
|
| 979 |
+
A location into which the gradient is stored. If None, a new array
|
| 980 |
+
might be created.
|
| 981 |
+
proba_out : None or array of shape (n_samples, n_classes)
|
| 982 |
+
A location into which the class probabilities are stored. If None,
|
| 983 |
+
a new array might be created.
|
| 984 |
+
n_threads : int, default=1
|
| 985 |
+
Might use openmp thread parallelism.
|
| 986 |
+
|
| 987 |
+
Returns
|
| 988 |
+
-------
|
| 989 |
+
gradient : array of shape (n_samples, n_classes)
|
| 990 |
+
Element-wise gradients.
|
| 991 |
+
|
| 992 |
+
proba : array of shape (n_samples, n_classes)
|
| 993 |
+
Element-wise class probabilities.
|
| 994 |
+
"""
|
| 995 |
+
if gradient_out is None:
|
| 996 |
+
if proba_out is None:
|
| 997 |
+
gradient_out = np.empty_like(raw_prediction)
|
| 998 |
+
proba_out = np.empty_like(raw_prediction)
|
| 999 |
+
else:
|
| 1000 |
+
gradient_out = np.empty_like(proba_out)
|
| 1001 |
+
elif proba_out is None:
|
| 1002 |
+
proba_out = np.empty_like(gradient_out)
|
| 1003 |
+
|
| 1004 |
+
y_true = ReadonlyArrayWrapper(y_true)
|
| 1005 |
+
raw_prediction = ReadonlyArrayWrapper(raw_prediction)
|
| 1006 |
+
if sample_weight is not None:
|
| 1007 |
+
sample_weight = ReadonlyArrayWrapper(sample_weight)
|
| 1008 |
+
return self.closs.gradient_proba(
|
| 1009 |
+
y_true=y_true,
|
| 1010 |
+
raw_prediction=raw_prediction,
|
| 1011 |
+
sample_weight=sample_weight,
|
| 1012 |
+
gradient_out=gradient_out,
|
| 1013 |
+
proba_out=proba_out,
|
| 1014 |
+
n_threads=n_threads,
|
| 1015 |
+
)
|
| 1016 |
+
|
| 1017 |
+
|
| 1018 |
+
_LOSSES = {
|
| 1019 |
+
"squared_error": HalfSquaredError,
|
| 1020 |
+
"absolute_error": AbsoluteError,
|
| 1021 |
+
"pinball_loss": PinballLoss,
|
| 1022 |
+
"poisson_loss": HalfPoissonLoss,
|
| 1023 |
+
"gamma_loss": HalfGammaLoss,
|
| 1024 |
+
"tweedie_loss": HalfTweedieLoss,
|
| 1025 |
+
"binomial_loss": HalfBinomialLoss,
|
| 1026 |
+
"multinomial_loss": HalfMultinomialLoss,
|
| 1027 |
+
}
|
mgm/lib/python3.10/site-packages/sklearn/_loss/tests/__init__.py
ADDED
|
File without changes
|
mgm/lib/python3.10/site-packages/sklearn/_loss/tests/__pycache__/test_glm_distribution.cpython-310.pyc
ADDED
|
Binary file (3.48 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/sklearn/_loss/tests/__pycache__/test_loss.cpython-310.pyc
ADDED
|
Binary file (23.9 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/sklearn/_loss/tests/test_glm_distribution.py
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Authors: Christian Lorentzen <lorentzen.ch@gmail.com>
|
| 2 |
+
#
|
| 3 |
+
# License: BSD 3 clause
|
| 4 |
+
#
|
| 5 |
+
# TODO(1.3): remove file
|
| 6 |
+
import numpy as np
|
| 7 |
+
from numpy.testing import (
|
| 8 |
+
assert_allclose,
|
| 9 |
+
assert_array_equal,
|
| 10 |
+
)
|
| 11 |
+
from scipy.optimize import check_grad
|
| 12 |
+
import pytest
|
| 13 |
+
|
| 14 |
+
from sklearn._loss.glm_distribution import (
|
| 15 |
+
TweedieDistribution,
|
| 16 |
+
NormalDistribution,
|
| 17 |
+
PoissonDistribution,
|
| 18 |
+
GammaDistribution,
|
| 19 |
+
InverseGaussianDistribution,
|
| 20 |
+
DistributionBoundary,
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
@pytest.mark.parametrize(
|
| 25 |
+
"family, expected",
|
| 26 |
+
[
|
| 27 |
+
(NormalDistribution(), [True, True, True]),
|
| 28 |
+
(PoissonDistribution(), [False, True, True]),
|
| 29 |
+
(TweedieDistribution(power=1.5), [False, True, True]),
|
| 30 |
+
(GammaDistribution(), [False, False, True]),
|
| 31 |
+
(InverseGaussianDistribution(), [False, False, True]),
|
| 32 |
+
(TweedieDistribution(power=4.5), [False, False, True]),
|
| 33 |
+
],
|
| 34 |
+
)
|
| 35 |
+
def test_family_bounds(family, expected):
|
| 36 |
+
"""Test the valid range of distributions at -1, 0, 1."""
|
| 37 |
+
result = family.in_y_range([-1, 0, 1])
|
| 38 |
+
assert_array_equal(result, expected)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def test_invalid_distribution_bound():
|
| 42 |
+
dist = TweedieDistribution()
|
| 43 |
+
dist._lower_bound = 0
|
| 44 |
+
with pytest.raises(TypeError, match="must be of type DistributionBoundary"):
|
| 45 |
+
dist.in_y_range([-1, 0, 1])
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def test_tweedie_distribution_power():
|
| 49 |
+
msg = "distribution is only defined for power<=0 and power>=1"
|
| 50 |
+
with pytest.raises(ValueError, match=msg):
|
| 51 |
+
TweedieDistribution(power=0.5)
|
| 52 |
+
|
| 53 |
+
with pytest.raises(TypeError, match="must be a real number"):
|
| 54 |
+
TweedieDistribution(power=1j)
|
| 55 |
+
|
| 56 |
+
with pytest.raises(TypeError, match="must be a real number"):
|
| 57 |
+
dist = TweedieDistribution()
|
| 58 |
+
dist.power = 1j
|
| 59 |
+
|
| 60 |
+
dist = TweedieDistribution()
|
| 61 |
+
assert isinstance(dist._lower_bound, DistributionBoundary)
|
| 62 |
+
|
| 63 |
+
assert dist._lower_bound.inclusive is False
|
| 64 |
+
dist.power = 1
|
| 65 |
+
assert dist._lower_bound.value == 0.0
|
| 66 |
+
assert dist._lower_bound.inclusive is True
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
@pytest.mark.parametrize(
|
| 70 |
+
"family, chk_values",
|
| 71 |
+
[
|
| 72 |
+
(NormalDistribution(), [-1.5, -0.1, 0.1, 2.5]),
|
| 73 |
+
(PoissonDistribution(), [0.1, 1.5]),
|
| 74 |
+
(GammaDistribution(), [0.1, 1.5]),
|
| 75 |
+
(InverseGaussianDistribution(), [0.1, 1.5]),
|
| 76 |
+
(TweedieDistribution(power=-2.5), [0.1, 1.5]),
|
| 77 |
+
(TweedieDistribution(power=-1), [0.1, 1.5]),
|
| 78 |
+
(TweedieDistribution(power=1.5), [0.1, 1.5]),
|
| 79 |
+
(TweedieDistribution(power=2.5), [0.1, 1.5]),
|
| 80 |
+
(TweedieDistribution(power=-4), [0.1, 1.5]),
|
| 81 |
+
],
|
| 82 |
+
)
|
| 83 |
+
def test_deviance_zero(family, chk_values):
|
| 84 |
+
"""Test deviance(y,y) = 0 for different families."""
|
| 85 |
+
for x in chk_values:
|
| 86 |
+
assert_allclose(family.deviance(x, x), 0, atol=1e-9)
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
@pytest.mark.parametrize(
|
| 90 |
+
"family",
|
| 91 |
+
[
|
| 92 |
+
NormalDistribution(),
|
| 93 |
+
PoissonDistribution(),
|
| 94 |
+
GammaDistribution(),
|
| 95 |
+
InverseGaussianDistribution(),
|
| 96 |
+
TweedieDistribution(power=-2.5),
|
| 97 |
+
TweedieDistribution(power=-1),
|
| 98 |
+
TweedieDistribution(power=1.5),
|
| 99 |
+
TweedieDistribution(power=2.5),
|
| 100 |
+
TweedieDistribution(power=-4),
|
| 101 |
+
],
|
| 102 |
+
ids=lambda x: x.__class__.__name__,
|
| 103 |
+
)
|
| 104 |
+
def test_deviance_derivative(family, global_random_seed):
|
| 105 |
+
"""Test deviance derivative for different families."""
|
| 106 |
+
rng = np.random.RandomState(global_random_seed)
|
| 107 |
+
y_true = rng.rand(10)
|
| 108 |
+
# make data positive
|
| 109 |
+
y_true += np.abs(y_true.min()) + 1e-2
|
| 110 |
+
|
| 111 |
+
y_pred = y_true + np.fmax(rng.rand(10), 0.0)
|
| 112 |
+
|
| 113 |
+
dev = family.deviance(y_true, y_pred)
|
| 114 |
+
assert isinstance(dev, float)
|
| 115 |
+
dev_derivative = family.deviance_derivative(y_true, y_pred)
|
| 116 |
+
assert dev_derivative.shape == y_pred.shape
|
| 117 |
+
|
| 118 |
+
err = check_grad(
|
| 119 |
+
lambda y_pred: family.deviance(y_true, y_pred),
|
| 120 |
+
lambda y_pred: family.deviance_derivative(y_true, y_pred),
|
| 121 |
+
y_pred,
|
| 122 |
+
) / np.linalg.norm(dev_derivative)
|
| 123 |
+
assert abs(err) < 3e-6
|
mgm/lib/python3.10/site-packages/sklearn/_loss/tests/test_link.py
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from numpy.testing import assert_allclose, assert_array_equal
|
| 3 |
+
import pytest
|
| 4 |
+
|
| 5 |
+
from sklearn._loss.link import (
|
| 6 |
+
_LINKS,
|
| 7 |
+
_inclusive_low_high,
|
| 8 |
+
MultinomialLogit,
|
| 9 |
+
Interval,
|
| 10 |
+
)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
LINK_FUNCTIONS = list(_LINKS.values())
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def test_interval_raises():
|
| 17 |
+
"""Test that interval with low > high raises ValueError."""
|
| 18 |
+
with pytest.raises(
|
| 19 |
+
ValueError, match="One must have low <= high; got low=1, high=0."
|
| 20 |
+
):
|
| 21 |
+
Interval(1, 0, False, False)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
@pytest.mark.parametrize(
|
| 25 |
+
"interval",
|
| 26 |
+
[
|
| 27 |
+
Interval(0, 1, False, False),
|
| 28 |
+
Interval(0, 1, False, True),
|
| 29 |
+
Interval(0, 1, True, False),
|
| 30 |
+
Interval(0, 1, True, True),
|
| 31 |
+
Interval(-np.inf, np.inf, False, False),
|
| 32 |
+
Interval(-np.inf, np.inf, False, True),
|
| 33 |
+
Interval(-np.inf, np.inf, True, False),
|
| 34 |
+
Interval(-np.inf, np.inf, True, True),
|
| 35 |
+
Interval(-10, -1, False, False),
|
| 36 |
+
Interval(-10, -1, False, True),
|
| 37 |
+
Interval(-10, -1, True, False),
|
| 38 |
+
Interval(-10, -1, True, True),
|
| 39 |
+
],
|
| 40 |
+
)
|
| 41 |
+
def test_is_in_range(interval):
|
| 42 |
+
# make sure low and high are always within the interval, used for linspace
|
| 43 |
+
low, high = _inclusive_low_high(interval)
|
| 44 |
+
|
| 45 |
+
x = np.linspace(low, high, num=10)
|
| 46 |
+
assert interval.includes(x)
|
| 47 |
+
|
| 48 |
+
# x contains lower bound
|
| 49 |
+
assert interval.includes(np.r_[x, interval.low]) == interval.low_inclusive
|
| 50 |
+
|
| 51 |
+
# x contains upper bound
|
| 52 |
+
assert interval.includes(np.r_[x, interval.high]) == interval.high_inclusive
|
| 53 |
+
|
| 54 |
+
# x contains upper and lower bound
|
| 55 |
+
assert interval.includes(np.r_[x, interval.low, interval.high]) == (
|
| 56 |
+
interval.low_inclusive and interval.high_inclusive
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
@pytest.mark.parametrize("link", LINK_FUNCTIONS)
|
| 61 |
+
def test_link_inverse_identity(link, global_random_seed):
|
| 62 |
+
# Test that link of inverse gives identity.
|
| 63 |
+
rng = np.random.RandomState(global_random_seed)
|
| 64 |
+
link = link()
|
| 65 |
+
n_samples, n_classes = 100, None
|
| 66 |
+
# The values for `raw_prediction` are limited from -20 to 20 because in the
|
| 67 |
+
# class `LogitLink` the term `expit(x)` comes very close to 1 for large
|
| 68 |
+
# positive x and therefore loses precision.
|
| 69 |
+
if link.is_multiclass:
|
| 70 |
+
n_classes = 10
|
| 71 |
+
raw_prediction = rng.uniform(low=-20, high=20, size=(n_samples, n_classes))
|
| 72 |
+
if isinstance(link, MultinomialLogit):
|
| 73 |
+
raw_prediction = link.symmetrize_raw_prediction(raw_prediction)
|
| 74 |
+
else:
|
| 75 |
+
raw_prediction = rng.uniform(low=-20, high=20, size=(n_samples))
|
| 76 |
+
|
| 77 |
+
assert_allclose(link.link(link.inverse(raw_prediction)), raw_prediction)
|
| 78 |
+
y_pred = link.inverse(raw_prediction)
|
| 79 |
+
assert_allclose(link.inverse(link.link(y_pred)), y_pred)
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
@pytest.mark.parametrize("link", LINK_FUNCTIONS)
|
| 83 |
+
def test_link_out_argument(link):
|
| 84 |
+
# Test that out argument gets assigned the result.
|
| 85 |
+
rng = np.random.RandomState(42)
|
| 86 |
+
link = link()
|
| 87 |
+
n_samples, n_classes = 100, None
|
| 88 |
+
if link.is_multiclass:
|
| 89 |
+
n_classes = 10
|
| 90 |
+
raw_prediction = rng.normal(loc=0, scale=10, size=(n_samples, n_classes))
|
| 91 |
+
if isinstance(link, MultinomialLogit):
|
| 92 |
+
raw_prediction = link.symmetrize_raw_prediction(raw_prediction)
|
| 93 |
+
else:
|
| 94 |
+
# So far, the valid interval of raw_prediction is (-inf, inf) and
|
| 95 |
+
# we do not need to distinguish.
|
| 96 |
+
raw_prediction = rng.normal(loc=0, scale=10, size=(n_samples))
|
| 97 |
+
|
| 98 |
+
y_pred = link.inverse(raw_prediction, out=None)
|
| 99 |
+
out = np.empty_like(raw_prediction)
|
| 100 |
+
y_pred_2 = link.inverse(raw_prediction, out=out)
|
| 101 |
+
assert_allclose(y_pred, out)
|
| 102 |
+
assert_array_equal(out, y_pred_2)
|
| 103 |
+
assert np.shares_memory(out, y_pred_2)
|
| 104 |
+
|
| 105 |
+
out = np.empty_like(y_pred)
|
| 106 |
+
raw_prediction_2 = link.link(y_pred, out=out)
|
| 107 |
+
assert_allclose(raw_prediction, out)
|
| 108 |
+
assert_array_equal(out, raw_prediction_2)
|
| 109 |
+
assert np.shares_memory(out, raw_prediction_2)
|
mgm/lib/python3.10/site-packages/sklearn/_loss/tests/test_loss.py
ADDED
|
@@ -0,0 +1,1161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pickle
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
from numpy.testing import assert_allclose, assert_array_equal
|
| 5 |
+
import pytest
|
| 6 |
+
from pytest import approx
|
| 7 |
+
from scipy.optimize import (
|
| 8 |
+
minimize,
|
| 9 |
+
minimize_scalar,
|
| 10 |
+
newton,
|
| 11 |
+
LinearConstraint,
|
| 12 |
+
)
|
| 13 |
+
from scipy.special import logsumexp
|
| 14 |
+
|
| 15 |
+
from sklearn._loss.link import _inclusive_low_high, IdentityLink
|
| 16 |
+
from sklearn._loss.loss import (
|
| 17 |
+
_LOSSES,
|
| 18 |
+
BaseLoss,
|
| 19 |
+
AbsoluteError,
|
| 20 |
+
HalfBinomialLoss,
|
| 21 |
+
HalfGammaLoss,
|
| 22 |
+
HalfMultinomialLoss,
|
| 23 |
+
HalfPoissonLoss,
|
| 24 |
+
HalfSquaredError,
|
| 25 |
+
HalfTweedieLoss,
|
| 26 |
+
HalfTweedieLossIdentity,
|
| 27 |
+
PinballLoss,
|
| 28 |
+
)
|
| 29 |
+
from sklearn.utils import assert_all_finite
|
| 30 |
+
from sklearn.utils._testing import create_memmap_backed_data, skip_if_32bit
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
ALL_LOSSES = list(_LOSSES.values())
|
| 34 |
+
|
| 35 |
+
LOSS_INSTANCES = [loss() for loss in ALL_LOSSES]
|
| 36 |
+
# HalfTweedieLoss(power=1.5) is already there as default
|
| 37 |
+
LOSS_INSTANCES += [
|
| 38 |
+
PinballLoss(quantile=0.25),
|
| 39 |
+
HalfTweedieLoss(power=-1.5),
|
| 40 |
+
HalfTweedieLoss(power=0),
|
| 41 |
+
HalfTweedieLoss(power=1),
|
| 42 |
+
HalfTweedieLoss(power=2),
|
| 43 |
+
HalfTweedieLoss(power=3.0),
|
| 44 |
+
HalfTweedieLossIdentity(power=0),
|
| 45 |
+
HalfTweedieLossIdentity(power=1),
|
| 46 |
+
HalfTweedieLossIdentity(power=2),
|
| 47 |
+
HalfTweedieLossIdentity(power=3.0),
|
| 48 |
+
]
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def loss_instance_name(param):
|
| 52 |
+
if isinstance(param, BaseLoss):
|
| 53 |
+
loss = param
|
| 54 |
+
name = loss.__class__.__name__
|
| 55 |
+
if hasattr(loss, "quantile"):
|
| 56 |
+
name += f"(quantile={loss.closs.quantile})"
|
| 57 |
+
elif hasattr(loss, "power"):
|
| 58 |
+
name += f"(power={loss.closs.power})"
|
| 59 |
+
return name
|
| 60 |
+
else:
|
| 61 |
+
return str(param)
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def random_y_true_raw_prediction(
|
| 65 |
+
loss, n_samples, y_bound=(-100, 100), raw_bound=(-5, 5), seed=42
|
| 66 |
+
):
|
| 67 |
+
"""Random generate y_true and raw_prediction in valid range."""
|
| 68 |
+
rng = np.random.RandomState(seed)
|
| 69 |
+
if loss.is_multiclass:
|
| 70 |
+
raw_prediction = np.empty((n_samples, loss.n_classes))
|
| 71 |
+
raw_prediction.flat[:] = rng.uniform(
|
| 72 |
+
low=raw_bound[0],
|
| 73 |
+
high=raw_bound[1],
|
| 74 |
+
size=n_samples * loss.n_classes,
|
| 75 |
+
)
|
| 76 |
+
y_true = np.arange(n_samples).astype(float) % loss.n_classes
|
| 77 |
+
else:
|
| 78 |
+
# If link is identity, we must respect the interval of y_pred:
|
| 79 |
+
if isinstance(loss.link, IdentityLink):
|
| 80 |
+
low, high = _inclusive_low_high(loss.interval_y_pred)
|
| 81 |
+
low = np.amax([low, raw_bound[0]])
|
| 82 |
+
high = np.amin([high, raw_bound[1]])
|
| 83 |
+
raw_bound = (low, high)
|
| 84 |
+
raw_prediction = rng.uniform(
|
| 85 |
+
low=raw_bound[0], high=raw_bound[1], size=n_samples
|
| 86 |
+
)
|
| 87 |
+
# generate a y_true in valid range
|
| 88 |
+
low, high = _inclusive_low_high(loss.interval_y_true)
|
| 89 |
+
low = max(low, y_bound[0])
|
| 90 |
+
high = min(high, y_bound[1])
|
| 91 |
+
y_true = rng.uniform(low, high, size=n_samples)
|
| 92 |
+
# set some values at special boundaries
|
| 93 |
+
if loss.interval_y_true.low == 0 and loss.interval_y_true.low_inclusive:
|
| 94 |
+
y_true[:: (n_samples // 3)] = 0
|
| 95 |
+
if loss.interval_y_true.high == 1 and loss.interval_y_true.high_inclusive:
|
| 96 |
+
y_true[1 :: (n_samples // 3)] = 1
|
| 97 |
+
|
| 98 |
+
return y_true, raw_prediction
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def numerical_derivative(func, x, eps):
|
| 102 |
+
"""Helper function for numerical (first) derivatives."""
|
| 103 |
+
# For numerical derivatives, see
|
| 104 |
+
# https://en.wikipedia.org/wiki/Numerical_differentiation
|
| 105 |
+
# https://en.wikipedia.org/wiki/Finite_difference_coefficient
|
| 106 |
+
# We use central finite differences of accuracy 4.
|
| 107 |
+
h = np.full_like(x, fill_value=eps)
|
| 108 |
+
f_minus_2h = func(x - 2 * h)
|
| 109 |
+
f_minus_1h = func(x - h)
|
| 110 |
+
f_plus_1h = func(x + h)
|
| 111 |
+
f_plus_2h = func(x + 2 * h)
|
| 112 |
+
return (-f_plus_2h + 8 * f_plus_1h - 8 * f_minus_1h + f_minus_2h) / (12.0 * eps)
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
|
| 116 |
+
def test_loss_boundary(loss):
|
| 117 |
+
"""Test interval ranges of y_true and y_pred in losses."""
|
| 118 |
+
# make sure low and high are always within the interval, used for linspace
|
| 119 |
+
if loss.is_multiclass:
|
| 120 |
+
y_true = np.linspace(0, 9, num=10)
|
| 121 |
+
else:
|
| 122 |
+
low, high = _inclusive_low_high(loss.interval_y_true)
|
| 123 |
+
y_true = np.linspace(low, high, num=10)
|
| 124 |
+
|
| 125 |
+
# add boundaries if they are included
|
| 126 |
+
if loss.interval_y_true.low_inclusive:
|
| 127 |
+
y_true = np.r_[y_true, loss.interval_y_true.low]
|
| 128 |
+
if loss.interval_y_true.high_inclusive:
|
| 129 |
+
y_true = np.r_[y_true, loss.interval_y_true.high]
|
| 130 |
+
|
| 131 |
+
assert loss.in_y_true_range(y_true)
|
| 132 |
+
|
| 133 |
+
n = y_true.shape[0]
|
| 134 |
+
low, high = _inclusive_low_high(loss.interval_y_pred)
|
| 135 |
+
if loss.is_multiclass:
|
| 136 |
+
y_pred = np.empty((n, 3))
|
| 137 |
+
y_pred[:, 0] = np.linspace(low, high, num=n)
|
| 138 |
+
y_pred[:, 1] = 0.5 * (1 - y_pred[:, 0])
|
| 139 |
+
y_pred[:, 2] = 0.5 * (1 - y_pred[:, 0])
|
| 140 |
+
else:
|
| 141 |
+
y_pred = np.linspace(low, high, num=n)
|
| 142 |
+
|
| 143 |
+
assert loss.in_y_pred_range(y_pred)
|
| 144 |
+
|
| 145 |
+
# calculating losses should not fail
|
| 146 |
+
raw_prediction = loss.link.link(y_pred)
|
| 147 |
+
loss.loss(y_true=y_true, raw_prediction=raw_prediction)
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
# Fixture to test valid value ranges.
|
| 151 |
+
Y_COMMON_PARAMS = [
|
| 152 |
+
# (loss, [y success], [y fail])
|
| 153 |
+
(HalfSquaredError(), [-100, 0, 0.1, 100], [-np.inf, np.inf]),
|
| 154 |
+
(AbsoluteError(), [-100, 0, 0.1, 100], [-np.inf, np.inf]),
|
| 155 |
+
(PinballLoss(), [-100, 0, 0.1, 100], [-np.inf, np.inf]),
|
| 156 |
+
(HalfPoissonLoss(), [0.1, 100], [-np.inf, -3, -0.1, np.inf]),
|
| 157 |
+
(HalfGammaLoss(), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]),
|
| 158 |
+
(HalfTweedieLoss(power=-3), [0.1, 100], [-np.inf, np.inf]),
|
| 159 |
+
(HalfTweedieLoss(power=0), [0.1, 100], [-np.inf, np.inf]),
|
| 160 |
+
(HalfTweedieLoss(power=1.5), [0.1, 100], [-np.inf, -3, -0.1, np.inf]),
|
| 161 |
+
(HalfTweedieLoss(power=2), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]),
|
| 162 |
+
(HalfTweedieLoss(power=3), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]),
|
| 163 |
+
(HalfTweedieLossIdentity(power=-3), [0.1, 100], [-np.inf, np.inf]),
|
| 164 |
+
(HalfTweedieLossIdentity(power=0), [-3, -0.1, 0, 0.1, 100], [-np.inf, np.inf]),
|
| 165 |
+
(HalfTweedieLossIdentity(power=1.5), [0.1, 100], [-np.inf, -3, -0.1, np.inf]),
|
| 166 |
+
(HalfTweedieLossIdentity(power=2), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]),
|
| 167 |
+
(HalfTweedieLossIdentity(power=3), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]),
|
| 168 |
+
(HalfBinomialLoss(), [0.1, 0.5, 0.9], [-np.inf, -1, 2, np.inf]),
|
| 169 |
+
(HalfMultinomialLoss(), [], [-np.inf, -1, 1.1, np.inf]),
|
| 170 |
+
]
|
| 171 |
+
# y_pred and y_true do not always have the same domain (valid value range).
|
| 172 |
+
# Hence, we define extra sets of parameters for each of them.
|
| 173 |
+
Y_TRUE_PARAMS = [ # type: ignore
|
| 174 |
+
# (loss, [y success], [y fail])
|
| 175 |
+
(HalfPoissonLoss(), [0], []),
|
| 176 |
+
(HalfTweedieLoss(power=-3), [-100, -0.1, 0], []),
|
| 177 |
+
(HalfTweedieLoss(power=0), [-100, 0], []),
|
| 178 |
+
(HalfTweedieLoss(power=1.5), [0], []),
|
| 179 |
+
(HalfTweedieLossIdentity(power=-3), [-100, -0.1, 0], []),
|
| 180 |
+
(HalfTweedieLossIdentity(power=0), [-100, 0], []),
|
| 181 |
+
(HalfTweedieLossIdentity(power=1.5), [0], []),
|
| 182 |
+
(HalfBinomialLoss(), [0, 1], []),
|
| 183 |
+
(HalfMultinomialLoss(), [0.0, 1.0, 2], []),
|
| 184 |
+
]
|
| 185 |
+
Y_PRED_PARAMS = [
|
| 186 |
+
# (loss, [y success], [y fail])
|
| 187 |
+
(HalfPoissonLoss(), [], [0]),
|
| 188 |
+
(HalfTweedieLoss(power=-3), [], [-3, -0.1, 0]),
|
| 189 |
+
(HalfTweedieLoss(power=0), [], [-3, -0.1, 0]),
|
| 190 |
+
(HalfTweedieLoss(power=1.5), [], [0]),
|
| 191 |
+
(HalfTweedieLossIdentity(power=-3), [], [-3, -0.1, 0]),
|
| 192 |
+
(HalfTweedieLossIdentity(power=0), [-3, -0.1, 0], []),
|
| 193 |
+
(HalfTweedieLossIdentity(power=1.5), [], [0]),
|
| 194 |
+
(HalfBinomialLoss(), [], [0, 1]),
|
| 195 |
+
(HalfMultinomialLoss(), [0.1, 0.5], [0, 1]),
|
| 196 |
+
]
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
@pytest.mark.parametrize(
|
| 200 |
+
"loss, y_true_success, y_true_fail", Y_COMMON_PARAMS + Y_TRUE_PARAMS
|
| 201 |
+
)
|
| 202 |
+
def test_loss_boundary_y_true(loss, y_true_success, y_true_fail):
|
| 203 |
+
"""Test boundaries of y_true for loss functions."""
|
| 204 |
+
for y in y_true_success:
|
| 205 |
+
assert loss.in_y_true_range(np.array([y]))
|
| 206 |
+
for y in y_true_fail:
|
| 207 |
+
assert not loss.in_y_true_range(np.array([y]))
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
@pytest.mark.parametrize(
|
| 211 |
+
"loss, y_pred_success, y_pred_fail", Y_COMMON_PARAMS + Y_PRED_PARAMS # type: ignore
|
| 212 |
+
)
|
| 213 |
+
def test_loss_boundary_y_pred(loss, y_pred_success, y_pred_fail):
|
| 214 |
+
"""Test boundaries of y_pred for loss functions."""
|
| 215 |
+
for y in y_pred_success:
|
| 216 |
+
assert loss.in_y_pred_range(np.array([y]))
|
| 217 |
+
for y in y_pred_fail:
|
| 218 |
+
assert not loss.in_y_pred_range(np.array([y]))
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
@pytest.mark.parametrize(
|
| 222 |
+
"loss, y_true, raw_prediction, loss_true",
|
| 223 |
+
[
|
| 224 |
+
(HalfSquaredError(), 1.0, 5.0, 8),
|
| 225 |
+
(AbsoluteError(), 1.0, 5.0, 4),
|
| 226 |
+
(PinballLoss(quantile=0.5), 1.0, 5.0, 2),
|
| 227 |
+
(PinballLoss(quantile=0.25), 1.0, 5.0, 4 * (1 - 0.25)),
|
| 228 |
+
(PinballLoss(quantile=0.25), 5.0, 1.0, 4 * 0.25),
|
| 229 |
+
(HalfPoissonLoss(), 2.0, np.log(4), 4 - 2 * np.log(4)),
|
| 230 |
+
(HalfGammaLoss(), 2.0, np.log(4), np.log(4) + 2 / 4),
|
| 231 |
+
(HalfTweedieLoss(power=3), 2.0, np.log(4), -1 / 4 + 1 / 4**2),
|
| 232 |
+
(HalfTweedieLossIdentity(power=1), 2.0, 4.0, 2 - 2 * np.log(2)),
|
| 233 |
+
(HalfTweedieLossIdentity(power=2), 2.0, 4.0, np.log(2) - 1 / 2),
|
| 234 |
+
(HalfTweedieLossIdentity(power=3), 2.0, 4.0, -1 / 4 + 1 / 4**2 + 1 / 2 / 2),
|
| 235 |
+
(HalfBinomialLoss(), 0.25, np.log(4), np.log(5) - 0.25 * np.log(4)),
|
| 236 |
+
(
|
| 237 |
+
HalfMultinomialLoss(n_classes=3),
|
| 238 |
+
0.0,
|
| 239 |
+
[0.2, 0.5, 0.3],
|
| 240 |
+
logsumexp([0.2, 0.5, 0.3]) - 0.2,
|
| 241 |
+
),
|
| 242 |
+
(
|
| 243 |
+
HalfMultinomialLoss(n_classes=3),
|
| 244 |
+
1.0,
|
| 245 |
+
[0.2, 0.5, 0.3],
|
| 246 |
+
logsumexp([0.2, 0.5, 0.3]) - 0.5,
|
| 247 |
+
),
|
| 248 |
+
(
|
| 249 |
+
HalfMultinomialLoss(n_classes=3),
|
| 250 |
+
2.0,
|
| 251 |
+
[0.2, 0.5, 0.3],
|
| 252 |
+
logsumexp([0.2, 0.5, 0.3]) - 0.3,
|
| 253 |
+
),
|
| 254 |
+
],
|
| 255 |
+
ids=loss_instance_name,
|
| 256 |
+
)
|
| 257 |
+
def test_loss_on_specific_values(loss, y_true, raw_prediction, loss_true):
|
| 258 |
+
"""Test losses at specific values."""
|
| 259 |
+
assert loss(
|
| 260 |
+
y_true=np.array([y_true]), raw_prediction=np.array([raw_prediction])
|
| 261 |
+
) == approx(loss_true, rel=1e-11, abs=1e-12)
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
@pytest.mark.parametrize("loss", ALL_LOSSES)
|
| 265 |
+
@pytest.mark.parametrize("readonly_memmap", [False, True])
|
| 266 |
+
@pytest.mark.parametrize("dtype_in", [np.float32, np.float64])
|
| 267 |
+
@pytest.mark.parametrize("dtype_out", [np.float32, np.float64])
|
| 268 |
+
@pytest.mark.parametrize("sample_weight", [None, 1])
|
| 269 |
+
@pytest.mark.parametrize("out1", [None, 1])
|
| 270 |
+
@pytest.mark.parametrize("out2", [None, 1])
|
| 271 |
+
@pytest.mark.parametrize("n_threads", [1, 2])
|
| 272 |
+
def test_loss_dtype(
|
| 273 |
+
loss, readonly_memmap, dtype_in, dtype_out, sample_weight, out1, out2, n_threads
|
| 274 |
+
):
|
| 275 |
+
"""Test acceptance of dtypes, readonly and writeable arrays in loss functions.
|
| 276 |
+
|
| 277 |
+
Check that loss accepts if all input arrays are either all float32 or all
|
| 278 |
+
float64, and all output arrays are either all float32 or all float64.
|
| 279 |
+
|
| 280 |
+
Also check that input arrays can be readonly, e.g. memory mapped.
|
| 281 |
+
"""
|
| 282 |
+
loss = loss()
|
| 283 |
+
# generate a y_true and raw_prediction in valid range
|
| 284 |
+
n_samples = 5
|
| 285 |
+
y_true, raw_prediction = random_y_true_raw_prediction(
|
| 286 |
+
loss=loss,
|
| 287 |
+
n_samples=n_samples,
|
| 288 |
+
y_bound=(-100, 100),
|
| 289 |
+
raw_bound=(-10, 10),
|
| 290 |
+
seed=42,
|
| 291 |
+
)
|
| 292 |
+
y_true = y_true.astype(dtype_in)
|
| 293 |
+
raw_prediction = raw_prediction.astype(dtype_in)
|
| 294 |
+
|
| 295 |
+
if sample_weight is not None:
|
| 296 |
+
sample_weight = np.array([2.0] * n_samples, dtype=dtype_in)
|
| 297 |
+
if out1 is not None:
|
| 298 |
+
out1 = np.empty_like(y_true, dtype=dtype_out)
|
| 299 |
+
if out2 is not None:
|
| 300 |
+
out2 = np.empty_like(raw_prediction, dtype=dtype_out)
|
| 301 |
+
|
| 302 |
+
if readonly_memmap:
|
| 303 |
+
y_true = create_memmap_backed_data(y_true, aligned=True)
|
| 304 |
+
raw_prediction = create_memmap_backed_data(raw_prediction, aligned=True)
|
| 305 |
+
if sample_weight is not None:
|
| 306 |
+
sample_weight = create_memmap_backed_data(sample_weight, aligned=True)
|
| 307 |
+
|
| 308 |
+
loss.loss(
|
| 309 |
+
y_true=y_true,
|
| 310 |
+
raw_prediction=raw_prediction,
|
| 311 |
+
sample_weight=sample_weight,
|
| 312 |
+
loss_out=out1,
|
| 313 |
+
n_threads=n_threads,
|
| 314 |
+
)
|
| 315 |
+
loss.gradient(
|
| 316 |
+
y_true=y_true,
|
| 317 |
+
raw_prediction=raw_prediction,
|
| 318 |
+
sample_weight=sample_weight,
|
| 319 |
+
gradient_out=out2,
|
| 320 |
+
n_threads=n_threads,
|
| 321 |
+
)
|
| 322 |
+
loss.loss_gradient(
|
| 323 |
+
y_true=y_true,
|
| 324 |
+
raw_prediction=raw_prediction,
|
| 325 |
+
sample_weight=sample_weight,
|
| 326 |
+
loss_out=out1,
|
| 327 |
+
gradient_out=out2,
|
| 328 |
+
n_threads=n_threads,
|
| 329 |
+
)
|
| 330 |
+
if out1 is not None and loss.is_multiclass:
|
| 331 |
+
out1 = np.empty_like(raw_prediction, dtype=dtype_out)
|
| 332 |
+
loss.gradient_hessian(
|
| 333 |
+
y_true=y_true,
|
| 334 |
+
raw_prediction=raw_prediction,
|
| 335 |
+
sample_weight=sample_weight,
|
| 336 |
+
gradient_out=out1,
|
| 337 |
+
hessian_out=out2,
|
| 338 |
+
n_threads=n_threads,
|
| 339 |
+
)
|
| 340 |
+
loss(y_true=y_true, raw_prediction=raw_prediction, sample_weight=sample_weight)
|
| 341 |
+
loss.fit_intercept_only(y_true=y_true, sample_weight=sample_weight)
|
| 342 |
+
loss.constant_to_optimal_zero(y_true=y_true, sample_weight=sample_weight)
|
| 343 |
+
if hasattr(loss, "predict_proba"):
|
| 344 |
+
loss.predict_proba(raw_prediction=raw_prediction)
|
| 345 |
+
if hasattr(loss, "gradient_proba"):
|
| 346 |
+
loss.gradient_proba(
|
| 347 |
+
y_true=y_true,
|
| 348 |
+
raw_prediction=raw_prediction,
|
| 349 |
+
sample_weight=sample_weight,
|
| 350 |
+
gradient_out=out1,
|
| 351 |
+
proba_out=out2,
|
| 352 |
+
n_threads=n_threads,
|
| 353 |
+
)
|
| 354 |
+
|
| 355 |
+
|
| 356 |
+
@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
|
| 357 |
+
@pytest.mark.parametrize("sample_weight", [None, "range"])
|
| 358 |
+
def test_loss_same_as_C_functions(loss, sample_weight):
|
| 359 |
+
"""Test that Python and Cython functions return same results."""
|
| 360 |
+
y_true, raw_prediction = random_y_true_raw_prediction(
|
| 361 |
+
loss=loss,
|
| 362 |
+
n_samples=20,
|
| 363 |
+
y_bound=(-100, 100),
|
| 364 |
+
raw_bound=(-10, 10),
|
| 365 |
+
seed=42,
|
| 366 |
+
)
|
| 367 |
+
if sample_weight == "range":
|
| 368 |
+
sample_weight = np.linspace(1, y_true.shape[0], num=y_true.shape[0])
|
| 369 |
+
|
| 370 |
+
out_l1 = np.empty_like(y_true)
|
| 371 |
+
out_l2 = np.empty_like(y_true)
|
| 372 |
+
out_g1 = np.empty_like(raw_prediction)
|
| 373 |
+
out_g2 = np.empty_like(raw_prediction)
|
| 374 |
+
out_h1 = np.empty_like(raw_prediction)
|
| 375 |
+
out_h2 = np.empty_like(raw_prediction)
|
| 376 |
+
assert_allclose(
|
| 377 |
+
loss.loss(
|
| 378 |
+
y_true=y_true,
|
| 379 |
+
raw_prediction=raw_prediction,
|
| 380 |
+
sample_weight=sample_weight,
|
| 381 |
+
loss_out=out_l1,
|
| 382 |
+
),
|
| 383 |
+
loss.closs.loss(
|
| 384 |
+
y_true=y_true,
|
| 385 |
+
raw_prediction=raw_prediction,
|
| 386 |
+
sample_weight=sample_weight,
|
| 387 |
+
loss_out=out_l2,
|
| 388 |
+
),
|
| 389 |
+
)
|
| 390 |
+
assert_allclose(
|
| 391 |
+
loss.gradient(
|
| 392 |
+
y_true=y_true,
|
| 393 |
+
raw_prediction=raw_prediction,
|
| 394 |
+
sample_weight=sample_weight,
|
| 395 |
+
gradient_out=out_g1,
|
| 396 |
+
),
|
| 397 |
+
loss.closs.gradient(
|
| 398 |
+
y_true=y_true,
|
| 399 |
+
raw_prediction=raw_prediction,
|
| 400 |
+
sample_weight=sample_weight,
|
| 401 |
+
gradient_out=out_g2,
|
| 402 |
+
),
|
| 403 |
+
)
|
| 404 |
+
loss.closs.loss_gradient(
|
| 405 |
+
y_true=y_true,
|
| 406 |
+
raw_prediction=raw_prediction,
|
| 407 |
+
sample_weight=sample_weight,
|
| 408 |
+
loss_out=out_l1,
|
| 409 |
+
gradient_out=out_g1,
|
| 410 |
+
)
|
| 411 |
+
loss.closs.loss_gradient(
|
| 412 |
+
y_true=y_true,
|
| 413 |
+
raw_prediction=raw_prediction,
|
| 414 |
+
sample_weight=sample_weight,
|
| 415 |
+
loss_out=out_l2,
|
| 416 |
+
gradient_out=out_g2,
|
| 417 |
+
)
|
| 418 |
+
assert_allclose(out_l1, out_l2)
|
| 419 |
+
assert_allclose(out_g1, out_g2)
|
| 420 |
+
loss.gradient_hessian(
|
| 421 |
+
y_true=y_true,
|
| 422 |
+
raw_prediction=raw_prediction,
|
| 423 |
+
sample_weight=sample_weight,
|
| 424 |
+
gradient_out=out_g1,
|
| 425 |
+
hessian_out=out_h1,
|
| 426 |
+
)
|
| 427 |
+
loss.closs.gradient_hessian(
|
| 428 |
+
y_true=y_true,
|
| 429 |
+
raw_prediction=raw_prediction,
|
| 430 |
+
sample_weight=sample_weight,
|
| 431 |
+
gradient_out=out_g2,
|
| 432 |
+
hessian_out=out_h2,
|
| 433 |
+
)
|
| 434 |
+
assert_allclose(out_g1, out_g2)
|
| 435 |
+
assert_allclose(out_h1, out_h2)
|
| 436 |
+
|
| 437 |
+
|
| 438 |
+
@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
|
| 439 |
+
@pytest.mark.parametrize("sample_weight", [None, "range"])
|
| 440 |
+
def test_loss_gradients_are_the_same(loss, sample_weight, global_random_seed):
|
| 441 |
+
"""Test that loss and gradient are the same across different functions.
|
| 442 |
+
|
| 443 |
+
Also test that output arguments contain correct results.
|
| 444 |
+
"""
|
| 445 |
+
y_true, raw_prediction = random_y_true_raw_prediction(
|
| 446 |
+
loss=loss,
|
| 447 |
+
n_samples=20,
|
| 448 |
+
y_bound=(-100, 100),
|
| 449 |
+
raw_bound=(-10, 10),
|
| 450 |
+
seed=global_random_seed,
|
| 451 |
+
)
|
| 452 |
+
if sample_weight == "range":
|
| 453 |
+
sample_weight = np.linspace(1, y_true.shape[0], num=y_true.shape[0])
|
| 454 |
+
|
| 455 |
+
out_l1 = np.empty_like(y_true)
|
| 456 |
+
out_l2 = np.empty_like(y_true)
|
| 457 |
+
out_g1 = np.empty_like(raw_prediction)
|
| 458 |
+
out_g2 = np.empty_like(raw_prediction)
|
| 459 |
+
out_g3 = np.empty_like(raw_prediction)
|
| 460 |
+
out_h3 = np.empty_like(raw_prediction)
|
| 461 |
+
|
| 462 |
+
l1 = loss.loss(
|
| 463 |
+
y_true=y_true,
|
| 464 |
+
raw_prediction=raw_prediction,
|
| 465 |
+
sample_weight=sample_weight,
|
| 466 |
+
loss_out=out_l1,
|
| 467 |
+
)
|
| 468 |
+
g1 = loss.gradient(
|
| 469 |
+
y_true=y_true,
|
| 470 |
+
raw_prediction=raw_prediction,
|
| 471 |
+
sample_weight=sample_weight,
|
| 472 |
+
gradient_out=out_g1,
|
| 473 |
+
)
|
| 474 |
+
l2, g2 = loss.loss_gradient(
|
| 475 |
+
y_true=y_true,
|
| 476 |
+
raw_prediction=raw_prediction,
|
| 477 |
+
sample_weight=sample_weight,
|
| 478 |
+
loss_out=out_l2,
|
| 479 |
+
gradient_out=out_g2,
|
| 480 |
+
)
|
| 481 |
+
g3, h3 = loss.gradient_hessian(
|
| 482 |
+
y_true=y_true,
|
| 483 |
+
raw_prediction=raw_prediction,
|
| 484 |
+
sample_weight=sample_weight,
|
| 485 |
+
gradient_out=out_g3,
|
| 486 |
+
hessian_out=out_h3,
|
| 487 |
+
)
|
| 488 |
+
assert_allclose(l1, l2)
|
| 489 |
+
assert_array_equal(l1, out_l1)
|
| 490 |
+
assert np.shares_memory(l1, out_l1)
|
| 491 |
+
assert_array_equal(l2, out_l2)
|
| 492 |
+
assert np.shares_memory(l2, out_l2)
|
| 493 |
+
assert_allclose(g1, g2)
|
| 494 |
+
assert_allclose(g1, g3)
|
| 495 |
+
assert_array_equal(g1, out_g1)
|
| 496 |
+
assert np.shares_memory(g1, out_g1)
|
| 497 |
+
assert_array_equal(g2, out_g2)
|
| 498 |
+
assert np.shares_memory(g2, out_g2)
|
| 499 |
+
assert_array_equal(g3, out_g3)
|
| 500 |
+
assert np.shares_memory(g3, out_g3)
|
| 501 |
+
|
| 502 |
+
if hasattr(loss, "gradient_proba"):
|
| 503 |
+
assert loss.is_multiclass # only for HalfMultinomialLoss
|
| 504 |
+
out_g4 = np.empty_like(raw_prediction)
|
| 505 |
+
out_proba = np.empty_like(raw_prediction)
|
| 506 |
+
g4, proba = loss.gradient_proba(
|
| 507 |
+
y_true=y_true,
|
| 508 |
+
raw_prediction=raw_prediction,
|
| 509 |
+
sample_weight=sample_weight,
|
| 510 |
+
gradient_out=out_g4,
|
| 511 |
+
proba_out=out_proba,
|
| 512 |
+
)
|
| 513 |
+
assert_allclose(g1, out_g4)
|
| 514 |
+
assert_allclose(g1, g4)
|
| 515 |
+
assert_allclose(proba, out_proba)
|
| 516 |
+
assert_allclose(np.sum(proba, axis=1), 1, rtol=1e-11)
|
| 517 |
+
|
| 518 |
+
|
| 519 |
+
@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
|
| 520 |
+
@pytest.mark.parametrize("sample_weight", ["ones", "random"])
|
| 521 |
+
def test_sample_weight_multiplies(loss, sample_weight, global_random_seed):
|
| 522 |
+
"""Test sample weights in loss, gradients and hessians.
|
| 523 |
+
|
| 524 |
+
Make sure that passing sample weights to loss, gradient and hessian
|
| 525 |
+
computation methods is equivalent to multiplying by the weights.
|
| 526 |
+
"""
|
| 527 |
+
n_samples = 100
|
| 528 |
+
y_true, raw_prediction = random_y_true_raw_prediction(
|
| 529 |
+
loss=loss,
|
| 530 |
+
n_samples=n_samples,
|
| 531 |
+
y_bound=(-100, 100),
|
| 532 |
+
raw_bound=(-5, 5),
|
| 533 |
+
seed=global_random_seed,
|
| 534 |
+
)
|
| 535 |
+
|
| 536 |
+
if sample_weight == "ones":
|
| 537 |
+
sample_weight = np.ones(shape=n_samples, dtype=np.float64)
|
| 538 |
+
else:
|
| 539 |
+
rng = np.random.RandomState(global_random_seed)
|
| 540 |
+
sample_weight = rng.normal(size=n_samples).astype(np.float64)
|
| 541 |
+
|
| 542 |
+
assert_allclose(
|
| 543 |
+
loss.loss(
|
| 544 |
+
y_true=y_true,
|
| 545 |
+
raw_prediction=raw_prediction,
|
| 546 |
+
sample_weight=sample_weight,
|
| 547 |
+
),
|
| 548 |
+
sample_weight
|
| 549 |
+
* loss.loss(
|
| 550 |
+
y_true=y_true,
|
| 551 |
+
raw_prediction=raw_prediction,
|
| 552 |
+
sample_weight=None,
|
| 553 |
+
),
|
| 554 |
+
)
|
| 555 |
+
|
| 556 |
+
losses, gradient = loss.loss_gradient(
|
| 557 |
+
y_true=y_true,
|
| 558 |
+
raw_prediction=raw_prediction,
|
| 559 |
+
sample_weight=None,
|
| 560 |
+
)
|
| 561 |
+
losses_sw, gradient_sw = loss.loss_gradient(
|
| 562 |
+
y_true=y_true,
|
| 563 |
+
raw_prediction=raw_prediction,
|
| 564 |
+
sample_weight=sample_weight,
|
| 565 |
+
)
|
| 566 |
+
assert_allclose(losses * sample_weight, losses_sw)
|
| 567 |
+
if not loss.is_multiclass:
|
| 568 |
+
assert_allclose(gradient * sample_weight, gradient_sw)
|
| 569 |
+
else:
|
| 570 |
+
assert_allclose(gradient * sample_weight[:, None], gradient_sw)
|
| 571 |
+
|
| 572 |
+
gradient, hessian = loss.gradient_hessian(
|
| 573 |
+
y_true=y_true,
|
| 574 |
+
raw_prediction=raw_prediction,
|
| 575 |
+
sample_weight=None,
|
| 576 |
+
)
|
| 577 |
+
gradient_sw, hessian_sw = loss.gradient_hessian(
|
| 578 |
+
y_true=y_true,
|
| 579 |
+
raw_prediction=raw_prediction,
|
| 580 |
+
sample_weight=sample_weight,
|
| 581 |
+
)
|
| 582 |
+
if not loss.is_multiclass:
|
| 583 |
+
assert_allclose(gradient * sample_weight, gradient_sw)
|
| 584 |
+
assert_allclose(hessian * sample_weight, hessian_sw)
|
| 585 |
+
else:
|
| 586 |
+
assert_allclose(gradient * sample_weight[:, None], gradient_sw)
|
| 587 |
+
assert_allclose(hessian * sample_weight[:, None], hessian_sw)
|
| 588 |
+
|
| 589 |
+
|
| 590 |
+
@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
|
| 591 |
+
def test_graceful_squeezing(loss):
|
| 592 |
+
"""Test that reshaped raw_prediction gives same results."""
|
| 593 |
+
y_true, raw_prediction = random_y_true_raw_prediction(
|
| 594 |
+
loss=loss,
|
| 595 |
+
n_samples=20,
|
| 596 |
+
y_bound=(-100, 100),
|
| 597 |
+
raw_bound=(-10, 10),
|
| 598 |
+
seed=42,
|
| 599 |
+
)
|
| 600 |
+
|
| 601 |
+
if raw_prediction.ndim == 1:
|
| 602 |
+
raw_prediction_2d = raw_prediction[:, None]
|
| 603 |
+
assert_allclose(
|
| 604 |
+
loss.loss(y_true=y_true, raw_prediction=raw_prediction_2d),
|
| 605 |
+
loss.loss(y_true=y_true, raw_prediction=raw_prediction),
|
| 606 |
+
)
|
| 607 |
+
assert_allclose(
|
| 608 |
+
loss.loss_gradient(y_true=y_true, raw_prediction=raw_prediction_2d),
|
| 609 |
+
loss.loss_gradient(y_true=y_true, raw_prediction=raw_prediction),
|
| 610 |
+
)
|
| 611 |
+
assert_allclose(
|
| 612 |
+
loss.gradient(y_true=y_true, raw_prediction=raw_prediction_2d),
|
| 613 |
+
loss.gradient(y_true=y_true, raw_prediction=raw_prediction),
|
| 614 |
+
)
|
| 615 |
+
assert_allclose(
|
| 616 |
+
loss.gradient_hessian(y_true=y_true, raw_prediction=raw_prediction_2d),
|
| 617 |
+
loss.gradient_hessian(y_true=y_true, raw_prediction=raw_prediction),
|
| 618 |
+
)
|
| 619 |
+
|
| 620 |
+
|
| 621 |
+
@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
|
| 622 |
+
@pytest.mark.parametrize("sample_weight", [None, "range"])
|
| 623 |
+
def test_loss_of_perfect_prediction(loss, sample_weight):
|
| 624 |
+
"""Test value of perfect predictions.
|
| 625 |
+
|
| 626 |
+
Loss of y_pred = y_true plus constant_to_optimal_zero should sums up to
|
| 627 |
+
zero.
|
| 628 |
+
"""
|
| 629 |
+
if not loss.is_multiclass:
|
| 630 |
+
# Use small values such that exp(value) is not nan.
|
| 631 |
+
raw_prediction = np.array([-10, -0.1, 0, 0.1, 3, 10])
|
| 632 |
+
# If link is identity, we must respect the interval of y_pred:
|
| 633 |
+
if isinstance(loss.link, IdentityLink):
|
| 634 |
+
eps = 1e-10
|
| 635 |
+
low = loss.interval_y_pred.low
|
| 636 |
+
if not loss.interval_y_pred.low_inclusive:
|
| 637 |
+
low = low + eps
|
| 638 |
+
high = loss.interval_y_pred.high
|
| 639 |
+
if not loss.interval_y_pred.high_inclusive:
|
| 640 |
+
high = high - eps
|
| 641 |
+
raw_prediction = np.clip(raw_prediction, low, high)
|
| 642 |
+
y_true = loss.link.inverse(raw_prediction)
|
| 643 |
+
else:
|
| 644 |
+
# HalfMultinomialLoss
|
| 645 |
+
y_true = np.arange(loss.n_classes).astype(float)
|
| 646 |
+
# raw_prediction with entries -exp(10), but +exp(10) on the diagonal
|
| 647 |
+
# this is close enough to np.inf which would produce nan
|
| 648 |
+
raw_prediction = np.full(
|
| 649 |
+
shape=(loss.n_classes, loss.n_classes),
|
| 650 |
+
fill_value=-np.exp(10),
|
| 651 |
+
dtype=float,
|
| 652 |
+
)
|
| 653 |
+
raw_prediction.flat[:: loss.n_classes + 1] = np.exp(10)
|
| 654 |
+
|
| 655 |
+
if sample_weight == "range":
|
| 656 |
+
sample_weight = np.linspace(1, y_true.shape[0], num=y_true.shape[0])
|
| 657 |
+
|
| 658 |
+
loss_value = loss.loss(
|
| 659 |
+
y_true=y_true,
|
| 660 |
+
raw_prediction=raw_prediction,
|
| 661 |
+
sample_weight=sample_weight,
|
| 662 |
+
)
|
| 663 |
+
constant_term = loss.constant_to_optimal_zero(
|
| 664 |
+
y_true=y_true, sample_weight=sample_weight
|
| 665 |
+
)
|
| 666 |
+
# Comparing loss_value + constant_term to zero would result in large
|
| 667 |
+
# round-off errors.
|
| 668 |
+
assert_allclose(loss_value, -constant_term, atol=1e-14, rtol=1e-15)
|
| 669 |
+
|
| 670 |
+
|
| 671 |
+
@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
|
| 672 |
+
@pytest.mark.parametrize("sample_weight", [None, "range"])
|
| 673 |
+
def test_gradients_hessians_numerically(loss, sample_weight, global_random_seed):
|
| 674 |
+
"""Test gradients and hessians with numerical derivatives.
|
| 675 |
+
|
| 676 |
+
Gradient should equal the numerical derivatives of the loss function.
|
| 677 |
+
Hessians should equal the numerical derivatives of gradients.
|
| 678 |
+
"""
|
| 679 |
+
n_samples = 20
|
| 680 |
+
y_true, raw_prediction = random_y_true_raw_prediction(
|
| 681 |
+
loss=loss,
|
| 682 |
+
n_samples=n_samples,
|
| 683 |
+
y_bound=(-100, 100),
|
| 684 |
+
raw_bound=(-5, 5),
|
| 685 |
+
seed=global_random_seed,
|
| 686 |
+
)
|
| 687 |
+
|
| 688 |
+
if sample_weight == "range":
|
| 689 |
+
sample_weight = np.linspace(1, y_true.shape[0], num=y_true.shape[0])
|
| 690 |
+
|
| 691 |
+
g, h = loss.gradient_hessian(
|
| 692 |
+
y_true=y_true,
|
| 693 |
+
raw_prediction=raw_prediction,
|
| 694 |
+
sample_weight=sample_weight,
|
| 695 |
+
)
|
| 696 |
+
|
| 697 |
+
assert g.shape == raw_prediction.shape
|
| 698 |
+
assert h.shape == raw_prediction.shape
|
| 699 |
+
|
| 700 |
+
if not loss.is_multiclass:
|
| 701 |
+
|
| 702 |
+
def loss_func(x):
|
| 703 |
+
return loss.loss(
|
| 704 |
+
y_true=y_true,
|
| 705 |
+
raw_prediction=x,
|
| 706 |
+
sample_weight=sample_weight,
|
| 707 |
+
)
|
| 708 |
+
|
| 709 |
+
g_numeric = numerical_derivative(loss_func, raw_prediction, eps=1e-6)
|
| 710 |
+
assert_allclose(g, g_numeric, rtol=5e-6, atol=1e-10)
|
| 711 |
+
|
| 712 |
+
def grad_func(x):
|
| 713 |
+
return loss.gradient(
|
| 714 |
+
y_true=y_true,
|
| 715 |
+
raw_prediction=x,
|
| 716 |
+
sample_weight=sample_weight,
|
| 717 |
+
)
|
| 718 |
+
|
| 719 |
+
h_numeric = numerical_derivative(grad_func, raw_prediction, eps=1e-6)
|
| 720 |
+
if loss.approx_hessian:
|
| 721 |
+
# TODO: What could we test if loss.approx_hessian?
|
| 722 |
+
pass
|
| 723 |
+
else:
|
| 724 |
+
assert_allclose(h, h_numeric, rtol=5e-6, atol=1e-10)
|
| 725 |
+
else:
|
| 726 |
+
# For multiclass loss, we should only change the predictions of the
|
| 727 |
+
# class for which the derivative is taken for, e.g. offset[:, k] = eps
|
| 728 |
+
# for class k.
|
| 729 |
+
# As a softmax is computed, offsetting the whole array by a constant
|
| 730 |
+
# would have no effect on the probabilities, and thus on the loss.
|
| 731 |
+
for k in range(loss.n_classes):
|
| 732 |
+
|
| 733 |
+
def loss_func(x):
|
| 734 |
+
raw = raw_prediction.copy()
|
| 735 |
+
raw[:, k] = x
|
| 736 |
+
return loss.loss(
|
| 737 |
+
y_true=y_true,
|
| 738 |
+
raw_prediction=raw,
|
| 739 |
+
sample_weight=sample_weight,
|
| 740 |
+
)
|
| 741 |
+
|
| 742 |
+
g_numeric = numerical_derivative(loss_func, raw_prediction[:, k], eps=1e-5)
|
| 743 |
+
assert_allclose(g[:, k], g_numeric, rtol=5e-6, atol=1e-10)
|
| 744 |
+
|
| 745 |
+
def grad_func(x):
|
| 746 |
+
raw = raw_prediction.copy()
|
| 747 |
+
raw[:, k] = x
|
| 748 |
+
return loss.gradient(
|
| 749 |
+
y_true=y_true,
|
| 750 |
+
raw_prediction=raw,
|
| 751 |
+
sample_weight=sample_weight,
|
| 752 |
+
)[:, k]
|
| 753 |
+
|
| 754 |
+
h_numeric = numerical_derivative(grad_func, raw_prediction[:, k], eps=1e-6)
|
| 755 |
+
if loss.approx_hessian:
|
| 756 |
+
# TODO: What could we test if loss.approx_hessian?
|
| 757 |
+
pass
|
| 758 |
+
else:
|
| 759 |
+
assert_allclose(h[:, k], h_numeric, rtol=5e-6, atol=1e-10)
|
| 760 |
+
|
| 761 |
+
|
| 762 |
+
@pytest.mark.parametrize(
|
| 763 |
+
"loss, x0, y_true",
|
| 764 |
+
[
|
| 765 |
+
("squared_error", -2.0, 42),
|
| 766 |
+
("squared_error", 117.0, 1.05),
|
| 767 |
+
("squared_error", 0.0, 0.0),
|
| 768 |
+
# The argmin of binomial_loss for y_true=0 and y_true=1 is resp.
|
| 769 |
+
# -inf and +inf due to logit, cf. "complete separation". Therefore, we
|
| 770 |
+
# use 0 < y_true < 1.
|
| 771 |
+
("binomial_loss", 0.3, 0.1),
|
| 772 |
+
("binomial_loss", -12, 0.2),
|
| 773 |
+
("binomial_loss", 30, 0.9),
|
| 774 |
+
("poisson_loss", 12.0, 1.0),
|
| 775 |
+
("poisson_loss", 0.0, 2.0),
|
| 776 |
+
("poisson_loss", -22.0, 10.0),
|
| 777 |
+
],
|
| 778 |
+
)
|
| 779 |
+
@skip_if_32bit
|
| 780 |
+
def test_derivatives(loss, x0, y_true):
|
| 781 |
+
"""Test that gradients are zero at the minimum of the loss.
|
| 782 |
+
|
| 783 |
+
We check this on a single value/sample using Halley's method with the
|
| 784 |
+
first and second order derivatives computed by the Loss instance.
|
| 785 |
+
Note that methods of Loss instances operate on arrays while the newton
|
| 786 |
+
root finder expects a scalar or a one-element array for this purpose.
|
| 787 |
+
"""
|
| 788 |
+
loss = _LOSSES[loss](sample_weight=None)
|
| 789 |
+
y_true = np.array([y_true], dtype=np.float64)
|
| 790 |
+
x0 = np.array([x0], dtype=np.float64)
|
| 791 |
+
|
| 792 |
+
def func(x: np.ndarray) -> np.ndarray:
|
| 793 |
+
"""Compute loss plus constant term.
|
| 794 |
+
|
| 795 |
+
The constant term is such that the minimum function value is zero,
|
| 796 |
+
which is required by the Newton method.
|
| 797 |
+
"""
|
| 798 |
+
return loss.loss(
|
| 799 |
+
y_true=y_true, raw_prediction=x
|
| 800 |
+
) + loss.constant_to_optimal_zero(y_true=y_true)
|
| 801 |
+
|
| 802 |
+
def fprime(x: np.ndarray) -> np.ndarray:
|
| 803 |
+
return loss.gradient(y_true=y_true, raw_prediction=x)
|
| 804 |
+
|
| 805 |
+
def fprime2(x: np.ndarray) -> np.ndarray:
|
| 806 |
+
return loss.gradient_hessian(y_true=y_true, raw_prediction=x)[1]
|
| 807 |
+
|
| 808 |
+
optimum = newton(
|
| 809 |
+
func,
|
| 810 |
+
x0=x0,
|
| 811 |
+
fprime=fprime,
|
| 812 |
+
fprime2=fprime2,
|
| 813 |
+
maxiter=100,
|
| 814 |
+
tol=5e-8,
|
| 815 |
+
)
|
| 816 |
+
|
| 817 |
+
# Need to ravel arrays because assert_allclose requires matching
|
| 818 |
+
# dimensions.
|
| 819 |
+
y_true = y_true.ravel()
|
| 820 |
+
optimum = optimum.ravel()
|
| 821 |
+
assert_allclose(loss.link.inverse(optimum), y_true)
|
| 822 |
+
assert_allclose(func(optimum), 0, atol=1e-14)
|
| 823 |
+
assert_allclose(loss.gradient(y_true=y_true, raw_prediction=optimum), 0, atol=5e-7)
|
| 824 |
+
|
| 825 |
+
|
| 826 |
+
@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
|
| 827 |
+
@pytest.mark.parametrize("sample_weight", [None, "range"])
|
| 828 |
+
def test_loss_intercept_only(loss, sample_weight):
|
| 829 |
+
"""Test that fit_intercept_only returns the argmin of the loss.
|
| 830 |
+
|
| 831 |
+
Also test that the gradient is zero at the minimum.
|
| 832 |
+
"""
|
| 833 |
+
n_samples = 50
|
| 834 |
+
if not loss.is_multiclass:
|
| 835 |
+
y_true = loss.link.inverse(np.linspace(-4, 4, num=n_samples))
|
| 836 |
+
else:
|
| 837 |
+
y_true = np.arange(n_samples).astype(np.float64) % loss.n_classes
|
| 838 |
+
y_true[::5] = 0 # exceedance of class 0
|
| 839 |
+
|
| 840 |
+
if sample_weight == "range":
|
| 841 |
+
sample_weight = np.linspace(0.1, 2, num=n_samples)
|
| 842 |
+
|
| 843 |
+
a = loss.fit_intercept_only(y_true=y_true, sample_weight=sample_weight)
|
| 844 |
+
|
| 845 |
+
# find minimum by optimization
|
| 846 |
+
def fun(x):
|
| 847 |
+
if not loss.is_multiclass:
|
| 848 |
+
raw_prediction = np.full(shape=(n_samples), fill_value=x)
|
| 849 |
+
else:
|
| 850 |
+
raw_prediction = np.ascontiguousarray(
|
| 851 |
+
np.broadcast_to(x, shape=(n_samples, loss.n_classes))
|
| 852 |
+
)
|
| 853 |
+
return loss(
|
| 854 |
+
y_true=y_true,
|
| 855 |
+
raw_prediction=raw_prediction,
|
| 856 |
+
sample_weight=sample_weight,
|
| 857 |
+
)
|
| 858 |
+
|
| 859 |
+
if not loss.is_multiclass:
|
| 860 |
+
opt = minimize_scalar(fun, tol=1e-7, options={"maxiter": 100})
|
| 861 |
+
grad = loss.gradient(
|
| 862 |
+
y_true=y_true,
|
| 863 |
+
raw_prediction=np.full_like(y_true, a),
|
| 864 |
+
sample_weight=sample_weight,
|
| 865 |
+
)
|
| 866 |
+
assert a.shape == tuple() # scalar
|
| 867 |
+
assert a.dtype == y_true.dtype
|
| 868 |
+
assert_all_finite(a)
|
| 869 |
+
a == approx(opt.x, rel=1e-7)
|
| 870 |
+
grad.sum() == approx(0, abs=1e-12)
|
| 871 |
+
else:
|
| 872 |
+
# The constraint corresponds to sum(raw_prediction) = 0. Without it, we would
|
| 873 |
+
# need to apply loss.symmetrize_raw_prediction to opt.x before comparing.
|
| 874 |
+
opt = minimize(
|
| 875 |
+
fun,
|
| 876 |
+
np.zeros((loss.n_classes)),
|
| 877 |
+
tol=1e-13,
|
| 878 |
+
options={"maxiter": 100},
|
| 879 |
+
method="SLSQP",
|
| 880 |
+
constraints=LinearConstraint(np.ones((1, loss.n_classes)), 0, 0),
|
| 881 |
+
)
|
| 882 |
+
grad = loss.gradient(
|
| 883 |
+
y_true=y_true,
|
| 884 |
+
raw_prediction=np.tile(a, (n_samples, 1)),
|
| 885 |
+
sample_weight=sample_weight,
|
| 886 |
+
)
|
| 887 |
+
assert a.dtype == y_true.dtype
|
| 888 |
+
assert_all_finite(a)
|
| 889 |
+
assert_allclose(a, opt.x, rtol=5e-6, atol=1e-12)
|
| 890 |
+
assert_allclose(grad.sum(axis=0), 0, atol=1e-12)
|
| 891 |
+
|
| 892 |
+
|
| 893 |
+
@pytest.mark.parametrize(
|
| 894 |
+
"loss, func, random_dist",
|
| 895 |
+
[
|
| 896 |
+
(HalfSquaredError(), np.mean, "normal"),
|
| 897 |
+
(AbsoluteError(), np.median, "normal"),
|
| 898 |
+
(PinballLoss(quantile=0.25), lambda x: np.percentile(x, q=25), "normal"),
|
| 899 |
+
(HalfPoissonLoss(), np.mean, "poisson"),
|
| 900 |
+
(HalfGammaLoss(), np.mean, "exponential"),
|
| 901 |
+
(HalfTweedieLoss(), np.mean, "exponential"),
|
| 902 |
+
(HalfBinomialLoss(), np.mean, "binomial"),
|
| 903 |
+
],
|
| 904 |
+
)
|
| 905 |
+
def test_specific_fit_intercept_only(loss, func, random_dist, global_random_seed):
|
| 906 |
+
"""Test that fit_intercept_only returns the correct functional.
|
| 907 |
+
|
| 908 |
+
We test the functional for specific, meaningful distributions, e.g.
|
| 909 |
+
squared error estimates the expectation of a probability distribution.
|
| 910 |
+
"""
|
| 911 |
+
rng = np.random.RandomState(global_random_seed)
|
| 912 |
+
if random_dist == "binomial":
|
| 913 |
+
y_train = rng.binomial(1, 0.5, size=100)
|
| 914 |
+
else:
|
| 915 |
+
y_train = getattr(rng, random_dist)(size=100)
|
| 916 |
+
baseline_prediction = loss.fit_intercept_only(y_true=y_train)
|
| 917 |
+
# Make sure baseline prediction is the expected functional=func, e.g. mean
|
| 918 |
+
# or median.
|
| 919 |
+
assert_all_finite(baseline_prediction)
|
| 920 |
+
assert baseline_prediction == approx(loss.link.link(func(y_train)))
|
| 921 |
+
assert loss.link.inverse(baseline_prediction) == approx(func(y_train))
|
| 922 |
+
if isinstance(loss, IdentityLink):
|
| 923 |
+
assert_allclose(loss.link.inverse(baseline_prediction), baseline_prediction)
|
| 924 |
+
|
| 925 |
+
# Test baseline at boundary
|
| 926 |
+
if loss.interval_y_true.low_inclusive:
|
| 927 |
+
y_train.fill(loss.interval_y_true.low)
|
| 928 |
+
baseline_prediction = loss.fit_intercept_only(y_true=y_train)
|
| 929 |
+
assert_all_finite(baseline_prediction)
|
| 930 |
+
if loss.interval_y_true.high_inclusive:
|
| 931 |
+
y_train.fill(loss.interval_y_true.high)
|
| 932 |
+
baseline_prediction = loss.fit_intercept_only(y_true=y_train)
|
| 933 |
+
assert_all_finite(baseline_prediction)
|
| 934 |
+
|
| 935 |
+
|
| 936 |
+
def test_multinomial_loss_fit_intercept_only():
|
| 937 |
+
"""Test that fit_intercept_only returns the mean functional for CCE."""
|
| 938 |
+
rng = np.random.RandomState(0)
|
| 939 |
+
n_classes = 4
|
| 940 |
+
loss = HalfMultinomialLoss(n_classes=n_classes)
|
| 941 |
+
# Same logic as test_specific_fit_intercept_only. Here inverse link
|
| 942 |
+
# function = softmax and link function = log - symmetry term.
|
| 943 |
+
y_train = rng.randint(0, n_classes + 1, size=100).astype(np.float64)
|
| 944 |
+
baseline_prediction = loss.fit_intercept_only(y_true=y_train)
|
| 945 |
+
assert baseline_prediction.shape == (n_classes,)
|
| 946 |
+
p = np.zeros(n_classes, dtype=y_train.dtype)
|
| 947 |
+
for k in range(n_classes):
|
| 948 |
+
p[k] = (y_train == k).mean()
|
| 949 |
+
assert_allclose(baseline_prediction, np.log(p) - np.mean(np.log(p)))
|
| 950 |
+
assert_allclose(baseline_prediction[None, :], loss.link.link(p[None, :]))
|
| 951 |
+
|
| 952 |
+
for y_train in (np.zeros(shape=10), np.ones(shape=10)):
|
| 953 |
+
y_train = y_train.astype(np.float64)
|
| 954 |
+
baseline_prediction = loss.fit_intercept_only(y_true=y_train)
|
| 955 |
+
assert baseline_prediction.dtype == y_train.dtype
|
| 956 |
+
assert_all_finite(baseline_prediction)
|
| 957 |
+
|
| 958 |
+
|
| 959 |
+
def test_binomial_and_multinomial_loss(global_random_seed):
|
| 960 |
+
"""Test that multinomial loss with n_classes = 2 is the same as binomial loss."""
|
| 961 |
+
rng = np.random.RandomState(global_random_seed)
|
| 962 |
+
n_samples = 20
|
| 963 |
+
binom = HalfBinomialLoss()
|
| 964 |
+
multinom = HalfMultinomialLoss(n_classes=2)
|
| 965 |
+
y_train = rng.randint(0, 2, size=n_samples).astype(np.float64)
|
| 966 |
+
raw_prediction = rng.normal(size=n_samples)
|
| 967 |
+
raw_multinom = np.empty((n_samples, 2))
|
| 968 |
+
raw_multinom[:, 0] = -0.5 * raw_prediction
|
| 969 |
+
raw_multinom[:, 1] = 0.5 * raw_prediction
|
| 970 |
+
assert_allclose(
|
| 971 |
+
binom.loss(y_true=y_train, raw_prediction=raw_prediction),
|
| 972 |
+
multinom.loss(y_true=y_train, raw_prediction=raw_multinom),
|
| 973 |
+
)
|
| 974 |
+
|
| 975 |
+
|
| 976 |
+
@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
|
| 977 |
+
def test_predict_proba(loss, global_random_seed):
|
| 978 |
+
"""Test that predict_proba and gradient_proba work as expected."""
|
| 979 |
+
n_samples = 20
|
| 980 |
+
y_true, raw_prediction = random_y_true_raw_prediction(
|
| 981 |
+
loss=loss,
|
| 982 |
+
n_samples=n_samples,
|
| 983 |
+
y_bound=(-100, 100),
|
| 984 |
+
raw_bound=(-5, 5),
|
| 985 |
+
seed=global_random_seed,
|
| 986 |
+
)
|
| 987 |
+
|
| 988 |
+
if hasattr(loss, "predict_proba"):
|
| 989 |
+
proba = loss.predict_proba(raw_prediction)
|
| 990 |
+
assert proba.shape == (n_samples, loss.n_classes)
|
| 991 |
+
assert np.sum(proba, axis=1) == approx(1, rel=1e-11)
|
| 992 |
+
|
| 993 |
+
if hasattr(loss, "gradient_proba"):
|
| 994 |
+
for grad, proba in (
|
| 995 |
+
(None, None),
|
| 996 |
+
(None, np.empty_like(raw_prediction)),
|
| 997 |
+
(np.empty_like(raw_prediction), None),
|
| 998 |
+
(np.empty_like(raw_prediction), np.empty_like(raw_prediction)),
|
| 999 |
+
):
|
| 1000 |
+
grad, proba = loss.gradient_proba(
|
| 1001 |
+
y_true=y_true,
|
| 1002 |
+
raw_prediction=raw_prediction,
|
| 1003 |
+
sample_weight=None,
|
| 1004 |
+
gradient_out=grad,
|
| 1005 |
+
proba_out=proba,
|
| 1006 |
+
)
|
| 1007 |
+
assert proba.shape == (n_samples, loss.n_classes)
|
| 1008 |
+
assert np.sum(proba, axis=1) == approx(1, rel=1e-11)
|
| 1009 |
+
assert_allclose(
|
| 1010 |
+
grad,
|
| 1011 |
+
loss.gradient(
|
| 1012 |
+
y_true=y_true,
|
| 1013 |
+
raw_prediction=raw_prediction,
|
| 1014 |
+
sample_weight=None,
|
| 1015 |
+
gradient_out=None,
|
| 1016 |
+
),
|
| 1017 |
+
)
|
| 1018 |
+
|
| 1019 |
+
|
| 1020 |
+
@pytest.mark.parametrize("loss", ALL_LOSSES)
|
| 1021 |
+
@pytest.mark.parametrize("sample_weight", [None, "range"])
|
| 1022 |
+
@pytest.mark.parametrize("dtype", (np.float32, np.float64))
|
| 1023 |
+
@pytest.mark.parametrize("order", ("C", "F"))
|
| 1024 |
+
def test_init_gradient_and_hessians(loss, sample_weight, dtype, order):
|
| 1025 |
+
"""Test that init_gradient_and_hessian works as expected.
|
| 1026 |
+
|
| 1027 |
+
passing sample_weight to a loss correctly influences the constant_hessian
|
| 1028 |
+
attribute, and consequently the shape of the hessian array.
|
| 1029 |
+
"""
|
| 1030 |
+
n_samples = 5
|
| 1031 |
+
if sample_weight == "range":
|
| 1032 |
+
sample_weight = np.ones(n_samples)
|
| 1033 |
+
loss = loss(sample_weight=sample_weight)
|
| 1034 |
+
gradient, hessian = loss.init_gradient_and_hessian(
|
| 1035 |
+
n_samples=n_samples,
|
| 1036 |
+
dtype=dtype,
|
| 1037 |
+
order=order,
|
| 1038 |
+
)
|
| 1039 |
+
if loss.constant_hessian:
|
| 1040 |
+
assert gradient.shape == (n_samples,)
|
| 1041 |
+
assert hessian.shape == (1,)
|
| 1042 |
+
elif loss.is_multiclass:
|
| 1043 |
+
assert gradient.shape == (n_samples, loss.n_classes)
|
| 1044 |
+
assert hessian.shape == (n_samples, loss.n_classes)
|
| 1045 |
+
else:
|
| 1046 |
+
assert hessian.shape == (n_samples,)
|
| 1047 |
+
assert hessian.shape == (n_samples,)
|
| 1048 |
+
|
| 1049 |
+
assert gradient.dtype == dtype
|
| 1050 |
+
assert hessian.dtype == dtype
|
| 1051 |
+
|
| 1052 |
+
if order == "C":
|
| 1053 |
+
assert gradient.flags.c_contiguous
|
| 1054 |
+
assert hessian.flags.c_contiguous
|
| 1055 |
+
else:
|
| 1056 |
+
assert gradient.flags.f_contiguous
|
| 1057 |
+
assert hessian.flags.f_contiguous
|
| 1058 |
+
|
| 1059 |
+
|
| 1060 |
+
@pytest.mark.parametrize("loss", ALL_LOSSES)
|
| 1061 |
+
@pytest.mark.parametrize(
|
| 1062 |
+
"params, err_msg",
|
| 1063 |
+
[
|
| 1064 |
+
(
|
| 1065 |
+
{"dtype": np.int64},
|
| 1066 |
+
f"Valid options for 'dtype' are .* Got dtype={np.int64} instead.",
|
| 1067 |
+
),
|
| 1068 |
+
],
|
| 1069 |
+
)
|
| 1070 |
+
def test_init_gradient_and_hessian_raises(loss, params, err_msg):
|
| 1071 |
+
"""Test that init_gradient_and_hessian raises errors for invalid input."""
|
| 1072 |
+
loss = loss()
|
| 1073 |
+
with pytest.raises((ValueError, TypeError), match=err_msg):
|
| 1074 |
+
gradient, hessian = loss.init_gradient_and_hessian(n_samples=5, **params)
|
| 1075 |
+
|
| 1076 |
+
|
| 1077 |
+
@pytest.mark.parametrize(
|
| 1078 |
+
"loss, params, err_type, err_msg",
|
| 1079 |
+
[
|
| 1080 |
+
(
|
| 1081 |
+
PinballLoss,
|
| 1082 |
+
{"quantile": None},
|
| 1083 |
+
TypeError,
|
| 1084 |
+
"quantile must be an instance of float, not NoneType.",
|
| 1085 |
+
),
|
| 1086 |
+
(
|
| 1087 |
+
PinballLoss,
|
| 1088 |
+
{"quantile": 0},
|
| 1089 |
+
ValueError,
|
| 1090 |
+
"quantile == 0, must be > 0.",
|
| 1091 |
+
),
|
| 1092 |
+
(PinballLoss, {"quantile": 1.1}, ValueError, "quantile == 1.1, must be < 1."),
|
| 1093 |
+
],
|
| 1094 |
+
)
|
| 1095 |
+
def test_loss_init_parameter_validation(loss, params, err_type, err_msg):
|
| 1096 |
+
"""Test that loss raises errors for invalid input."""
|
| 1097 |
+
with pytest.raises(err_type, match=err_msg):
|
| 1098 |
+
loss(**params)
|
| 1099 |
+
|
| 1100 |
+
|
| 1101 |
+
@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
|
| 1102 |
+
def test_loss_pickle(loss):
|
| 1103 |
+
"""Test that losses can be pickled."""
|
| 1104 |
+
n_samples = 20
|
| 1105 |
+
y_true, raw_prediction = random_y_true_raw_prediction(
|
| 1106 |
+
loss=loss,
|
| 1107 |
+
n_samples=n_samples,
|
| 1108 |
+
y_bound=(-100, 100),
|
| 1109 |
+
raw_bound=(-5, 5),
|
| 1110 |
+
seed=42,
|
| 1111 |
+
)
|
| 1112 |
+
pickled_loss = pickle.dumps(loss)
|
| 1113 |
+
unpickled_loss = pickle.loads(pickled_loss)
|
| 1114 |
+
assert loss(y_true=y_true, raw_prediction=raw_prediction) == approx(
|
| 1115 |
+
unpickled_loss(y_true=y_true, raw_prediction=raw_prediction)
|
| 1116 |
+
)
|
| 1117 |
+
|
| 1118 |
+
|
| 1119 |
+
@pytest.mark.parametrize("p", [-1.5, 0, 1, 1.5, 2, 3])
|
| 1120 |
+
def test_tweedie_log_identity_consistency(p):
|
| 1121 |
+
"""Test for identical losses when only the link function is different."""
|
| 1122 |
+
half_tweedie_log = HalfTweedieLoss(power=p)
|
| 1123 |
+
half_tweedie_identity = HalfTweedieLossIdentity(power=p)
|
| 1124 |
+
n_samples = 10
|
| 1125 |
+
y_true, raw_prediction = random_y_true_raw_prediction(
|
| 1126 |
+
loss=half_tweedie_log, n_samples=n_samples, seed=42
|
| 1127 |
+
)
|
| 1128 |
+
y_pred = half_tweedie_log.link.inverse(raw_prediction) # exp(raw_prediction)
|
| 1129 |
+
|
| 1130 |
+
# Let's compare the loss values, up to some constant term that is dropped
|
| 1131 |
+
# in HalfTweedieLoss but not in HalfTweedieLossIdentity.
|
| 1132 |
+
loss_log = half_tweedie_log.loss(
|
| 1133 |
+
y_true=y_true, raw_prediction=raw_prediction
|
| 1134 |
+
) + half_tweedie_log.constant_to_optimal_zero(y_true)
|
| 1135 |
+
loss_identity = half_tweedie_identity.loss(
|
| 1136 |
+
y_true=y_true, raw_prediction=y_pred
|
| 1137 |
+
) + half_tweedie_identity.constant_to_optimal_zero(y_true)
|
| 1138 |
+
# Note that HalfTweedieLoss ignores different constant terms than
|
| 1139 |
+
# HalfTweedieLossIdentity. Constant terms means terms not depending on
|
| 1140 |
+
# raw_prediction. By adding these terms, `constant_to_optimal_zero`, both losses
|
| 1141 |
+
# give the same values.
|
| 1142 |
+
assert_allclose(loss_log, loss_identity)
|
| 1143 |
+
|
| 1144 |
+
# For gradients and hessians, the constant terms do not matter. We have, however,
|
| 1145 |
+
# to account for the chain rule, i.e. with x=raw_prediction
|
| 1146 |
+
# gradient_log(x) = d/dx loss_log(x)
|
| 1147 |
+
# = d/dx loss_identity(exp(x))
|
| 1148 |
+
# = exp(x) * gradient_identity(exp(x))
|
| 1149 |
+
# Similarly,
|
| 1150 |
+
# hessian_log(x) = exp(x) * gradient_identity(exp(x))
|
| 1151 |
+
# + exp(x)**2 * hessian_identity(x)
|
| 1152 |
+
gradient_log, hessian_log = half_tweedie_log.gradient_hessian(
|
| 1153 |
+
y_true=y_true, raw_prediction=raw_prediction
|
| 1154 |
+
)
|
| 1155 |
+
gradient_identity, hessian_identity = half_tweedie_identity.gradient_hessian(
|
| 1156 |
+
y_true=y_true, raw_prediction=y_pred
|
| 1157 |
+
)
|
| 1158 |
+
assert_allclose(gradient_log, y_pred * gradient_identity)
|
| 1159 |
+
assert_allclose(
|
| 1160 |
+
hessian_log, y_pred * gradient_identity + y_pred**2 * hessian_identity
|
| 1161 |
+
)
|
mgm/lib/python3.10/site-packages/sklearn/cross_decomposition/_pls.py
ADDED
|
@@ -0,0 +1,1089 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
# Author: Edouard Duchesnay <edouard.duchesnay@cea.fr>
|
| 6 |
+
# License: BSD 3 clause
|
| 7 |
+
|
| 8 |
+
from numbers import Integral, Real
|
| 9 |
+
|
| 10 |
+
import warnings
|
| 11 |
+
from abc import ABCMeta, abstractmethod
|
| 12 |
+
|
| 13 |
+
import numpy as np
|
| 14 |
+
from scipy.linalg import svd
|
| 15 |
+
|
| 16 |
+
from ..base import BaseEstimator, RegressorMixin, TransformerMixin
|
| 17 |
+
from ..base import MultiOutputMixin
|
| 18 |
+
from ..base import ClassNamePrefixFeaturesOutMixin
|
| 19 |
+
from ..utils import check_array, check_consistent_length
|
| 20 |
+
from ..utils.fixes import sp_version
|
| 21 |
+
from ..utils.fixes import parse_version
|
| 22 |
+
from ..utils.extmath import svd_flip
|
| 23 |
+
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
|
| 24 |
+
from ..utils._param_validation import Interval, StrOptions
|
| 25 |
+
from ..exceptions import ConvergenceWarning
|
| 26 |
+
|
| 27 |
+
__all__ = ["PLSCanonical", "PLSRegression", "PLSSVD"]
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
if sp_version >= parse_version("1.7"):
|
| 31 |
+
# Starting in scipy 1.7 pinv2 was deprecated in favor of pinv.
|
| 32 |
+
# pinv now uses the svd to compute the pseudo-inverse.
|
| 33 |
+
from scipy.linalg import pinv as pinv2
|
| 34 |
+
else:
|
| 35 |
+
from scipy.linalg import pinv2
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def _pinv2_old(a):
|
| 39 |
+
# Used previous scipy pinv2 that was updated in:
|
| 40 |
+
# https://github.com/scipy/scipy/pull/10067
|
| 41 |
+
# We can not set `cond` or `rcond` for pinv2 in scipy >= 1.3 to keep the
|
| 42 |
+
# same behavior of pinv2 for scipy < 1.3, because the condition used to
|
| 43 |
+
# determine the rank is dependent on the output of svd.
|
| 44 |
+
u, s, vh = svd(a, full_matrices=False, check_finite=False)
|
| 45 |
+
|
| 46 |
+
t = u.dtype.char.lower()
|
| 47 |
+
factor = {"f": 1e3, "d": 1e6}
|
| 48 |
+
cond = np.max(s) * factor[t] * np.finfo(t).eps
|
| 49 |
+
rank = np.sum(s > cond)
|
| 50 |
+
|
| 51 |
+
u = u[:, :rank]
|
| 52 |
+
u /= s[:rank]
|
| 53 |
+
return np.transpose(np.conjugate(np.dot(u, vh[:rank])))
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def _get_first_singular_vectors_power_method(
|
| 57 |
+
X, Y, mode="A", max_iter=500, tol=1e-06, norm_y_weights=False
|
| 58 |
+
):
|
| 59 |
+
"""Return the first left and right singular vectors of X'Y.
|
| 60 |
+
|
| 61 |
+
Provides an alternative to the svd(X'Y) and uses the power method instead.
|
| 62 |
+
With norm_y_weights to True and in mode A, this corresponds to the
|
| 63 |
+
algorithm section 11.3 of the Wegelin's review, except this starts at the
|
| 64 |
+
"update saliences" part.
|
| 65 |
+
"""
|
| 66 |
+
|
| 67 |
+
eps = np.finfo(X.dtype).eps
|
| 68 |
+
try:
|
| 69 |
+
y_score = next(col for col in Y.T if np.any(np.abs(col) > eps))
|
| 70 |
+
except StopIteration as e:
|
| 71 |
+
raise StopIteration("Y residual is constant") from e
|
| 72 |
+
|
| 73 |
+
x_weights_old = 100 # init to big value for first convergence check
|
| 74 |
+
|
| 75 |
+
if mode == "B":
|
| 76 |
+
# Precompute pseudo inverse matrices
|
| 77 |
+
# Basically: X_pinv = (X.T X)^-1 X.T
|
| 78 |
+
# Which requires inverting a (n_features, n_features) matrix.
|
| 79 |
+
# As a result, and as detailed in the Wegelin's review, CCA (i.e. mode
|
| 80 |
+
# B) will be unstable if n_features > n_samples or n_targets >
|
| 81 |
+
# n_samples
|
| 82 |
+
X_pinv, Y_pinv = _pinv2_old(X), _pinv2_old(Y)
|
| 83 |
+
|
| 84 |
+
for i in range(max_iter):
|
| 85 |
+
if mode == "B":
|
| 86 |
+
x_weights = np.dot(X_pinv, y_score)
|
| 87 |
+
else:
|
| 88 |
+
x_weights = np.dot(X.T, y_score) / np.dot(y_score, y_score)
|
| 89 |
+
|
| 90 |
+
x_weights /= np.sqrt(np.dot(x_weights, x_weights)) + eps
|
| 91 |
+
x_score = np.dot(X, x_weights)
|
| 92 |
+
|
| 93 |
+
if mode == "B":
|
| 94 |
+
y_weights = np.dot(Y_pinv, x_score)
|
| 95 |
+
else:
|
| 96 |
+
y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
|
| 97 |
+
|
| 98 |
+
if norm_y_weights:
|
| 99 |
+
y_weights /= np.sqrt(np.dot(y_weights, y_weights)) + eps
|
| 100 |
+
|
| 101 |
+
y_score = np.dot(Y, y_weights) / (np.dot(y_weights, y_weights) + eps)
|
| 102 |
+
|
| 103 |
+
x_weights_diff = x_weights - x_weights_old
|
| 104 |
+
if np.dot(x_weights_diff, x_weights_diff) < tol or Y.shape[1] == 1:
|
| 105 |
+
break
|
| 106 |
+
x_weights_old = x_weights
|
| 107 |
+
|
| 108 |
+
n_iter = i + 1
|
| 109 |
+
if n_iter == max_iter:
|
| 110 |
+
warnings.warn("Maximum number of iterations reached", ConvergenceWarning)
|
| 111 |
+
|
| 112 |
+
return x_weights, y_weights, n_iter
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def _get_first_singular_vectors_svd(X, Y):
|
| 116 |
+
"""Return the first left and right singular vectors of X'Y.
|
| 117 |
+
|
| 118 |
+
Here the whole SVD is computed.
|
| 119 |
+
"""
|
| 120 |
+
C = np.dot(X.T, Y)
|
| 121 |
+
U, _, Vt = svd(C, full_matrices=False)
|
| 122 |
+
return U[:, 0], Vt[0, :]
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def _center_scale_xy(X, Y, scale=True):
|
| 126 |
+
"""Center X, Y and scale if the scale parameter==True
|
| 127 |
+
|
| 128 |
+
Returns
|
| 129 |
+
-------
|
| 130 |
+
X, Y, x_mean, y_mean, x_std, y_std
|
| 131 |
+
"""
|
| 132 |
+
# center
|
| 133 |
+
x_mean = X.mean(axis=0)
|
| 134 |
+
X -= x_mean
|
| 135 |
+
y_mean = Y.mean(axis=0)
|
| 136 |
+
Y -= y_mean
|
| 137 |
+
# scale
|
| 138 |
+
if scale:
|
| 139 |
+
x_std = X.std(axis=0, ddof=1)
|
| 140 |
+
x_std[x_std == 0.0] = 1.0
|
| 141 |
+
X /= x_std
|
| 142 |
+
y_std = Y.std(axis=0, ddof=1)
|
| 143 |
+
y_std[y_std == 0.0] = 1.0
|
| 144 |
+
Y /= y_std
|
| 145 |
+
else:
|
| 146 |
+
x_std = np.ones(X.shape[1])
|
| 147 |
+
y_std = np.ones(Y.shape[1])
|
| 148 |
+
return X, Y, x_mean, y_mean, x_std, y_std
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
def _svd_flip_1d(u, v):
|
| 152 |
+
"""Same as svd_flip but works on 1d arrays, and is inplace"""
|
| 153 |
+
# svd_flip would force us to convert to 2d array and would also return 2d
|
| 154 |
+
# arrays. We don't want that.
|
| 155 |
+
biggest_abs_val_idx = np.argmax(np.abs(u))
|
| 156 |
+
sign = np.sign(u[biggest_abs_val_idx])
|
| 157 |
+
u *= sign
|
| 158 |
+
v *= sign
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
class _PLS(
|
| 162 |
+
ClassNamePrefixFeaturesOutMixin,
|
| 163 |
+
TransformerMixin,
|
| 164 |
+
RegressorMixin,
|
| 165 |
+
MultiOutputMixin,
|
| 166 |
+
BaseEstimator,
|
| 167 |
+
metaclass=ABCMeta,
|
| 168 |
+
):
|
| 169 |
+
"""Partial Least Squares (PLS)
|
| 170 |
+
|
| 171 |
+
This class implements the generic PLS algorithm.
|
| 172 |
+
|
| 173 |
+
Main ref: Wegelin, a survey of Partial Least Squares (PLS) methods,
|
| 174 |
+
with emphasis on the two-block case
|
| 175 |
+
https://stat.uw.edu/sites/default/files/files/reports/2000/tr371.pdf
|
| 176 |
+
"""
|
| 177 |
+
|
| 178 |
+
_parameter_constraints: dict = {
|
| 179 |
+
"n_components": [Interval(Integral, 1, None, closed="left")],
|
| 180 |
+
"scale": ["boolean"],
|
| 181 |
+
"deflation_mode": [StrOptions({"regression", "canonical"})],
|
| 182 |
+
"mode": [StrOptions({"A", "B"})],
|
| 183 |
+
"algorithm": [StrOptions({"svd", "nipals"})],
|
| 184 |
+
"max_iter": [Interval(Integral, 1, None, closed="left")],
|
| 185 |
+
"tol": [Interval(Real, 0, None, closed="left")],
|
| 186 |
+
"copy": ["boolean"],
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
@abstractmethod
|
| 190 |
+
def __init__(
|
| 191 |
+
self,
|
| 192 |
+
n_components=2,
|
| 193 |
+
*,
|
| 194 |
+
scale=True,
|
| 195 |
+
deflation_mode="regression",
|
| 196 |
+
mode="A",
|
| 197 |
+
algorithm="nipals",
|
| 198 |
+
max_iter=500,
|
| 199 |
+
tol=1e-06,
|
| 200 |
+
copy=True,
|
| 201 |
+
):
|
| 202 |
+
self.n_components = n_components
|
| 203 |
+
self.deflation_mode = deflation_mode
|
| 204 |
+
self.mode = mode
|
| 205 |
+
self.scale = scale
|
| 206 |
+
self.algorithm = algorithm
|
| 207 |
+
self.max_iter = max_iter
|
| 208 |
+
self.tol = tol
|
| 209 |
+
self.copy = copy
|
| 210 |
+
|
| 211 |
+
def fit(self, X, Y):
|
| 212 |
+
"""Fit model to data.
|
| 213 |
+
|
| 214 |
+
Parameters
|
| 215 |
+
----------
|
| 216 |
+
X : array-like of shape (n_samples, n_features)
|
| 217 |
+
Training vectors, where `n_samples` is the number of samples and
|
| 218 |
+
`n_features` is the number of predictors.
|
| 219 |
+
|
| 220 |
+
Y : array-like of shape (n_samples,) or (n_samples, n_targets)
|
| 221 |
+
Target vectors, where `n_samples` is the number of samples and
|
| 222 |
+
`n_targets` is the number of response variables.
|
| 223 |
+
|
| 224 |
+
Returns
|
| 225 |
+
-------
|
| 226 |
+
self : object
|
| 227 |
+
Fitted model.
|
| 228 |
+
"""
|
| 229 |
+
self._validate_params()
|
| 230 |
+
|
| 231 |
+
check_consistent_length(X, Y)
|
| 232 |
+
X = self._validate_data(
|
| 233 |
+
X, dtype=np.float64, copy=self.copy, ensure_min_samples=2
|
| 234 |
+
)
|
| 235 |
+
Y = check_array(
|
| 236 |
+
Y, input_name="Y", dtype=np.float64, copy=self.copy, ensure_2d=False
|
| 237 |
+
)
|
| 238 |
+
if Y.ndim == 1:
|
| 239 |
+
Y = Y.reshape(-1, 1)
|
| 240 |
+
|
| 241 |
+
n = X.shape[0]
|
| 242 |
+
p = X.shape[1]
|
| 243 |
+
q = Y.shape[1]
|
| 244 |
+
|
| 245 |
+
n_components = self.n_components
|
| 246 |
+
# With PLSRegression n_components is bounded by the rank of (X.T X) see
|
| 247 |
+
# Wegelin page 25. With CCA and PLSCanonical, n_components is bounded
|
| 248 |
+
# by the rank of X and the rank of Y: see Wegelin page 12
|
| 249 |
+
rank_upper_bound = p if self.deflation_mode == "regression" else min(n, p, q)
|
| 250 |
+
if n_components > rank_upper_bound:
|
| 251 |
+
raise ValueError(
|
| 252 |
+
f"`n_components` upper bound is {rank_upper_bound}. "
|
| 253 |
+
f"Got {n_components} instead. Reduce `n_components`."
|
| 254 |
+
)
|
| 255 |
+
|
| 256 |
+
self._norm_y_weights = self.deflation_mode == "canonical" # 1.1
|
| 257 |
+
norm_y_weights = self._norm_y_weights
|
| 258 |
+
|
| 259 |
+
# Scale (in place)
|
| 260 |
+
Xk, Yk, self._x_mean, self._y_mean, self._x_std, self._y_std = _center_scale_xy(
|
| 261 |
+
X, Y, self.scale
|
| 262 |
+
)
|
| 263 |
+
|
| 264 |
+
self.x_weights_ = np.zeros((p, n_components)) # U
|
| 265 |
+
self.y_weights_ = np.zeros((q, n_components)) # V
|
| 266 |
+
self._x_scores = np.zeros((n, n_components)) # Xi
|
| 267 |
+
self._y_scores = np.zeros((n, n_components)) # Omega
|
| 268 |
+
self.x_loadings_ = np.zeros((p, n_components)) # Gamma
|
| 269 |
+
self.y_loadings_ = np.zeros((q, n_components)) # Delta
|
| 270 |
+
self.n_iter_ = []
|
| 271 |
+
|
| 272 |
+
# This whole thing corresponds to the algorithm in section 4.1 of the
|
| 273 |
+
# review from Wegelin. See above for a notation mapping from code to
|
| 274 |
+
# paper.
|
| 275 |
+
Y_eps = np.finfo(Yk.dtype).eps
|
| 276 |
+
for k in range(n_components):
|
| 277 |
+
# Find first left and right singular vectors of the X.T.dot(Y)
|
| 278 |
+
# cross-covariance matrix.
|
| 279 |
+
if self.algorithm == "nipals":
|
| 280 |
+
# Replace columns that are all close to zero with zeros
|
| 281 |
+
Yk_mask = np.all(np.abs(Yk) < 10 * Y_eps, axis=0)
|
| 282 |
+
Yk[:, Yk_mask] = 0.0
|
| 283 |
+
|
| 284 |
+
try:
|
| 285 |
+
(
|
| 286 |
+
x_weights,
|
| 287 |
+
y_weights,
|
| 288 |
+
n_iter_,
|
| 289 |
+
) = _get_first_singular_vectors_power_method(
|
| 290 |
+
Xk,
|
| 291 |
+
Yk,
|
| 292 |
+
mode=self.mode,
|
| 293 |
+
max_iter=self.max_iter,
|
| 294 |
+
tol=self.tol,
|
| 295 |
+
norm_y_weights=norm_y_weights,
|
| 296 |
+
)
|
| 297 |
+
except StopIteration as e:
|
| 298 |
+
if str(e) != "Y residual is constant":
|
| 299 |
+
raise
|
| 300 |
+
warnings.warn(f"Y residual is constant at iteration {k}")
|
| 301 |
+
break
|
| 302 |
+
|
| 303 |
+
self.n_iter_.append(n_iter_)
|
| 304 |
+
|
| 305 |
+
elif self.algorithm == "svd":
|
| 306 |
+
x_weights, y_weights = _get_first_singular_vectors_svd(Xk, Yk)
|
| 307 |
+
|
| 308 |
+
# inplace sign flip for consistency across solvers and archs
|
| 309 |
+
_svd_flip_1d(x_weights, y_weights)
|
| 310 |
+
|
| 311 |
+
# compute scores, i.e. the projections of X and Y
|
| 312 |
+
x_scores = np.dot(Xk, x_weights)
|
| 313 |
+
if norm_y_weights:
|
| 314 |
+
y_ss = 1
|
| 315 |
+
else:
|
| 316 |
+
y_ss = np.dot(y_weights, y_weights)
|
| 317 |
+
y_scores = np.dot(Yk, y_weights) / y_ss
|
| 318 |
+
|
| 319 |
+
# Deflation: subtract rank-one approx to obtain Xk+1 and Yk+1
|
| 320 |
+
x_loadings = np.dot(x_scores, Xk) / np.dot(x_scores, x_scores)
|
| 321 |
+
Xk -= np.outer(x_scores, x_loadings)
|
| 322 |
+
|
| 323 |
+
if self.deflation_mode == "canonical":
|
| 324 |
+
# regress Yk on y_score
|
| 325 |
+
y_loadings = np.dot(y_scores, Yk) / np.dot(y_scores, y_scores)
|
| 326 |
+
Yk -= np.outer(y_scores, y_loadings)
|
| 327 |
+
if self.deflation_mode == "regression":
|
| 328 |
+
# regress Yk on x_score
|
| 329 |
+
y_loadings = np.dot(x_scores, Yk) / np.dot(x_scores, x_scores)
|
| 330 |
+
Yk -= np.outer(x_scores, y_loadings)
|
| 331 |
+
|
| 332 |
+
self.x_weights_[:, k] = x_weights
|
| 333 |
+
self.y_weights_[:, k] = y_weights
|
| 334 |
+
self._x_scores[:, k] = x_scores
|
| 335 |
+
self._y_scores[:, k] = y_scores
|
| 336 |
+
self.x_loadings_[:, k] = x_loadings
|
| 337 |
+
self.y_loadings_[:, k] = y_loadings
|
| 338 |
+
|
| 339 |
+
# X was approximated as Xi . Gamma.T + X_(R+1)
|
| 340 |
+
# Xi . Gamma.T is a sum of n_components rank-1 matrices. X_(R+1) is
|
| 341 |
+
# whatever is left to fully reconstruct X, and can be 0 if X is of rank
|
| 342 |
+
# n_components.
|
| 343 |
+
# Similarly, Y was approximated as Omega . Delta.T + Y_(R+1)
|
| 344 |
+
|
| 345 |
+
# Compute transformation matrices (rotations_). See User Guide.
|
| 346 |
+
self.x_rotations_ = np.dot(
|
| 347 |
+
self.x_weights_,
|
| 348 |
+
pinv2(np.dot(self.x_loadings_.T, self.x_weights_), check_finite=False),
|
| 349 |
+
)
|
| 350 |
+
self.y_rotations_ = np.dot(
|
| 351 |
+
self.y_weights_,
|
| 352 |
+
pinv2(np.dot(self.y_loadings_.T, self.y_weights_), check_finite=False),
|
| 353 |
+
)
|
| 354 |
+
# TODO(1.3): change `self._coef_` to `self.coef_`
|
| 355 |
+
self._coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)
|
| 356 |
+
self._coef_ = (self._coef_ * self._y_std).T
|
| 357 |
+
self.intercept_ = self._y_mean
|
| 358 |
+
self._n_features_out = self.x_rotations_.shape[1]
|
| 359 |
+
return self
|
| 360 |
+
|
| 361 |
+
def transform(self, X, Y=None, copy=True):
|
| 362 |
+
"""Apply the dimension reduction.
|
| 363 |
+
|
| 364 |
+
Parameters
|
| 365 |
+
----------
|
| 366 |
+
X : array-like of shape (n_samples, n_features)
|
| 367 |
+
Samples to transform.
|
| 368 |
+
|
| 369 |
+
Y : array-like of shape (n_samples, n_targets), default=None
|
| 370 |
+
Target vectors.
|
| 371 |
+
|
| 372 |
+
copy : bool, default=True
|
| 373 |
+
Whether to copy `X` and `Y`, or perform in-place normalization.
|
| 374 |
+
|
| 375 |
+
Returns
|
| 376 |
+
-------
|
| 377 |
+
x_scores, y_scores : array-like or tuple of array-like
|
| 378 |
+
Return `x_scores` if `Y` is not given, `(x_scores, y_scores)` otherwise.
|
| 379 |
+
"""
|
| 380 |
+
check_is_fitted(self)
|
| 381 |
+
X = self._validate_data(X, copy=copy, dtype=FLOAT_DTYPES, reset=False)
|
| 382 |
+
# Normalize
|
| 383 |
+
X -= self._x_mean
|
| 384 |
+
X /= self._x_std
|
| 385 |
+
# Apply rotation
|
| 386 |
+
x_scores = np.dot(X, self.x_rotations_)
|
| 387 |
+
if Y is not None:
|
| 388 |
+
Y = check_array(
|
| 389 |
+
Y, input_name="Y", ensure_2d=False, copy=copy, dtype=FLOAT_DTYPES
|
| 390 |
+
)
|
| 391 |
+
if Y.ndim == 1:
|
| 392 |
+
Y = Y.reshape(-1, 1)
|
| 393 |
+
Y -= self._y_mean
|
| 394 |
+
Y /= self._y_std
|
| 395 |
+
y_scores = np.dot(Y, self.y_rotations_)
|
| 396 |
+
return x_scores, y_scores
|
| 397 |
+
|
| 398 |
+
return x_scores
|
| 399 |
+
|
| 400 |
+
def inverse_transform(self, X, Y=None):
|
| 401 |
+
"""Transform data back to its original space.
|
| 402 |
+
|
| 403 |
+
Parameters
|
| 404 |
+
----------
|
| 405 |
+
X : array-like of shape (n_samples, n_components)
|
| 406 |
+
New data, where `n_samples` is the number of samples
|
| 407 |
+
and `n_components` is the number of pls components.
|
| 408 |
+
|
| 409 |
+
Y : array-like of shape (n_samples, n_components)
|
| 410 |
+
New target, where `n_samples` is the number of samples
|
| 411 |
+
and `n_components` is the number of pls components.
|
| 412 |
+
|
| 413 |
+
Returns
|
| 414 |
+
-------
|
| 415 |
+
X_reconstructed : ndarray of shape (n_samples, n_features)
|
| 416 |
+
Return the reconstructed `X` data.
|
| 417 |
+
|
| 418 |
+
Y_reconstructed : ndarray of shape (n_samples, n_targets)
|
| 419 |
+
Return the reconstructed `X` target. Only returned when `Y` is given.
|
| 420 |
+
|
| 421 |
+
Notes
|
| 422 |
+
-----
|
| 423 |
+
This transformation will only be exact if `n_components=n_features`.
|
| 424 |
+
"""
|
| 425 |
+
check_is_fitted(self)
|
| 426 |
+
X = check_array(X, input_name="X", dtype=FLOAT_DTYPES)
|
| 427 |
+
# From pls space to original space
|
| 428 |
+
X_reconstructed = np.matmul(X, self.x_loadings_.T)
|
| 429 |
+
# Denormalize
|
| 430 |
+
X_reconstructed *= self._x_std
|
| 431 |
+
X_reconstructed += self._x_mean
|
| 432 |
+
|
| 433 |
+
if Y is not None:
|
| 434 |
+
Y = check_array(Y, input_name="Y", dtype=FLOAT_DTYPES)
|
| 435 |
+
# From pls space to original space
|
| 436 |
+
Y_reconstructed = np.matmul(Y, self.y_loadings_.T)
|
| 437 |
+
# Denormalize
|
| 438 |
+
Y_reconstructed *= self._y_std
|
| 439 |
+
Y_reconstructed += self._y_mean
|
| 440 |
+
return X_reconstructed, Y_reconstructed
|
| 441 |
+
|
| 442 |
+
return X_reconstructed
|
| 443 |
+
|
| 444 |
+
def predict(self, X, copy=True):
|
| 445 |
+
"""Predict targets of given samples.
|
| 446 |
+
|
| 447 |
+
Parameters
|
| 448 |
+
----------
|
| 449 |
+
X : array-like of shape (n_samples, n_features)
|
| 450 |
+
Samples.
|
| 451 |
+
|
| 452 |
+
copy : bool, default=True
|
| 453 |
+
Whether to copy `X` and `Y`, or perform in-place normalization.
|
| 454 |
+
|
| 455 |
+
Returns
|
| 456 |
+
-------
|
| 457 |
+
y_pred : ndarray of shape (n_samples,) or (n_samples, n_targets)
|
| 458 |
+
Returns predicted values.
|
| 459 |
+
|
| 460 |
+
Notes
|
| 461 |
+
-----
|
| 462 |
+
This call requires the estimation of a matrix of shape
|
| 463 |
+
`(n_features, n_targets)`, which may be an issue in high dimensional
|
| 464 |
+
space.
|
| 465 |
+
"""
|
| 466 |
+
check_is_fitted(self)
|
| 467 |
+
X = self._validate_data(X, copy=copy, dtype=FLOAT_DTYPES, reset=False)
|
| 468 |
+
# Normalize
|
| 469 |
+
X -= self._x_mean
|
| 470 |
+
X /= self._x_std
|
| 471 |
+
# TODO(1.3): change `self._coef_` to `self.coef_`
|
| 472 |
+
Ypred = X @ self._coef_.T
|
| 473 |
+
return Ypred + self.intercept_
|
| 474 |
+
|
| 475 |
+
def fit_transform(self, X, y=None):
|
| 476 |
+
"""Learn and apply the dimension reduction on the train data.
|
| 477 |
+
|
| 478 |
+
Parameters
|
| 479 |
+
----------
|
| 480 |
+
X : array-like of shape (n_samples, n_features)
|
| 481 |
+
Training vectors, where `n_samples` is the number of samples and
|
| 482 |
+
`n_features` is the number of predictors.
|
| 483 |
+
|
| 484 |
+
y : array-like of shape (n_samples, n_targets), default=None
|
| 485 |
+
Target vectors, where `n_samples` is the number of samples and
|
| 486 |
+
`n_targets` is the number of response variables.
|
| 487 |
+
|
| 488 |
+
Returns
|
| 489 |
+
-------
|
| 490 |
+
self : ndarray of shape (n_samples, n_components)
|
| 491 |
+
Return `x_scores` if `Y` is not given, `(x_scores, y_scores)` otherwise.
|
| 492 |
+
"""
|
| 493 |
+
return self.fit(X, y).transform(X, y)
|
| 494 |
+
|
| 495 |
+
@property
|
| 496 |
+
def coef_(self):
|
| 497 |
+
"""The coefficients of the linear model."""
|
| 498 |
+
# TODO(1.3): remove and change `self._coef_` to `self.coef_`
|
| 499 |
+
# remove catch warnings from `_get_feature_importances`
|
| 500 |
+
# delete self._coef_no_warning
|
| 501 |
+
# update the docstring of `coef_` and `intercept_` attribute
|
| 502 |
+
if hasattr(self, "_coef_") and getattr(self, "_coef_warning", True):
|
| 503 |
+
warnings.warn(
|
| 504 |
+
"The attribute `coef_` will be transposed in version 1.3 to be "
|
| 505 |
+
"consistent with other linear models in scikit-learn. Currently, "
|
| 506 |
+
"`coef_` has a shape of (n_features, n_targets) and in the future it "
|
| 507 |
+
"will have a shape of (n_targets, n_features).",
|
| 508 |
+
FutureWarning,
|
| 509 |
+
)
|
| 510 |
+
# Only warn the first time
|
| 511 |
+
self._coef_warning = False
|
| 512 |
+
|
| 513 |
+
return self._coef_.T
|
| 514 |
+
|
| 515 |
+
def _more_tags(self):
|
| 516 |
+
return {"poor_score": True, "requires_y": False}
|
| 517 |
+
|
| 518 |
+
|
| 519 |
+
class PLSRegression(_PLS):
|
| 520 |
+
"""PLS regression.
|
| 521 |
+
|
| 522 |
+
PLSRegression is also known as PLS2 or PLS1, depending on the number of
|
| 523 |
+
targets.
|
| 524 |
+
|
| 525 |
+
Read more in the :ref:`User Guide <cross_decomposition>`.
|
| 526 |
+
|
| 527 |
+
.. versionadded:: 0.8
|
| 528 |
+
|
| 529 |
+
Parameters
|
| 530 |
+
----------
|
| 531 |
+
n_components : int, default=2
|
| 532 |
+
Number of components to keep. Should be in `[1, min(n_samples,
|
| 533 |
+
n_features, n_targets)]`.
|
| 534 |
+
|
| 535 |
+
scale : bool, default=True
|
| 536 |
+
Whether to scale `X` and `Y`.
|
| 537 |
+
|
| 538 |
+
max_iter : int, default=500
|
| 539 |
+
The maximum number of iterations of the power method when
|
| 540 |
+
`algorithm='nipals'`. Ignored otherwise.
|
| 541 |
+
|
| 542 |
+
tol : float, default=1e-06
|
| 543 |
+
The tolerance used as convergence criteria in the power method: the
|
| 544 |
+
algorithm stops whenever the squared norm of `u_i - u_{i-1}` is less
|
| 545 |
+
than `tol`, where `u` corresponds to the left singular vector.
|
| 546 |
+
|
| 547 |
+
copy : bool, default=True
|
| 548 |
+
Whether to copy `X` and `Y` in :term:`fit` before applying centering,
|
| 549 |
+
and potentially scaling. If `False`, these operations will be done
|
| 550 |
+
inplace, modifying both arrays.
|
| 551 |
+
|
| 552 |
+
Attributes
|
| 553 |
+
----------
|
| 554 |
+
x_weights_ : ndarray of shape (n_features, n_components)
|
| 555 |
+
The left singular vectors of the cross-covariance matrices of each
|
| 556 |
+
iteration.
|
| 557 |
+
|
| 558 |
+
y_weights_ : ndarray of shape (n_targets, n_components)
|
| 559 |
+
The right singular vectors of the cross-covariance matrices of each
|
| 560 |
+
iteration.
|
| 561 |
+
|
| 562 |
+
x_loadings_ : ndarray of shape (n_features, n_components)
|
| 563 |
+
The loadings of `X`.
|
| 564 |
+
|
| 565 |
+
y_loadings_ : ndarray of shape (n_targets, n_components)
|
| 566 |
+
The loadings of `Y`.
|
| 567 |
+
|
| 568 |
+
x_scores_ : ndarray of shape (n_samples, n_components)
|
| 569 |
+
The transformed training samples.
|
| 570 |
+
|
| 571 |
+
y_scores_ : ndarray of shape (n_samples, n_components)
|
| 572 |
+
The transformed training targets.
|
| 573 |
+
|
| 574 |
+
x_rotations_ : ndarray of shape (n_features, n_components)
|
| 575 |
+
The projection matrix used to transform `X`.
|
| 576 |
+
|
| 577 |
+
y_rotations_ : ndarray of shape (n_features, n_components)
|
| 578 |
+
The projection matrix used to transform `Y`.
|
| 579 |
+
|
| 580 |
+
coef_ : ndarray of shape (n_features, n_targets)
|
| 581 |
+
The coefficients of the linear model such that `Y` is approximated as
|
| 582 |
+
`Y = X @ coef_ + intercept_`.
|
| 583 |
+
|
| 584 |
+
intercept_ : ndarray of shape (n_targets,)
|
| 585 |
+
The intercepts of the linear model such that `Y` is approximated as
|
| 586 |
+
`Y = X @ coef_ + intercept_`.
|
| 587 |
+
|
| 588 |
+
.. versionadded:: 1.1
|
| 589 |
+
|
| 590 |
+
n_iter_ : list of shape (n_components,)
|
| 591 |
+
Number of iterations of the power method, for each
|
| 592 |
+
component.
|
| 593 |
+
|
| 594 |
+
n_features_in_ : int
|
| 595 |
+
Number of features seen during :term:`fit`.
|
| 596 |
+
|
| 597 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
| 598 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
| 599 |
+
has feature names that are all strings.
|
| 600 |
+
|
| 601 |
+
.. versionadded:: 1.0
|
| 602 |
+
|
| 603 |
+
See Also
|
| 604 |
+
--------
|
| 605 |
+
PLSCanonical : Partial Least Squares transformer and regressor.
|
| 606 |
+
|
| 607 |
+
Examples
|
| 608 |
+
--------
|
| 609 |
+
>>> from sklearn.cross_decomposition import PLSRegression
|
| 610 |
+
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
|
| 611 |
+
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
|
| 612 |
+
>>> pls2 = PLSRegression(n_components=2)
|
| 613 |
+
>>> pls2.fit(X, Y)
|
| 614 |
+
PLSRegression()
|
| 615 |
+
>>> Y_pred = pls2.predict(X)
|
| 616 |
+
"""
|
| 617 |
+
|
| 618 |
+
_parameter_constraints: dict = {**_PLS._parameter_constraints}
|
| 619 |
+
for param in ("deflation_mode", "mode", "algorithm"):
|
| 620 |
+
_parameter_constraints.pop(param)
|
| 621 |
+
|
| 622 |
+
# This implementation provides the same results that 3 PLS packages
|
| 623 |
+
# provided in the R language (R-project):
|
| 624 |
+
# - "mixOmics" with function pls(X, Y, mode = "regression")
|
| 625 |
+
# - "plspm " with function plsreg2(X, Y)
|
| 626 |
+
# - "pls" with function oscorespls.fit(X, Y)
|
| 627 |
+
|
| 628 |
+
def __init__(
|
| 629 |
+
self, n_components=2, *, scale=True, max_iter=500, tol=1e-06, copy=True
|
| 630 |
+
):
|
| 631 |
+
super().__init__(
|
| 632 |
+
n_components=n_components,
|
| 633 |
+
scale=scale,
|
| 634 |
+
deflation_mode="regression",
|
| 635 |
+
mode="A",
|
| 636 |
+
algorithm="nipals",
|
| 637 |
+
max_iter=max_iter,
|
| 638 |
+
tol=tol,
|
| 639 |
+
copy=copy,
|
| 640 |
+
)
|
| 641 |
+
|
| 642 |
+
def fit(self, X, Y):
|
| 643 |
+
"""Fit model to data.
|
| 644 |
+
|
| 645 |
+
Parameters
|
| 646 |
+
----------
|
| 647 |
+
X : array-like of shape (n_samples, n_features)
|
| 648 |
+
Training vectors, where `n_samples` is the number of samples and
|
| 649 |
+
`n_features` is the number of predictors.
|
| 650 |
+
|
| 651 |
+
Y : array-like of shape (n_samples,) or (n_samples, n_targets)
|
| 652 |
+
Target vectors, where `n_samples` is the number of samples and
|
| 653 |
+
`n_targets` is the number of response variables.
|
| 654 |
+
|
| 655 |
+
Returns
|
| 656 |
+
-------
|
| 657 |
+
self : object
|
| 658 |
+
Fitted model.
|
| 659 |
+
"""
|
| 660 |
+
super().fit(X, Y)
|
| 661 |
+
# expose the fitted attributes `x_scores_` and `y_scores_`
|
| 662 |
+
self.x_scores_ = self._x_scores
|
| 663 |
+
self.y_scores_ = self._y_scores
|
| 664 |
+
return self
|
| 665 |
+
|
| 666 |
+
|
| 667 |
+
class PLSCanonical(_PLS):
|
| 668 |
+
"""Partial Least Squares transformer and regressor.
|
| 669 |
+
|
| 670 |
+
Read more in the :ref:`User Guide <cross_decomposition>`.
|
| 671 |
+
|
| 672 |
+
.. versionadded:: 0.8
|
| 673 |
+
|
| 674 |
+
Parameters
|
| 675 |
+
----------
|
| 676 |
+
n_components : int, default=2
|
| 677 |
+
Number of components to keep. Should be in `[1, min(n_samples,
|
| 678 |
+
n_features, n_targets)]`.
|
| 679 |
+
|
| 680 |
+
scale : bool, default=True
|
| 681 |
+
Whether to scale `X` and `Y`.
|
| 682 |
+
|
| 683 |
+
algorithm : {'nipals', 'svd'}, default='nipals'
|
| 684 |
+
The algorithm used to estimate the first singular vectors of the
|
| 685 |
+
cross-covariance matrix. 'nipals' uses the power method while 'svd'
|
| 686 |
+
will compute the whole SVD.
|
| 687 |
+
|
| 688 |
+
max_iter : int, default=500
|
| 689 |
+
The maximum number of iterations of the power method when
|
| 690 |
+
`algorithm='nipals'`. Ignored otherwise.
|
| 691 |
+
|
| 692 |
+
tol : float, default=1e-06
|
| 693 |
+
The tolerance used as convergence criteria in the power method: the
|
| 694 |
+
algorithm stops whenever the squared norm of `u_i - u_{i-1}` is less
|
| 695 |
+
than `tol`, where `u` corresponds to the left singular vector.
|
| 696 |
+
|
| 697 |
+
copy : bool, default=True
|
| 698 |
+
Whether to copy `X` and `Y` in fit before applying centering, and
|
| 699 |
+
potentially scaling. If False, these operations will be done inplace,
|
| 700 |
+
modifying both arrays.
|
| 701 |
+
|
| 702 |
+
Attributes
|
| 703 |
+
----------
|
| 704 |
+
x_weights_ : ndarray of shape (n_features, n_components)
|
| 705 |
+
The left singular vectors of the cross-covariance matrices of each
|
| 706 |
+
iteration.
|
| 707 |
+
|
| 708 |
+
y_weights_ : ndarray of shape (n_targets, n_components)
|
| 709 |
+
The right singular vectors of the cross-covariance matrices of each
|
| 710 |
+
iteration.
|
| 711 |
+
|
| 712 |
+
x_loadings_ : ndarray of shape (n_features, n_components)
|
| 713 |
+
The loadings of `X`.
|
| 714 |
+
|
| 715 |
+
y_loadings_ : ndarray of shape (n_targets, n_components)
|
| 716 |
+
The loadings of `Y`.
|
| 717 |
+
|
| 718 |
+
x_rotations_ : ndarray of shape (n_features, n_components)
|
| 719 |
+
The projection matrix used to transform `X`.
|
| 720 |
+
|
| 721 |
+
y_rotations_ : ndarray of shape (n_features, n_components)
|
| 722 |
+
The projection matrix used to transform `Y`.
|
| 723 |
+
|
| 724 |
+
coef_ : ndarray of shape (n_features, n_targets)
|
| 725 |
+
The coefficients of the linear model such that `Y` is approximated as
|
| 726 |
+
`Y = X @ coef_ + intercept_`.
|
| 727 |
+
|
| 728 |
+
intercept_ : ndarray of shape (n_targets,)
|
| 729 |
+
The intercepts of the linear model such that `Y` is approximated as
|
| 730 |
+
`Y = X @ coef_ + intercept_`.
|
| 731 |
+
|
| 732 |
+
.. versionadded:: 1.1
|
| 733 |
+
|
| 734 |
+
n_iter_ : list of shape (n_components,)
|
| 735 |
+
Number of iterations of the power method, for each
|
| 736 |
+
component. Empty if `algorithm='svd'`.
|
| 737 |
+
|
| 738 |
+
n_features_in_ : int
|
| 739 |
+
Number of features seen during :term:`fit`.
|
| 740 |
+
|
| 741 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
| 742 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
| 743 |
+
has feature names that are all strings.
|
| 744 |
+
|
| 745 |
+
.. versionadded:: 1.0
|
| 746 |
+
|
| 747 |
+
See Also
|
| 748 |
+
--------
|
| 749 |
+
CCA : Canonical Correlation Analysis.
|
| 750 |
+
PLSSVD : Partial Least Square SVD.
|
| 751 |
+
|
| 752 |
+
Examples
|
| 753 |
+
--------
|
| 754 |
+
>>> from sklearn.cross_decomposition import PLSCanonical
|
| 755 |
+
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
|
| 756 |
+
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
|
| 757 |
+
>>> plsca = PLSCanonical(n_components=2)
|
| 758 |
+
>>> plsca.fit(X, Y)
|
| 759 |
+
PLSCanonical()
|
| 760 |
+
>>> X_c, Y_c = plsca.transform(X, Y)
|
| 761 |
+
"""
|
| 762 |
+
|
| 763 |
+
_parameter_constraints: dict = {**_PLS._parameter_constraints}
|
| 764 |
+
for param in ("deflation_mode", "mode"):
|
| 765 |
+
_parameter_constraints.pop(param)
|
| 766 |
+
|
| 767 |
+
# This implementation provides the same results that the "plspm" package
|
| 768 |
+
# provided in the R language (R-project), using the function plsca(X, Y).
|
| 769 |
+
# Results are equal or collinear with the function
|
| 770 |
+
# ``pls(..., mode = "canonical")`` of the "mixOmics" package. The
|
| 771 |
+
# difference relies in the fact that mixOmics implementation does not
|
| 772 |
+
# exactly implement the Wold algorithm since it does not normalize
|
| 773 |
+
# y_weights to one.
|
| 774 |
+
|
| 775 |
+
def __init__(
|
| 776 |
+
self,
|
| 777 |
+
n_components=2,
|
| 778 |
+
*,
|
| 779 |
+
scale=True,
|
| 780 |
+
algorithm="nipals",
|
| 781 |
+
max_iter=500,
|
| 782 |
+
tol=1e-06,
|
| 783 |
+
copy=True,
|
| 784 |
+
):
|
| 785 |
+
super().__init__(
|
| 786 |
+
n_components=n_components,
|
| 787 |
+
scale=scale,
|
| 788 |
+
deflation_mode="canonical",
|
| 789 |
+
mode="A",
|
| 790 |
+
algorithm=algorithm,
|
| 791 |
+
max_iter=max_iter,
|
| 792 |
+
tol=tol,
|
| 793 |
+
copy=copy,
|
| 794 |
+
)
|
| 795 |
+
|
| 796 |
+
|
| 797 |
+
class CCA(_PLS):
|
| 798 |
+
"""Canonical Correlation Analysis, also known as "Mode B" PLS.
|
| 799 |
+
|
| 800 |
+
Read more in the :ref:`User Guide <cross_decomposition>`.
|
| 801 |
+
|
| 802 |
+
Parameters
|
| 803 |
+
----------
|
| 804 |
+
n_components : int, default=2
|
| 805 |
+
Number of components to keep. Should be in `[1, min(n_samples,
|
| 806 |
+
n_features, n_targets)]`.
|
| 807 |
+
|
| 808 |
+
scale : bool, default=True
|
| 809 |
+
Whether to scale `X` and `Y`.
|
| 810 |
+
|
| 811 |
+
max_iter : int, default=500
|
| 812 |
+
The maximum number of iterations of the power method.
|
| 813 |
+
|
| 814 |
+
tol : float, default=1e-06
|
| 815 |
+
The tolerance used as convergence criteria in the power method: the
|
| 816 |
+
algorithm stops whenever the squared norm of `u_i - u_{i-1}` is less
|
| 817 |
+
than `tol`, where `u` corresponds to the left singular vector.
|
| 818 |
+
|
| 819 |
+
copy : bool, default=True
|
| 820 |
+
Whether to copy `X` and `Y` in fit before applying centering, and
|
| 821 |
+
potentially scaling. If False, these operations will be done inplace,
|
| 822 |
+
modifying both arrays.
|
| 823 |
+
|
| 824 |
+
Attributes
|
| 825 |
+
----------
|
| 826 |
+
x_weights_ : ndarray of shape (n_features, n_components)
|
| 827 |
+
The left singular vectors of the cross-covariance matrices of each
|
| 828 |
+
iteration.
|
| 829 |
+
|
| 830 |
+
y_weights_ : ndarray of shape (n_targets, n_components)
|
| 831 |
+
The right singular vectors of the cross-covariance matrices of each
|
| 832 |
+
iteration.
|
| 833 |
+
|
| 834 |
+
x_loadings_ : ndarray of shape (n_features, n_components)
|
| 835 |
+
The loadings of `X`.
|
| 836 |
+
|
| 837 |
+
y_loadings_ : ndarray of shape (n_targets, n_components)
|
| 838 |
+
The loadings of `Y`.
|
| 839 |
+
|
| 840 |
+
x_rotations_ : ndarray of shape (n_features, n_components)
|
| 841 |
+
The projection matrix used to transform `X`.
|
| 842 |
+
|
| 843 |
+
y_rotations_ : ndarray of shape (n_features, n_components)
|
| 844 |
+
The projection matrix used to transform `Y`.
|
| 845 |
+
|
| 846 |
+
coef_ : ndarray of shape (n_features, n_targets)
|
| 847 |
+
The coefficients of the linear model such that `Y` is approximated as
|
| 848 |
+
`Y = X @ coef_ + intercept_`.
|
| 849 |
+
|
| 850 |
+
intercept_ : ndarray of shape (n_targets,)
|
| 851 |
+
The intercepts of the linear model such that `Y` is approximated as
|
| 852 |
+
`Y = X @ coef_ + intercept_`.
|
| 853 |
+
|
| 854 |
+
.. versionadded:: 1.1
|
| 855 |
+
|
| 856 |
+
n_iter_ : list of shape (n_components,)
|
| 857 |
+
Number of iterations of the power method, for each
|
| 858 |
+
component.
|
| 859 |
+
|
| 860 |
+
n_features_in_ : int
|
| 861 |
+
Number of features seen during :term:`fit`.
|
| 862 |
+
|
| 863 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
| 864 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
| 865 |
+
has feature names that are all strings.
|
| 866 |
+
|
| 867 |
+
.. versionadded:: 1.0
|
| 868 |
+
|
| 869 |
+
See Also
|
| 870 |
+
--------
|
| 871 |
+
PLSCanonical : Partial Least Squares transformer and regressor.
|
| 872 |
+
PLSSVD : Partial Least Square SVD.
|
| 873 |
+
|
| 874 |
+
Examples
|
| 875 |
+
--------
|
| 876 |
+
>>> from sklearn.cross_decomposition import CCA
|
| 877 |
+
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
|
| 878 |
+
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
|
| 879 |
+
>>> cca = CCA(n_components=1)
|
| 880 |
+
>>> cca.fit(X, Y)
|
| 881 |
+
CCA(n_components=1)
|
| 882 |
+
>>> X_c, Y_c = cca.transform(X, Y)
|
| 883 |
+
"""
|
| 884 |
+
|
| 885 |
+
_parameter_constraints: dict = {**_PLS._parameter_constraints}
|
| 886 |
+
for param in ("deflation_mode", "mode", "algorithm"):
|
| 887 |
+
_parameter_constraints.pop(param)
|
| 888 |
+
|
| 889 |
+
def __init__(
|
| 890 |
+
self, n_components=2, *, scale=True, max_iter=500, tol=1e-06, copy=True
|
| 891 |
+
):
|
| 892 |
+
super().__init__(
|
| 893 |
+
n_components=n_components,
|
| 894 |
+
scale=scale,
|
| 895 |
+
deflation_mode="canonical",
|
| 896 |
+
mode="B",
|
| 897 |
+
algorithm="nipals",
|
| 898 |
+
max_iter=max_iter,
|
| 899 |
+
tol=tol,
|
| 900 |
+
copy=copy,
|
| 901 |
+
)
|
| 902 |
+
|
| 903 |
+
|
| 904 |
+
class PLSSVD(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
|
| 905 |
+
"""Partial Least Square SVD.
|
| 906 |
+
|
| 907 |
+
This transformer simply performs a SVD on the cross-covariance matrix
|
| 908 |
+
`X'Y`. It is able to project both the training data `X` and the targets
|
| 909 |
+
`Y`. The training data `X` is projected on the left singular vectors, while
|
| 910 |
+
the targets are projected on the right singular vectors.
|
| 911 |
+
|
| 912 |
+
Read more in the :ref:`User Guide <cross_decomposition>`.
|
| 913 |
+
|
| 914 |
+
.. versionadded:: 0.8
|
| 915 |
+
|
| 916 |
+
Parameters
|
| 917 |
+
----------
|
| 918 |
+
n_components : int, default=2
|
| 919 |
+
The number of components to keep. Should be in `[1,
|
| 920 |
+
min(n_samples, n_features, n_targets)]`.
|
| 921 |
+
|
| 922 |
+
scale : bool, default=True
|
| 923 |
+
Whether to scale `X` and `Y`.
|
| 924 |
+
|
| 925 |
+
copy : bool, default=True
|
| 926 |
+
Whether to copy `X` and `Y` in fit before applying centering, and
|
| 927 |
+
potentially scaling. If `False`, these operations will be done inplace,
|
| 928 |
+
modifying both arrays.
|
| 929 |
+
|
| 930 |
+
Attributes
|
| 931 |
+
----------
|
| 932 |
+
x_weights_ : ndarray of shape (n_features, n_components)
|
| 933 |
+
The left singular vectors of the SVD of the cross-covariance matrix.
|
| 934 |
+
Used to project `X` in :meth:`transform`.
|
| 935 |
+
|
| 936 |
+
y_weights_ : ndarray of (n_targets, n_components)
|
| 937 |
+
The right singular vectors of the SVD of the cross-covariance matrix.
|
| 938 |
+
Used to project `X` in :meth:`transform`.
|
| 939 |
+
|
| 940 |
+
n_features_in_ : int
|
| 941 |
+
Number of features seen during :term:`fit`.
|
| 942 |
+
|
| 943 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
| 944 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
| 945 |
+
has feature names that are all strings.
|
| 946 |
+
|
| 947 |
+
.. versionadded:: 1.0
|
| 948 |
+
|
| 949 |
+
See Also
|
| 950 |
+
--------
|
| 951 |
+
PLSCanonical : Partial Least Squares transformer and regressor.
|
| 952 |
+
CCA : Canonical Correlation Analysis.
|
| 953 |
+
|
| 954 |
+
Examples
|
| 955 |
+
--------
|
| 956 |
+
>>> import numpy as np
|
| 957 |
+
>>> from sklearn.cross_decomposition import PLSSVD
|
| 958 |
+
>>> X = np.array([[0., 0., 1.],
|
| 959 |
+
... [1., 0., 0.],
|
| 960 |
+
... [2., 2., 2.],
|
| 961 |
+
... [2., 5., 4.]])
|
| 962 |
+
>>> Y = np.array([[0.1, -0.2],
|
| 963 |
+
... [0.9, 1.1],
|
| 964 |
+
... [6.2, 5.9],
|
| 965 |
+
... [11.9, 12.3]])
|
| 966 |
+
>>> pls = PLSSVD(n_components=2).fit(X, Y)
|
| 967 |
+
>>> X_c, Y_c = pls.transform(X, Y)
|
| 968 |
+
>>> X_c.shape, Y_c.shape
|
| 969 |
+
((4, 2), (4, 2))
|
| 970 |
+
"""
|
| 971 |
+
|
| 972 |
+
_parameter_constraints: dict = {
|
| 973 |
+
"n_components": [Interval(Integral, 1, None, closed="left")],
|
| 974 |
+
"scale": ["boolean"],
|
| 975 |
+
"copy": ["boolean"],
|
| 976 |
+
}
|
| 977 |
+
|
| 978 |
+
def __init__(self, n_components=2, *, scale=True, copy=True):
|
| 979 |
+
self.n_components = n_components
|
| 980 |
+
self.scale = scale
|
| 981 |
+
self.copy = copy
|
| 982 |
+
|
| 983 |
+
def fit(self, X, Y):
|
| 984 |
+
"""Fit model to data.
|
| 985 |
+
|
| 986 |
+
Parameters
|
| 987 |
+
----------
|
| 988 |
+
X : array-like of shape (n_samples, n_features)
|
| 989 |
+
Training samples.
|
| 990 |
+
|
| 991 |
+
Y : array-like of shape (n_samples,) or (n_samples, n_targets)
|
| 992 |
+
Targets.
|
| 993 |
+
|
| 994 |
+
Returns
|
| 995 |
+
-------
|
| 996 |
+
self : object
|
| 997 |
+
Fitted estimator.
|
| 998 |
+
"""
|
| 999 |
+
self._validate_params()
|
| 1000 |
+
|
| 1001 |
+
check_consistent_length(X, Y)
|
| 1002 |
+
X = self._validate_data(
|
| 1003 |
+
X, dtype=np.float64, copy=self.copy, ensure_min_samples=2
|
| 1004 |
+
)
|
| 1005 |
+
Y = check_array(
|
| 1006 |
+
Y, input_name="Y", dtype=np.float64, copy=self.copy, ensure_2d=False
|
| 1007 |
+
)
|
| 1008 |
+
if Y.ndim == 1:
|
| 1009 |
+
Y = Y.reshape(-1, 1)
|
| 1010 |
+
|
| 1011 |
+
# we'll compute the SVD of the cross-covariance matrix = X.T.dot(Y)
|
| 1012 |
+
# This matrix rank is at most min(n_samples, n_features, n_targets) so
|
| 1013 |
+
# n_components cannot be bigger than that.
|
| 1014 |
+
n_components = self.n_components
|
| 1015 |
+
rank_upper_bound = min(X.shape[0], X.shape[1], Y.shape[1])
|
| 1016 |
+
if n_components > rank_upper_bound:
|
| 1017 |
+
raise ValueError(
|
| 1018 |
+
f"`n_components` upper bound is {rank_upper_bound}. "
|
| 1019 |
+
f"Got {n_components} instead. Reduce `n_components`."
|
| 1020 |
+
)
|
| 1021 |
+
|
| 1022 |
+
X, Y, self._x_mean, self._y_mean, self._x_std, self._y_std = _center_scale_xy(
|
| 1023 |
+
X, Y, self.scale
|
| 1024 |
+
)
|
| 1025 |
+
|
| 1026 |
+
# Compute SVD of cross-covariance matrix
|
| 1027 |
+
C = np.dot(X.T, Y)
|
| 1028 |
+
U, s, Vt = svd(C, full_matrices=False)
|
| 1029 |
+
U = U[:, :n_components]
|
| 1030 |
+
Vt = Vt[:n_components]
|
| 1031 |
+
U, Vt = svd_flip(U, Vt)
|
| 1032 |
+
V = Vt.T
|
| 1033 |
+
|
| 1034 |
+
self.x_weights_ = U
|
| 1035 |
+
self.y_weights_ = V
|
| 1036 |
+
self._n_features_out = self.x_weights_.shape[1]
|
| 1037 |
+
return self
|
| 1038 |
+
|
| 1039 |
+
def transform(self, X, Y=None):
|
| 1040 |
+
"""
|
| 1041 |
+
Apply the dimensionality reduction.
|
| 1042 |
+
|
| 1043 |
+
Parameters
|
| 1044 |
+
----------
|
| 1045 |
+
X : array-like of shape (n_samples, n_features)
|
| 1046 |
+
Samples to be transformed.
|
| 1047 |
+
|
| 1048 |
+
Y : array-like of shape (n_samples,) or (n_samples, n_targets), \
|
| 1049 |
+
default=None
|
| 1050 |
+
Targets.
|
| 1051 |
+
|
| 1052 |
+
Returns
|
| 1053 |
+
-------
|
| 1054 |
+
x_scores : array-like or tuple of array-like
|
| 1055 |
+
The transformed data `X_transformed` if `Y is not None`,
|
| 1056 |
+
`(X_transformed, Y_transformed)` otherwise.
|
| 1057 |
+
"""
|
| 1058 |
+
check_is_fitted(self)
|
| 1059 |
+
X = self._validate_data(X, dtype=np.float64, reset=False)
|
| 1060 |
+
Xr = (X - self._x_mean) / self._x_std
|
| 1061 |
+
x_scores = np.dot(Xr, self.x_weights_)
|
| 1062 |
+
if Y is not None:
|
| 1063 |
+
Y = check_array(Y, input_name="Y", ensure_2d=False, dtype=np.float64)
|
| 1064 |
+
if Y.ndim == 1:
|
| 1065 |
+
Y = Y.reshape(-1, 1)
|
| 1066 |
+
Yr = (Y - self._y_mean) / self._y_std
|
| 1067 |
+
y_scores = np.dot(Yr, self.y_weights_)
|
| 1068 |
+
return x_scores, y_scores
|
| 1069 |
+
return x_scores
|
| 1070 |
+
|
| 1071 |
+
def fit_transform(self, X, y=None):
|
| 1072 |
+
"""Learn and apply the dimensionality reduction.
|
| 1073 |
+
|
| 1074 |
+
Parameters
|
| 1075 |
+
----------
|
| 1076 |
+
X : array-like of shape (n_samples, n_features)
|
| 1077 |
+
Training samples.
|
| 1078 |
+
|
| 1079 |
+
y : array-like of shape (n_samples,) or (n_samples, n_targets), \
|
| 1080 |
+
default=None
|
| 1081 |
+
Targets.
|
| 1082 |
+
|
| 1083 |
+
Returns
|
| 1084 |
+
-------
|
| 1085 |
+
out : array-like or tuple of array-like
|
| 1086 |
+
The transformed data `X_transformed` if `Y is not None`,
|
| 1087 |
+
`(X_transformed, Y_transformed)` otherwise.
|
| 1088 |
+
"""
|
| 1089 |
+
return self.fit(X, y).transform(X, y)
|
mgm/lib/python3.10/site-packages/sklearn/externals/__pycache__/_arff.cpython-310.pyc
ADDED
|
Binary file (33.3 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/sklearn/externals/_arff.py
ADDED
|
@@ -0,0 +1,1107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# =============================================================================
|
| 2 |
+
# Federal University of Rio Grande do Sul (UFRGS)
|
| 3 |
+
# Connectionist Artificial Intelligence Laboratory (LIAC)
|
| 4 |
+
# Renato de Pontes Pereira - rppereira@inf.ufrgs.br
|
| 5 |
+
# =============================================================================
|
| 6 |
+
# Copyright (c) 2011 Renato de Pontes Pereira, renato.ppontes at gmail dot com
|
| 7 |
+
#
|
| 8 |
+
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 9 |
+
# of this software and associated documentation files (the "Software"), to deal
|
| 10 |
+
# in the Software without restriction, including without limitation the rights
|
| 11 |
+
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 12 |
+
# copies of the Software, and to permit persons to whom the Software is
|
| 13 |
+
# furnished to do so, subject to the following conditions:
|
| 14 |
+
#
|
| 15 |
+
# The above copyright notice and this permission notice shall be included in
|
| 16 |
+
# all copies or substantial portions of the Software.
|
| 17 |
+
#
|
| 18 |
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 19 |
+
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 20 |
+
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 21 |
+
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 22 |
+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 23 |
+
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 24 |
+
# SOFTWARE.
|
| 25 |
+
# =============================================================================
|
| 26 |
+
|
| 27 |
+
'''
|
| 28 |
+
The liac-arff module implements functions to read and write ARFF files in
|
| 29 |
+
Python. It was created in the Connectionist Artificial Intelligence Laboratory
|
| 30 |
+
(LIAC), which takes place at the Federal University of Rio Grande do Sul
|
| 31 |
+
(UFRGS), in Brazil.
|
| 32 |
+
|
| 33 |
+
ARFF (Attribute-Relation File Format) is an file format specially created for
|
| 34 |
+
describe datasets which are commonly used for machine learning experiments and
|
| 35 |
+
software. This file format was created to be used in Weka, the best
|
| 36 |
+
representative software for machine learning automated experiments.
|
| 37 |
+
|
| 38 |
+
An ARFF file can be divided into two sections: header and data. The Header
|
| 39 |
+
describes the metadata of the dataset, including a general description of the
|
| 40 |
+
dataset, its name and its attributes. The source below is an example of a
|
| 41 |
+
header section in a XOR dataset::
|
| 42 |
+
|
| 43 |
+
%
|
| 44 |
+
% XOR Dataset
|
| 45 |
+
%
|
| 46 |
+
% Created by Renato Pereira
|
| 47 |
+
% rppereira@inf.ufrgs.br
|
| 48 |
+
% http://inf.ufrgs.br/~rppereira
|
| 49 |
+
%
|
| 50 |
+
%
|
| 51 |
+
@RELATION XOR
|
| 52 |
+
|
| 53 |
+
@ATTRIBUTE input1 REAL
|
| 54 |
+
@ATTRIBUTE input2 REAL
|
| 55 |
+
@ATTRIBUTE y REAL
|
| 56 |
+
|
| 57 |
+
The Data section of an ARFF file describes the observations of the dataset, in
|
| 58 |
+
the case of XOR dataset::
|
| 59 |
+
|
| 60 |
+
@DATA
|
| 61 |
+
0.0,0.0,0.0
|
| 62 |
+
0.0,1.0,1.0
|
| 63 |
+
1.0,0.0,1.0
|
| 64 |
+
1.0,1.0,0.0
|
| 65 |
+
%
|
| 66 |
+
%
|
| 67 |
+
%
|
| 68 |
+
|
| 69 |
+
Notice that several lines are starting with an ``%`` symbol, denoting a
|
| 70 |
+
comment, thus, lines with ``%`` at the beginning will be ignored, except by the
|
| 71 |
+
description part at the beginning of the file. The declarations ``@RELATION``,
|
| 72 |
+
``@ATTRIBUTE``, and ``@DATA`` are all case insensitive and obligatory.
|
| 73 |
+
|
| 74 |
+
For more information and details about the ARFF file description, consult
|
| 75 |
+
http://www.cs.waikato.ac.nz/~ml/weka/arff.html
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
ARFF Files in Python
|
| 79 |
+
~~~~~~~~~~~~~~~~~~~~
|
| 80 |
+
|
| 81 |
+
This module uses built-ins python objects to represent a deserialized ARFF
|
| 82 |
+
file. A dictionary is used as the container of the data and metadata of ARFF,
|
| 83 |
+
and have the following keys:
|
| 84 |
+
|
| 85 |
+
- **description**: (OPTIONAL) a string with the description of the dataset.
|
| 86 |
+
- **relation**: (OBLIGATORY) a string with the name of the dataset.
|
| 87 |
+
- **attributes**: (OBLIGATORY) a list of attributes with the following
|
| 88 |
+
template::
|
| 89 |
+
|
| 90 |
+
(attribute_name, attribute_type)
|
| 91 |
+
|
| 92 |
+
the attribute_name is a string, and attribute_type must be an string
|
| 93 |
+
or a list of strings.
|
| 94 |
+
- **data**: (OBLIGATORY) a list of data instances. Each data instance must be
|
| 95 |
+
a list with values, depending on the attributes.
|
| 96 |
+
|
| 97 |
+
The above keys must follow the case which were described, i.e., the keys are
|
| 98 |
+
case sensitive. The attribute type ``attribute_type`` must be one of these
|
| 99 |
+
strings (they are not case sensitive): ``NUMERIC``, ``INTEGER``, ``REAL`` or
|
| 100 |
+
``STRING``. For nominal attributes, the ``atribute_type`` must be a list of
|
| 101 |
+
strings.
|
| 102 |
+
|
| 103 |
+
In this format, the XOR dataset presented above can be represented as a python
|
| 104 |
+
object as::
|
| 105 |
+
|
| 106 |
+
xor_dataset = {
|
| 107 |
+
'description': 'XOR Dataset',
|
| 108 |
+
'relation': 'XOR',
|
| 109 |
+
'attributes': [
|
| 110 |
+
('input1', 'REAL'),
|
| 111 |
+
('input2', 'REAL'),
|
| 112 |
+
('y', 'REAL'),
|
| 113 |
+
],
|
| 114 |
+
'data': [
|
| 115 |
+
[0.0, 0.0, 0.0],
|
| 116 |
+
[0.0, 1.0, 1.0],
|
| 117 |
+
[1.0, 0.0, 1.0],
|
| 118 |
+
[1.0, 1.0, 0.0]
|
| 119 |
+
]
|
| 120 |
+
}
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
Features
|
| 124 |
+
~~~~~~~~
|
| 125 |
+
|
| 126 |
+
This module provides several features, including:
|
| 127 |
+
|
| 128 |
+
- Read and write ARFF files using python built-in structures, such dictionaries
|
| 129 |
+
and lists;
|
| 130 |
+
- Supports `scipy.sparse.coo <http://docs.scipy
|
| 131 |
+
.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.html#scipy.sparse.coo_matrix>`_
|
| 132 |
+
and lists of dictionaries as used by SVMLight
|
| 133 |
+
- Supports the following attribute types: NUMERIC, REAL, INTEGER, STRING, and
|
| 134 |
+
NOMINAL;
|
| 135 |
+
- Has an interface similar to other built-in modules such as ``json``, or
|
| 136 |
+
``zipfile``;
|
| 137 |
+
- Supports read and write the descriptions of files;
|
| 138 |
+
- Supports missing values and names with spaces;
|
| 139 |
+
- Supports unicode values and names;
|
| 140 |
+
- Fully compatible with Python 2.7+, Python 3.5+, pypy and pypy3;
|
| 141 |
+
- Under `MIT License <http://opensource.org/licenses/MIT>`_
|
| 142 |
+
|
| 143 |
+
'''
|
| 144 |
+
__author__ = 'Renato de Pontes Pereira, Matthias Feurer, Joel Nothman'
|
| 145 |
+
__author_email__ = ('renato.ppontes@gmail.com, '
|
| 146 |
+
'feurerm@informatik.uni-freiburg.de, '
|
| 147 |
+
'joel.nothman@gmail.com')
|
| 148 |
+
__version__ = '2.4.0'
|
| 149 |
+
|
| 150 |
+
import re
|
| 151 |
+
import csv
|
| 152 |
+
from typing import TYPE_CHECKING
|
| 153 |
+
from typing import Optional, List, Dict, Any, Iterator, Union, Tuple
|
| 154 |
+
|
| 155 |
+
# CONSTANTS ===================================================================
|
| 156 |
+
_SIMPLE_TYPES = ['NUMERIC', 'REAL', 'INTEGER', 'STRING']
|
| 157 |
+
|
| 158 |
+
_TK_DESCRIPTION = '%'
|
| 159 |
+
_TK_COMMENT = '%'
|
| 160 |
+
_TK_RELATION = '@RELATION'
|
| 161 |
+
_TK_ATTRIBUTE = '@ATTRIBUTE'
|
| 162 |
+
_TK_DATA = '@DATA'
|
| 163 |
+
|
| 164 |
+
_RE_RELATION = re.compile(r'^([^\{\}%,\s]*|\".*\"|\'.*\')$', re.UNICODE)
|
| 165 |
+
_RE_ATTRIBUTE = re.compile(r'^(\".*\"|\'.*\'|[^\{\}%,\s]*)\s+(.+)$', re.UNICODE)
|
| 166 |
+
_RE_QUOTE_CHARS = re.compile(r'["\'\\\s%,\000-\031]', re.UNICODE)
|
| 167 |
+
_RE_ESCAPE_CHARS = re.compile(r'(?=["\'\\%])|[\n\r\t\000-\031]')
|
| 168 |
+
_RE_SPARSE_LINE = re.compile(r'^\s*\{.*\}\s*$', re.UNICODE)
|
| 169 |
+
_RE_NONTRIVIAL_DATA = re.compile('["\'{}\\s]', re.UNICODE)
|
| 170 |
+
|
| 171 |
+
ArffDenseDataType = Iterator[List]
|
| 172 |
+
ArffSparseDataType = Tuple[List, ...]
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
if TYPE_CHECKING:
|
| 176 |
+
# typing_extensions is available when mypy is installed
|
| 177 |
+
from typing_extensions import TypedDict
|
| 178 |
+
|
| 179 |
+
class ArffContainerType(TypedDict):
|
| 180 |
+
description: str
|
| 181 |
+
relation: str
|
| 182 |
+
attributes: List
|
| 183 |
+
data: Union[ArffDenseDataType, ArffSparseDataType]
|
| 184 |
+
|
| 185 |
+
else:
|
| 186 |
+
ArffContainerType = Dict[str, Any]
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
def _build_re_values():
|
| 190 |
+
quoted_re = r'''
|
| 191 |
+
" # open quote followed by zero or more of:
|
| 192 |
+
(?:
|
| 193 |
+
(?<!\\) # no additional backslash
|
| 194 |
+
(?:\\\\)* # maybe escaped backslashes
|
| 195 |
+
\\" # escaped quote
|
| 196 |
+
|
|
| 197 |
+
\\[^"] # escaping a non-quote
|
| 198 |
+
|
|
| 199 |
+
[^"\\] # non-quote char
|
| 200 |
+
)*
|
| 201 |
+
" # close quote
|
| 202 |
+
'''
|
| 203 |
+
# a value is surrounded by " or by ' or contains no quotables
|
| 204 |
+
value_re = r'''(?:
|
| 205 |
+
%s| # a value may be surrounded by "
|
| 206 |
+
%s| # or by '
|
| 207 |
+
[^,\s"'{}]+ # or may contain no characters requiring quoting
|
| 208 |
+
)''' % (quoted_re,
|
| 209 |
+
quoted_re.replace('"', "'"))
|
| 210 |
+
|
| 211 |
+
# This captures (value, error) groups. Because empty values are allowed,
|
| 212 |
+
# we cannot just look for empty values to handle syntax errors.
|
| 213 |
+
# We presume the line has had ',' prepended...
|
| 214 |
+
dense = re.compile(r'''(?x)
|
| 215 |
+
, # may follow ','
|
| 216 |
+
\s*
|
| 217 |
+
((?=,)|$|{value_re}) # empty or value
|
| 218 |
+
|
|
| 219 |
+
(\S.*) # error
|
| 220 |
+
'''.format(value_re=value_re))
|
| 221 |
+
|
| 222 |
+
# This captures (key, value) groups and will have an empty key/value
|
| 223 |
+
# in case of syntax errors.
|
| 224 |
+
# It does not ensure that the line starts with '{' or ends with '}'.
|
| 225 |
+
sparse = re.compile(r'''(?x)
|
| 226 |
+
(?:^\s*\{|,) # may follow ',', or '{' at line start
|
| 227 |
+
\s*
|
| 228 |
+
(\d+) # attribute key
|
| 229 |
+
\s+
|
| 230 |
+
(%(value_re)s) # value
|
| 231 |
+
|
|
| 232 |
+
(?!}\s*$) # not an error if it's }$
|
| 233 |
+
(?!^\s*{\s*}\s*$) # not an error if it's ^{}$
|
| 234 |
+
\S.* # error
|
| 235 |
+
''' % {'value_re': value_re})
|
| 236 |
+
return dense, sparse
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
_RE_DENSE_VALUES, _RE_SPARSE_KEY_VALUES = _build_re_values()
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
_ESCAPE_SUB_MAP = {
|
| 244 |
+
'\\\\': '\\',
|
| 245 |
+
'\\"': '"',
|
| 246 |
+
"\\'": "'",
|
| 247 |
+
'\\t': '\t',
|
| 248 |
+
'\\n': '\n',
|
| 249 |
+
'\\r': '\r',
|
| 250 |
+
'\\b': '\b',
|
| 251 |
+
'\\f': '\f',
|
| 252 |
+
'\\%': '%',
|
| 253 |
+
}
|
| 254 |
+
_UNESCAPE_SUB_MAP = {chr(i): '\\%03o' % i for i in range(32)}
|
| 255 |
+
_UNESCAPE_SUB_MAP.update({v: k for k, v in _ESCAPE_SUB_MAP.items()})
|
| 256 |
+
_UNESCAPE_SUB_MAP[''] = '\\'
|
| 257 |
+
_ESCAPE_SUB_MAP.update({'\\%d' % i: chr(i) for i in range(10)})
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
def _escape_sub_callback(match):
|
| 261 |
+
s = match.group()
|
| 262 |
+
if len(s) == 2:
|
| 263 |
+
try:
|
| 264 |
+
return _ESCAPE_SUB_MAP[s]
|
| 265 |
+
except KeyError:
|
| 266 |
+
raise ValueError('Unsupported escape sequence: %s' % s)
|
| 267 |
+
if s[1] == 'u':
|
| 268 |
+
return chr(int(s[2:], 16))
|
| 269 |
+
else:
|
| 270 |
+
return chr(int(s[1:], 8))
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
def _unquote(v):
|
| 274 |
+
if v[:1] in ('"', "'"):
|
| 275 |
+
return re.sub(r'\\([0-9]{1,3}|u[0-9a-f]{4}|.)', _escape_sub_callback,
|
| 276 |
+
v[1:-1])
|
| 277 |
+
elif v in ('?', ''):
|
| 278 |
+
return None
|
| 279 |
+
else:
|
| 280 |
+
return v
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
def _parse_values(s):
|
| 284 |
+
'''(INTERNAL) Split a line into a list of values'''
|
| 285 |
+
if not _RE_NONTRIVIAL_DATA.search(s):
|
| 286 |
+
# Fast path for trivial cases (unfortunately we have to handle missing
|
| 287 |
+
# values because of the empty string case :(.)
|
| 288 |
+
return [None if s in ('?', '') else s
|
| 289 |
+
for s in next(csv.reader([s]))]
|
| 290 |
+
|
| 291 |
+
# _RE_DENSE_VALUES tokenizes despite quoting, whitespace, etc.
|
| 292 |
+
values, errors = zip(*_RE_DENSE_VALUES.findall(',' + s))
|
| 293 |
+
if not any(errors):
|
| 294 |
+
return [_unquote(v) for v in values]
|
| 295 |
+
if _RE_SPARSE_LINE.match(s):
|
| 296 |
+
try:
|
| 297 |
+
return {int(k): _unquote(v)
|
| 298 |
+
for k, v in _RE_SPARSE_KEY_VALUES.findall(s)}
|
| 299 |
+
except ValueError:
|
| 300 |
+
# an ARFF syntax error in sparse data
|
| 301 |
+
for match in _RE_SPARSE_KEY_VALUES.finditer(s):
|
| 302 |
+
if not match.group(1):
|
| 303 |
+
raise BadLayout('Error parsing %r' % match.group())
|
| 304 |
+
raise BadLayout('Unknown parsing error')
|
| 305 |
+
else:
|
| 306 |
+
# an ARFF syntax error
|
| 307 |
+
for match in _RE_DENSE_VALUES.finditer(s):
|
| 308 |
+
if match.group(2):
|
| 309 |
+
raise BadLayout('Error parsing %r' % match.group())
|
| 310 |
+
raise BadLayout('Unknown parsing error')
|
| 311 |
+
|
| 312 |
+
|
| 313 |
+
DENSE = 0 # Constant value representing a dense matrix
|
| 314 |
+
COO = 1 # Constant value representing a sparse matrix in coordinate format
|
| 315 |
+
LOD = 2 # Constant value representing a sparse matrix in list of
|
| 316 |
+
# dictionaries format
|
| 317 |
+
DENSE_GEN = 3 # Generator of dictionaries
|
| 318 |
+
LOD_GEN = 4 # Generator of dictionaries
|
| 319 |
+
_SUPPORTED_DATA_STRUCTURES = [DENSE, COO, LOD, DENSE_GEN, LOD_GEN]
|
| 320 |
+
|
| 321 |
+
|
| 322 |
+
# EXCEPTIONS ==================================================================
|
| 323 |
+
class ArffException(Exception):
|
| 324 |
+
message: Optional[str] = None
|
| 325 |
+
|
| 326 |
+
def __init__(self):
|
| 327 |
+
self.line = -1
|
| 328 |
+
|
| 329 |
+
def __str__(self):
|
| 330 |
+
return self.message%self.line
|
| 331 |
+
|
| 332 |
+
class BadRelationFormat(ArffException):
|
| 333 |
+
'''Error raised when the relation declaration is in an invalid format.'''
|
| 334 |
+
message = 'Bad @RELATION format, at line %d.'
|
| 335 |
+
|
| 336 |
+
class BadAttributeFormat(ArffException):
|
| 337 |
+
'''Error raised when some attribute declaration is in an invalid format.'''
|
| 338 |
+
message = 'Bad @ATTRIBUTE format, at line %d.'
|
| 339 |
+
|
| 340 |
+
class BadDataFormat(ArffException):
|
| 341 |
+
'''Error raised when some data instance is in an invalid format.'''
|
| 342 |
+
def __init__(self, value):
|
| 343 |
+
super().__init__()
|
| 344 |
+
self.message = (
|
| 345 |
+
'Bad @DATA instance format in line %d: ' +
|
| 346 |
+
('%s' % value)
|
| 347 |
+
)
|
| 348 |
+
|
| 349 |
+
class BadAttributeType(ArffException):
|
| 350 |
+
'''Error raised when some invalid type is provided into the attribute
|
| 351 |
+
declaration.'''
|
| 352 |
+
message = 'Bad @ATTRIBUTE type, at line %d.'
|
| 353 |
+
|
| 354 |
+
class BadAttributeName(ArffException):
|
| 355 |
+
'''Error raised when an attribute name is provided twice the attribute
|
| 356 |
+
declaration.'''
|
| 357 |
+
|
| 358 |
+
def __init__(self, value, value2):
|
| 359 |
+
super().__init__()
|
| 360 |
+
self.message = (
|
| 361 |
+
('Bad @ATTRIBUTE name %s at line' % value) +
|
| 362 |
+
' %d, this name is already in use in line' +
|
| 363 |
+
(' %d.' % value2)
|
| 364 |
+
)
|
| 365 |
+
|
| 366 |
+
class BadNominalValue(ArffException):
|
| 367 |
+
'''Error raised when a value in used in some data instance but is not
|
| 368 |
+
declared into it respective attribute declaration.'''
|
| 369 |
+
|
| 370 |
+
def __init__(self, value):
|
| 371 |
+
super().__init__()
|
| 372 |
+
self.message = (
|
| 373 |
+
('Data value %s not found in nominal declaration, ' % value)
|
| 374 |
+
+ 'at line %d.'
|
| 375 |
+
)
|
| 376 |
+
|
| 377 |
+
class BadNominalFormatting(ArffException):
|
| 378 |
+
'''Error raised when a nominal value with space is not properly quoted.'''
|
| 379 |
+
def __init__(self, value):
|
| 380 |
+
super().__init__()
|
| 381 |
+
self.message = (
|
| 382 |
+
('Nominal data value "%s" not properly quoted in line ' % value) +
|
| 383 |
+
'%d.'
|
| 384 |
+
)
|
| 385 |
+
|
| 386 |
+
class BadNumericalValue(ArffException):
|
| 387 |
+
'''Error raised when and invalid numerical value is used in some data
|
| 388 |
+
instance.'''
|
| 389 |
+
message = 'Invalid numerical value, at line %d.'
|
| 390 |
+
|
| 391 |
+
class BadStringValue(ArffException):
|
| 392 |
+
'''Error raise when a string contains space but is not quoted.'''
|
| 393 |
+
message = 'Invalid string value at line %d.'
|
| 394 |
+
|
| 395 |
+
class BadLayout(ArffException):
|
| 396 |
+
'''Error raised when the layout of the ARFF file has something wrong.'''
|
| 397 |
+
message = 'Invalid layout of the ARFF file, at line %d.'
|
| 398 |
+
|
| 399 |
+
def __init__(self, msg=''):
|
| 400 |
+
super().__init__()
|
| 401 |
+
if msg:
|
| 402 |
+
self.message = BadLayout.message + ' ' + msg.replace('%', '%%')
|
| 403 |
+
|
| 404 |
+
|
| 405 |
+
class BadObject(ArffException):
|
| 406 |
+
'''Error raised when the object representing the ARFF file has something
|
| 407 |
+
wrong.'''
|
| 408 |
+
def __init__(self, msg='Invalid object.'):
|
| 409 |
+
self.msg = msg
|
| 410 |
+
|
| 411 |
+
def __str__(self):
|
| 412 |
+
return '%s' % self.msg
|
| 413 |
+
|
| 414 |
+
# =============================================================================
|
| 415 |
+
|
| 416 |
+
# INTERNAL ====================================================================
|
| 417 |
+
def _unescape_sub_callback(match):
|
| 418 |
+
return _UNESCAPE_SUB_MAP[match.group()]
|
| 419 |
+
|
| 420 |
+
|
| 421 |
+
def encode_string(s):
|
| 422 |
+
if _RE_QUOTE_CHARS.search(s):
|
| 423 |
+
return "'%s'" % _RE_ESCAPE_CHARS.sub(_unescape_sub_callback, s)
|
| 424 |
+
return s
|
| 425 |
+
|
| 426 |
+
|
| 427 |
+
class EncodedNominalConversor:
|
| 428 |
+
def __init__(self, values):
|
| 429 |
+
self.values = {v: i for i, v in enumerate(values)}
|
| 430 |
+
self.values[0] = 0
|
| 431 |
+
|
| 432 |
+
def __call__(self, value):
|
| 433 |
+
try:
|
| 434 |
+
return self.values[value]
|
| 435 |
+
except KeyError:
|
| 436 |
+
raise BadNominalValue(value)
|
| 437 |
+
|
| 438 |
+
|
| 439 |
+
class NominalConversor:
|
| 440 |
+
def __init__(self, values):
|
| 441 |
+
self.values = set(values)
|
| 442 |
+
self.zero_value = values[0]
|
| 443 |
+
|
| 444 |
+
def __call__(self, value):
|
| 445 |
+
if value not in self.values:
|
| 446 |
+
if value == 0:
|
| 447 |
+
# Sparse decode
|
| 448 |
+
# See issue #52: nominals should take their first value when
|
| 449 |
+
# unspecified in a sparse matrix. Naturally, this is consistent
|
| 450 |
+
# with EncodedNominalConversor.
|
| 451 |
+
return self.zero_value
|
| 452 |
+
raise BadNominalValue(value)
|
| 453 |
+
return str(value)
|
| 454 |
+
|
| 455 |
+
|
| 456 |
+
class DenseGeneratorData:
|
| 457 |
+
'''Internal helper class to allow for different matrix types without
|
| 458 |
+
making the code a huge collection of if statements.'''
|
| 459 |
+
|
| 460 |
+
def decode_rows(self, stream, conversors):
|
| 461 |
+
for row in stream:
|
| 462 |
+
values = _parse_values(row)
|
| 463 |
+
|
| 464 |
+
if isinstance(values, dict):
|
| 465 |
+
if values and max(values) >= len(conversors):
|
| 466 |
+
raise BadDataFormat(row)
|
| 467 |
+
# XXX: int 0 is used for implicit values, not '0'
|
| 468 |
+
values = [values[i] if i in values else 0 for i in
|
| 469 |
+
range(len(conversors))]
|
| 470 |
+
else:
|
| 471 |
+
if len(values) != len(conversors):
|
| 472 |
+
raise BadDataFormat(row)
|
| 473 |
+
|
| 474 |
+
yield self._decode_values(values, conversors)
|
| 475 |
+
|
| 476 |
+
@staticmethod
|
| 477 |
+
def _decode_values(values, conversors):
|
| 478 |
+
try:
|
| 479 |
+
values = [None if value is None else conversor(value)
|
| 480 |
+
for conversor, value
|
| 481 |
+
in zip(conversors, values)]
|
| 482 |
+
except ValueError as exc:
|
| 483 |
+
if 'float: ' in str(exc):
|
| 484 |
+
raise BadNumericalValue()
|
| 485 |
+
return values
|
| 486 |
+
|
| 487 |
+
def encode_data(self, data, attributes):
|
| 488 |
+
'''(INTERNAL) Encodes a line of data.
|
| 489 |
+
|
| 490 |
+
Data instances follow the csv format, i.e, attribute values are
|
| 491 |
+
delimited by commas. After converted from csv.
|
| 492 |
+
|
| 493 |
+
:param data: a list of values.
|
| 494 |
+
:param attributes: a list of attributes. Used to check if data is valid.
|
| 495 |
+
:return: a string with the encoded data line.
|
| 496 |
+
'''
|
| 497 |
+
current_row = 0
|
| 498 |
+
|
| 499 |
+
for inst in data:
|
| 500 |
+
if len(inst) != len(attributes):
|
| 501 |
+
raise BadObject(
|
| 502 |
+
'Instance %d has %d attributes, expected %d' %
|
| 503 |
+
(current_row, len(inst), len(attributes))
|
| 504 |
+
)
|
| 505 |
+
|
| 506 |
+
new_data = []
|
| 507 |
+
for value in inst:
|
| 508 |
+
if value is None or value == '' or value != value:
|
| 509 |
+
s = '?'
|
| 510 |
+
else:
|
| 511 |
+
s = encode_string(str(value))
|
| 512 |
+
new_data.append(s)
|
| 513 |
+
|
| 514 |
+
current_row += 1
|
| 515 |
+
yield ','.join(new_data)
|
| 516 |
+
|
| 517 |
+
|
| 518 |
+
class _DataListMixin:
|
| 519 |
+
"""Mixin to return a list from decode_rows instead of a generator"""
|
| 520 |
+
def decode_rows(self, stream, conversors):
|
| 521 |
+
return list(super().decode_rows(stream, conversors))
|
| 522 |
+
|
| 523 |
+
|
| 524 |
+
class Data(_DataListMixin, DenseGeneratorData):
|
| 525 |
+
pass
|
| 526 |
+
|
| 527 |
+
|
| 528 |
+
class COOData:
|
| 529 |
+
def decode_rows(self, stream, conversors):
|
| 530 |
+
data, rows, cols = [], [], []
|
| 531 |
+
for i, row in enumerate(stream):
|
| 532 |
+
values = _parse_values(row)
|
| 533 |
+
if not isinstance(values, dict):
|
| 534 |
+
raise BadLayout()
|
| 535 |
+
if not values:
|
| 536 |
+
continue
|
| 537 |
+
row_cols, values = zip(*sorted(values.items()))
|
| 538 |
+
try:
|
| 539 |
+
values = [value if value is None else conversors[key](value)
|
| 540 |
+
for key, value in zip(row_cols, values)]
|
| 541 |
+
except ValueError as exc:
|
| 542 |
+
if 'float: ' in str(exc):
|
| 543 |
+
raise BadNumericalValue()
|
| 544 |
+
raise
|
| 545 |
+
except IndexError:
|
| 546 |
+
# conversor out of range
|
| 547 |
+
raise BadDataFormat(row)
|
| 548 |
+
|
| 549 |
+
data.extend(values)
|
| 550 |
+
rows.extend([i] * len(values))
|
| 551 |
+
cols.extend(row_cols)
|
| 552 |
+
|
| 553 |
+
return data, rows, cols
|
| 554 |
+
|
| 555 |
+
def encode_data(self, data, attributes):
|
| 556 |
+
num_attributes = len(attributes)
|
| 557 |
+
new_data = []
|
| 558 |
+
current_row = 0
|
| 559 |
+
|
| 560 |
+
row = data.row
|
| 561 |
+
col = data.col
|
| 562 |
+
data = data.data
|
| 563 |
+
|
| 564 |
+
# Check if the rows are sorted
|
| 565 |
+
if not all(row[i] <= row[i + 1] for i in range(len(row) - 1)):
|
| 566 |
+
raise ValueError("liac-arff can only output COO matrices with "
|
| 567 |
+
"sorted rows.")
|
| 568 |
+
|
| 569 |
+
for v, col, row in zip(data, col, row):
|
| 570 |
+
if row > current_row:
|
| 571 |
+
# Add empty rows if necessary
|
| 572 |
+
while current_row < row:
|
| 573 |
+
yield " ".join(["{", ','.join(new_data), "}"])
|
| 574 |
+
new_data = []
|
| 575 |
+
current_row += 1
|
| 576 |
+
|
| 577 |
+
if col >= num_attributes:
|
| 578 |
+
raise BadObject(
|
| 579 |
+
'Instance %d has at least %d attributes, expected %d' %
|
| 580 |
+
(current_row, col + 1, num_attributes)
|
| 581 |
+
)
|
| 582 |
+
|
| 583 |
+
if v is None or v == '' or v != v:
|
| 584 |
+
s = '?'
|
| 585 |
+
else:
|
| 586 |
+
s = encode_string(str(v))
|
| 587 |
+
new_data.append("%d %s" % (col, s))
|
| 588 |
+
|
| 589 |
+
yield " ".join(["{", ','.join(new_data), "}"])
|
| 590 |
+
|
| 591 |
+
class LODGeneratorData:
|
| 592 |
+
def decode_rows(self, stream, conversors):
|
| 593 |
+
for row in stream:
|
| 594 |
+
values = _parse_values(row)
|
| 595 |
+
|
| 596 |
+
if not isinstance(values, dict):
|
| 597 |
+
raise BadLayout()
|
| 598 |
+
try:
|
| 599 |
+
yield {key: None if value is None else conversors[key](value)
|
| 600 |
+
for key, value in values.items()}
|
| 601 |
+
except ValueError as exc:
|
| 602 |
+
if 'float: ' in str(exc):
|
| 603 |
+
raise BadNumericalValue()
|
| 604 |
+
raise
|
| 605 |
+
except IndexError:
|
| 606 |
+
# conversor out of range
|
| 607 |
+
raise BadDataFormat(row)
|
| 608 |
+
|
| 609 |
+
def encode_data(self, data, attributes):
|
| 610 |
+
current_row = 0
|
| 611 |
+
|
| 612 |
+
num_attributes = len(attributes)
|
| 613 |
+
for row in data:
|
| 614 |
+
new_data = []
|
| 615 |
+
|
| 616 |
+
if len(row) > 0 and max(row) >= num_attributes:
|
| 617 |
+
raise BadObject(
|
| 618 |
+
'Instance %d has %d attributes, expected %d' %
|
| 619 |
+
(current_row, max(row) + 1, num_attributes)
|
| 620 |
+
)
|
| 621 |
+
|
| 622 |
+
for col in sorted(row):
|
| 623 |
+
v = row[col]
|
| 624 |
+
if v is None or v == '' or v != v:
|
| 625 |
+
s = '?'
|
| 626 |
+
else:
|
| 627 |
+
s = encode_string(str(v))
|
| 628 |
+
new_data.append("%d %s" % (col, s))
|
| 629 |
+
|
| 630 |
+
current_row += 1
|
| 631 |
+
yield " ".join(["{", ','.join(new_data), "}"])
|
| 632 |
+
|
| 633 |
+
class LODData(_DataListMixin, LODGeneratorData):
|
| 634 |
+
pass
|
| 635 |
+
|
| 636 |
+
|
| 637 |
+
def _get_data_object_for_decoding(matrix_type):
|
| 638 |
+
if matrix_type == DENSE:
|
| 639 |
+
return Data()
|
| 640 |
+
elif matrix_type == COO:
|
| 641 |
+
return COOData()
|
| 642 |
+
elif matrix_type == LOD:
|
| 643 |
+
return LODData()
|
| 644 |
+
elif matrix_type == DENSE_GEN:
|
| 645 |
+
return DenseGeneratorData()
|
| 646 |
+
elif matrix_type == LOD_GEN:
|
| 647 |
+
return LODGeneratorData()
|
| 648 |
+
else:
|
| 649 |
+
raise ValueError("Matrix type %s not supported." % str(matrix_type))
|
| 650 |
+
|
| 651 |
+
def _get_data_object_for_encoding(matrix):
|
| 652 |
+
# Probably a scipy.sparse
|
| 653 |
+
if hasattr(matrix, 'format'):
|
| 654 |
+
if matrix.format == 'coo':
|
| 655 |
+
return COOData()
|
| 656 |
+
else:
|
| 657 |
+
raise ValueError('Cannot guess matrix format!')
|
| 658 |
+
elif isinstance(matrix[0], dict):
|
| 659 |
+
return LODData()
|
| 660 |
+
else:
|
| 661 |
+
return Data()
|
| 662 |
+
|
| 663 |
+
# =============================================================================
|
| 664 |
+
|
| 665 |
+
# ADVANCED INTERFACE ==========================================================
|
| 666 |
+
class ArffDecoder:
|
| 667 |
+
'''An ARFF decoder.'''
|
| 668 |
+
|
| 669 |
+
def __init__(self):
|
| 670 |
+
'''Constructor.'''
|
| 671 |
+
self._conversors = []
|
| 672 |
+
self._current_line = 0
|
| 673 |
+
|
| 674 |
+
def _decode_comment(self, s):
|
| 675 |
+
'''(INTERNAL) Decodes a comment line.
|
| 676 |
+
|
| 677 |
+
Comments are single line strings starting, obligatorily, with the ``%``
|
| 678 |
+
character, and can have any symbol, including whitespaces or special
|
| 679 |
+
characters.
|
| 680 |
+
|
| 681 |
+
This method must receive a normalized string, i.e., a string without
|
| 682 |
+
padding, including the "\r\n" characters.
|
| 683 |
+
|
| 684 |
+
:param s: a normalized string.
|
| 685 |
+
:return: a string with the decoded comment.
|
| 686 |
+
'''
|
| 687 |
+
res = re.sub(r'^\%( )?', '', s)
|
| 688 |
+
return res
|
| 689 |
+
|
| 690 |
+
def _decode_relation(self, s):
|
| 691 |
+
'''(INTERNAL) Decodes a relation line.
|
| 692 |
+
|
| 693 |
+
The relation declaration is a line with the format ``@RELATION
|
| 694 |
+
<relation-name>``, where ``relation-name`` is a string. The string must
|
| 695 |
+
start with alphabetic character and must be quoted if the name includes
|
| 696 |
+
spaces, otherwise this method will raise a `BadRelationFormat` exception.
|
| 697 |
+
|
| 698 |
+
This method must receive a normalized string, i.e., a string without
|
| 699 |
+
padding, including the "\r\n" characters.
|
| 700 |
+
|
| 701 |
+
:param s: a normalized string.
|
| 702 |
+
:return: a string with the decoded relation name.
|
| 703 |
+
'''
|
| 704 |
+
_, v = s.split(' ', 1)
|
| 705 |
+
v = v.strip()
|
| 706 |
+
|
| 707 |
+
if not _RE_RELATION.match(v):
|
| 708 |
+
raise BadRelationFormat()
|
| 709 |
+
|
| 710 |
+
res = str(v.strip('"\''))
|
| 711 |
+
return res
|
| 712 |
+
|
| 713 |
+
def _decode_attribute(self, s):
|
| 714 |
+
'''(INTERNAL) Decodes an attribute line.
|
| 715 |
+
|
| 716 |
+
The attribute is the most complex declaration in an arff file. All
|
| 717 |
+
attributes must follow the template::
|
| 718 |
+
|
| 719 |
+
@attribute <attribute-name> <datatype>
|
| 720 |
+
|
| 721 |
+
where ``attribute-name`` is a string, quoted if the name contains any
|
| 722 |
+
whitespace, and ``datatype`` can be:
|
| 723 |
+
|
| 724 |
+
- Numerical attributes as ``NUMERIC``, ``INTEGER`` or ``REAL``.
|
| 725 |
+
- Strings as ``STRING``.
|
| 726 |
+
- Dates (NOT IMPLEMENTED).
|
| 727 |
+
- Nominal attributes with format:
|
| 728 |
+
|
| 729 |
+
{<nominal-name1>, <nominal-name2>, <nominal-name3>, ...}
|
| 730 |
+
|
| 731 |
+
The nominal names follow the rules for the attribute names, i.e., they
|
| 732 |
+
must be quoted if the name contains whitespaces.
|
| 733 |
+
|
| 734 |
+
This method must receive a normalized string, i.e., a string without
|
| 735 |
+
padding, including the "\r\n" characters.
|
| 736 |
+
|
| 737 |
+
:param s: a normalized string.
|
| 738 |
+
:return: a tuple (ATTRIBUTE_NAME, TYPE_OR_VALUES).
|
| 739 |
+
'''
|
| 740 |
+
_, v = s.split(' ', 1)
|
| 741 |
+
v = v.strip()
|
| 742 |
+
|
| 743 |
+
# Verify the general structure of declaration
|
| 744 |
+
m = _RE_ATTRIBUTE.match(v)
|
| 745 |
+
if not m:
|
| 746 |
+
raise BadAttributeFormat()
|
| 747 |
+
|
| 748 |
+
# Extracts the raw name and type
|
| 749 |
+
name, type_ = m.groups()
|
| 750 |
+
|
| 751 |
+
# Extracts the final name
|
| 752 |
+
name = str(name.strip('"\''))
|
| 753 |
+
|
| 754 |
+
# Extracts the final type
|
| 755 |
+
if type_[:1] == "{" and type_[-1:] == "}":
|
| 756 |
+
try:
|
| 757 |
+
type_ = _parse_values(type_.strip('{} '))
|
| 758 |
+
except Exception:
|
| 759 |
+
raise BadAttributeType()
|
| 760 |
+
if isinstance(type_, dict):
|
| 761 |
+
raise BadAttributeType()
|
| 762 |
+
|
| 763 |
+
else:
|
| 764 |
+
# If not nominal, verify the type name
|
| 765 |
+
type_ = str(type_).upper()
|
| 766 |
+
if type_ not in ['NUMERIC', 'REAL', 'INTEGER', 'STRING']:
|
| 767 |
+
raise BadAttributeType()
|
| 768 |
+
|
| 769 |
+
return (name, type_)
|
| 770 |
+
|
| 771 |
+
def _decode(self, s, encode_nominal=False, matrix_type=DENSE):
|
| 772 |
+
'''Do the job the ``encode``.'''
|
| 773 |
+
|
| 774 |
+
# Make sure this method is idempotent
|
| 775 |
+
self._current_line = 0
|
| 776 |
+
|
| 777 |
+
# If string, convert to a list of lines
|
| 778 |
+
if isinstance(s, str):
|
| 779 |
+
s = s.strip('\r\n ').replace('\r\n', '\n').split('\n')
|
| 780 |
+
|
| 781 |
+
# Create the return object
|
| 782 |
+
obj: ArffContainerType = {
|
| 783 |
+
'description': '',
|
| 784 |
+
'relation': '',
|
| 785 |
+
'attributes': [],
|
| 786 |
+
'data': []
|
| 787 |
+
}
|
| 788 |
+
attribute_names = {}
|
| 789 |
+
|
| 790 |
+
# Create the data helper object
|
| 791 |
+
data = _get_data_object_for_decoding(matrix_type)
|
| 792 |
+
|
| 793 |
+
# Read all lines
|
| 794 |
+
STATE = _TK_DESCRIPTION
|
| 795 |
+
s = iter(s)
|
| 796 |
+
for row in s:
|
| 797 |
+
self._current_line += 1
|
| 798 |
+
# Ignore empty lines
|
| 799 |
+
row = row.strip(' \r\n')
|
| 800 |
+
if not row: continue
|
| 801 |
+
|
| 802 |
+
u_row = row.upper()
|
| 803 |
+
|
| 804 |
+
# DESCRIPTION -----------------------------------------------------
|
| 805 |
+
if u_row.startswith(_TK_DESCRIPTION) and STATE == _TK_DESCRIPTION:
|
| 806 |
+
obj['description'] += self._decode_comment(row) + '\n'
|
| 807 |
+
# -----------------------------------------------------------------
|
| 808 |
+
|
| 809 |
+
# RELATION --------------------------------------------------------
|
| 810 |
+
elif u_row.startswith(_TK_RELATION):
|
| 811 |
+
if STATE != _TK_DESCRIPTION:
|
| 812 |
+
raise BadLayout()
|
| 813 |
+
|
| 814 |
+
STATE = _TK_RELATION
|
| 815 |
+
obj['relation'] = self._decode_relation(row)
|
| 816 |
+
# -----------------------------------------------------------------
|
| 817 |
+
|
| 818 |
+
# ATTRIBUTE -------------------------------------------------------
|
| 819 |
+
elif u_row.startswith(_TK_ATTRIBUTE):
|
| 820 |
+
if STATE != _TK_RELATION and STATE != _TK_ATTRIBUTE:
|
| 821 |
+
raise BadLayout()
|
| 822 |
+
|
| 823 |
+
STATE = _TK_ATTRIBUTE
|
| 824 |
+
|
| 825 |
+
attr = self._decode_attribute(row)
|
| 826 |
+
if attr[0] in attribute_names:
|
| 827 |
+
raise BadAttributeName(attr[0], attribute_names[attr[0]])
|
| 828 |
+
else:
|
| 829 |
+
attribute_names[attr[0]] = self._current_line
|
| 830 |
+
obj['attributes'].append(attr)
|
| 831 |
+
|
| 832 |
+
if isinstance(attr[1], (list, tuple)):
|
| 833 |
+
if encode_nominal:
|
| 834 |
+
conversor = EncodedNominalConversor(attr[1])
|
| 835 |
+
else:
|
| 836 |
+
conversor = NominalConversor(attr[1])
|
| 837 |
+
else:
|
| 838 |
+
CONVERSOR_MAP = {'STRING': str,
|
| 839 |
+
'INTEGER': lambda x: int(float(x)),
|
| 840 |
+
'NUMERIC': float,
|
| 841 |
+
'REAL': float}
|
| 842 |
+
conversor = CONVERSOR_MAP[attr[1]]
|
| 843 |
+
|
| 844 |
+
self._conversors.append(conversor)
|
| 845 |
+
# -----------------------------------------------------------------
|
| 846 |
+
|
| 847 |
+
# DATA ------------------------------------------------------------
|
| 848 |
+
elif u_row.startswith(_TK_DATA):
|
| 849 |
+
if STATE != _TK_ATTRIBUTE:
|
| 850 |
+
raise BadLayout()
|
| 851 |
+
|
| 852 |
+
break
|
| 853 |
+
# -----------------------------------------------------------------
|
| 854 |
+
|
| 855 |
+
# COMMENT ---------------------------------------------------------
|
| 856 |
+
elif u_row.startswith(_TK_COMMENT):
|
| 857 |
+
pass
|
| 858 |
+
# -----------------------------------------------------------------
|
| 859 |
+
else:
|
| 860 |
+
# Never found @DATA
|
| 861 |
+
raise BadLayout()
|
| 862 |
+
|
| 863 |
+
def stream():
|
| 864 |
+
for row in s:
|
| 865 |
+
self._current_line += 1
|
| 866 |
+
row = row.strip()
|
| 867 |
+
# Ignore empty lines and comment lines.
|
| 868 |
+
if row and not row.startswith(_TK_COMMENT):
|
| 869 |
+
yield row
|
| 870 |
+
|
| 871 |
+
# Alter the data object
|
| 872 |
+
obj['data'] = data.decode_rows(stream(), self._conversors)
|
| 873 |
+
if obj['description'].endswith('\n'):
|
| 874 |
+
obj['description'] = obj['description'][:-1]
|
| 875 |
+
|
| 876 |
+
return obj
|
| 877 |
+
|
| 878 |
+
def decode(self, s, encode_nominal=False, return_type=DENSE):
|
| 879 |
+
'''Returns the Python representation of a given ARFF file.
|
| 880 |
+
|
| 881 |
+
When a file object is passed as an argument, this method reads lines
|
| 882 |
+
iteratively, avoiding to load unnecessary information to the memory.
|
| 883 |
+
|
| 884 |
+
:param s: a string or file object with the ARFF file.
|
| 885 |
+
:param encode_nominal: boolean, if True perform a label encoding
|
| 886 |
+
while reading the .arff file.
|
| 887 |
+
:param return_type: determines the data structure used to store the
|
| 888 |
+
dataset. Can be one of `arff.DENSE`, `arff.COO`, `arff.LOD`,
|
| 889 |
+
`arff.DENSE_GEN` or `arff.LOD_GEN`.
|
| 890 |
+
Consult the sections on `working with sparse data`_ and `loading
|
| 891 |
+
progressively`_.
|
| 892 |
+
'''
|
| 893 |
+
try:
|
| 894 |
+
return self._decode(s, encode_nominal=encode_nominal,
|
| 895 |
+
matrix_type=return_type)
|
| 896 |
+
except ArffException as e:
|
| 897 |
+
e.line = self._current_line
|
| 898 |
+
raise e
|
| 899 |
+
|
| 900 |
+
|
| 901 |
+
class ArffEncoder:
|
| 902 |
+
'''An ARFF encoder.'''
|
| 903 |
+
|
| 904 |
+
def _encode_comment(self, s=''):
|
| 905 |
+
'''(INTERNAL) Encodes a comment line.
|
| 906 |
+
|
| 907 |
+
Comments are single line strings starting, obligatorily, with the ``%``
|
| 908 |
+
character, and can have any symbol, including whitespaces or special
|
| 909 |
+
characters.
|
| 910 |
+
|
| 911 |
+
If ``s`` is None, this method will simply return an empty comment.
|
| 912 |
+
|
| 913 |
+
:param s: (OPTIONAL) string.
|
| 914 |
+
:return: a string with the encoded comment line.
|
| 915 |
+
'''
|
| 916 |
+
if s:
|
| 917 |
+
return '%s %s'%(_TK_COMMENT, s)
|
| 918 |
+
else:
|
| 919 |
+
return '%s' % _TK_COMMENT
|
| 920 |
+
|
| 921 |
+
def _encode_relation(self, name):
|
| 922 |
+
'''(INTERNAL) Decodes a relation line.
|
| 923 |
+
|
| 924 |
+
The relation declaration is a line with the format ``@RELATION
|
| 925 |
+
<relation-name>``, where ``relation-name`` is a string.
|
| 926 |
+
|
| 927 |
+
:param name: a string.
|
| 928 |
+
:return: a string with the encoded relation declaration.
|
| 929 |
+
'''
|
| 930 |
+
for char in ' %{},':
|
| 931 |
+
if char in name:
|
| 932 |
+
name = '"%s"'%name
|
| 933 |
+
break
|
| 934 |
+
|
| 935 |
+
return '%s %s'%(_TK_RELATION, name)
|
| 936 |
+
|
| 937 |
+
def _encode_attribute(self, name, type_):
|
| 938 |
+
'''(INTERNAL) Encodes an attribute line.
|
| 939 |
+
|
| 940 |
+
The attribute follow the template::
|
| 941 |
+
|
| 942 |
+
@attribute <attribute-name> <datatype>
|
| 943 |
+
|
| 944 |
+
where ``attribute-name`` is a string, and ``datatype`` can be:
|
| 945 |
+
|
| 946 |
+
- Numerical attributes as ``NUMERIC``, ``INTEGER`` or ``REAL``.
|
| 947 |
+
- Strings as ``STRING``.
|
| 948 |
+
- Dates (NOT IMPLEMENTED).
|
| 949 |
+
- Nominal attributes with format:
|
| 950 |
+
|
| 951 |
+
{<nominal-name1>, <nominal-name2>, <nominal-name3>, ...}
|
| 952 |
+
|
| 953 |
+
This method must receive a the name of the attribute and its type, if
|
| 954 |
+
the attribute type is nominal, ``type`` must be a list of values.
|
| 955 |
+
|
| 956 |
+
:param name: a string.
|
| 957 |
+
:param type_: a string or a list of string.
|
| 958 |
+
:return: a string with the encoded attribute declaration.
|
| 959 |
+
'''
|
| 960 |
+
for char in ' %{},':
|
| 961 |
+
if char in name:
|
| 962 |
+
name = '"%s"'%name
|
| 963 |
+
break
|
| 964 |
+
|
| 965 |
+
if isinstance(type_, (tuple, list)):
|
| 966 |
+
type_tmp = ['%s' % encode_string(type_k) for type_k in type_]
|
| 967 |
+
type_ = '{%s}'%(', '.join(type_tmp))
|
| 968 |
+
|
| 969 |
+
return '%s %s %s'%(_TK_ATTRIBUTE, name, type_)
|
| 970 |
+
|
| 971 |
+
def encode(self, obj):
|
| 972 |
+
'''Encodes a given object to an ARFF file.
|
| 973 |
+
|
| 974 |
+
:param obj: the object containing the ARFF information.
|
| 975 |
+
:return: the ARFF file as an string.
|
| 976 |
+
'''
|
| 977 |
+
data = [row for row in self.iter_encode(obj)]
|
| 978 |
+
|
| 979 |
+
return '\n'.join(data)
|
| 980 |
+
|
| 981 |
+
def iter_encode(self, obj):
|
| 982 |
+
'''The iterative version of `arff.ArffEncoder.encode`.
|
| 983 |
+
|
| 984 |
+
This encodes iteratively a given object and return, one-by-one, the
|
| 985 |
+
lines of the ARFF file.
|
| 986 |
+
|
| 987 |
+
:param obj: the object containing the ARFF information.
|
| 988 |
+
:return: (yields) the ARFF file as strings.
|
| 989 |
+
'''
|
| 990 |
+
# DESCRIPTION
|
| 991 |
+
if obj.get('description', None):
|
| 992 |
+
for row in obj['description'].split('\n'):
|
| 993 |
+
yield self._encode_comment(row)
|
| 994 |
+
|
| 995 |
+
# RELATION
|
| 996 |
+
if not obj.get('relation'):
|
| 997 |
+
raise BadObject('Relation name not found or with invalid value.')
|
| 998 |
+
|
| 999 |
+
yield self._encode_relation(obj['relation'])
|
| 1000 |
+
yield ''
|
| 1001 |
+
|
| 1002 |
+
# ATTRIBUTES
|
| 1003 |
+
if not obj.get('attributes'):
|
| 1004 |
+
raise BadObject('Attributes not found.')
|
| 1005 |
+
|
| 1006 |
+
attribute_names = set()
|
| 1007 |
+
for attr in obj['attributes']:
|
| 1008 |
+
# Verify for bad object format
|
| 1009 |
+
if not isinstance(attr, (tuple, list)) or \
|
| 1010 |
+
len(attr) != 2 or \
|
| 1011 |
+
not isinstance(attr[0], str):
|
| 1012 |
+
raise BadObject('Invalid attribute declaration "%s"'%str(attr))
|
| 1013 |
+
|
| 1014 |
+
if isinstance(attr[1], str):
|
| 1015 |
+
# Verify for invalid types
|
| 1016 |
+
if attr[1] not in _SIMPLE_TYPES:
|
| 1017 |
+
raise BadObject('Invalid attribute type "%s"'%str(attr))
|
| 1018 |
+
|
| 1019 |
+
# Verify for bad object format
|
| 1020 |
+
elif not isinstance(attr[1], (tuple, list)):
|
| 1021 |
+
raise BadObject('Invalid attribute type "%s"'%str(attr))
|
| 1022 |
+
|
| 1023 |
+
# Verify attribute name is not used twice
|
| 1024 |
+
if attr[0] in attribute_names:
|
| 1025 |
+
raise BadObject('Trying to use attribute name "%s" for the '
|
| 1026 |
+
'second time.' % str(attr[0]))
|
| 1027 |
+
else:
|
| 1028 |
+
attribute_names.add(attr[0])
|
| 1029 |
+
|
| 1030 |
+
yield self._encode_attribute(attr[0], attr[1])
|
| 1031 |
+
yield ''
|
| 1032 |
+
attributes = obj['attributes']
|
| 1033 |
+
|
| 1034 |
+
# DATA
|
| 1035 |
+
yield _TK_DATA
|
| 1036 |
+
if 'data' in obj:
|
| 1037 |
+
data = _get_data_object_for_encoding(obj.get('data'))
|
| 1038 |
+
yield from data.encode_data(obj.get('data'), attributes)
|
| 1039 |
+
|
| 1040 |
+
yield ''
|
| 1041 |
+
|
| 1042 |
+
# =============================================================================
|
| 1043 |
+
|
| 1044 |
+
# BASIC INTERFACE =============================================================
|
| 1045 |
+
def load(fp, encode_nominal=False, return_type=DENSE):
|
| 1046 |
+
'''Load a file-like object containing the ARFF document and convert it into
|
| 1047 |
+
a Python object.
|
| 1048 |
+
|
| 1049 |
+
:param fp: a file-like object.
|
| 1050 |
+
:param encode_nominal: boolean, if True perform a label encoding
|
| 1051 |
+
while reading the .arff file.
|
| 1052 |
+
:param return_type: determines the data structure used to store the
|
| 1053 |
+
dataset. Can be one of `arff.DENSE`, `arff.COO`, `arff.LOD`,
|
| 1054 |
+
`arff.DENSE_GEN` or `arff.LOD_GEN`.
|
| 1055 |
+
Consult the sections on `working with sparse data`_ and `loading
|
| 1056 |
+
progressively`_.
|
| 1057 |
+
:return: a dictionary.
|
| 1058 |
+
'''
|
| 1059 |
+
decoder = ArffDecoder()
|
| 1060 |
+
return decoder.decode(fp, encode_nominal=encode_nominal,
|
| 1061 |
+
return_type=return_type)
|
| 1062 |
+
|
| 1063 |
+
def loads(s, encode_nominal=False, return_type=DENSE):
|
| 1064 |
+
'''Convert a string instance containing the ARFF document into a Python
|
| 1065 |
+
object.
|
| 1066 |
+
|
| 1067 |
+
:param s: a string object.
|
| 1068 |
+
:param encode_nominal: boolean, if True perform a label encoding
|
| 1069 |
+
while reading the .arff file.
|
| 1070 |
+
:param return_type: determines the data structure used to store the
|
| 1071 |
+
dataset. Can be one of `arff.DENSE`, `arff.COO`, `arff.LOD`,
|
| 1072 |
+
`arff.DENSE_GEN` or `arff.LOD_GEN`.
|
| 1073 |
+
Consult the sections on `working with sparse data`_ and `loading
|
| 1074 |
+
progressively`_.
|
| 1075 |
+
:return: a dictionary.
|
| 1076 |
+
'''
|
| 1077 |
+
decoder = ArffDecoder()
|
| 1078 |
+
return decoder.decode(s, encode_nominal=encode_nominal,
|
| 1079 |
+
return_type=return_type)
|
| 1080 |
+
|
| 1081 |
+
def dump(obj, fp):
|
| 1082 |
+
'''Serialize an object representing the ARFF document to a given file-like
|
| 1083 |
+
object.
|
| 1084 |
+
|
| 1085 |
+
:param obj: a dictionary.
|
| 1086 |
+
:param fp: a file-like object.
|
| 1087 |
+
'''
|
| 1088 |
+
encoder = ArffEncoder()
|
| 1089 |
+
generator = encoder.iter_encode(obj)
|
| 1090 |
+
|
| 1091 |
+
last_row = next(generator)
|
| 1092 |
+
for row in generator:
|
| 1093 |
+
fp.write(last_row + '\n')
|
| 1094 |
+
last_row = row
|
| 1095 |
+
fp.write(last_row)
|
| 1096 |
+
|
| 1097 |
+
return fp
|
| 1098 |
+
|
| 1099 |
+
def dumps(obj):
|
| 1100 |
+
'''Serialize an object representing the ARFF document, returning a string.
|
| 1101 |
+
|
| 1102 |
+
:param obj: a dictionary.
|
| 1103 |
+
:return: a string with the ARFF document.
|
| 1104 |
+
'''
|
| 1105 |
+
encoder = ArffEncoder()
|
| 1106 |
+
return encoder.encode(obj)
|
| 1107 |
+
# =============================================================================
|
mgm/lib/python3.10/site-packages/sklearn/externals/_lobpcg.py
ADDED
|
@@ -0,0 +1,991 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
scikit-learn copy of scipy/sparse/linalg/_eigen/lobpcg/lobpcg.py v1.10
|
| 3 |
+
to be deleted after scipy 1.4 becomes a dependency in scikit-lean
|
| 4 |
+
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
| 5 |
+
Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG).
|
| 6 |
+
|
| 7 |
+
References
|
| 8 |
+
----------
|
| 9 |
+
.. [1] A. V. Knyazev (2001),
|
| 10 |
+
Toward the Optimal Preconditioned Eigensolver: Locally Optimal
|
| 11 |
+
Block Preconditioned Conjugate Gradient Method.
|
| 12 |
+
SIAM Journal on Scientific Computing 23, no. 2,
|
| 13 |
+
pp. 517-541. :doi:`10.1137/S1064827500366124`
|
| 14 |
+
|
| 15 |
+
.. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov (2007),
|
| 16 |
+
Block Locally Optimal Preconditioned Eigenvalue Xolvers (BLOPEX)
|
| 17 |
+
in hypre and PETSc. :arxiv:`0705.2626`
|
| 18 |
+
|
| 19 |
+
.. [3] A. V. Knyazev's C and MATLAB implementations:
|
| 20 |
+
https://github.com/lobpcg/blopex
|
| 21 |
+
"""
|
| 22 |
+
import inspect
|
| 23 |
+
import warnings
|
| 24 |
+
import numpy as np
|
| 25 |
+
from scipy.linalg import (inv, eigh, cho_factor, cho_solve,
|
| 26 |
+
cholesky, LinAlgError)
|
| 27 |
+
from scipy.sparse.linalg import LinearOperator
|
| 28 |
+
from scipy.sparse import isspmatrix
|
| 29 |
+
from numpy import block as bmat
|
| 30 |
+
|
| 31 |
+
__all__ = ["lobpcg"]
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def _report_nonhermitian(M, name):
|
| 35 |
+
"""
|
| 36 |
+
Report if `M` is not a Hermitian matrix given its type.
|
| 37 |
+
"""
|
| 38 |
+
from scipy.linalg import norm
|
| 39 |
+
|
| 40 |
+
md = M - M.T.conj()
|
| 41 |
+
nmd = norm(md, 1)
|
| 42 |
+
tol = 10 * np.finfo(M.dtype).eps
|
| 43 |
+
tol = max(tol, tol * norm(M, 1))
|
| 44 |
+
if nmd > tol:
|
| 45 |
+
warnings.warn(
|
| 46 |
+
f"Matrix {name} of the type {M.dtype} is not Hermitian: "
|
| 47 |
+
f"condition: {nmd} < {tol} fails.",
|
| 48 |
+
UserWarning, stacklevel=4
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
def _as2d(ar):
|
| 52 |
+
"""
|
| 53 |
+
If the input array is 2D return it, if it is 1D, append a dimension,
|
| 54 |
+
making it a column vector.
|
| 55 |
+
"""
|
| 56 |
+
if ar.ndim == 2:
|
| 57 |
+
return ar
|
| 58 |
+
else: # Assume 1!
|
| 59 |
+
aux = np.array(ar, copy=False)
|
| 60 |
+
aux.shape = (ar.shape[0], 1)
|
| 61 |
+
return aux
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def _makeMatMat(m):
|
| 65 |
+
if m is None:
|
| 66 |
+
return None
|
| 67 |
+
elif callable(m):
|
| 68 |
+
return lambda v: m(v)
|
| 69 |
+
else:
|
| 70 |
+
return lambda v: m @ v
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def _applyConstraints(blockVectorV, factYBY, blockVectorBY, blockVectorY):
|
| 74 |
+
"""Changes blockVectorV in place."""
|
| 75 |
+
YBV = np.dot(blockVectorBY.T.conj(), blockVectorV)
|
| 76 |
+
tmp = cho_solve(factYBY, YBV)
|
| 77 |
+
blockVectorV -= np.dot(blockVectorY, tmp)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def _b_orthonormalize(B, blockVectorV, blockVectorBV=None,
|
| 81 |
+
verbosityLevel=0):
|
| 82 |
+
"""in-place B-orthonormalize the given block vector using Cholesky."""
|
| 83 |
+
normalization = blockVectorV.max(axis=0) + np.finfo(blockVectorV.dtype).eps
|
| 84 |
+
blockVectorV = blockVectorV / normalization
|
| 85 |
+
if blockVectorBV is None:
|
| 86 |
+
if B is not None:
|
| 87 |
+
try:
|
| 88 |
+
blockVectorBV = B(blockVectorV)
|
| 89 |
+
except Exception as e:
|
| 90 |
+
if verbosityLevel:
|
| 91 |
+
warnings.warn(
|
| 92 |
+
f"Secondary MatMul call failed with error\n"
|
| 93 |
+
f"{e}\n",
|
| 94 |
+
UserWarning, stacklevel=3
|
| 95 |
+
)
|
| 96 |
+
return None, None, None, normalization
|
| 97 |
+
if blockVectorBV.shape != blockVectorV.shape:
|
| 98 |
+
raise ValueError(
|
| 99 |
+
f"The shape {blockVectorV.shape} "
|
| 100 |
+
f"of the orthogonalized matrix not preserved\n"
|
| 101 |
+
f"and changed to {blockVectorBV.shape} "
|
| 102 |
+
f"after multiplying by the secondary matrix.\n"
|
| 103 |
+
)
|
| 104 |
+
else:
|
| 105 |
+
blockVectorBV = blockVectorV # Shared data!!!
|
| 106 |
+
else:
|
| 107 |
+
blockVectorBV = blockVectorBV / normalization
|
| 108 |
+
VBV = blockVectorV.T.conj() @ blockVectorBV
|
| 109 |
+
try:
|
| 110 |
+
# VBV is a Cholesky factor from now on...
|
| 111 |
+
VBV = cholesky(VBV, overwrite_a=True)
|
| 112 |
+
VBV = inv(VBV, overwrite_a=True)
|
| 113 |
+
blockVectorV = blockVectorV @ VBV
|
| 114 |
+
# blockVectorV = (cho_solve((VBV.T, True), blockVectorV.T)).T
|
| 115 |
+
if B is not None:
|
| 116 |
+
blockVectorBV = blockVectorBV @ VBV
|
| 117 |
+
# blockVectorBV = (cho_solve((VBV.T, True), blockVectorBV.T)).T
|
| 118 |
+
return blockVectorV, blockVectorBV, VBV, normalization
|
| 119 |
+
except LinAlgError:
|
| 120 |
+
if verbosityLevel:
|
| 121 |
+
warnings.warn(
|
| 122 |
+
"Cholesky has failed.",
|
| 123 |
+
UserWarning, stacklevel=3
|
| 124 |
+
)
|
| 125 |
+
return None, None, None, normalization
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def _get_indx(_lambda, num, largest):
|
| 129 |
+
"""Get `num` indices into `_lambda` depending on `largest` option."""
|
| 130 |
+
ii = np.argsort(_lambda)
|
| 131 |
+
if largest:
|
| 132 |
+
ii = ii[:-num - 1:-1]
|
| 133 |
+
else:
|
| 134 |
+
ii = ii[:num]
|
| 135 |
+
|
| 136 |
+
return ii
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def _handle_gramA_gramB_verbosity(gramA, gramB, verbosityLevel):
|
| 140 |
+
if verbosityLevel:
|
| 141 |
+
_report_nonhermitian(gramA, "gramA")
|
| 142 |
+
_report_nonhermitian(gramB, "gramB")
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def lobpcg(
|
| 146 |
+
A,
|
| 147 |
+
X,
|
| 148 |
+
B=None,
|
| 149 |
+
M=None,
|
| 150 |
+
Y=None,
|
| 151 |
+
tol=None,
|
| 152 |
+
maxiter=None,
|
| 153 |
+
largest=True,
|
| 154 |
+
verbosityLevel=0,
|
| 155 |
+
retLambdaHistory=False,
|
| 156 |
+
retResidualNormsHistory=False,
|
| 157 |
+
restartControl=20,
|
| 158 |
+
):
|
| 159 |
+
"""Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG).
|
| 160 |
+
|
| 161 |
+
LOBPCG is a preconditioned eigensolver for large symmetric positive
|
| 162 |
+
definite (SPD) generalized eigenproblems.
|
| 163 |
+
|
| 164 |
+
Parameters
|
| 165 |
+
----------
|
| 166 |
+
A : {sparse matrix, dense matrix, LinearOperator, callable object}
|
| 167 |
+
The symmetric linear operator of the problem, usually a
|
| 168 |
+
sparse matrix. Often called the "stiffness matrix".
|
| 169 |
+
X : ndarray, float32 or float64
|
| 170 |
+
Initial approximation to the ``k`` eigenvectors (non-sparse). If `A`
|
| 171 |
+
has ``shape=(n,n)`` then `X` should have shape ``shape=(n,k)``.
|
| 172 |
+
B : {dense matrix, sparse matrix, LinearOperator, callable object}
|
| 173 |
+
Optional.
|
| 174 |
+
The right hand side operator in a generalized eigenproblem.
|
| 175 |
+
By default, ``B = Identity``. Often called the "mass matrix".
|
| 176 |
+
M : {dense matrix, sparse matrix, LinearOperator, callable object}
|
| 177 |
+
Optional.
|
| 178 |
+
Preconditioner to `A`; by default ``M = Identity``.
|
| 179 |
+
`M` should approximate the inverse of `A`.
|
| 180 |
+
Y : ndarray, float32 or float64, optional.
|
| 181 |
+
An n-by-sizeY matrix of constraints (non-sparse), sizeY < n.
|
| 182 |
+
The iterations will be performed in the B-orthogonal complement
|
| 183 |
+
of the column-space of Y. Y must be full rank.
|
| 184 |
+
tol : scalar, optional.
|
| 185 |
+
Solver tolerance (stopping criterion).
|
| 186 |
+
The default is ``tol=n*sqrt(eps)``.
|
| 187 |
+
maxiter : int, optional.
|
| 188 |
+
Maximum number of iterations. The default is ``maxiter=20``.
|
| 189 |
+
largest : bool, optional.
|
| 190 |
+
When True, solve for the largest eigenvalues, otherwise the smallest.
|
| 191 |
+
verbosityLevel : int, optional
|
| 192 |
+
Controls solver output. The default is ``verbosityLevel=0``.
|
| 193 |
+
retLambdaHistory : bool, optional.
|
| 194 |
+
Whether to return eigenvalue history. Default is False.
|
| 195 |
+
retResidualNormsHistory : bool, optional.
|
| 196 |
+
Whether to return history of residual norms. Default is False.
|
| 197 |
+
restartControl : int, optional.
|
| 198 |
+
Iterations restart if the residuals jump up 2**restartControl times
|
| 199 |
+
compared to the smallest ones recorded in retResidualNormsHistory.
|
| 200 |
+
The default is ``restartControl=20``, making the restarts rare for
|
| 201 |
+
backward compatibility.
|
| 202 |
+
|
| 203 |
+
Returns
|
| 204 |
+
-------
|
| 205 |
+
w : ndarray
|
| 206 |
+
Array of ``k`` eigenvalues.
|
| 207 |
+
v : ndarray
|
| 208 |
+
An array of ``k`` eigenvectors. `v` has the same shape as `X`.
|
| 209 |
+
lambdas : ndarray, optional
|
| 210 |
+
The eigenvalue history, if `retLambdaHistory` is True.
|
| 211 |
+
rnorms : ndarray, optional
|
| 212 |
+
The history of residual norms, if `retResidualNormsHistory` is True.
|
| 213 |
+
|
| 214 |
+
Notes
|
| 215 |
+
-----
|
| 216 |
+
The iterative loop in lobpcg runs maxit=maxiter (or 20 if maxit=None)
|
| 217 |
+
iterations at most and finishes earler if the tolerance is met.
|
| 218 |
+
Breaking backward compatibility with the previous version, lobpcg
|
| 219 |
+
now returns the block of iterative vectors with the best accuracy rather
|
| 220 |
+
than the last one iterated, as a cure for possible divergence.
|
| 221 |
+
|
| 222 |
+
The size of the iteration history output equals to the number of the best
|
| 223 |
+
(limited by maxit) iterations plus 3 (initial, final, and postprocessing).
|
| 224 |
+
|
| 225 |
+
If both ``retLambdaHistory`` and ``retResidualNormsHistory`` are True,
|
| 226 |
+
the return tuple has the following format
|
| 227 |
+
``(lambda, V, lambda history, residual norms history)``.
|
| 228 |
+
|
| 229 |
+
In the following ``n`` denotes the matrix size and ``k`` the number
|
| 230 |
+
of required eigenvalues (smallest or largest).
|
| 231 |
+
|
| 232 |
+
The LOBPCG code internally solves eigenproblems of the size ``3k`` on every
|
| 233 |
+
iteration by calling the dense eigensolver `eigh`, so if ``k`` is not
|
| 234 |
+
small enough compared to ``n``, it makes no sense to call the LOBPCG code.
|
| 235 |
+
Moreover, if one calls the LOBPCG algorithm for ``5k > n``, it would likely
|
| 236 |
+
break internally, so the code calls the standard function `eigh` instead.
|
| 237 |
+
It is not that ``n`` should be large for the LOBPCG to work, but rather the
|
| 238 |
+
ratio ``n / k`` should be large. It you call LOBPCG with ``k=1``
|
| 239 |
+
and ``n=10``, it works though ``n`` is small. The method is intended
|
| 240 |
+
for extremely large ``n / k``.
|
| 241 |
+
|
| 242 |
+
The convergence speed depends basically on two factors:
|
| 243 |
+
|
| 244 |
+
1. Relative separation of the seeking eigenvalues from the rest
|
| 245 |
+
of the eigenvalues. One can vary ``k`` to improve the absolute
|
| 246 |
+
separation and use proper preconditioning to shrink the spectral spread.
|
| 247 |
+
For example, a rod vibration test problem (under tests
|
| 248 |
+
directory) is ill-conditioned for large ``n``, so convergence will be
|
| 249 |
+
slow, unless efficient preconditioning is used. For this specific
|
| 250 |
+
problem, a good simple preconditioner function would be a linear solve
|
| 251 |
+
for `A`, which is easy to code since `A` is tridiagonal.
|
| 252 |
+
|
| 253 |
+
2. Quality of the initial approximations `X` to the seeking eigenvectors.
|
| 254 |
+
Randomly distributed around the origin vectors work well if no better
|
| 255 |
+
choice is known.
|
| 256 |
+
|
| 257 |
+
References
|
| 258 |
+
----------
|
| 259 |
+
.. [1] A. V. Knyazev (2001),
|
| 260 |
+
Toward the Optimal Preconditioned Eigensolver: Locally Optimal
|
| 261 |
+
Block Preconditioned Conjugate Gradient Method.
|
| 262 |
+
SIAM Journal on Scientific Computing 23, no. 2,
|
| 263 |
+
pp. 517-541. :doi:`10.1137/S1064827500366124`
|
| 264 |
+
|
| 265 |
+
.. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov
|
| 266 |
+
(2007), Block Locally Optimal Preconditioned Eigenvalue Xolvers
|
| 267 |
+
(BLOPEX) in hypre and PETSc. :arxiv:`0705.2626`
|
| 268 |
+
|
| 269 |
+
.. [3] A. V. Knyazev's C and MATLAB implementations:
|
| 270 |
+
https://github.com/lobpcg/blopex
|
| 271 |
+
|
| 272 |
+
Examples
|
| 273 |
+
--------
|
| 274 |
+
Solve ``A x = lambda x`` with constraints and preconditioning.
|
| 275 |
+
|
| 276 |
+
>>> import numpy as np
|
| 277 |
+
>>> from scipy.sparse import spdiags, issparse
|
| 278 |
+
>>> from scipy.sparse.linalg import lobpcg, LinearOperator
|
| 279 |
+
|
| 280 |
+
The square matrix size:
|
| 281 |
+
|
| 282 |
+
>>> n = 100
|
| 283 |
+
>>> vals = np.arange(1, n + 1)
|
| 284 |
+
|
| 285 |
+
The first mandatory input parameter, in this test
|
| 286 |
+
a sparse 2D array representing the square matrix
|
| 287 |
+
of the eigenvalue problem to solve:
|
| 288 |
+
|
| 289 |
+
>>> A = spdiags(vals, 0, n, n)
|
| 290 |
+
>>> A.toarray()
|
| 291 |
+
array([[ 1, 0, 0, ..., 0, 0, 0],
|
| 292 |
+
[ 0, 2, 0, ..., 0, 0, 0],
|
| 293 |
+
[ 0, 0, 3, ..., 0, 0, 0],
|
| 294 |
+
...,
|
| 295 |
+
[ 0, 0, 0, ..., 98, 0, 0],
|
| 296 |
+
[ 0, 0, 0, ..., 0, 99, 0],
|
| 297 |
+
[ 0, 0, 0, ..., 0, 0, 100]])
|
| 298 |
+
|
| 299 |
+
Initial guess for eigenvectors, should have linearly independent
|
| 300 |
+
columns. The second mandatory input parameter, a 2D array with the
|
| 301 |
+
row dimension determining the number of requested eigenvalues.
|
| 302 |
+
If no initial approximations available, randomly oriented vectors
|
| 303 |
+
commonly work best, e.g., with components normally disrtibuted
|
| 304 |
+
around zero or uniformly distributed on the interval [-1 1].
|
| 305 |
+
|
| 306 |
+
>>> rng = np.random.default_rng()
|
| 307 |
+
>>> X = rng.normal(size=(n, 3))
|
| 308 |
+
|
| 309 |
+
Constraints - an optional input parameter is a 2D array comprising
|
| 310 |
+
of column vectors that the eigenvectors must be orthogonal to:
|
| 311 |
+
|
| 312 |
+
>>> Y = np.eye(n, 3)
|
| 313 |
+
|
| 314 |
+
Preconditioner in the inverse of A in this example:
|
| 315 |
+
|
| 316 |
+
>>> invA = spdiags([1./vals], 0, n, n)
|
| 317 |
+
|
| 318 |
+
The preconditiner must be defined by a function:
|
| 319 |
+
|
| 320 |
+
>>> def precond( x ):
|
| 321 |
+
... return invA @ x
|
| 322 |
+
|
| 323 |
+
The argument x of the preconditioner function is a matrix inside `lobpcg`,
|
| 324 |
+
thus the use of matrix-matrix product ``@``.
|
| 325 |
+
|
| 326 |
+
The preconditioner function is passed to lobpcg as a `LinearOperator`:
|
| 327 |
+
|
| 328 |
+
>>> M = LinearOperator(matvec=precond, matmat=precond,
|
| 329 |
+
... shape=(n, n), dtype=np.float64)
|
| 330 |
+
|
| 331 |
+
Let us now solve the eigenvalue problem for the matrix A:
|
| 332 |
+
|
| 333 |
+
>>> eigenvalues, _ = lobpcg(A, X, Y=Y, M=M, largest=False)
|
| 334 |
+
>>> eigenvalues
|
| 335 |
+
array([4., 5., 6.])
|
| 336 |
+
|
| 337 |
+
Note that the vectors passed in Y are the eigenvectors of the 3 smallest
|
| 338 |
+
eigenvalues. The results returned are orthogonal to those.
|
| 339 |
+
"""
|
| 340 |
+
blockVectorX = X
|
| 341 |
+
bestblockVectorX = blockVectorX
|
| 342 |
+
blockVectorY = Y
|
| 343 |
+
residualTolerance = tol
|
| 344 |
+
if maxiter is None:
|
| 345 |
+
maxiter = 20
|
| 346 |
+
|
| 347 |
+
bestIterationNumber = maxiter
|
| 348 |
+
|
| 349 |
+
sizeY = 0
|
| 350 |
+
if blockVectorY is not None:
|
| 351 |
+
if len(blockVectorY.shape) != 2:
|
| 352 |
+
warnings.warn(
|
| 353 |
+
f"Expected rank-2 array for argument Y, instead got "
|
| 354 |
+
f"{len(blockVectorY.shape)}, "
|
| 355 |
+
f"so ignore it and use no constraints.",
|
| 356 |
+
UserWarning, stacklevel=2
|
| 357 |
+
)
|
| 358 |
+
blockVectorY = None
|
| 359 |
+
else:
|
| 360 |
+
sizeY = blockVectorY.shape[1]
|
| 361 |
+
|
| 362 |
+
# Block size.
|
| 363 |
+
if blockVectorX is None:
|
| 364 |
+
raise ValueError("The mandatory initial matrix X cannot be None")
|
| 365 |
+
if len(blockVectorX.shape) != 2:
|
| 366 |
+
raise ValueError("expected rank-2 array for argument X")
|
| 367 |
+
|
| 368 |
+
n, sizeX = blockVectorX.shape
|
| 369 |
+
|
| 370 |
+
# Data type of iterates, determined by X, must be inexact
|
| 371 |
+
if not np.issubdtype(blockVectorX.dtype, np.inexact):
|
| 372 |
+
warnings.warn(
|
| 373 |
+
f"Data type for argument X is {blockVectorX.dtype}, "
|
| 374 |
+
f"which is not inexact, so casted to np.float32.",
|
| 375 |
+
UserWarning, stacklevel=2
|
| 376 |
+
)
|
| 377 |
+
blockVectorX = np.asarray(blockVectorX, dtype=np.float32)
|
| 378 |
+
|
| 379 |
+
if retLambdaHistory:
|
| 380 |
+
lambdaHistory = np.zeros((maxiter + 3, sizeX),
|
| 381 |
+
dtype=blockVectorX.dtype)
|
| 382 |
+
if retResidualNormsHistory:
|
| 383 |
+
residualNormsHistory = np.zeros((maxiter + 3, sizeX),
|
| 384 |
+
dtype=blockVectorX.dtype)
|
| 385 |
+
|
| 386 |
+
if verbosityLevel:
|
| 387 |
+
aux = "Solving "
|
| 388 |
+
if B is None:
|
| 389 |
+
aux += "standard"
|
| 390 |
+
else:
|
| 391 |
+
aux += "generalized"
|
| 392 |
+
aux += " eigenvalue problem with"
|
| 393 |
+
if M is None:
|
| 394 |
+
aux += "out"
|
| 395 |
+
aux += " preconditioning\n\n"
|
| 396 |
+
aux += "matrix size %d\n" % n
|
| 397 |
+
aux += "block size %d\n\n" % sizeX
|
| 398 |
+
if blockVectorY is None:
|
| 399 |
+
aux += "No constraints\n\n"
|
| 400 |
+
else:
|
| 401 |
+
if sizeY > 1:
|
| 402 |
+
aux += "%d constraints\n\n" % sizeY
|
| 403 |
+
else:
|
| 404 |
+
aux += "%d constraint\n\n" % sizeY
|
| 405 |
+
print(aux)
|
| 406 |
+
|
| 407 |
+
if (n - sizeY) < (5 * sizeX):
|
| 408 |
+
warnings.warn(
|
| 409 |
+
f"The problem size {n} minus the constraints size {sizeY} "
|
| 410 |
+
f"is too small relative to the block size {sizeX}. "
|
| 411 |
+
f"Using a dense eigensolver instead of LOBPCG iterations."
|
| 412 |
+
f"No output of the history of the iterations.",
|
| 413 |
+
UserWarning, stacklevel=2
|
| 414 |
+
)
|
| 415 |
+
|
| 416 |
+
sizeX = min(sizeX, n)
|
| 417 |
+
|
| 418 |
+
if blockVectorY is not None:
|
| 419 |
+
raise NotImplementedError(
|
| 420 |
+
"The dense eigensolver does not support constraints."
|
| 421 |
+
)
|
| 422 |
+
|
| 423 |
+
# Define the closed range of indices of eigenvalues to return.
|
| 424 |
+
if largest:
|
| 425 |
+
eigvals = (n - sizeX, n - 1)
|
| 426 |
+
else:
|
| 427 |
+
eigvals = (0, sizeX - 1)
|
| 428 |
+
|
| 429 |
+
try:
|
| 430 |
+
if isinstance(A, LinearOperator):
|
| 431 |
+
A = A(np.eye(n, dtype=int))
|
| 432 |
+
elif callable(A):
|
| 433 |
+
A = A(np.eye(n, dtype=int))
|
| 434 |
+
if A.shape != (n, n):
|
| 435 |
+
raise ValueError(
|
| 436 |
+
f"The shape {A.shape} of the primary matrix\n"
|
| 437 |
+
f"defined by a callable object is wrong.\n"
|
| 438 |
+
)
|
| 439 |
+
elif isspmatrix(A):
|
| 440 |
+
A = A.toarray()
|
| 441 |
+
else:
|
| 442 |
+
A = np.asarray(A)
|
| 443 |
+
except Exception as e:
|
| 444 |
+
raise Exception(
|
| 445 |
+
f"Primary MatMul call failed with error\n"
|
| 446 |
+
f"{e}\n")
|
| 447 |
+
|
| 448 |
+
if B is not None:
|
| 449 |
+
try:
|
| 450 |
+
if isinstance(B, LinearOperator):
|
| 451 |
+
B = B(np.eye(n, dtype=int))
|
| 452 |
+
elif callable(B):
|
| 453 |
+
B = B(np.eye(n, dtype=int))
|
| 454 |
+
if B.shape != (n, n):
|
| 455 |
+
raise ValueError(
|
| 456 |
+
f"The shape {B.shape} of the secondary matrix\n"
|
| 457 |
+
f"defined by a callable object is wrong.\n"
|
| 458 |
+
)
|
| 459 |
+
elif isspmatrix(B):
|
| 460 |
+
B = B.toarray()
|
| 461 |
+
else:
|
| 462 |
+
B = np.asarray(B)
|
| 463 |
+
except Exception as e:
|
| 464 |
+
raise Exception(
|
| 465 |
+
f"Secondary MatMul call failed with error\n"
|
| 466 |
+
f"{e}\n")
|
| 467 |
+
|
| 468 |
+
try:
|
| 469 |
+
if "subset_by_index" in inspect.signature(eigh).parameters:
|
| 470 |
+
# scipy >= 1.5
|
| 471 |
+
additional_params = {"subset_by_index": eigvals}
|
| 472 |
+
else:
|
| 473 |
+
# deprecated in scipy == 1.10
|
| 474 |
+
additional_params = {"eigvals": eigvals}
|
| 475 |
+
vals, vecs = eigh(A,
|
| 476 |
+
B,
|
| 477 |
+
check_finite=False,
|
| 478 |
+
**additional_params)
|
| 479 |
+
if largest:
|
| 480 |
+
# Reverse order to be compatible with eigs() in 'LM' mode.
|
| 481 |
+
vals = vals[::-1]
|
| 482 |
+
vecs = vecs[:, ::-1]
|
| 483 |
+
|
| 484 |
+
return vals, vecs
|
| 485 |
+
except Exception as e:
|
| 486 |
+
raise Exception(
|
| 487 |
+
f"Dense eigensolver failed with error\n"
|
| 488 |
+
f"{e}\n"
|
| 489 |
+
)
|
| 490 |
+
|
| 491 |
+
if (residualTolerance is None) or (residualTolerance <= 0.0):
|
| 492 |
+
residualTolerance = np.sqrt(np.finfo(blockVectorX.dtype).eps) * n
|
| 493 |
+
|
| 494 |
+
A = _makeMatMat(A)
|
| 495 |
+
B = _makeMatMat(B)
|
| 496 |
+
M = _makeMatMat(M)
|
| 497 |
+
|
| 498 |
+
# Apply constraints to X.
|
| 499 |
+
if blockVectorY is not None:
|
| 500 |
+
|
| 501 |
+
if B is not None:
|
| 502 |
+
blockVectorBY = B(blockVectorY)
|
| 503 |
+
if blockVectorBY.shape != blockVectorY.shape:
|
| 504 |
+
raise ValueError(
|
| 505 |
+
f"The shape {blockVectorY.shape} "
|
| 506 |
+
f"of the constraint not preserved\n"
|
| 507 |
+
f"and changed to {blockVectorBY.shape} "
|
| 508 |
+
f"after multiplying by the secondary matrix.\n"
|
| 509 |
+
)
|
| 510 |
+
else:
|
| 511 |
+
blockVectorBY = blockVectorY
|
| 512 |
+
|
| 513 |
+
# gramYBY is a dense array.
|
| 514 |
+
gramYBY = np.dot(blockVectorY.T.conj(), blockVectorBY)
|
| 515 |
+
try:
|
| 516 |
+
# gramYBY is a Cholesky factor from now on...
|
| 517 |
+
gramYBY = cho_factor(gramYBY)
|
| 518 |
+
except LinAlgError as e:
|
| 519 |
+
raise ValueError("Linearly dependent constraints") from e
|
| 520 |
+
|
| 521 |
+
_applyConstraints(blockVectorX, gramYBY, blockVectorBY, blockVectorY)
|
| 522 |
+
|
| 523 |
+
##
|
| 524 |
+
# B-orthonormalize X.
|
| 525 |
+
blockVectorX, blockVectorBX, _, _ = _b_orthonormalize(
|
| 526 |
+
B, blockVectorX, verbosityLevel=verbosityLevel)
|
| 527 |
+
if blockVectorX is None:
|
| 528 |
+
raise ValueError("Linearly dependent initial approximations")
|
| 529 |
+
|
| 530 |
+
##
|
| 531 |
+
# Compute the initial Ritz vectors: solve the eigenproblem.
|
| 532 |
+
blockVectorAX = A(blockVectorX)
|
| 533 |
+
if blockVectorAX.shape != blockVectorX.shape:
|
| 534 |
+
raise ValueError(
|
| 535 |
+
f"The shape {blockVectorX.shape} "
|
| 536 |
+
f"of the initial approximations not preserved\n"
|
| 537 |
+
f"and changed to {blockVectorAX.shape} "
|
| 538 |
+
f"after multiplying by the primary matrix.\n"
|
| 539 |
+
)
|
| 540 |
+
|
| 541 |
+
gramXAX = np.dot(blockVectorX.T.conj(), blockVectorAX)
|
| 542 |
+
|
| 543 |
+
_lambda, eigBlockVector = eigh(gramXAX, check_finite=False)
|
| 544 |
+
ii = _get_indx(_lambda, sizeX, largest)
|
| 545 |
+
_lambda = _lambda[ii]
|
| 546 |
+
if retLambdaHistory:
|
| 547 |
+
lambdaHistory[0, :] = _lambda
|
| 548 |
+
|
| 549 |
+
eigBlockVector = np.asarray(eigBlockVector[:, ii])
|
| 550 |
+
blockVectorX = np.dot(blockVectorX, eigBlockVector)
|
| 551 |
+
blockVectorAX = np.dot(blockVectorAX, eigBlockVector)
|
| 552 |
+
if B is not None:
|
| 553 |
+
blockVectorBX = np.dot(blockVectorBX, eigBlockVector)
|
| 554 |
+
|
| 555 |
+
##
|
| 556 |
+
# Active index set.
|
| 557 |
+
activeMask = np.ones((sizeX,), dtype=bool)
|
| 558 |
+
|
| 559 |
+
##
|
| 560 |
+
# Main iteration loop.
|
| 561 |
+
|
| 562 |
+
blockVectorP = None # set during iteration
|
| 563 |
+
blockVectorAP = None
|
| 564 |
+
blockVectorBP = None
|
| 565 |
+
|
| 566 |
+
smallestResidualNorm = np.abs(np.finfo(blockVectorX.dtype).max)
|
| 567 |
+
|
| 568 |
+
iterationNumber = -1
|
| 569 |
+
restart = True
|
| 570 |
+
forcedRestart = False
|
| 571 |
+
explicitGramFlag = False
|
| 572 |
+
while iterationNumber < maxiter:
|
| 573 |
+
iterationNumber += 1
|
| 574 |
+
|
| 575 |
+
if B is not None:
|
| 576 |
+
aux = blockVectorBX * _lambda[np.newaxis, :]
|
| 577 |
+
else:
|
| 578 |
+
aux = blockVectorX * _lambda[np.newaxis, :]
|
| 579 |
+
|
| 580 |
+
blockVectorR = blockVectorAX - aux
|
| 581 |
+
|
| 582 |
+
aux = np.sum(blockVectorR.conj() * blockVectorR, 0)
|
| 583 |
+
residualNorms = np.sqrt(np.abs(aux))
|
| 584 |
+
if retResidualNormsHistory:
|
| 585 |
+
residualNormsHistory[iterationNumber, :] = residualNorms
|
| 586 |
+
residualNorm = np.sum(np.abs(residualNorms)) / sizeX
|
| 587 |
+
|
| 588 |
+
if residualNorm < smallestResidualNorm:
|
| 589 |
+
smallestResidualNorm = residualNorm
|
| 590 |
+
bestIterationNumber = iterationNumber
|
| 591 |
+
bestblockVectorX = blockVectorX
|
| 592 |
+
elif residualNorm > 2**restartControl * smallestResidualNorm:
|
| 593 |
+
forcedRestart = True
|
| 594 |
+
blockVectorAX = A(blockVectorX)
|
| 595 |
+
if blockVectorAX.shape != blockVectorX.shape:
|
| 596 |
+
raise ValueError(
|
| 597 |
+
f"The shape {blockVectorX.shape} "
|
| 598 |
+
f"of the restarted iterate not preserved\n"
|
| 599 |
+
f"and changed to {blockVectorAX.shape} "
|
| 600 |
+
f"after multiplying by the primary matrix.\n"
|
| 601 |
+
)
|
| 602 |
+
if B is not None:
|
| 603 |
+
blockVectorBX = B(blockVectorX)
|
| 604 |
+
if blockVectorBX.shape != blockVectorX.shape:
|
| 605 |
+
raise ValueError(
|
| 606 |
+
f"The shape {blockVectorX.shape} "
|
| 607 |
+
f"of the restarted iterate not preserved\n"
|
| 608 |
+
f"and changed to {blockVectorBX.shape} "
|
| 609 |
+
f"after multiplying by the secondary matrix.\n"
|
| 610 |
+
)
|
| 611 |
+
|
| 612 |
+
ii = np.where(residualNorms > residualTolerance, True, False)
|
| 613 |
+
activeMask = activeMask & ii
|
| 614 |
+
currentBlockSize = activeMask.sum()
|
| 615 |
+
|
| 616 |
+
if verbosityLevel:
|
| 617 |
+
print(f"iteration {iterationNumber}")
|
| 618 |
+
print(f"current block size: {currentBlockSize}")
|
| 619 |
+
print(f"eigenvalue(s):\n{_lambda}")
|
| 620 |
+
print(f"residual norm(s):\n{residualNorms}")
|
| 621 |
+
|
| 622 |
+
if currentBlockSize == 0:
|
| 623 |
+
break
|
| 624 |
+
|
| 625 |
+
activeBlockVectorR = _as2d(blockVectorR[:, activeMask])
|
| 626 |
+
|
| 627 |
+
if iterationNumber > 0:
|
| 628 |
+
activeBlockVectorP = _as2d(blockVectorP[:, activeMask])
|
| 629 |
+
activeBlockVectorAP = _as2d(blockVectorAP[:, activeMask])
|
| 630 |
+
if B is not None:
|
| 631 |
+
activeBlockVectorBP = _as2d(blockVectorBP[:, activeMask])
|
| 632 |
+
|
| 633 |
+
if M is not None:
|
| 634 |
+
# Apply preconditioner T to the active residuals.
|
| 635 |
+
activeBlockVectorR = M(activeBlockVectorR)
|
| 636 |
+
|
| 637 |
+
##
|
| 638 |
+
# Apply constraints to the preconditioned residuals.
|
| 639 |
+
if blockVectorY is not None:
|
| 640 |
+
_applyConstraints(activeBlockVectorR,
|
| 641 |
+
gramYBY,
|
| 642 |
+
blockVectorBY,
|
| 643 |
+
blockVectorY)
|
| 644 |
+
|
| 645 |
+
##
|
| 646 |
+
# B-orthogonalize the preconditioned residuals to X.
|
| 647 |
+
if B is not None:
|
| 648 |
+
activeBlockVectorR = activeBlockVectorR - (
|
| 649 |
+
blockVectorX @
|
| 650 |
+
(blockVectorBX.T.conj() @ activeBlockVectorR)
|
| 651 |
+
)
|
| 652 |
+
else:
|
| 653 |
+
activeBlockVectorR = activeBlockVectorR - (
|
| 654 |
+
blockVectorX @
|
| 655 |
+
(blockVectorX.T.conj() @ activeBlockVectorR)
|
| 656 |
+
)
|
| 657 |
+
|
| 658 |
+
##
|
| 659 |
+
# B-orthonormalize the preconditioned residuals.
|
| 660 |
+
aux = _b_orthonormalize(
|
| 661 |
+
B, activeBlockVectorR, verbosityLevel=verbosityLevel)
|
| 662 |
+
activeBlockVectorR, activeBlockVectorBR, _, _ = aux
|
| 663 |
+
|
| 664 |
+
if activeBlockVectorR is None:
|
| 665 |
+
warnings.warn(
|
| 666 |
+
f"Failed at iteration {iterationNumber} with accuracies "
|
| 667 |
+
f"{residualNorms}\n not reaching the requested "
|
| 668 |
+
f"tolerance {residualTolerance}.",
|
| 669 |
+
UserWarning, stacklevel=2
|
| 670 |
+
)
|
| 671 |
+
break
|
| 672 |
+
activeBlockVectorAR = A(activeBlockVectorR)
|
| 673 |
+
|
| 674 |
+
if iterationNumber > 0:
|
| 675 |
+
if B is not None:
|
| 676 |
+
aux = _b_orthonormalize(
|
| 677 |
+
B, activeBlockVectorP, activeBlockVectorBP,
|
| 678 |
+
verbosityLevel=verbosityLevel
|
| 679 |
+
)
|
| 680 |
+
activeBlockVectorP, activeBlockVectorBP, invR, normal = aux
|
| 681 |
+
else:
|
| 682 |
+
aux = _b_orthonormalize(B, activeBlockVectorP,
|
| 683 |
+
verbosityLevel=verbosityLevel)
|
| 684 |
+
activeBlockVectorP, _, invR, normal = aux
|
| 685 |
+
# Function _b_orthonormalize returns None if Cholesky fails
|
| 686 |
+
if activeBlockVectorP is not None:
|
| 687 |
+
activeBlockVectorAP = activeBlockVectorAP / normal
|
| 688 |
+
activeBlockVectorAP = np.dot(activeBlockVectorAP, invR)
|
| 689 |
+
restart = forcedRestart
|
| 690 |
+
else:
|
| 691 |
+
restart = True
|
| 692 |
+
|
| 693 |
+
##
|
| 694 |
+
# Perform the Rayleigh Ritz Procedure:
|
| 695 |
+
# Compute symmetric Gram matrices:
|
| 696 |
+
|
| 697 |
+
if activeBlockVectorAR.dtype == "float32":
|
| 698 |
+
myeps = 1
|
| 699 |
+
else:
|
| 700 |
+
myeps = np.sqrt(np.finfo(activeBlockVectorR.dtype).eps)
|
| 701 |
+
|
| 702 |
+
if residualNorms.max() > myeps and not explicitGramFlag:
|
| 703 |
+
explicitGramFlag = False
|
| 704 |
+
else:
|
| 705 |
+
# Once explicitGramFlag, forever explicitGramFlag.
|
| 706 |
+
explicitGramFlag = True
|
| 707 |
+
|
| 708 |
+
# Shared memory assingments to simplify the code
|
| 709 |
+
if B is None:
|
| 710 |
+
blockVectorBX = blockVectorX
|
| 711 |
+
activeBlockVectorBR = activeBlockVectorR
|
| 712 |
+
if not restart:
|
| 713 |
+
activeBlockVectorBP = activeBlockVectorP
|
| 714 |
+
|
| 715 |
+
# Common submatrices:
|
| 716 |
+
gramXAR = np.dot(blockVectorX.T.conj(), activeBlockVectorAR)
|
| 717 |
+
gramRAR = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAR)
|
| 718 |
+
|
| 719 |
+
gramDtype = activeBlockVectorAR.dtype
|
| 720 |
+
if explicitGramFlag:
|
| 721 |
+
gramRAR = (gramRAR + gramRAR.T.conj()) / 2
|
| 722 |
+
gramXAX = np.dot(blockVectorX.T.conj(), blockVectorAX)
|
| 723 |
+
gramXAX = (gramXAX + gramXAX.T.conj()) / 2
|
| 724 |
+
gramXBX = np.dot(blockVectorX.T.conj(), blockVectorBX)
|
| 725 |
+
gramRBR = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorBR)
|
| 726 |
+
gramXBR = np.dot(blockVectorX.T.conj(), activeBlockVectorBR)
|
| 727 |
+
else:
|
| 728 |
+
gramXAX = np.diag(_lambda).astype(gramDtype)
|
| 729 |
+
gramXBX = np.eye(sizeX, dtype=gramDtype)
|
| 730 |
+
gramRBR = np.eye(currentBlockSize, dtype=gramDtype)
|
| 731 |
+
gramXBR = np.zeros((sizeX, currentBlockSize), dtype=gramDtype)
|
| 732 |
+
|
| 733 |
+
if not restart:
|
| 734 |
+
gramXAP = np.dot(blockVectorX.T.conj(), activeBlockVectorAP)
|
| 735 |
+
gramRAP = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAP)
|
| 736 |
+
gramPAP = np.dot(activeBlockVectorP.T.conj(), activeBlockVectorAP)
|
| 737 |
+
gramXBP = np.dot(blockVectorX.T.conj(), activeBlockVectorBP)
|
| 738 |
+
gramRBP = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorBP)
|
| 739 |
+
if explicitGramFlag:
|
| 740 |
+
gramPAP = (gramPAP + gramPAP.T.conj()) / 2
|
| 741 |
+
gramPBP = np.dot(activeBlockVectorP.T.conj(),
|
| 742 |
+
activeBlockVectorBP)
|
| 743 |
+
else:
|
| 744 |
+
gramPBP = np.eye(currentBlockSize, dtype=gramDtype)
|
| 745 |
+
|
| 746 |
+
gramA = bmat(
|
| 747 |
+
[
|
| 748 |
+
[gramXAX, gramXAR, gramXAP],
|
| 749 |
+
[gramXAR.T.conj(), gramRAR, gramRAP],
|
| 750 |
+
[gramXAP.T.conj(), gramRAP.T.conj(), gramPAP],
|
| 751 |
+
]
|
| 752 |
+
)
|
| 753 |
+
gramB = bmat(
|
| 754 |
+
[
|
| 755 |
+
[gramXBX, gramXBR, gramXBP],
|
| 756 |
+
[gramXBR.T.conj(), gramRBR, gramRBP],
|
| 757 |
+
[gramXBP.T.conj(), gramRBP.T.conj(), gramPBP],
|
| 758 |
+
]
|
| 759 |
+
)
|
| 760 |
+
|
| 761 |
+
_handle_gramA_gramB_verbosity(gramA, gramB, verbosityLevel)
|
| 762 |
+
|
| 763 |
+
try:
|
| 764 |
+
_lambda, eigBlockVector = eigh(gramA,
|
| 765 |
+
gramB,
|
| 766 |
+
check_finite=False)
|
| 767 |
+
except LinAlgError as e:
|
| 768 |
+
# raise ValueError("eigh failed in lobpcg iterations") from e
|
| 769 |
+
if verbosityLevel:
|
| 770 |
+
warnings.warn(
|
| 771 |
+
f"eigh failed at iteration {iterationNumber} \n"
|
| 772 |
+
f"with error {e} causing a restart.\n",
|
| 773 |
+
UserWarning, stacklevel=2
|
| 774 |
+
)
|
| 775 |
+
# try again after dropping the direction vectors P from RR
|
| 776 |
+
restart = True
|
| 777 |
+
|
| 778 |
+
if restart:
|
| 779 |
+
gramA = bmat([[gramXAX, gramXAR], [gramXAR.T.conj(), gramRAR]])
|
| 780 |
+
gramB = bmat([[gramXBX, gramXBR], [gramXBR.T.conj(), gramRBR]])
|
| 781 |
+
|
| 782 |
+
_handle_gramA_gramB_verbosity(gramA, gramB, verbosityLevel)
|
| 783 |
+
|
| 784 |
+
try:
|
| 785 |
+
_lambda, eigBlockVector = eigh(gramA,
|
| 786 |
+
gramB,
|
| 787 |
+
check_finite=False)
|
| 788 |
+
except LinAlgError as e:
|
| 789 |
+
# raise ValueError("eigh failed in lobpcg iterations") from e
|
| 790 |
+
warnings.warn(
|
| 791 |
+
f"eigh failed at iteration {iterationNumber} with error\n"
|
| 792 |
+
f"{e}\n",
|
| 793 |
+
UserWarning, stacklevel=2
|
| 794 |
+
)
|
| 795 |
+
break
|
| 796 |
+
|
| 797 |
+
ii = _get_indx(_lambda, sizeX, largest)
|
| 798 |
+
_lambda = _lambda[ii]
|
| 799 |
+
eigBlockVector = eigBlockVector[:, ii]
|
| 800 |
+
if retLambdaHistory:
|
| 801 |
+
lambdaHistory[iterationNumber + 1, :] = _lambda
|
| 802 |
+
|
| 803 |
+
# Compute Ritz vectors.
|
| 804 |
+
if B is not None:
|
| 805 |
+
if not restart:
|
| 806 |
+
eigBlockVectorX = eigBlockVector[:sizeX]
|
| 807 |
+
eigBlockVectorR = eigBlockVector[sizeX:
|
| 808 |
+
sizeX + currentBlockSize]
|
| 809 |
+
eigBlockVectorP = eigBlockVector[sizeX + currentBlockSize:]
|
| 810 |
+
|
| 811 |
+
pp = np.dot(activeBlockVectorR, eigBlockVectorR)
|
| 812 |
+
pp += np.dot(activeBlockVectorP, eigBlockVectorP)
|
| 813 |
+
|
| 814 |
+
app = np.dot(activeBlockVectorAR, eigBlockVectorR)
|
| 815 |
+
app += np.dot(activeBlockVectorAP, eigBlockVectorP)
|
| 816 |
+
|
| 817 |
+
bpp = np.dot(activeBlockVectorBR, eigBlockVectorR)
|
| 818 |
+
bpp += np.dot(activeBlockVectorBP, eigBlockVectorP)
|
| 819 |
+
else:
|
| 820 |
+
eigBlockVectorX = eigBlockVector[:sizeX]
|
| 821 |
+
eigBlockVectorR = eigBlockVector[sizeX:]
|
| 822 |
+
|
| 823 |
+
pp = np.dot(activeBlockVectorR, eigBlockVectorR)
|
| 824 |
+
app = np.dot(activeBlockVectorAR, eigBlockVectorR)
|
| 825 |
+
bpp = np.dot(activeBlockVectorBR, eigBlockVectorR)
|
| 826 |
+
|
| 827 |
+
blockVectorX = np.dot(blockVectorX, eigBlockVectorX) + pp
|
| 828 |
+
blockVectorAX = np.dot(blockVectorAX, eigBlockVectorX) + app
|
| 829 |
+
blockVectorBX = np.dot(blockVectorBX, eigBlockVectorX) + bpp
|
| 830 |
+
|
| 831 |
+
blockVectorP, blockVectorAP, blockVectorBP = pp, app, bpp
|
| 832 |
+
|
| 833 |
+
else:
|
| 834 |
+
if not restart:
|
| 835 |
+
eigBlockVectorX = eigBlockVector[:sizeX]
|
| 836 |
+
eigBlockVectorR = eigBlockVector[sizeX:
|
| 837 |
+
sizeX + currentBlockSize]
|
| 838 |
+
eigBlockVectorP = eigBlockVector[sizeX + currentBlockSize:]
|
| 839 |
+
|
| 840 |
+
pp = np.dot(activeBlockVectorR, eigBlockVectorR)
|
| 841 |
+
pp += np.dot(activeBlockVectorP, eigBlockVectorP)
|
| 842 |
+
|
| 843 |
+
app = np.dot(activeBlockVectorAR, eigBlockVectorR)
|
| 844 |
+
app += np.dot(activeBlockVectorAP, eigBlockVectorP)
|
| 845 |
+
else:
|
| 846 |
+
eigBlockVectorX = eigBlockVector[:sizeX]
|
| 847 |
+
eigBlockVectorR = eigBlockVector[sizeX:]
|
| 848 |
+
|
| 849 |
+
pp = np.dot(activeBlockVectorR, eigBlockVectorR)
|
| 850 |
+
app = np.dot(activeBlockVectorAR, eigBlockVectorR)
|
| 851 |
+
|
| 852 |
+
blockVectorX = np.dot(blockVectorX, eigBlockVectorX) + pp
|
| 853 |
+
blockVectorAX = np.dot(blockVectorAX, eigBlockVectorX) + app
|
| 854 |
+
|
| 855 |
+
blockVectorP, blockVectorAP = pp, app
|
| 856 |
+
|
| 857 |
+
if B is not None:
|
| 858 |
+
aux = blockVectorBX * _lambda[np.newaxis, :]
|
| 859 |
+
else:
|
| 860 |
+
aux = blockVectorX * _lambda[np.newaxis, :]
|
| 861 |
+
|
| 862 |
+
blockVectorR = blockVectorAX - aux
|
| 863 |
+
|
| 864 |
+
aux = np.sum(blockVectorR.conj() * blockVectorR, 0)
|
| 865 |
+
residualNorms = np.sqrt(np.abs(aux))
|
| 866 |
+
# Use old lambda in case of early loop exit.
|
| 867 |
+
if retLambdaHistory:
|
| 868 |
+
lambdaHistory[iterationNumber + 1, :] = _lambda
|
| 869 |
+
if retResidualNormsHistory:
|
| 870 |
+
residualNormsHistory[iterationNumber + 1, :] = residualNorms
|
| 871 |
+
residualNorm = np.sum(np.abs(residualNorms)) / sizeX
|
| 872 |
+
if residualNorm < smallestResidualNorm:
|
| 873 |
+
smallestResidualNorm = residualNorm
|
| 874 |
+
bestIterationNumber = iterationNumber + 1
|
| 875 |
+
bestblockVectorX = blockVectorX
|
| 876 |
+
|
| 877 |
+
if np.max(np.abs(residualNorms)) > residualTolerance:
|
| 878 |
+
warnings.warn(
|
| 879 |
+
f"Exited at iteration {iterationNumber} with accuracies \n"
|
| 880 |
+
f"{residualNorms}\n"
|
| 881 |
+
f"not reaching the requested tolerance {residualTolerance}.\n"
|
| 882 |
+
f"Use iteration {bestIterationNumber} instead with accuracy \n"
|
| 883 |
+
f"{smallestResidualNorm}.\n",
|
| 884 |
+
UserWarning, stacklevel=2
|
| 885 |
+
)
|
| 886 |
+
|
| 887 |
+
if verbosityLevel:
|
| 888 |
+
print(f"Final iterative eigenvalue(s):\n{_lambda}")
|
| 889 |
+
print(f"Final iterative residual norm(s):\n{residualNorms}")
|
| 890 |
+
|
| 891 |
+
blockVectorX = bestblockVectorX
|
| 892 |
+
# Making eigenvectors "exactly" satisfy the blockVectorY constrains
|
| 893 |
+
if blockVectorY is not None:
|
| 894 |
+
_applyConstraints(blockVectorX,
|
| 895 |
+
gramYBY,
|
| 896 |
+
blockVectorBY,
|
| 897 |
+
blockVectorY)
|
| 898 |
+
|
| 899 |
+
# Making eigenvectors "exactly" othonormalized by final "exact" RR
|
| 900 |
+
blockVectorAX = A(blockVectorX)
|
| 901 |
+
if blockVectorAX.shape != blockVectorX.shape:
|
| 902 |
+
raise ValueError(
|
| 903 |
+
f"The shape {blockVectorX.shape} "
|
| 904 |
+
f"of the postprocessing iterate not preserved\n"
|
| 905 |
+
f"and changed to {blockVectorAX.shape} "
|
| 906 |
+
f"after multiplying by the primary matrix.\n"
|
| 907 |
+
)
|
| 908 |
+
gramXAX = np.dot(blockVectorX.T.conj(), blockVectorAX)
|
| 909 |
+
|
| 910 |
+
blockVectorBX = blockVectorX
|
| 911 |
+
if B is not None:
|
| 912 |
+
blockVectorBX = B(blockVectorX)
|
| 913 |
+
if blockVectorBX.shape != blockVectorX.shape:
|
| 914 |
+
raise ValueError(
|
| 915 |
+
f"The shape {blockVectorX.shape} "
|
| 916 |
+
f"of the postprocessing iterate not preserved\n"
|
| 917 |
+
f"and changed to {blockVectorBX.shape} "
|
| 918 |
+
f"after multiplying by the secondary matrix.\n"
|
| 919 |
+
)
|
| 920 |
+
|
| 921 |
+
gramXBX = np.dot(blockVectorX.T.conj(), blockVectorBX)
|
| 922 |
+
_handle_gramA_gramB_verbosity(gramXAX, gramXBX, verbosityLevel)
|
| 923 |
+
gramXAX = (gramXAX + gramXAX.T.conj()) / 2
|
| 924 |
+
gramXBX = (gramXBX + gramXBX.T.conj()) / 2
|
| 925 |
+
try:
|
| 926 |
+
_lambda, eigBlockVector = eigh(gramXAX,
|
| 927 |
+
gramXBX,
|
| 928 |
+
check_finite=False)
|
| 929 |
+
except LinAlgError as e:
|
| 930 |
+
raise ValueError("eigh has failed in lobpcg postprocessing") from e
|
| 931 |
+
|
| 932 |
+
ii = _get_indx(_lambda, sizeX, largest)
|
| 933 |
+
_lambda = _lambda[ii]
|
| 934 |
+
eigBlockVector = np.asarray(eigBlockVector[:, ii])
|
| 935 |
+
|
| 936 |
+
blockVectorX = np.dot(blockVectorX, eigBlockVector)
|
| 937 |
+
blockVectorAX = np.dot(blockVectorAX, eigBlockVector)
|
| 938 |
+
|
| 939 |
+
if B is not None:
|
| 940 |
+
blockVectorBX = np.dot(blockVectorBX, eigBlockVector)
|
| 941 |
+
aux = blockVectorBX * _lambda[np.newaxis, :]
|
| 942 |
+
else:
|
| 943 |
+
aux = blockVectorX * _lambda[np.newaxis, :]
|
| 944 |
+
|
| 945 |
+
blockVectorR = blockVectorAX - aux
|
| 946 |
+
|
| 947 |
+
aux = np.sum(blockVectorR.conj() * blockVectorR, 0)
|
| 948 |
+
residualNorms = np.sqrt(np.abs(aux))
|
| 949 |
+
|
| 950 |
+
if retLambdaHistory:
|
| 951 |
+
lambdaHistory[bestIterationNumber + 1, :] = _lambda
|
| 952 |
+
if retResidualNormsHistory:
|
| 953 |
+
residualNormsHistory[bestIterationNumber + 1, :] = residualNorms
|
| 954 |
+
|
| 955 |
+
if retLambdaHistory:
|
| 956 |
+
lambdaHistory = lambdaHistory[
|
| 957 |
+
: bestIterationNumber + 2, :]
|
| 958 |
+
if retResidualNormsHistory:
|
| 959 |
+
residualNormsHistory = residualNormsHistory[
|
| 960 |
+
: bestIterationNumber + 2, :]
|
| 961 |
+
|
| 962 |
+
if np.max(np.abs(residualNorms)) > residualTolerance:
|
| 963 |
+
warnings.warn(
|
| 964 |
+
f"Exited postprocessing with accuracies \n"
|
| 965 |
+
f"{residualNorms}\n"
|
| 966 |
+
f"not reaching the requested tolerance {residualTolerance}.",
|
| 967 |
+
UserWarning, stacklevel=2
|
| 968 |
+
)
|
| 969 |
+
|
| 970 |
+
if verbosityLevel:
|
| 971 |
+
print(f"Final postprocessing eigenvalue(s):\n{_lambda}")
|
| 972 |
+
print(f"Final residual norm(s):\n{residualNorms}")
|
| 973 |
+
|
| 974 |
+
if retLambdaHistory:
|
| 975 |
+
lambdaHistory = np.vsplit(lambdaHistory, np.shape(lambdaHistory)[0])
|
| 976 |
+
lambdaHistory = [np.squeeze(i) for i in lambdaHistory]
|
| 977 |
+
if retResidualNormsHistory:
|
| 978 |
+
residualNormsHistory = np.vsplit(residualNormsHistory,
|
| 979 |
+
np.shape(residualNormsHistory)[0])
|
| 980 |
+
residualNormsHistory = [np.squeeze(i) for i in residualNormsHistory]
|
| 981 |
+
|
| 982 |
+
if retLambdaHistory:
|
| 983 |
+
if retResidualNormsHistory:
|
| 984 |
+
return _lambda, blockVectorX, lambdaHistory, residualNormsHistory
|
| 985 |
+
else:
|
| 986 |
+
return _lambda, blockVectorX, lambdaHistory
|
| 987 |
+
else:
|
| 988 |
+
if retResidualNormsHistory:
|
| 989 |
+
return _lambda, blockVectorX, residualNormsHistory
|
| 990 |
+
else:
|
| 991 |
+
return _lambda, blockVectorX
|
mgm/lib/python3.10/site-packages/sklearn/externals/_packaging/__init__.py
ADDED
|
File without changes
|
mgm/lib/python3.10/site-packages/sklearn/externals/_packaging/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (177 Bytes). View file
|
|
|
mgm/lib/python3.10/site-packages/sklearn/externals/_packaging/__pycache__/_structures.cpython-310.pyc
ADDED
|
Binary file (3.06 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/sklearn/externals/_packaging/__pycache__/version.cpython-310.pyc
ADDED
|
Binary file (13.2 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/sklearn/externals/_packaging/_structures.py
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Vendoered from
|
| 2 |
+
https://github.com/pypa/packaging/blob/main/packaging/_structures.py
|
| 3 |
+
"""
|
| 4 |
+
# Copyright (c) Donald Stufft and individual contributors.
|
| 5 |
+
# All rights reserved.
|
| 6 |
+
|
| 7 |
+
# Redistribution and use in source and binary forms, with or without
|
| 8 |
+
# modification, are permitted provided that the following conditions are met:
|
| 9 |
+
|
| 10 |
+
# 1. Redistributions of source code must retain the above copyright notice,
|
| 11 |
+
# this list of conditions and the following disclaimer.
|
| 12 |
+
|
| 13 |
+
# 2. Redistributions in binary form must reproduce the above copyright
|
| 14 |
+
# notice, this list of conditions and the following disclaimer in the
|
| 15 |
+
# documentation and/or other materials provided with the distribution.
|
| 16 |
+
|
| 17 |
+
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
| 18 |
+
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
| 19 |
+
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
| 20 |
+
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
| 21 |
+
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
| 22 |
+
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
| 23 |
+
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
| 24 |
+
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
| 25 |
+
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 26 |
+
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class InfinityType:
|
| 30 |
+
def __repr__(self) -> str:
|
| 31 |
+
return "Infinity"
|
| 32 |
+
|
| 33 |
+
def __hash__(self) -> int:
|
| 34 |
+
return hash(repr(self))
|
| 35 |
+
|
| 36 |
+
def __lt__(self, other: object) -> bool:
|
| 37 |
+
return False
|
| 38 |
+
|
| 39 |
+
def __le__(self, other: object) -> bool:
|
| 40 |
+
return False
|
| 41 |
+
|
| 42 |
+
def __eq__(self, other: object) -> bool:
|
| 43 |
+
return isinstance(other, self.__class__)
|
| 44 |
+
|
| 45 |
+
def __ne__(self, other: object) -> bool:
|
| 46 |
+
return not isinstance(other, self.__class__)
|
| 47 |
+
|
| 48 |
+
def __gt__(self, other: object) -> bool:
|
| 49 |
+
return True
|
| 50 |
+
|
| 51 |
+
def __ge__(self, other: object) -> bool:
|
| 52 |
+
return True
|
| 53 |
+
|
| 54 |
+
def __neg__(self: object) -> "NegativeInfinityType":
|
| 55 |
+
return NegativeInfinity
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
Infinity = InfinityType()
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
class NegativeInfinityType:
|
| 62 |
+
def __repr__(self) -> str:
|
| 63 |
+
return "-Infinity"
|
| 64 |
+
|
| 65 |
+
def __hash__(self) -> int:
|
| 66 |
+
return hash(repr(self))
|
| 67 |
+
|
| 68 |
+
def __lt__(self, other: object) -> bool:
|
| 69 |
+
return True
|
| 70 |
+
|
| 71 |
+
def __le__(self, other: object) -> bool:
|
| 72 |
+
return True
|
| 73 |
+
|
| 74 |
+
def __eq__(self, other: object) -> bool:
|
| 75 |
+
return isinstance(other, self.__class__)
|
| 76 |
+
|
| 77 |
+
def __ne__(self, other: object) -> bool:
|
| 78 |
+
return not isinstance(other, self.__class__)
|
| 79 |
+
|
| 80 |
+
def __gt__(self, other: object) -> bool:
|
| 81 |
+
return False
|
| 82 |
+
|
| 83 |
+
def __ge__(self, other: object) -> bool:
|
| 84 |
+
return False
|
| 85 |
+
|
| 86 |
+
def __neg__(self: object) -> InfinityType:
|
| 87 |
+
return Infinity
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
NegativeInfinity = NegativeInfinityType()
|
mgm/lib/python3.10/site-packages/sklearn/externals/_packaging/version.py
ADDED
|
@@ -0,0 +1,535 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Vendoered from
|
| 2 |
+
https://github.com/pypa/packaging/blob/main/packaging/version.py
|
| 3 |
+
"""
|
| 4 |
+
# Copyright (c) Donald Stufft and individual contributors.
|
| 5 |
+
# All rights reserved.
|
| 6 |
+
|
| 7 |
+
# Redistribution and use in source and binary forms, with or without
|
| 8 |
+
# modification, are permitted provided that the following conditions are met:
|
| 9 |
+
|
| 10 |
+
# 1. Redistributions of source code must retain the above copyright notice,
|
| 11 |
+
# this list of conditions and the following disclaimer.
|
| 12 |
+
|
| 13 |
+
# 2. Redistributions in binary form must reproduce the above copyright
|
| 14 |
+
# notice, this list of conditions and the following disclaimer in the
|
| 15 |
+
# documentation and/or other materials provided with the distribution.
|
| 16 |
+
|
| 17 |
+
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
| 18 |
+
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
| 19 |
+
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
| 20 |
+
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
| 21 |
+
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
| 22 |
+
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
| 23 |
+
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
| 24 |
+
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
| 25 |
+
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 26 |
+
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 27 |
+
|
| 28 |
+
import collections
|
| 29 |
+
import itertools
|
| 30 |
+
import re
|
| 31 |
+
import warnings
|
| 32 |
+
from typing import Callable, Iterator, List, Optional, SupportsInt, Tuple, Union
|
| 33 |
+
|
| 34 |
+
from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType
|
| 35 |
+
|
| 36 |
+
__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"]
|
| 37 |
+
|
| 38 |
+
InfiniteTypes = Union[InfinityType, NegativeInfinityType]
|
| 39 |
+
PrePostDevType = Union[InfiniteTypes, Tuple[str, int]]
|
| 40 |
+
SubLocalType = Union[InfiniteTypes, int, str]
|
| 41 |
+
LocalType = Union[
|
| 42 |
+
NegativeInfinityType,
|
| 43 |
+
Tuple[
|
| 44 |
+
Union[
|
| 45 |
+
SubLocalType,
|
| 46 |
+
Tuple[SubLocalType, str],
|
| 47 |
+
Tuple[NegativeInfinityType, SubLocalType],
|
| 48 |
+
],
|
| 49 |
+
...,
|
| 50 |
+
],
|
| 51 |
+
]
|
| 52 |
+
CmpKey = Tuple[
|
| 53 |
+
int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType
|
| 54 |
+
]
|
| 55 |
+
LegacyCmpKey = Tuple[int, Tuple[str, ...]]
|
| 56 |
+
VersionComparisonMethod = Callable[
|
| 57 |
+
[Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool
|
| 58 |
+
]
|
| 59 |
+
|
| 60 |
+
_Version = collections.namedtuple(
|
| 61 |
+
"_Version", ["epoch", "release", "dev", "pre", "post", "local"]
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def parse(version: str) -> Union["LegacyVersion", "Version"]:
|
| 66 |
+
"""Parse the given version from a string to an appropriate class.
|
| 67 |
+
|
| 68 |
+
Parameters
|
| 69 |
+
----------
|
| 70 |
+
version : str
|
| 71 |
+
Version in a string format, eg. "0.9.1" or "1.2.dev0".
|
| 72 |
+
|
| 73 |
+
Returns
|
| 74 |
+
-------
|
| 75 |
+
version : :class:`Version` object or a :class:`LegacyVersion` object
|
| 76 |
+
Returned class depends on the given version: if is a valid
|
| 77 |
+
PEP 440 version or a legacy version.
|
| 78 |
+
"""
|
| 79 |
+
try:
|
| 80 |
+
return Version(version)
|
| 81 |
+
except InvalidVersion:
|
| 82 |
+
return LegacyVersion(version)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
class InvalidVersion(ValueError):
|
| 86 |
+
"""
|
| 87 |
+
An invalid version was found, users should refer to PEP 440.
|
| 88 |
+
"""
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
class _BaseVersion:
|
| 92 |
+
_key: Union[CmpKey, LegacyCmpKey]
|
| 93 |
+
|
| 94 |
+
def __hash__(self) -> int:
|
| 95 |
+
return hash(self._key)
|
| 96 |
+
|
| 97 |
+
# Please keep the duplicated `isinstance` check
|
| 98 |
+
# in the six comparisons hereunder
|
| 99 |
+
# unless you find a way to avoid adding overhead function calls.
|
| 100 |
+
def __lt__(self, other: "_BaseVersion") -> bool:
|
| 101 |
+
if not isinstance(other, _BaseVersion):
|
| 102 |
+
return NotImplemented
|
| 103 |
+
|
| 104 |
+
return self._key < other._key
|
| 105 |
+
|
| 106 |
+
def __le__(self, other: "_BaseVersion") -> bool:
|
| 107 |
+
if not isinstance(other, _BaseVersion):
|
| 108 |
+
return NotImplemented
|
| 109 |
+
|
| 110 |
+
return self._key <= other._key
|
| 111 |
+
|
| 112 |
+
def __eq__(self, other: object) -> bool:
|
| 113 |
+
if not isinstance(other, _BaseVersion):
|
| 114 |
+
return NotImplemented
|
| 115 |
+
|
| 116 |
+
return self._key == other._key
|
| 117 |
+
|
| 118 |
+
def __ge__(self, other: "_BaseVersion") -> bool:
|
| 119 |
+
if not isinstance(other, _BaseVersion):
|
| 120 |
+
return NotImplemented
|
| 121 |
+
|
| 122 |
+
return self._key >= other._key
|
| 123 |
+
|
| 124 |
+
def __gt__(self, other: "_BaseVersion") -> bool:
|
| 125 |
+
if not isinstance(other, _BaseVersion):
|
| 126 |
+
return NotImplemented
|
| 127 |
+
|
| 128 |
+
return self._key > other._key
|
| 129 |
+
|
| 130 |
+
def __ne__(self, other: object) -> bool:
|
| 131 |
+
if not isinstance(other, _BaseVersion):
|
| 132 |
+
return NotImplemented
|
| 133 |
+
|
| 134 |
+
return self._key != other._key
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
class LegacyVersion(_BaseVersion):
|
| 138 |
+
def __init__(self, version: str) -> None:
|
| 139 |
+
self._version = str(version)
|
| 140 |
+
self._key = _legacy_cmpkey(self._version)
|
| 141 |
+
|
| 142 |
+
warnings.warn(
|
| 143 |
+
"Creating a LegacyVersion has been deprecated and will be "
|
| 144 |
+
"removed in the next major release",
|
| 145 |
+
DeprecationWarning,
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
def __str__(self) -> str:
|
| 149 |
+
return self._version
|
| 150 |
+
|
| 151 |
+
def __repr__(self) -> str:
|
| 152 |
+
return f"<LegacyVersion('{self}')>"
|
| 153 |
+
|
| 154 |
+
@property
|
| 155 |
+
def public(self) -> str:
|
| 156 |
+
return self._version
|
| 157 |
+
|
| 158 |
+
@property
|
| 159 |
+
def base_version(self) -> str:
|
| 160 |
+
return self._version
|
| 161 |
+
|
| 162 |
+
@property
|
| 163 |
+
def epoch(self) -> int:
|
| 164 |
+
return -1
|
| 165 |
+
|
| 166 |
+
@property
|
| 167 |
+
def release(self) -> None:
|
| 168 |
+
return None
|
| 169 |
+
|
| 170 |
+
@property
|
| 171 |
+
def pre(self) -> None:
|
| 172 |
+
return None
|
| 173 |
+
|
| 174 |
+
@property
|
| 175 |
+
def post(self) -> None:
|
| 176 |
+
return None
|
| 177 |
+
|
| 178 |
+
@property
|
| 179 |
+
def dev(self) -> None:
|
| 180 |
+
return None
|
| 181 |
+
|
| 182 |
+
@property
|
| 183 |
+
def local(self) -> None:
|
| 184 |
+
return None
|
| 185 |
+
|
| 186 |
+
@property
|
| 187 |
+
def is_prerelease(self) -> bool:
|
| 188 |
+
return False
|
| 189 |
+
|
| 190 |
+
@property
|
| 191 |
+
def is_postrelease(self) -> bool:
|
| 192 |
+
return False
|
| 193 |
+
|
| 194 |
+
@property
|
| 195 |
+
def is_devrelease(self) -> bool:
|
| 196 |
+
return False
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE)
|
| 200 |
+
|
| 201 |
+
_legacy_version_replacement_map = {
|
| 202 |
+
"pre": "c",
|
| 203 |
+
"preview": "c",
|
| 204 |
+
"-": "final-",
|
| 205 |
+
"rc": "c",
|
| 206 |
+
"dev": "@",
|
| 207 |
+
}
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
def _parse_version_parts(s: str) -> Iterator[str]:
|
| 211 |
+
for part in _legacy_version_component_re.split(s):
|
| 212 |
+
part = _legacy_version_replacement_map.get(part, part)
|
| 213 |
+
|
| 214 |
+
if not part or part == ".":
|
| 215 |
+
continue
|
| 216 |
+
|
| 217 |
+
if part[:1] in "0123456789":
|
| 218 |
+
# pad for numeric comparison
|
| 219 |
+
yield part.zfill(8)
|
| 220 |
+
else:
|
| 221 |
+
yield "*" + part
|
| 222 |
+
|
| 223 |
+
# ensure that alpha/beta/candidate are before final
|
| 224 |
+
yield "*final"
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
def _legacy_cmpkey(version: str) -> LegacyCmpKey:
|
| 228 |
+
|
| 229 |
+
# We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
|
| 230 |
+
# greater than or equal to 0. This will effectively put the LegacyVersion,
|
| 231 |
+
# which uses the defacto standard originally implemented by setuptools,
|
| 232 |
+
# as before all PEP 440 versions.
|
| 233 |
+
epoch = -1
|
| 234 |
+
|
| 235 |
+
# This scheme is taken from pkg_resources.parse_version setuptools prior to
|
| 236 |
+
# it's adoption of the packaging library.
|
| 237 |
+
parts: List[str] = []
|
| 238 |
+
for part in _parse_version_parts(version.lower()):
|
| 239 |
+
if part.startswith("*"):
|
| 240 |
+
# remove "-" before a prerelease tag
|
| 241 |
+
if part < "*final":
|
| 242 |
+
while parts and parts[-1] == "*final-":
|
| 243 |
+
parts.pop()
|
| 244 |
+
|
| 245 |
+
# remove trailing zeros from each series of numeric parts
|
| 246 |
+
while parts and parts[-1] == "00000000":
|
| 247 |
+
parts.pop()
|
| 248 |
+
|
| 249 |
+
parts.append(part)
|
| 250 |
+
|
| 251 |
+
return epoch, tuple(parts)
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
# Deliberately not anchored to the start and end of the string, to make it
|
| 255 |
+
# easier for 3rd party code to reuse
|
| 256 |
+
VERSION_PATTERN = r"""
|
| 257 |
+
v?
|
| 258 |
+
(?:
|
| 259 |
+
(?:(?P<epoch>[0-9]+)!)? # epoch
|
| 260 |
+
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
|
| 261 |
+
(?P<pre> # pre-release
|
| 262 |
+
[-_\.]?
|
| 263 |
+
(?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
|
| 264 |
+
[-_\.]?
|
| 265 |
+
(?P<pre_n>[0-9]+)?
|
| 266 |
+
)?
|
| 267 |
+
(?P<post> # post release
|
| 268 |
+
(?:-(?P<post_n1>[0-9]+))
|
| 269 |
+
|
|
| 270 |
+
(?:
|
| 271 |
+
[-_\.]?
|
| 272 |
+
(?P<post_l>post|rev|r)
|
| 273 |
+
[-_\.]?
|
| 274 |
+
(?P<post_n2>[0-9]+)?
|
| 275 |
+
)
|
| 276 |
+
)?
|
| 277 |
+
(?P<dev> # dev release
|
| 278 |
+
[-_\.]?
|
| 279 |
+
(?P<dev_l>dev)
|
| 280 |
+
[-_\.]?
|
| 281 |
+
(?P<dev_n>[0-9]+)?
|
| 282 |
+
)?
|
| 283 |
+
)
|
| 284 |
+
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
|
| 285 |
+
"""
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
class Version(_BaseVersion):
|
| 289 |
+
|
| 290 |
+
_regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
|
| 291 |
+
|
| 292 |
+
def __init__(self, version: str) -> None:
|
| 293 |
+
|
| 294 |
+
# Validate the version and parse it into pieces
|
| 295 |
+
match = self._regex.search(version)
|
| 296 |
+
if not match:
|
| 297 |
+
raise InvalidVersion(f"Invalid version: '{version}'")
|
| 298 |
+
|
| 299 |
+
# Store the parsed out pieces of the version
|
| 300 |
+
self._version = _Version(
|
| 301 |
+
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
|
| 302 |
+
release=tuple(int(i) for i in match.group("release").split(".")),
|
| 303 |
+
pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
|
| 304 |
+
post=_parse_letter_version(
|
| 305 |
+
match.group("post_l"), match.group("post_n1") or match.group("post_n2")
|
| 306 |
+
),
|
| 307 |
+
dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
|
| 308 |
+
local=_parse_local_version(match.group("local")),
|
| 309 |
+
)
|
| 310 |
+
|
| 311 |
+
# Generate a key which will be used for sorting
|
| 312 |
+
self._key = _cmpkey(
|
| 313 |
+
self._version.epoch,
|
| 314 |
+
self._version.release,
|
| 315 |
+
self._version.pre,
|
| 316 |
+
self._version.post,
|
| 317 |
+
self._version.dev,
|
| 318 |
+
self._version.local,
|
| 319 |
+
)
|
| 320 |
+
|
| 321 |
+
def __repr__(self) -> str:
|
| 322 |
+
return f"<Version('{self}')>"
|
| 323 |
+
|
| 324 |
+
def __str__(self) -> str:
|
| 325 |
+
parts = []
|
| 326 |
+
|
| 327 |
+
# Epoch
|
| 328 |
+
if self.epoch != 0:
|
| 329 |
+
parts.append(f"{self.epoch}!")
|
| 330 |
+
|
| 331 |
+
# Release segment
|
| 332 |
+
parts.append(".".join(str(x) for x in self.release))
|
| 333 |
+
|
| 334 |
+
# Pre-release
|
| 335 |
+
if self.pre is not None:
|
| 336 |
+
parts.append("".join(str(x) for x in self.pre))
|
| 337 |
+
|
| 338 |
+
# Post-release
|
| 339 |
+
if self.post is not None:
|
| 340 |
+
parts.append(f".post{self.post}")
|
| 341 |
+
|
| 342 |
+
# Development release
|
| 343 |
+
if self.dev is not None:
|
| 344 |
+
parts.append(f".dev{self.dev}")
|
| 345 |
+
|
| 346 |
+
# Local version segment
|
| 347 |
+
if self.local is not None:
|
| 348 |
+
parts.append(f"+{self.local}")
|
| 349 |
+
|
| 350 |
+
return "".join(parts)
|
| 351 |
+
|
| 352 |
+
@property
|
| 353 |
+
def epoch(self) -> int:
|
| 354 |
+
_epoch: int = self._version.epoch
|
| 355 |
+
return _epoch
|
| 356 |
+
|
| 357 |
+
@property
|
| 358 |
+
def release(self) -> Tuple[int, ...]:
|
| 359 |
+
_release: Tuple[int, ...] = self._version.release
|
| 360 |
+
return _release
|
| 361 |
+
|
| 362 |
+
@property
|
| 363 |
+
def pre(self) -> Optional[Tuple[str, int]]:
|
| 364 |
+
_pre: Optional[Tuple[str, int]] = self._version.pre
|
| 365 |
+
return _pre
|
| 366 |
+
|
| 367 |
+
@property
|
| 368 |
+
def post(self) -> Optional[int]:
|
| 369 |
+
return self._version.post[1] if self._version.post else None
|
| 370 |
+
|
| 371 |
+
@property
|
| 372 |
+
def dev(self) -> Optional[int]:
|
| 373 |
+
return self._version.dev[1] if self._version.dev else None
|
| 374 |
+
|
| 375 |
+
@property
|
| 376 |
+
def local(self) -> Optional[str]:
|
| 377 |
+
if self._version.local:
|
| 378 |
+
return ".".join(str(x) for x in self._version.local)
|
| 379 |
+
else:
|
| 380 |
+
return None
|
| 381 |
+
|
| 382 |
+
@property
|
| 383 |
+
def public(self) -> str:
|
| 384 |
+
return str(self).split("+", 1)[0]
|
| 385 |
+
|
| 386 |
+
@property
|
| 387 |
+
def base_version(self) -> str:
|
| 388 |
+
parts = []
|
| 389 |
+
|
| 390 |
+
# Epoch
|
| 391 |
+
if self.epoch != 0:
|
| 392 |
+
parts.append(f"{self.epoch}!")
|
| 393 |
+
|
| 394 |
+
# Release segment
|
| 395 |
+
parts.append(".".join(str(x) for x in self.release))
|
| 396 |
+
|
| 397 |
+
return "".join(parts)
|
| 398 |
+
|
| 399 |
+
@property
|
| 400 |
+
def is_prerelease(self) -> bool:
|
| 401 |
+
return self.dev is not None or self.pre is not None
|
| 402 |
+
|
| 403 |
+
@property
|
| 404 |
+
def is_postrelease(self) -> bool:
|
| 405 |
+
return self.post is not None
|
| 406 |
+
|
| 407 |
+
@property
|
| 408 |
+
def is_devrelease(self) -> bool:
|
| 409 |
+
return self.dev is not None
|
| 410 |
+
|
| 411 |
+
@property
|
| 412 |
+
def major(self) -> int:
|
| 413 |
+
return self.release[0] if len(self.release) >= 1 else 0
|
| 414 |
+
|
| 415 |
+
@property
|
| 416 |
+
def minor(self) -> int:
|
| 417 |
+
return self.release[1] if len(self.release) >= 2 else 0
|
| 418 |
+
|
| 419 |
+
@property
|
| 420 |
+
def micro(self) -> int:
|
| 421 |
+
return self.release[2] if len(self.release) >= 3 else 0
|
| 422 |
+
|
| 423 |
+
|
| 424 |
+
def _parse_letter_version(
|
| 425 |
+
letter: str, number: Union[str, bytes, SupportsInt]
|
| 426 |
+
) -> Optional[Tuple[str, int]]:
|
| 427 |
+
|
| 428 |
+
if letter:
|
| 429 |
+
# We consider there to be an implicit 0 in a pre-release if there is
|
| 430 |
+
# not a numeral associated with it.
|
| 431 |
+
if number is None:
|
| 432 |
+
number = 0
|
| 433 |
+
|
| 434 |
+
# We normalize any letters to their lower case form
|
| 435 |
+
letter = letter.lower()
|
| 436 |
+
|
| 437 |
+
# We consider some words to be alternate spellings of other words and
|
| 438 |
+
# in those cases we want to normalize the spellings to our preferred
|
| 439 |
+
# spelling.
|
| 440 |
+
if letter == "alpha":
|
| 441 |
+
letter = "a"
|
| 442 |
+
elif letter == "beta":
|
| 443 |
+
letter = "b"
|
| 444 |
+
elif letter in ["c", "pre", "preview"]:
|
| 445 |
+
letter = "rc"
|
| 446 |
+
elif letter in ["rev", "r"]:
|
| 447 |
+
letter = "post"
|
| 448 |
+
|
| 449 |
+
return letter, int(number)
|
| 450 |
+
if not letter and number:
|
| 451 |
+
# We assume if we are given a number, but we are not given a letter
|
| 452 |
+
# then this is using the implicit post release syntax (e.g. 1.0-1)
|
| 453 |
+
letter = "post"
|
| 454 |
+
|
| 455 |
+
return letter, int(number)
|
| 456 |
+
|
| 457 |
+
return None
|
| 458 |
+
|
| 459 |
+
|
| 460 |
+
_local_version_separators = re.compile(r"[\._-]")
|
| 461 |
+
|
| 462 |
+
|
| 463 |
+
def _parse_local_version(local: str) -> Optional[LocalType]:
|
| 464 |
+
"""
|
| 465 |
+
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
|
| 466 |
+
"""
|
| 467 |
+
if local is not None:
|
| 468 |
+
return tuple(
|
| 469 |
+
part.lower() if not part.isdigit() else int(part)
|
| 470 |
+
for part in _local_version_separators.split(local)
|
| 471 |
+
)
|
| 472 |
+
return None
|
| 473 |
+
|
| 474 |
+
|
| 475 |
+
def _cmpkey(
|
| 476 |
+
epoch: int,
|
| 477 |
+
release: Tuple[int, ...],
|
| 478 |
+
pre: Optional[Tuple[str, int]],
|
| 479 |
+
post: Optional[Tuple[str, int]],
|
| 480 |
+
dev: Optional[Tuple[str, int]],
|
| 481 |
+
local: Optional[Tuple[SubLocalType]],
|
| 482 |
+
) -> CmpKey:
|
| 483 |
+
|
| 484 |
+
# When we compare a release version, we want to compare it with all of the
|
| 485 |
+
# trailing zeros removed. So we'll use a reverse the list, drop all the now
|
| 486 |
+
# leading zeros until we come to something non zero, then take the rest
|
| 487 |
+
# re-reverse it back into the correct order and make it a tuple and use
|
| 488 |
+
# that for our sorting key.
|
| 489 |
+
_release = tuple(
|
| 490 |
+
reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
|
| 491 |
+
)
|
| 492 |
+
|
| 493 |
+
# We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
|
| 494 |
+
# We'll do this by abusing the pre segment, but we _only_ want to do this
|
| 495 |
+
# if there is not a pre or a post segment. If we have one of those then
|
| 496 |
+
# the normal sorting rules will handle this case correctly.
|
| 497 |
+
if pre is None and post is None and dev is not None:
|
| 498 |
+
_pre: PrePostDevType = NegativeInfinity
|
| 499 |
+
# Versions without a pre-release (except as noted above) should sort after
|
| 500 |
+
# those with one.
|
| 501 |
+
elif pre is None:
|
| 502 |
+
_pre = Infinity
|
| 503 |
+
else:
|
| 504 |
+
_pre = pre
|
| 505 |
+
|
| 506 |
+
# Versions without a post segment should sort before those with one.
|
| 507 |
+
if post is None:
|
| 508 |
+
_post: PrePostDevType = NegativeInfinity
|
| 509 |
+
|
| 510 |
+
else:
|
| 511 |
+
_post = post
|
| 512 |
+
|
| 513 |
+
# Versions without a development segment should sort after those with one.
|
| 514 |
+
if dev is None:
|
| 515 |
+
_dev: PrePostDevType = Infinity
|
| 516 |
+
|
| 517 |
+
else:
|
| 518 |
+
_dev = dev
|
| 519 |
+
|
| 520 |
+
if local is None:
|
| 521 |
+
# Versions without a local segment should sort before those with one.
|
| 522 |
+
_local: LocalType = NegativeInfinity
|
| 523 |
+
else:
|
| 524 |
+
# Versions with a local segment need that segment parsed to implement
|
| 525 |
+
# the sorting rules in PEP440.
|
| 526 |
+
# - Alpha numeric segments sort before numeric segments
|
| 527 |
+
# - Alpha numeric segments sort lexicographically
|
| 528 |
+
# - Numeric segments sort numerically
|
| 529 |
+
# - Shorter versions sort before longer versions when the prefixes
|
| 530 |
+
# match exactly
|
| 531 |
+
_local = tuple(
|
| 532 |
+
(i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
|
| 533 |
+
)
|
| 534 |
+
|
| 535 |
+
return epoch, _release, _pre, _post, _dev, _local
|
mgm/lib/python3.10/site-packages/sklearn/impute/__init__.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Transformers for missing value imputation"""
|
| 2 |
+
import typing
|
| 3 |
+
|
| 4 |
+
from ._base import MissingIndicator, SimpleImputer
|
| 5 |
+
from ._knn import KNNImputer
|
| 6 |
+
|
| 7 |
+
if typing.TYPE_CHECKING:
|
| 8 |
+
# Avoid errors in type checkers (e.g. mypy) for experimental estimators.
|
| 9 |
+
# TODO: remove this check once the estimator is no longer experimental.
|
| 10 |
+
from ._iterative import IterativeImputer # noqa
|
| 11 |
+
|
| 12 |
+
__all__ = ["MissingIndicator", "SimpleImputer", "KNNImputer"]
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
# TODO: remove this check once the estimator is no longer experimental.
|
| 16 |
+
def __getattr__(name):
|
| 17 |
+
if name == "IterativeImputer":
|
| 18 |
+
raise ImportError(
|
| 19 |
+
f"{name} is experimental and the API might change without any "
|
| 20 |
+
"deprecation cycle. To use it, you need to explicitly import "
|
| 21 |
+
"enable_iterative_imputer:\n"
|
| 22 |
+
"from sklearn.experimental import enable_iterative_imputer"
|
| 23 |
+
)
|
| 24 |
+
raise AttributeError(f"module {__name__} has no attribute {name}")
|
mgm/lib/python3.10/site-packages/sklearn/impute/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (887 Bytes). View file
|
|
|
mgm/lib/python3.10/site-packages/sklearn/impute/__pycache__/_base.cpython-310.pyc
ADDED
|
Binary file (29.4 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/sklearn/impute/__pycache__/_iterative.cpython-310.pyc
ADDED
|
Binary file (27.6 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/sklearn/impute/__pycache__/_knn.cpython-310.pyc
ADDED
|
Binary file (11.3 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/sklearn/impute/_base.py
ADDED
|
@@ -0,0 +1,1071 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Authors: Nicolas Tresegnie <nicolas.tresegnie@gmail.com>
|
| 2 |
+
# Sergey Feldman <sergeyfeldman@gmail.com>
|
| 3 |
+
# License: BSD 3 clause
|
| 4 |
+
|
| 5 |
+
import numbers
|
| 6 |
+
import warnings
|
| 7 |
+
from collections import Counter
|
| 8 |
+
|
| 9 |
+
import numpy as np
|
| 10 |
+
import numpy.ma as ma
|
| 11 |
+
from scipy import sparse as sp
|
| 12 |
+
|
| 13 |
+
from ..base import BaseEstimator, TransformerMixin
|
| 14 |
+
from ..utils._param_validation import StrOptions, Hidden
|
| 15 |
+
from ..utils.fixes import _mode
|
| 16 |
+
from ..utils.sparsefuncs import _get_median
|
| 17 |
+
from ..utils.validation import check_is_fitted
|
| 18 |
+
from ..utils.validation import FLOAT_DTYPES
|
| 19 |
+
from ..utils.validation import _check_feature_names_in
|
| 20 |
+
from ..utils._mask import _get_mask
|
| 21 |
+
from ..utils import _is_pandas_na
|
| 22 |
+
from ..utils import is_scalar_nan
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def _check_inputs_dtype(X, missing_values):
|
| 26 |
+
if _is_pandas_na(missing_values):
|
| 27 |
+
# Allow using `pd.NA` as missing values to impute numerical arrays.
|
| 28 |
+
return
|
| 29 |
+
if X.dtype.kind in ("f", "i", "u") and not isinstance(missing_values, numbers.Real):
|
| 30 |
+
raise ValueError(
|
| 31 |
+
"'X' and 'missing_values' types are expected to be"
|
| 32 |
+
" both numerical. Got X.dtype={} and "
|
| 33 |
+
" type(missing_values)={}.".format(X.dtype, type(missing_values))
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def _most_frequent(array, extra_value, n_repeat):
|
| 38 |
+
"""Compute the most frequent value in a 1d array extended with
|
| 39 |
+
[extra_value] * n_repeat, where extra_value is assumed to be not part
|
| 40 |
+
of the array."""
|
| 41 |
+
# Compute the most frequent value in array only
|
| 42 |
+
if array.size > 0:
|
| 43 |
+
if array.dtype == object:
|
| 44 |
+
# scipy.stats.mode is slow with object dtype array.
|
| 45 |
+
# Python Counter is more efficient
|
| 46 |
+
counter = Counter(array)
|
| 47 |
+
most_frequent_count = counter.most_common(1)[0][1]
|
| 48 |
+
# tie breaking similarly to scipy.stats.mode
|
| 49 |
+
most_frequent_value = min(
|
| 50 |
+
value
|
| 51 |
+
for value, count in counter.items()
|
| 52 |
+
if count == most_frequent_count
|
| 53 |
+
)
|
| 54 |
+
else:
|
| 55 |
+
mode = _mode(array)
|
| 56 |
+
most_frequent_value = mode[0][0]
|
| 57 |
+
most_frequent_count = mode[1][0]
|
| 58 |
+
else:
|
| 59 |
+
most_frequent_value = 0
|
| 60 |
+
most_frequent_count = 0
|
| 61 |
+
|
| 62 |
+
# Compare to array + [extra_value] * n_repeat
|
| 63 |
+
if most_frequent_count == 0 and n_repeat == 0:
|
| 64 |
+
return np.nan
|
| 65 |
+
elif most_frequent_count < n_repeat:
|
| 66 |
+
return extra_value
|
| 67 |
+
elif most_frequent_count > n_repeat:
|
| 68 |
+
return most_frequent_value
|
| 69 |
+
elif most_frequent_count == n_repeat:
|
| 70 |
+
# tie breaking similarly to scipy.stats.mode
|
| 71 |
+
return min(most_frequent_value, extra_value)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
class _BaseImputer(TransformerMixin, BaseEstimator):
|
| 75 |
+
"""Base class for all imputers.
|
| 76 |
+
|
| 77 |
+
It adds automatically support for `add_indicator`.
|
| 78 |
+
"""
|
| 79 |
+
|
| 80 |
+
_parameter_constraints: dict = {
|
| 81 |
+
"missing_values": ["missing_values"],
|
| 82 |
+
"add_indicator": ["boolean"],
|
| 83 |
+
"keep_empty_features": ["boolean"],
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
def __init__(
|
| 87 |
+
self, *, missing_values=np.nan, add_indicator=False, keep_empty_features=False
|
| 88 |
+
):
|
| 89 |
+
self.missing_values = missing_values
|
| 90 |
+
self.add_indicator = add_indicator
|
| 91 |
+
self.keep_empty_features = keep_empty_features
|
| 92 |
+
|
| 93 |
+
def _fit_indicator(self, X):
|
| 94 |
+
"""Fit a MissingIndicator."""
|
| 95 |
+
if self.add_indicator:
|
| 96 |
+
self.indicator_ = MissingIndicator(
|
| 97 |
+
missing_values=self.missing_values, error_on_new=False
|
| 98 |
+
)
|
| 99 |
+
self.indicator_._fit(X, precomputed=True)
|
| 100 |
+
else:
|
| 101 |
+
self.indicator_ = None
|
| 102 |
+
|
| 103 |
+
def _transform_indicator(self, X):
|
| 104 |
+
"""Compute the indicator mask.'
|
| 105 |
+
|
| 106 |
+
Note that X must be the original data as passed to the imputer before
|
| 107 |
+
any imputation, since imputation may be done inplace in some cases.
|
| 108 |
+
"""
|
| 109 |
+
if self.add_indicator:
|
| 110 |
+
if not hasattr(self, "indicator_"):
|
| 111 |
+
raise ValueError(
|
| 112 |
+
"Make sure to call _fit_indicator before _transform_indicator"
|
| 113 |
+
)
|
| 114 |
+
return self.indicator_.transform(X)
|
| 115 |
+
|
| 116 |
+
def _concatenate_indicator(self, X_imputed, X_indicator):
|
| 117 |
+
"""Concatenate indicator mask with the imputed data."""
|
| 118 |
+
if not self.add_indicator:
|
| 119 |
+
return X_imputed
|
| 120 |
+
|
| 121 |
+
hstack = sp.hstack if sp.issparse(X_imputed) else np.hstack
|
| 122 |
+
if X_indicator is None:
|
| 123 |
+
raise ValueError(
|
| 124 |
+
"Data from the missing indicator are not provided. Call "
|
| 125 |
+
"_fit_indicator and _transform_indicator in the imputer "
|
| 126 |
+
"implementation."
|
| 127 |
+
)
|
| 128 |
+
|
| 129 |
+
return hstack((X_imputed, X_indicator))
|
| 130 |
+
|
| 131 |
+
def _concatenate_indicator_feature_names_out(self, names, input_features):
|
| 132 |
+
if not self.add_indicator:
|
| 133 |
+
return names
|
| 134 |
+
|
| 135 |
+
indicator_names = self.indicator_.get_feature_names_out(input_features)
|
| 136 |
+
return np.concatenate([names, indicator_names])
|
| 137 |
+
|
| 138 |
+
def _more_tags(self):
|
| 139 |
+
return {"allow_nan": is_scalar_nan(self.missing_values)}
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
class SimpleImputer(_BaseImputer):
|
| 143 |
+
"""Univariate imputer for completing missing values with simple strategies.
|
| 144 |
+
|
| 145 |
+
Replace missing values using a descriptive statistic (e.g. mean, median, or
|
| 146 |
+
most frequent) along each column, or using a constant value.
|
| 147 |
+
|
| 148 |
+
Read more in the :ref:`User Guide <impute>`.
|
| 149 |
+
|
| 150 |
+
.. versionadded:: 0.20
|
| 151 |
+
`SimpleImputer` replaces the previous `sklearn.preprocessing.Imputer`
|
| 152 |
+
estimator which is now removed.
|
| 153 |
+
|
| 154 |
+
Parameters
|
| 155 |
+
----------
|
| 156 |
+
missing_values : int, float, str, np.nan, None or pandas.NA, default=np.nan
|
| 157 |
+
The placeholder for the missing values. All occurrences of
|
| 158 |
+
`missing_values` will be imputed. For pandas' dataframes with
|
| 159 |
+
nullable integer dtypes with missing values, `missing_values`
|
| 160 |
+
can be set to either `np.nan` or `pd.NA`.
|
| 161 |
+
|
| 162 |
+
strategy : str, default='mean'
|
| 163 |
+
The imputation strategy.
|
| 164 |
+
|
| 165 |
+
- If "mean", then replace missing values using the mean along
|
| 166 |
+
each column. Can only be used with numeric data.
|
| 167 |
+
- If "median", then replace missing values using the median along
|
| 168 |
+
each column. Can only be used with numeric data.
|
| 169 |
+
- If "most_frequent", then replace missing using the most frequent
|
| 170 |
+
value along each column. Can be used with strings or numeric data.
|
| 171 |
+
If there is more than one such value, only the smallest is returned.
|
| 172 |
+
- If "constant", then replace missing values with fill_value. Can be
|
| 173 |
+
used with strings or numeric data.
|
| 174 |
+
|
| 175 |
+
.. versionadded:: 0.20
|
| 176 |
+
strategy="constant" for fixed value imputation.
|
| 177 |
+
|
| 178 |
+
fill_value : str or numerical value, default=None
|
| 179 |
+
When strategy == "constant", `fill_value` is used to replace all
|
| 180 |
+
occurrences of missing_values. For string or object data types,
|
| 181 |
+
`fill_value` must be a string.
|
| 182 |
+
If `None`, `fill_value` will be 0 when imputing numerical
|
| 183 |
+
data and "missing_value" for strings or object data types.
|
| 184 |
+
|
| 185 |
+
verbose : int, default=0
|
| 186 |
+
Controls the verbosity of the imputer.
|
| 187 |
+
|
| 188 |
+
.. deprecated:: 1.1
|
| 189 |
+
The 'verbose' parameter was deprecated in version 1.1 and will be
|
| 190 |
+
removed in 1.3. A warning will always be raised upon the removal of
|
| 191 |
+
empty columns in the future version.
|
| 192 |
+
|
| 193 |
+
copy : bool, default=True
|
| 194 |
+
If True, a copy of X will be created. If False, imputation will
|
| 195 |
+
be done in-place whenever possible. Note that, in the following cases,
|
| 196 |
+
a new copy will always be made, even if `copy=False`:
|
| 197 |
+
|
| 198 |
+
- If `X` is not an array of floating values;
|
| 199 |
+
- If `X` is encoded as a CSR matrix;
|
| 200 |
+
- If `add_indicator=True`.
|
| 201 |
+
|
| 202 |
+
add_indicator : bool, default=False
|
| 203 |
+
If True, a :class:`MissingIndicator` transform will stack onto output
|
| 204 |
+
of the imputer's transform. This allows a predictive estimator
|
| 205 |
+
to account for missingness despite imputation. If a feature has no
|
| 206 |
+
missing values at fit/train time, the feature won't appear on
|
| 207 |
+
the missing indicator even if there are missing values at
|
| 208 |
+
transform/test time.
|
| 209 |
+
|
| 210 |
+
keep_empty_features : bool, default=False
|
| 211 |
+
If True, features that consist exclusively of missing values when
|
| 212 |
+
`fit` is called are returned in results when `transform` is called.
|
| 213 |
+
The imputed value is always `0` except when `strategy="constant"`
|
| 214 |
+
in which case `fill_value` will be used instead.
|
| 215 |
+
|
| 216 |
+
.. versionadded:: 1.2
|
| 217 |
+
|
| 218 |
+
Attributes
|
| 219 |
+
----------
|
| 220 |
+
statistics_ : array of shape (n_features,)
|
| 221 |
+
The imputation fill value for each feature.
|
| 222 |
+
Computing statistics can result in `np.nan` values.
|
| 223 |
+
During :meth:`transform`, features corresponding to `np.nan`
|
| 224 |
+
statistics will be discarded.
|
| 225 |
+
|
| 226 |
+
indicator_ : :class:`~sklearn.impute.MissingIndicator`
|
| 227 |
+
Indicator used to add binary indicators for missing values.
|
| 228 |
+
`None` if `add_indicator=False`.
|
| 229 |
+
|
| 230 |
+
n_features_in_ : int
|
| 231 |
+
Number of features seen during :term:`fit`.
|
| 232 |
+
|
| 233 |
+
.. versionadded:: 0.24
|
| 234 |
+
|
| 235 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
| 236 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
| 237 |
+
has feature names that are all strings.
|
| 238 |
+
|
| 239 |
+
.. versionadded:: 1.0
|
| 240 |
+
|
| 241 |
+
See Also
|
| 242 |
+
--------
|
| 243 |
+
IterativeImputer : Multivariate imputer that estimates values to impute for
|
| 244 |
+
each feature with missing values from all the others.
|
| 245 |
+
KNNImputer : Multivariate imputer that estimates missing features using
|
| 246 |
+
nearest samples.
|
| 247 |
+
|
| 248 |
+
Notes
|
| 249 |
+
-----
|
| 250 |
+
Columns which only contained missing values at :meth:`fit` are discarded
|
| 251 |
+
upon :meth:`transform` if strategy is not `"constant"`.
|
| 252 |
+
|
| 253 |
+
In a prediction context, simple imputation usually performs poorly when
|
| 254 |
+
associated with a weak learner. However, with a powerful learner, it can
|
| 255 |
+
lead to as good or better performance than complex imputation such as
|
| 256 |
+
:class:`~sklearn.impute.IterativeImputer` or :class:`~sklearn.impute.KNNImputer`.
|
| 257 |
+
|
| 258 |
+
Examples
|
| 259 |
+
--------
|
| 260 |
+
>>> import numpy as np
|
| 261 |
+
>>> from sklearn.impute import SimpleImputer
|
| 262 |
+
>>> imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean')
|
| 263 |
+
>>> imp_mean.fit([[7, 2, 3], [4, np.nan, 6], [10, 5, 9]])
|
| 264 |
+
SimpleImputer()
|
| 265 |
+
>>> X = [[np.nan, 2, 3], [4, np.nan, 6], [10, np.nan, 9]]
|
| 266 |
+
>>> print(imp_mean.transform(X))
|
| 267 |
+
[[ 7. 2. 3. ]
|
| 268 |
+
[ 4. 3.5 6. ]
|
| 269 |
+
[10. 3.5 9. ]]
|
| 270 |
+
"""
|
| 271 |
+
|
| 272 |
+
_parameter_constraints: dict = {
|
| 273 |
+
**_BaseImputer._parameter_constraints,
|
| 274 |
+
"strategy": [StrOptions({"mean", "median", "most_frequent", "constant"})],
|
| 275 |
+
"fill_value": "no_validation", # any object is valid
|
| 276 |
+
"verbose": ["verbose", Hidden(StrOptions({"deprecated"}))],
|
| 277 |
+
"copy": ["boolean"],
|
| 278 |
+
}
|
| 279 |
+
|
| 280 |
+
def __init__(
|
| 281 |
+
self,
|
| 282 |
+
*,
|
| 283 |
+
missing_values=np.nan,
|
| 284 |
+
strategy="mean",
|
| 285 |
+
fill_value=None,
|
| 286 |
+
verbose="deprecated",
|
| 287 |
+
copy=True,
|
| 288 |
+
add_indicator=False,
|
| 289 |
+
keep_empty_features=False,
|
| 290 |
+
):
|
| 291 |
+
super().__init__(
|
| 292 |
+
missing_values=missing_values,
|
| 293 |
+
add_indicator=add_indicator,
|
| 294 |
+
keep_empty_features=keep_empty_features,
|
| 295 |
+
)
|
| 296 |
+
self.strategy = strategy
|
| 297 |
+
self.fill_value = fill_value
|
| 298 |
+
self.verbose = verbose
|
| 299 |
+
self.copy = copy
|
| 300 |
+
|
| 301 |
+
def _validate_input(self, X, in_fit):
|
| 302 |
+
|
| 303 |
+
if self.strategy in ("most_frequent", "constant"):
|
| 304 |
+
# If input is a list of strings, dtype = object.
|
| 305 |
+
# Otherwise ValueError is raised in SimpleImputer
|
| 306 |
+
# with strategy='most_frequent' or 'constant'
|
| 307 |
+
# because the list is converted to Unicode numpy array
|
| 308 |
+
if isinstance(X, list) and any(
|
| 309 |
+
isinstance(elem, str) for row in X for elem in row
|
| 310 |
+
):
|
| 311 |
+
dtype = object
|
| 312 |
+
else:
|
| 313 |
+
dtype = None
|
| 314 |
+
else:
|
| 315 |
+
dtype = FLOAT_DTYPES
|
| 316 |
+
|
| 317 |
+
if not in_fit and self._fit_dtype.kind == "O":
|
| 318 |
+
# Use object dtype if fitted on object dtypes
|
| 319 |
+
dtype = self._fit_dtype
|
| 320 |
+
|
| 321 |
+
if _is_pandas_na(self.missing_values) or is_scalar_nan(self.missing_values):
|
| 322 |
+
force_all_finite = "allow-nan"
|
| 323 |
+
else:
|
| 324 |
+
force_all_finite = True
|
| 325 |
+
|
| 326 |
+
try:
|
| 327 |
+
X = self._validate_data(
|
| 328 |
+
X,
|
| 329 |
+
reset=in_fit,
|
| 330 |
+
accept_sparse="csc",
|
| 331 |
+
dtype=dtype,
|
| 332 |
+
force_all_finite=force_all_finite,
|
| 333 |
+
copy=self.copy,
|
| 334 |
+
)
|
| 335 |
+
except ValueError as ve:
|
| 336 |
+
if "could not convert" in str(ve):
|
| 337 |
+
new_ve = ValueError(
|
| 338 |
+
"Cannot use {} strategy with non-numeric data:\n{}".format(
|
| 339 |
+
self.strategy, ve
|
| 340 |
+
)
|
| 341 |
+
)
|
| 342 |
+
raise new_ve from None
|
| 343 |
+
else:
|
| 344 |
+
raise ve
|
| 345 |
+
|
| 346 |
+
if in_fit:
|
| 347 |
+
# Use the dtype seen in `fit` for non-`fit` conversion
|
| 348 |
+
self._fit_dtype = X.dtype
|
| 349 |
+
|
| 350 |
+
_check_inputs_dtype(X, self.missing_values)
|
| 351 |
+
if X.dtype.kind not in ("i", "u", "f", "O"):
|
| 352 |
+
raise ValueError(
|
| 353 |
+
"SimpleImputer does not support data with dtype "
|
| 354 |
+
"{0}. Please provide either a numeric array (with"
|
| 355 |
+
" a floating point or integer dtype) or "
|
| 356 |
+
"categorical data represented either as an array "
|
| 357 |
+
"with integer dtype or an array of string values "
|
| 358 |
+
"with an object dtype.".format(X.dtype)
|
| 359 |
+
)
|
| 360 |
+
|
| 361 |
+
return X
|
| 362 |
+
|
| 363 |
+
def fit(self, X, y=None):
|
| 364 |
+
"""Fit the imputer on `X`.
|
| 365 |
+
|
| 366 |
+
Parameters
|
| 367 |
+
----------
|
| 368 |
+
X : {array-like, sparse matrix}, shape (n_samples, n_features)
|
| 369 |
+
Input data, where `n_samples` is the number of samples and
|
| 370 |
+
`n_features` is the number of features.
|
| 371 |
+
|
| 372 |
+
y : Ignored
|
| 373 |
+
Not used, present here for API consistency by convention.
|
| 374 |
+
|
| 375 |
+
Returns
|
| 376 |
+
-------
|
| 377 |
+
self : object
|
| 378 |
+
Fitted estimator.
|
| 379 |
+
"""
|
| 380 |
+
self._validate_params()
|
| 381 |
+
if self.verbose != "deprecated":
|
| 382 |
+
warnings.warn(
|
| 383 |
+
"The 'verbose' parameter was deprecated in version "
|
| 384 |
+
"1.1 and will be removed in 1.3. A warning will "
|
| 385 |
+
"always be raised upon the removal of empty columns "
|
| 386 |
+
"in the future version.",
|
| 387 |
+
FutureWarning,
|
| 388 |
+
)
|
| 389 |
+
|
| 390 |
+
X = self._validate_input(X, in_fit=True)
|
| 391 |
+
|
| 392 |
+
# default fill_value is 0 for numerical input and "missing_value"
|
| 393 |
+
# otherwise
|
| 394 |
+
if self.fill_value is None:
|
| 395 |
+
if X.dtype.kind in ("i", "u", "f"):
|
| 396 |
+
fill_value = 0
|
| 397 |
+
else:
|
| 398 |
+
fill_value = "missing_value"
|
| 399 |
+
else:
|
| 400 |
+
fill_value = self.fill_value
|
| 401 |
+
|
| 402 |
+
# fill_value should be numerical in case of numerical input
|
| 403 |
+
if (
|
| 404 |
+
self.strategy == "constant"
|
| 405 |
+
and X.dtype.kind in ("i", "u", "f")
|
| 406 |
+
and not isinstance(fill_value, numbers.Real)
|
| 407 |
+
):
|
| 408 |
+
raise ValueError(
|
| 409 |
+
"'fill_value'={0} is invalid. Expected a "
|
| 410 |
+
"numerical value when imputing numerical "
|
| 411 |
+
"data".format(fill_value)
|
| 412 |
+
)
|
| 413 |
+
|
| 414 |
+
if sp.issparse(X):
|
| 415 |
+
# missing_values = 0 not allowed with sparse data as it would
|
| 416 |
+
# force densification
|
| 417 |
+
if self.missing_values == 0:
|
| 418 |
+
raise ValueError(
|
| 419 |
+
"Imputation not possible when missing_values "
|
| 420 |
+
"== 0 and input is sparse. Provide a dense "
|
| 421 |
+
"array instead."
|
| 422 |
+
)
|
| 423 |
+
else:
|
| 424 |
+
self.statistics_ = self._sparse_fit(
|
| 425 |
+
X, self.strategy, self.missing_values, fill_value
|
| 426 |
+
)
|
| 427 |
+
|
| 428 |
+
else:
|
| 429 |
+
self.statistics_ = self._dense_fit(
|
| 430 |
+
X, self.strategy, self.missing_values, fill_value
|
| 431 |
+
)
|
| 432 |
+
|
| 433 |
+
return self
|
| 434 |
+
|
| 435 |
+
def _sparse_fit(self, X, strategy, missing_values, fill_value):
|
| 436 |
+
"""Fit the transformer on sparse data."""
|
| 437 |
+
missing_mask = _get_mask(X, missing_values)
|
| 438 |
+
mask_data = missing_mask.data
|
| 439 |
+
n_implicit_zeros = X.shape[0] - np.diff(X.indptr)
|
| 440 |
+
|
| 441 |
+
statistics = np.empty(X.shape[1])
|
| 442 |
+
|
| 443 |
+
if strategy == "constant":
|
| 444 |
+
# for constant strategy, self.statistics_ is used to store
|
| 445 |
+
# fill_value in each column
|
| 446 |
+
statistics.fill(fill_value)
|
| 447 |
+
else:
|
| 448 |
+
for i in range(X.shape[1]):
|
| 449 |
+
column = X.data[X.indptr[i] : X.indptr[i + 1]]
|
| 450 |
+
mask_column = mask_data[X.indptr[i] : X.indptr[i + 1]]
|
| 451 |
+
column = column[~mask_column]
|
| 452 |
+
|
| 453 |
+
# combine explicit and implicit zeros
|
| 454 |
+
mask_zeros = _get_mask(column, 0)
|
| 455 |
+
column = column[~mask_zeros]
|
| 456 |
+
n_explicit_zeros = mask_zeros.sum()
|
| 457 |
+
n_zeros = n_implicit_zeros[i] + n_explicit_zeros
|
| 458 |
+
|
| 459 |
+
if len(column) == 0 and self.keep_empty_features:
|
| 460 |
+
# in case we want to keep columns with only missing values.
|
| 461 |
+
statistics[i] = 0
|
| 462 |
+
else:
|
| 463 |
+
if strategy == "mean":
|
| 464 |
+
s = column.size + n_zeros
|
| 465 |
+
statistics[i] = np.nan if s == 0 else column.sum() / s
|
| 466 |
+
|
| 467 |
+
elif strategy == "median":
|
| 468 |
+
statistics[i] = _get_median(column, n_zeros)
|
| 469 |
+
|
| 470 |
+
elif strategy == "most_frequent":
|
| 471 |
+
statistics[i] = _most_frequent(column, 0, n_zeros)
|
| 472 |
+
|
| 473 |
+
super()._fit_indicator(missing_mask)
|
| 474 |
+
|
| 475 |
+
return statistics
|
| 476 |
+
|
| 477 |
+
def _dense_fit(self, X, strategy, missing_values, fill_value):
|
| 478 |
+
"""Fit the transformer on dense data."""
|
| 479 |
+
missing_mask = _get_mask(X, missing_values)
|
| 480 |
+
masked_X = ma.masked_array(X, mask=missing_mask)
|
| 481 |
+
|
| 482 |
+
super()._fit_indicator(missing_mask)
|
| 483 |
+
|
| 484 |
+
# Mean
|
| 485 |
+
if strategy == "mean":
|
| 486 |
+
mean_masked = np.ma.mean(masked_X, axis=0)
|
| 487 |
+
# Avoid the warning "Warning: converting a masked element to nan."
|
| 488 |
+
mean = np.ma.getdata(mean_masked)
|
| 489 |
+
mean[np.ma.getmask(mean_masked)] = 0 if self.keep_empty_features else np.nan
|
| 490 |
+
|
| 491 |
+
return mean
|
| 492 |
+
|
| 493 |
+
# Median
|
| 494 |
+
elif strategy == "median":
|
| 495 |
+
median_masked = np.ma.median(masked_X, axis=0)
|
| 496 |
+
# Avoid the warning "Warning: converting a masked element to nan."
|
| 497 |
+
median = np.ma.getdata(median_masked)
|
| 498 |
+
median[np.ma.getmaskarray(median_masked)] = (
|
| 499 |
+
0 if self.keep_empty_features else np.nan
|
| 500 |
+
)
|
| 501 |
+
|
| 502 |
+
return median
|
| 503 |
+
|
| 504 |
+
# Most frequent
|
| 505 |
+
elif strategy == "most_frequent":
|
| 506 |
+
# Avoid use of scipy.stats.mstats.mode due to the required
|
| 507 |
+
# additional overhead and slow benchmarking performance.
|
| 508 |
+
# See Issue 14325 and PR 14399 for full discussion.
|
| 509 |
+
|
| 510 |
+
# To be able access the elements by columns
|
| 511 |
+
X = X.transpose()
|
| 512 |
+
mask = missing_mask.transpose()
|
| 513 |
+
|
| 514 |
+
if X.dtype.kind == "O":
|
| 515 |
+
most_frequent = np.empty(X.shape[0], dtype=object)
|
| 516 |
+
else:
|
| 517 |
+
most_frequent = np.empty(X.shape[0])
|
| 518 |
+
|
| 519 |
+
for i, (row, row_mask) in enumerate(zip(X[:], mask[:])):
|
| 520 |
+
row_mask = np.logical_not(row_mask).astype(bool)
|
| 521 |
+
row = row[row_mask]
|
| 522 |
+
if len(row) == 0 and self.keep_empty_features:
|
| 523 |
+
most_frequent[i] = 0
|
| 524 |
+
else:
|
| 525 |
+
most_frequent[i] = _most_frequent(row, np.nan, 0)
|
| 526 |
+
|
| 527 |
+
return most_frequent
|
| 528 |
+
|
| 529 |
+
# Constant
|
| 530 |
+
elif strategy == "constant":
|
| 531 |
+
# for constant strategy, self.statistcs_ is used to store
|
| 532 |
+
# fill_value in each column
|
| 533 |
+
return np.full(X.shape[1], fill_value, dtype=X.dtype)
|
| 534 |
+
|
| 535 |
+
def transform(self, X):
|
| 536 |
+
"""Impute all missing values in `X`.
|
| 537 |
+
|
| 538 |
+
Parameters
|
| 539 |
+
----------
|
| 540 |
+
X : {array-like, sparse matrix}, shape (n_samples, n_features)
|
| 541 |
+
The input data to complete.
|
| 542 |
+
|
| 543 |
+
Returns
|
| 544 |
+
-------
|
| 545 |
+
X_imputed : {ndarray, sparse matrix} of shape \
|
| 546 |
+
(n_samples, n_features_out)
|
| 547 |
+
`X` with imputed values.
|
| 548 |
+
"""
|
| 549 |
+
check_is_fitted(self)
|
| 550 |
+
|
| 551 |
+
X = self._validate_input(X, in_fit=False)
|
| 552 |
+
statistics = self.statistics_
|
| 553 |
+
|
| 554 |
+
if X.shape[1] != statistics.shape[0]:
|
| 555 |
+
raise ValueError(
|
| 556 |
+
"X has %d features per sample, expected %d"
|
| 557 |
+
% (X.shape[1], self.statistics_.shape[0])
|
| 558 |
+
)
|
| 559 |
+
|
| 560 |
+
# compute mask before eliminating invalid features
|
| 561 |
+
missing_mask = _get_mask(X, self.missing_values)
|
| 562 |
+
|
| 563 |
+
# Decide whether to keep missing features
|
| 564 |
+
if self.strategy == "constant" or self.keep_empty_features:
|
| 565 |
+
valid_statistics = statistics
|
| 566 |
+
valid_statistics_indexes = None
|
| 567 |
+
else:
|
| 568 |
+
# same as np.isnan but also works for object dtypes
|
| 569 |
+
invalid_mask = _get_mask(statistics, np.nan)
|
| 570 |
+
valid_mask = np.logical_not(invalid_mask)
|
| 571 |
+
valid_statistics = statistics[valid_mask]
|
| 572 |
+
valid_statistics_indexes = np.flatnonzero(valid_mask)
|
| 573 |
+
|
| 574 |
+
if invalid_mask.any():
|
| 575 |
+
invalid_features = np.arange(X.shape[1])[invalid_mask]
|
| 576 |
+
if self.verbose != "deprecated" and self.verbose:
|
| 577 |
+
# use feature names warning if features are provided
|
| 578 |
+
if hasattr(self, "feature_names_in_"):
|
| 579 |
+
invalid_features = self.feature_names_in_[invalid_features]
|
| 580 |
+
warnings.warn(
|
| 581 |
+
"Skipping features without any observed values:"
|
| 582 |
+
f" {invalid_features}. At least one non-missing value is needed"
|
| 583 |
+
f" for imputation with strategy='{self.strategy}'."
|
| 584 |
+
)
|
| 585 |
+
X = X[:, valid_statistics_indexes]
|
| 586 |
+
|
| 587 |
+
# Do actual imputation
|
| 588 |
+
if sp.issparse(X):
|
| 589 |
+
if self.missing_values == 0:
|
| 590 |
+
raise ValueError(
|
| 591 |
+
"Imputation not possible when missing_values "
|
| 592 |
+
"== 0 and input is sparse. Provide a dense "
|
| 593 |
+
"array instead."
|
| 594 |
+
)
|
| 595 |
+
else:
|
| 596 |
+
# if no invalid statistics are found, use the mask computed
|
| 597 |
+
# before, else recompute mask
|
| 598 |
+
if valid_statistics_indexes is None:
|
| 599 |
+
mask = missing_mask.data
|
| 600 |
+
else:
|
| 601 |
+
mask = _get_mask(X.data, self.missing_values)
|
| 602 |
+
indexes = np.repeat(
|
| 603 |
+
np.arange(len(X.indptr) - 1, dtype=int), np.diff(X.indptr)
|
| 604 |
+
)[mask]
|
| 605 |
+
|
| 606 |
+
X.data[mask] = valid_statistics[indexes].astype(X.dtype, copy=False)
|
| 607 |
+
else:
|
| 608 |
+
# use mask computed before eliminating invalid mask
|
| 609 |
+
if valid_statistics_indexes is None:
|
| 610 |
+
mask_valid_features = missing_mask
|
| 611 |
+
else:
|
| 612 |
+
mask_valid_features = missing_mask[:, valid_statistics_indexes]
|
| 613 |
+
n_missing = np.sum(mask_valid_features, axis=0)
|
| 614 |
+
values = np.repeat(valid_statistics, n_missing)
|
| 615 |
+
coordinates = np.where(mask_valid_features.transpose())[::-1]
|
| 616 |
+
|
| 617 |
+
X[coordinates] = values
|
| 618 |
+
|
| 619 |
+
X_indicator = super()._transform_indicator(missing_mask)
|
| 620 |
+
|
| 621 |
+
return super()._concatenate_indicator(X, X_indicator)
|
| 622 |
+
|
| 623 |
+
def inverse_transform(self, X):
|
| 624 |
+
"""Convert the data back to the original representation.
|
| 625 |
+
|
| 626 |
+
Inverts the `transform` operation performed on an array.
|
| 627 |
+
This operation can only be performed after :class:`SimpleImputer` is
|
| 628 |
+
instantiated with `add_indicator=True`.
|
| 629 |
+
|
| 630 |
+
Note that `inverse_transform` can only invert the transform in
|
| 631 |
+
features that have binary indicators for missing values. If a feature
|
| 632 |
+
has no missing values at `fit` time, the feature won't have a binary
|
| 633 |
+
indicator, and the imputation done at `transform` time won't be
|
| 634 |
+
inverted.
|
| 635 |
+
|
| 636 |
+
.. versionadded:: 0.24
|
| 637 |
+
|
| 638 |
+
Parameters
|
| 639 |
+
----------
|
| 640 |
+
X : array-like of shape \
|
| 641 |
+
(n_samples, n_features + n_features_missing_indicator)
|
| 642 |
+
The imputed data to be reverted to original data. It has to be
|
| 643 |
+
an augmented array of imputed data and the missing indicator mask.
|
| 644 |
+
|
| 645 |
+
Returns
|
| 646 |
+
-------
|
| 647 |
+
X_original : ndarray of shape (n_samples, n_features)
|
| 648 |
+
The original `X` with missing values as it was prior
|
| 649 |
+
to imputation.
|
| 650 |
+
"""
|
| 651 |
+
check_is_fitted(self)
|
| 652 |
+
|
| 653 |
+
if not self.add_indicator:
|
| 654 |
+
raise ValueError(
|
| 655 |
+
"'inverse_transform' works only when "
|
| 656 |
+
"'SimpleImputer' is instantiated with "
|
| 657 |
+
"'add_indicator=True'. "
|
| 658 |
+
f"Got 'add_indicator={self.add_indicator}' "
|
| 659 |
+
"instead."
|
| 660 |
+
)
|
| 661 |
+
|
| 662 |
+
n_features_missing = len(self.indicator_.features_)
|
| 663 |
+
non_empty_feature_count = X.shape[1] - n_features_missing
|
| 664 |
+
array_imputed = X[:, :non_empty_feature_count].copy()
|
| 665 |
+
missing_mask = X[:, non_empty_feature_count:].astype(bool)
|
| 666 |
+
|
| 667 |
+
n_features_original = len(self.statistics_)
|
| 668 |
+
shape_original = (X.shape[0], n_features_original)
|
| 669 |
+
X_original = np.zeros(shape_original)
|
| 670 |
+
X_original[:, self.indicator_.features_] = missing_mask
|
| 671 |
+
full_mask = X_original.astype(bool)
|
| 672 |
+
|
| 673 |
+
imputed_idx, original_idx = 0, 0
|
| 674 |
+
while imputed_idx < len(array_imputed.T):
|
| 675 |
+
if not np.all(X_original[:, original_idx]):
|
| 676 |
+
X_original[:, original_idx] = array_imputed.T[imputed_idx]
|
| 677 |
+
imputed_idx += 1
|
| 678 |
+
original_idx += 1
|
| 679 |
+
else:
|
| 680 |
+
original_idx += 1
|
| 681 |
+
|
| 682 |
+
X_original[full_mask] = self.missing_values
|
| 683 |
+
return X_original
|
| 684 |
+
|
| 685 |
+
def _more_tags(self):
|
| 686 |
+
return {
|
| 687 |
+
"allow_nan": (
|
| 688 |
+
_is_pandas_na(self.missing_values) or is_scalar_nan(self.missing_values)
|
| 689 |
+
)
|
| 690 |
+
}
|
| 691 |
+
|
| 692 |
+
def get_feature_names_out(self, input_features=None):
|
| 693 |
+
"""Get output feature names for transformation.
|
| 694 |
+
|
| 695 |
+
Parameters
|
| 696 |
+
----------
|
| 697 |
+
input_features : array-like of str or None, default=None
|
| 698 |
+
Input features.
|
| 699 |
+
|
| 700 |
+
- If `input_features` is `None`, then `feature_names_in_` is
|
| 701 |
+
used as feature names in. If `feature_names_in_` is not defined,
|
| 702 |
+
then the following input feature names are generated:
|
| 703 |
+
`["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
|
| 704 |
+
- If `input_features` is an array-like, then `input_features` must
|
| 705 |
+
match `feature_names_in_` if `feature_names_in_` is defined.
|
| 706 |
+
|
| 707 |
+
Returns
|
| 708 |
+
-------
|
| 709 |
+
feature_names_out : ndarray of str objects
|
| 710 |
+
Transformed feature names.
|
| 711 |
+
"""
|
| 712 |
+
input_features = _check_feature_names_in(self, input_features)
|
| 713 |
+
non_missing_mask = np.logical_not(_get_mask(self.statistics_, np.nan))
|
| 714 |
+
names = input_features[non_missing_mask]
|
| 715 |
+
return self._concatenate_indicator_feature_names_out(names, input_features)
|
| 716 |
+
|
| 717 |
+
|
| 718 |
+
class MissingIndicator(TransformerMixin, BaseEstimator):
|
| 719 |
+
"""Binary indicators for missing values.
|
| 720 |
+
|
| 721 |
+
Note that this component typically should not be used in a vanilla
|
| 722 |
+
:class:`Pipeline` consisting of transformers and a classifier, but rather
|
| 723 |
+
could be added using a :class:`FeatureUnion` or :class:`ColumnTransformer`.
|
| 724 |
+
|
| 725 |
+
Read more in the :ref:`User Guide <impute>`.
|
| 726 |
+
|
| 727 |
+
.. versionadded:: 0.20
|
| 728 |
+
|
| 729 |
+
Parameters
|
| 730 |
+
----------
|
| 731 |
+
missing_values : int, float, str, np.nan or None, default=np.nan
|
| 732 |
+
The placeholder for the missing values. All occurrences of
|
| 733 |
+
`missing_values` will be imputed. For pandas' dataframes with
|
| 734 |
+
nullable integer dtypes with missing values, `missing_values`
|
| 735 |
+
should be set to `np.nan`, since `pd.NA` will be converted to `np.nan`.
|
| 736 |
+
|
| 737 |
+
features : {'missing-only', 'all'}, default='missing-only'
|
| 738 |
+
Whether the imputer mask should represent all or a subset of
|
| 739 |
+
features.
|
| 740 |
+
|
| 741 |
+
- If `'missing-only'` (default), the imputer mask will only represent
|
| 742 |
+
features containing missing values during fit time.
|
| 743 |
+
- If `'all'`, the imputer mask will represent all features.
|
| 744 |
+
|
| 745 |
+
sparse : bool or 'auto', default='auto'
|
| 746 |
+
Whether the imputer mask format should be sparse or dense.
|
| 747 |
+
|
| 748 |
+
- If `'auto'` (default), the imputer mask will be of same type as
|
| 749 |
+
input.
|
| 750 |
+
- If `True`, the imputer mask will be a sparse matrix.
|
| 751 |
+
- If `False`, the imputer mask will be a numpy array.
|
| 752 |
+
|
| 753 |
+
error_on_new : bool, default=True
|
| 754 |
+
If `True`, :meth:`transform` will raise an error when there are
|
| 755 |
+
features with missing values that have no missing values in
|
| 756 |
+
:meth:`fit`. This is applicable only when `features='missing-only'`.
|
| 757 |
+
|
| 758 |
+
Attributes
|
| 759 |
+
----------
|
| 760 |
+
features_ : ndarray of shape (n_missing_features,) or (n_features,)
|
| 761 |
+
The features indices which will be returned when calling
|
| 762 |
+
:meth:`transform`. They are computed during :meth:`fit`. If
|
| 763 |
+
`features='all'`, `features_` is equal to `range(n_features)`.
|
| 764 |
+
|
| 765 |
+
n_features_in_ : int
|
| 766 |
+
Number of features seen during :term:`fit`.
|
| 767 |
+
|
| 768 |
+
.. versionadded:: 0.24
|
| 769 |
+
|
| 770 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
| 771 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
| 772 |
+
has feature names that are all strings.
|
| 773 |
+
|
| 774 |
+
.. versionadded:: 1.0
|
| 775 |
+
|
| 776 |
+
See Also
|
| 777 |
+
--------
|
| 778 |
+
SimpleImputer : Univariate imputation of missing values.
|
| 779 |
+
IterativeImputer : Multivariate imputation of missing values.
|
| 780 |
+
|
| 781 |
+
Examples
|
| 782 |
+
--------
|
| 783 |
+
>>> import numpy as np
|
| 784 |
+
>>> from sklearn.impute import MissingIndicator
|
| 785 |
+
>>> X1 = np.array([[np.nan, 1, 3],
|
| 786 |
+
... [4, 0, np.nan],
|
| 787 |
+
... [8, 1, 0]])
|
| 788 |
+
>>> X2 = np.array([[5, 1, np.nan],
|
| 789 |
+
... [np.nan, 2, 3],
|
| 790 |
+
... [2, 4, 0]])
|
| 791 |
+
>>> indicator = MissingIndicator()
|
| 792 |
+
>>> indicator.fit(X1)
|
| 793 |
+
MissingIndicator()
|
| 794 |
+
>>> X2_tr = indicator.transform(X2)
|
| 795 |
+
>>> X2_tr
|
| 796 |
+
array([[False, True],
|
| 797 |
+
[ True, False],
|
| 798 |
+
[False, False]])
|
| 799 |
+
"""
|
| 800 |
+
|
| 801 |
+
_parameter_constraints: dict = {
|
| 802 |
+
"missing_values": [numbers.Real, numbers.Integral, str, None],
|
| 803 |
+
"features": [StrOptions({"missing-only", "all"})],
|
| 804 |
+
"sparse": ["boolean", StrOptions({"auto"})],
|
| 805 |
+
"error_on_new": ["boolean"],
|
| 806 |
+
}
|
| 807 |
+
|
| 808 |
+
def __init__(
|
| 809 |
+
self,
|
| 810 |
+
*,
|
| 811 |
+
missing_values=np.nan,
|
| 812 |
+
features="missing-only",
|
| 813 |
+
sparse="auto",
|
| 814 |
+
error_on_new=True,
|
| 815 |
+
):
|
| 816 |
+
self.missing_values = missing_values
|
| 817 |
+
self.features = features
|
| 818 |
+
self.sparse = sparse
|
| 819 |
+
self.error_on_new = error_on_new
|
| 820 |
+
|
| 821 |
+
def _get_missing_features_info(self, X):
|
| 822 |
+
"""Compute the imputer mask and the indices of the features
|
| 823 |
+
containing missing values.
|
| 824 |
+
|
| 825 |
+
Parameters
|
| 826 |
+
----------
|
| 827 |
+
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
|
| 828 |
+
The input data with missing values. Note that `X` has been
|
| 829 |
+
checked in :meth:`fit` and :meth:`transform` before to call this
|
| 830 |
+
function.
|
| 831 |
+
|
| 832 |
+
Returns
|
| 833 |
+
-------
|
| 834 |
+
imputer_mask : {ndarray, sparse matrix} of shape \
|
| 835 |
+
(n_samples, n_features)
|
| 836 |
+
The imputer mask of the original data.
|
| 837 |
+
|
| 838 |
+
features_with_missing : ndarray of shape (n_features_with_missing)
|
| 839 |
+
The features containing missing values.
|
| 840 |
+
"""
|
| 841 |
+
if not self._precomputed:
|
| 842 |
+
imputer_mask = _get_mask(X, self.missing_values)
|
| 843 |
+
else:
|
| 844 |
+
imputer_mask = X
|
| 845 |
+
|
| 846 |
+
if sp.issparse(X):
|
| 847 |
+
imputer_mask.eliminate_zeros()
|
| 848 |
+
|
| 849 |
+
if self.features == "missing-only":
|
| 850 |
+
n_missing = imputer_mask.getnnz(axis=0)
|
| 851 |
+
|
| 852 |
+
if self.sparse is False:
|
| 853 |
+
imputer_mask = imputer_mask.toarray()
|
| 854 |
+
elif imputer_mask.format == "csr":
|
| 855 |
+
imputer_mask = imputer_mask.tocsc()
|
| 856 |
+
else:
|
| 857 |
+
if not self._precomputed:
|
| 858 |
+
imputer_mask = _get_mask(X, self.missing_values)
|
| 859 |
+
else:
|
| 860 |
+
imputer_mask = X
|
| 861 |
+
|
| 862 |
+
if self.features == "missing-only":
|
| 863 |
+
n_missing = imputer_mask.sum(axis=0)
|
| 864 |
+
|
| 865 |
+
if self.sparse is True:
|
| 866 |
+
imputer_mask = sp.csc_matrix(imputer_mask)
|
| 867 |
+
|
| 868 |
+
if self.features == "all":
|
| 869 |
+
features_indices = np.arange(X.shape[1])
|
| 870 |
+
else:
|
| 871 |
+
features_indices = np.flatnonzero(n_missing)
|
| 872 |
+
|
| 873 |
+
return imputer_mask, features_indices
|
| 874 |
+
|
| 875 |
+
def _validate_input(self, X, in_fit):
|
| 876 |
+
if not is_scalar_nan(self.missing_values):
|
| 877 |
+
force_all_finite = True
|
| 878 |
+
else:
|
| 879 |
+
force_all_finite = "allow-nan"
|
| 880 |
+
X = self._validate_data(
|
| 881 |
+
X,
|
| 882 |
+
reset=in_fit,
|
| 883 |
+
accept_sparse=("csc", "csr"),
|
| 884 |
+
dtype=None,
|
| 885 |
+
force_all_finite=force_all_finite,
|
| 886 |
+
)
|
| 887 |
+
_check_inputs_dtype(X, self.missing_values)
|
| 888 |
+
if X.dtype.kind not in ("i", "u", "f", "O"):
|
| 889 |
+
raise ValueError(
|
| 890 |
+
"MissingIndicator does not support data with "
|
| 891 |
+
"dtype {0}. Please provide either a numeric array"
|
| 892 |
+
" (with a floating point or integer dtype) or "
|
| 893 |
+
"categorical data represented either as an array "
|
| 894 |
+
"with integer dtype or an array of string values "
|
| 895 |
+
"with an object dtype.".format(X.dtype)
|
| 896 |
+
)
|
| 897 |
+
|
| 898 |
+
if sp.issparse(X) and self.missing_values == 0:
|
| 899 |
+
# missing_values = 0 not allowed with sparse data as it would
|
| 900 |
+
# force densification
|
| 901 |
+
raise ValueError(
|
| 902 |
+
"Sparse input with missing_values=0 is "
|
| 903 |
+
"not supported. Provide a dense "
|
| 904 |
+
"array instead."
|
| 905 |
+
)
|
| 906 |
+
|
| 907 |
+
return X
|
| 908 |
+
|
| 909 |
+
def _fit(self, X, y=None, precomputed=False):
|
| 910 |
+
"""Fit the transformer on `X`.
|
| 911 |
+
|
| 912 |
+
Parameters
|
| 913 |
+
----------
|
| 914 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 915 |
+
Input data, where `n_samples` is the number of samples and
|
| 916 |
+
`n_features` is the number of features.
|
| 917 |
+
If `precomputed=True`, then `X` is a mask of the input data.
|
| 918 |
+
|
| 919 |
+
precomputed : bool
|
| 920 |
+
Whether the input data is a mask.
|
| 921 |
+
|
| 922 |
+
Returns
|
| 923 |
+
-------
|
| 924 |
+
imputer_mask : {ndarray, sparse matrix} of shape (n_samples, \
|
| 925 |
+
n_features)
|
| 926 |
+
The imputer mask of the original data.
|
| 927 |
+
"""
|
| 928 |
+
if precomputed:
|
| 929 |
+
if not (hasattr(X, "dtype") and X.dtype.kind == "b"):
|
| 930 |
+
raise ValueError("precomputed is True but the input data is not a mask")
|
| 931 |
+
self._precomputed = True
|
| 932 |
+
else:
|
| 933 |
+
self._precomputed = False
|
| 934 |
+
|
| 935 |
+
# Need not validate X again as it would have already been validated
|
| 936 |
+
# in the Imputer calling MissingIndicator
|
| 937 |
+
if not self._precomputed:
|
| 938 |
+
X = self._validate_input(X, in_fit=True)
|
| 939 |
+
|
| 940 |
+
self._n_features = X.shape[1]
|
| 941 |
+
|
| 942 |
+
missing_features_info = self._get_missing_features_info(X)
|
| 943 |
+
self.features_ = missing_features_info[1]
|
| 944 |
+
|
| 945 |
+
return missing_features_info[0]
|
| 946 |
+
|
| 947 |
+
def fit(self, X, y=None):
|
| 948 |
+
"""Fit the transformer on `X`.
|
| 949 |
+
|
| 950 |
+
Parameters
|
| 951 |
+
----------
|
| 952 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 953 |
+
Input data, where `n_samples` is the number of samples and
|
| 954 |
+
`n_features` is the number of features.
|
| 955 |
+
|
| 956 |
+
y : Ignored
|
| 957 |
+
Not used, present for API consistency by convention.
|
| 958 |
+
|
| 959 |
+
Returns
|
| 960 |
+
-------
|
| 961 |
+
self : object
|
| 962 |
+
Fitted estimator.
|
| 963 |
+
"""
|
| 964 |
+
self._validate_params()
|
| 965 |
+
self._fit(X, y)
|
| 966 |
+
|
| 967 |
+
return self
|
| 968 |
+
|
| 969 |
+
def transform(self, X):
|
| 970 |
+
"""Generate missing values indicator for `X`.
|
| 971 |
+
|
| 972 |
+
Parameters
|
| 973 |
+
----------
|
| 974 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 975 |
+
The input data to complete.
|
| 976 |
+
|
| 977 |
+
Returns
|
| 978 |
+
-------
|
| 979 |
+
Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) \
|
| 980 |
+
or (n_samples, n_features_with_missing)
|
| 981 |
+
The missing indicator for input data. The data type of `Xt`
|
| 982 |
+
will be boolean.
|
| 983 |
+
"""
|
| 984 |
+
check_is_fitted(self)
|
| 985 |
+
|
| 986 |
+
# Need not validate X again as it would have already been validated
|
| 987 |
+
# in the Imputer calling MissingIndicator
|
| 988 |
+
if not self._precomputed:
|
| 989 |
+
X = self._validate_input(X, in_fit=False)
|
| 990 |
+
else:
|
| 991 |
+
if not (hasattr(X, "dtype") and X.dtype.kind == "b"):
|
| 992 |
+
raise ValueError("precomputed is True but the input data is not a mask")
|
| 993 |
+
|
| 994 |
+
imputer_mask, features = self._get_missing_features_info(X)
|
| 995 |
+
|
| 996 |
+
if self.features == "missing-only":
|
| 997 |
+
features_diff_fit_trans = np.setdiff1d(features, self.features_)
|
| 998 |
+
if self.error_on_new and features_diff_fit_trans.size > 0:
|
| 999 |
+
raise ValueError(
|
| 1000 |
+
"The features {} have missing values "
|
| 1001 |
+
"in transform but have no missing values "
|
| 1002 |
+
"in fit.".format(features_diff_fit_trans)
|
| 1003 |
+
)
|
| 1004 |
+
|
| 1005 |
+
if self.features_.size < self._n_features:
|
| 1006 |
+
imputer_mask = imputer_mask[:, self.features_]
|
| 1007 |
+
|
| 1008 |
+
return imputer_mask
|
| 1009 |
+
|
| 1010 |
+
def fit_transform(self, X, y=None):
|
| 1011 |
+
"""Generate missing values indicator for `X`.
|
| 1012 |
+
|
| 1013 |
+
Parameters
|
| 1014 |
+
----------
|
| 1015 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 1016 |
+
The input data to complete.
|
| 1017 |
+
|
| 1018 |
+
y : Ignored
|
| 1019 |
+
Not used, present for API consistency by convention.
|
| 1020 |
+
|
| 1021 |
+
Returns
|
| 1022 |
+
-------
|
| 1023 |
+
Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) \
|
| 1024 |
+
or (n_samples, n_features_with_missing)
|
| 1025 |
+
The missing indicator for input data. The data type of `Xt`
|
| 1026 |
+
will be boolean.
|
| 1027 |
+
"""
|
| 1028 |
+
self._validate_params()
|
| 1029 |
+
imputer_mask = self._fit(X, y)
|
| 1030 |
+
|
| 1031 |
+
if self.features_.size < self._n_features:
|
| 1032 |
+
imputer_mask = imputer_mask[:, self.features_]
|
| 1033 |
+
|
| 1034 |
+
return imputer_mask
|
| 1035 |
+
|
| 1036 |
+
def get_feature_names_out(self, input_features=None):
|
| 1037 |
+
"""Get output feature names for transformation.
|
| 1038 |
+
|
| 1039 |
+
Parameters
|
| 1040 |
+
----------
|
| 1041 |
+
input_features : array-like of str or None, default=None
|
| 1042 |
+
Input features.
|
| 1043 |
+
|
| 1044 |
+
- If `input_features` is `None`, then `feature_names_in_` is
|
| 1045 |
+
used as feature names in. If `feature_names_in_` is not defined,
|
| 1046 |
+
then the following input feature names are generated:
|
| 1047 |
+
`["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
|
| 1048 |
+
- If `input_features` is an array-like, then `input_features` must
|
| 1049 |
+
match `feature_names_in_` if `feature_names_in_` is defined.
|
| 1050 |
+
|
| 1051 |
+
Returns
|
| 1052 |
+
-------
|
| 1053 |
+
feature_names_out : ndarray of str objects
|
| 1054 |
+
Transformed feature names.
|
| 1055 |
+
"""
|
| 1056 |
+
input_features = _check_feature_names_in(self, input_features)
|
| 1057 |
+
prefix = self.__class__.__name__.lower()
|
| 1058 |
+
return np.asarray(
|
| 1059 |
+
[
|
| 1060 |
+
f"{prefix}_{feature_name}"
|
| 1061 |
+
for feature_name in input_features[self.features_]
|
| 1062 |
+
],
|
| 1063 |
+
dtype=object,
|
| 1064 |
+
)
|
| 1065 |
+
|
| 1066 |
+
def _more_tags(self):
|
| 1067 |
+
return {
|
| 1068 |
+
"allow_nan": True,
|
| 1069 |
+
"X_types": ["2darray", "string"],
|
| 1070 |
+
"preserves_dtype": [],
|
| 1071 |
+
}
|
mgm/lib/python3.10/site-packages/sklearn/impute/_iterative.py
ADDED
|
@@ -0,0 +1,889 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from time import time
|
| 2 |
+
from collections import namedtuple
|
| 3 |
+
from numbers import Integral, Real
|
| 4 |
+
import warnings
|
| 5 |
+
|
| 6 |
+
from scipy import stats
|
| 7 |
+
import numpy as np
|
| 8 |
+
|
| 9 |
+
from ..base import clone
|
| 10 |
+
from ..exceptions import ConvergenceWarning
|
| 11 |
+
from ..preprocessing import normalize
|
| 12 |
+
from ..utils import (
|
| 13 |
+
check_array,
|
| 14 |
+
check_random_state,
|
| 15 |
+
is_scalar_nan,
|
| 16 |
+
_safe_assign,
|
| 17 |
+
_safe_indexing,
|
| 18 |
+
)
|
| 19 |
+
from ..utils.validation import FLOAT_DTYPES, check_is_fitted
|
| 20 |
+
from ..utils.validation import _check_feature_names_in
|
| 21 |
+
from ..utils._mask import _get_mask
|
| 22 |
+
from ..utils._param_validation import HasMethods, Interval, StrOptions
|
| 23 |
+
|
| 24 |
+
from ._base import _BaseImputer
|
| 25 |
+
from ._base import SimpleImputer
|
| 26 |
+
from ._base import _check_inputs_dtype
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
_ImputerTriplet = namedtuple(
|
| 30 |
+
"_ImputerTriplet", ["feat_idx", "neighbor_feat_idx", "estimator"]
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def _assign_where(X1, X2, cond):
|
| 35 |
+
"""Assign X2 to X1 where cond is True.
|
| 36 |
+
|
| 37 |
+
Parameters
|
| 38 |
+
----------
|
| 39 |
+
X1 : ndarray or dataframe of shape (n_samples, n_features)
|
| 40 |
+
Data.
|
| 41 |
+
|
| 42 |
+
X2 : ndarray of shape (n_samples, n_features)
|
| 43 |
+
Data to be assigned.
|
| 44 |
+
|
| 45 |
+
cond : ndarray of shape (n_samples, n_features)
|
| 46 |
+
Boolean mask to assign data.
|
| 47 |
+
"""
|
| 48 |
+
if hasattr(X1, "mask"): # pandas dataframes
|
| 49 |
+
X1.mask(cond=cond, other=X2, inplace=True)
|
| 50 |
+
else: # ndarrays
|
| 51 |
+
X1[cond] = X2[cond]
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class IterativeImputer(_BaseImputer):
|
| 55 |
+
"""Multivariate imputer that estimates each feature from all the others.
|
| 56 |
+
|
| 57 |
+
A strategy for imputing missing values by modeling each feature with
|
| 58 |
+
missing values as a function of other features in a round-robin fashion.
|
| 59 |
+
|
| 60 |
+
Read more in the :ref:`User Guide <iterative_imputer>`.
|
| 61 |
+
|
| 62 |
+
.. versionadded:: 0.21
|
| 63 |
+
|
| 64 |
+
.. note::
|
| 65 |
+
|
| 66 |
+
This estimator is still **experimental** for now: the predictions
|
| 67 |
+
and the API might change without any deprecation cycle. To use it,
|
| 68 |
+
you need to explicitly import `enable_iterative_imputer`::
|
| 69 |
+
|
| 70 |
+
>>> # explicitly require this experimental feature
|
| 71 |
+
>>> from sklearn.experimental import enable_iterative_imputer # noqa
|
| 72 |
+
>>> # now you can import normally from sklearn.impute
|
| 73 |
+
>>> from sklearn.impute import IterativeImputer
|
| 74 |
+
|
| 75 |
+
Parameters
|
| 76 |
+
----------
|
| 77 |
+
estimator : estimator object, default=BayesianRidge()
|
| 78 |
+
The estimator to use at each step of the round-robin imputation.
|
| 79 |
+
If `sample_posterior=True`, the estimator must support
|
| 80 |
+
`return_std` in its `predict` method.
|
| 81 |
+
|
| 82 |
+
missing_values : int or np.nan, default=np.nan
|
| 83 |
+
The placeholder for the missing values. All occurrences of
|
| 84 |
+
`missing_values` will be imputed. For pandas' dataframes with
|
| 85 |
+
nullable integer dtypes with missing values, `missing_values`
|
| 86 |
+
should be set to `np.nan`, since `pd.NA` will be converted to `np.nan`.
|
| 87 |
+
|
| 88 |
+
sample_posterior : bool, default=False
|
| 89 |
+
Whether to sample from the (Gaussian) predictive posterior of the
|
| 90 |
+
fitted estimator for each imputation. Estimator must support
|
| 91 |
+
`return_std` in its `predict` method if set to `True`. Set to
|
| 92 |
+
`True` if using `IterativeImputer` for multiple imputations.
|
| 93 |
+
|
| 94 |
+
max_iter : int, default=10
|
| 95 |
+
Maximum number of imputation rounds to perform before returning the
|
| 96 |
+
imputations computed during the final round. A round is a single
|
| 97 |
+
imputation of each feature with missing values. The stopping criterion
|
| 98 |
+
is met once `max(abs(X_t - X_{t-1}))/max(abs(X[known_vals])) < tol`,
|
| 99 |
+
where `X_t` is `X` at iteration `t`. Note that early stopping is only
|
| 100 |
+
applied if `sample_posterior=False`.
|
| 101 |
+
|
| 102 |
+
tol : float, default=1e-3
|
| 103 |
+
Tolerance of the stopping condition.
|
| 104 |
+
|
| 105 |
+
n_nearest_features : int, default=None
|
| 106 |
+
Number of other features to use to estimate the missing values of
|
| 107 |
+
each feature column. Nearness between features is measured using
|
| 108 |
+
the absolute correlation coefficient between each feature pair (after
|
| 109 |
+
initial imputation). To ensure coverage of features throughout the
|
| 110 |
+
imputation process, the neighbor features are not necessarily nearest,
|
| 111 |
+
but are drawn with probability proportional to correlation for each
|
| 112 |
+
imputed target feature. Can provide significant speed-up when the
|
| 113 |
+
number of features is huge. If `None`, all features will be used.
|
| 114 |
+
|
| 115 |
+
initial_strategy : {'mean', 'median', 'most_frequent', 'constant'}, \
|
| 116 |
+
default='mean'
|
| 117 |
+
Which strategy to use to initialize the missing values. Same as the
|
| 118 |
+
`strategy` parameter in :class:`~sklearn.impute.SimpleImputer`.
|
| 119 |
+
|
| 120 |
+
imputation_order : {'ascending', 'descending', 'roman', 'arabic', \
|
| 121 |
+
'random'}, default='ascending'
|
| 122 |
+
The order in which the features will be imputed. Possible values:
|
| 123 |
+
|
| 124 |
+
- `'ascending'`: From features with fewest missing values to most.
|
| 125 |
+
- `'descending'`: From features with most missing values to fewest.
|
| 126 |
+
- `'roman'`: Left to right.
|
| 127 |
+
- `'arabic'`: Right to left.
|
| 128 |
+
- `'random'`: A random order for each round.
|
| 129 |
+
|
| 130 |
+
skip_complete : bool, default=False
|
| 131 |
+
If `True` then features with missing values during :meth:`transform`
|
| 132 |
+
which did not have any missing values during :meth:`fit` will be
|
| 133 |
+
imputed with the initial imputation method only. Set to `True` if you
|
| 134 |
+
have many features with no missing values at both :meth:`fit` and
|
| 135 |
+
:meth:`transform` time to save compute.
|
| 136 |
+
|
| 137 |
+
min_value : float or array-like of shape (n_features,), default=-np.inf
|
| 138 |
+
Minimum possible imputed value. Broadcast to shape `(n_features,)` if
|
| 139 |
+
scalar. If array-like, expects shape `(n_features,)`, one min value for
|
| 140 |
+
each feature. The default is `-np.inf`.
|
| 141 |
+
|
| 142 |
+
.. versionchanged:: 0.23
|
| 143 |
+
Added support for array-like.
|
| 144 |
+
|
| 145 |
+
max_value : float or array-like of shape (n_features,), default=np.inf
|
| 146 |
+
Maximum possible imputed value. Broadcast to shape `(n_features,)` if
|
| 147 |
+
scalar. If array-like, expects shape `(n_features,)`, one max value for
|
| 148 |
+
each feature. The default is `np.inf`.
|
| 149 |
+
|
| 150 |
+
.. versionchanged:: 0.23
|
| 151 |
+
Added support for array-like.
|
| 152 |
+
|
| 153 |
+
verbose : int, default=0
|
| 154 |
+
Verbosity flag, controls the debug messages that are issued
|
| 155 |
+
as functions are evaluated. The higher, the more verbose. Can be 0, 1,
|
| 156 |
+
or 2.
|
| 157 |
+
|
| 158 |
+
random_state : int, RandomState instance or None, default=None
|
| 159 |
+
The seed of the pseudo random number generator to use. Randomizes
|
| 160 |
+
selection of estimator features if `n_nearest_features` is not `None`,
|
| 161 |
+
the `imputation_order` if `random`, and the sampling from posterior if
|
| 162 |
+
`sample_posterior=True`. Use an integer for determinism.
|
| 163 |
+
See :term:`the Glossary <random_state>`.
|
| 164 |
+
|
| 165 |
+
add_indicator : bool, default=False
|
| 166 |
+
If `True`, a :class:`MissingIndicator` transform will stack onto output
|
| 167 |
+
of the imputer's transform. This allows a predictive estimator
|
| 168 |
+
to account for missingness despite imputation. If a feature has no
|
| 169 |
+
missing values at fit/train time, the feature won't appear on
|
| 170 |
+
the missing indicator even if there are missing values at
|
| 171 |
+
transform/test time.
|
| 172 |
+
|
| 173 |
+
keep_empty_features : bool, default=False
|
| 174 |
+
If True, features that consist exclusively of missing values when
|
| 175 |
+
`fit` is called are returned in results when `transform` is called.
|
| 176 |
+
The imputed value is always `0` except when
|
| 177 |
+
`initial_strategy="constant"` in which case `fill_value` will be
|
| 178 |
+
used instead.
|
| 179 |
+
|
| 180 |
+
.. versionadded:: 1.2
|
| 181 |
+
|
| 182 |
+
Attributes
|
| 183 |
+
----------
|
| 184 |
+
initial_imputer_ : object of type :class:`~sklearn.impute.SimpleImputer`
|
| 185 |
+
Imputer used to initialize the missing values.
|
| 186 |
+
|
| 187 |
+
imputation_sequence_ : list of tuples
|
| 188 |
+
Each tuple has `(feat_idx, neighbor_feat_idx, estimator)`, where
|
| 189 |
+
`feat_idx` is the current feature to be imputed,
|
| 190 |
+
`neighbor_feat_idx` is the array of other features used to impute the
|
| 191 |
+
current feature, and `estimator` is the trained estimator used for
|
| 192 |
+
the imputation. Length is `self.n_features_with_missing_ *
|
| 193 |
+
self.n_iter_`.
|
| 194 |
+
|
| 195 |
+
n_iter_ : int
|
| 196 |
+
Number of iteration rounds that occurred. Will be less than
|
| 197 |
+
`self.max_iter` if early stopping criterion was reached.
|
| 198 |
+
|
| 199 |
+
n_features_in_ : int
|
| 200 |
+
Number of features seen during :term:`fit`.
|
| 201 |
+
|
| 202 |
+
.. versionadded:: 0.24
|
| 203 |
+
|
| 204 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
| 205 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
| 206 |
+
has feature names that are all strings.
|
| 207 |
+
|
| 208 |
+
.. versionadded:: 1.0
|
| 209 |
+
|
| 210 |
+
n_features_with_missing_ : int
|
| 211 |
+
Number of features with missing values.
|
| 212 |
+
|
| 213 |
+
indicator_ : :class:`~sklearn.impute.MissingIndicator`
|
| 214 |
+
Indicator used to add binary indicators for missing values.
|
| 215 |
+
`None` if `add_indicator=False`.
|
| 216 |
+
|
| 217 |
+
random_state_ : RandomState instance
|
| 218 |
+
RandomState instance that is generated either from a seed, the random
|
| 219 |
+
number generator or by `np.random`.
|
| 220 |
+
|
| 221 |
+
See Also
|
| 222 |
+
--------
|
| 223 |
+
SimpleImputer : Univariate imputer for completing missing values
|
| 224 |
+
with simple strategies.
|
| 225 |
+
KNNImputer : Multivariate imputer that estimates missing features using
|
| 226 |
+
nearest samples.
|
| 227 |
+
|
| 228 |
+
Notes
|
| 229 |
+
-----
|
| 230 |
+
To support imputation in inductive mode we store each feature's estimator
|
| 231 |
+
during the :meth:`fit` phase, and predict without refitting (in order)
|
| 232 |
+
during the :meth:`transform` phase.
|
| 233 |
+
|
| 234 |
+
Features which contain all missing values at :meth:`fit` are discarded upon
|
| 235 |
+
:meth:`transform`.
|
| 236 |
+
|
| 237 |
+
Using defaults, the imputer scales in :math:`\\mathcal{O}(knp^3\\min(n,p))`
|
| 238 |
+
where :math:`k` = `max_iter`, :math:`n` the number of samples and
|
| 239 |
+
:math:`p` the number of features. It thus becomes prohibitively costly when
|
| 240 |
+
the number of features increases. Setting
|
| 241 |
+
`n_nearest_features << n_features`, `skip_complete=True` or increasing `tol`
|
| 242 |
+
can help to reduce its computational cost.
|
| 243 |
+
|
| 244 |
+
Depending on the nature of missing values, simple imputers can be
|
| 245 |
+
preferable in a prediction context.
|
| 246 |
+
|
| 247 |
+
References
|
| 248 |
+
----------
|
| 249 |
+
.. [1] `Stef van Buuren, Karin Groothuis-Oudshoorn (2011). "mice:
|
| 250 |
+
Multivariate Imputation by Chained Equations in R". Journal of
|
| 251 |
+
Statistical Software 45: 1-67.
|
| 252 |
+
<https://www.jstatsoft.org/article/view/v045i03>`_
|
| 253 |
+
|
| 254 |
+
.. [2] `S. F. Buck, (1960). "A Method of Estimation of Missing Values in
|
| 255 |
+
Multivariate Data Suitable for use with an Electronic Computer".
|
| 256 |
+
Journal of the Royal Statistical Society 22(2): 302-306.
|
| 257 |
+
<https://www.jstor.org/stable/2984099>`_
|
| 258 |
+
|
| 259 |
+
Examples
|
| 260 |
+
--------
|
| 261 |
+
>>> import numpy as np
|
| 262 |
+
>>> from sklearn.experimental import enable_iterative_imputer
|
| 263 |
+
>>> from sklearn.impute import IterativeImputer
|
| 264 |
+
>>> imp_mean = IterativeImputer(random_state=0)
|
| 265 |
+
>>> imp_mean.fit([[7, 2, 3], [4, np.nan, 6], [10, 5, 9]])
|
| 266 |
+
IterativeImputer(random_state=0)
|
| 267 |
+
>>> X = [[np.nan, 2, 3], [4, np.nan, 6], [10, np.nan, 9]]
|
| 268 |
+
>>> imp_mean.transform(X)
|
| 269 |
+
array([[ 6.9584..., 2. , 3. ],
|
| 270 |
+
[ 4. , 2.6000..., 6. ],
|
| 271 |
+
[10. , 4.9999..., 9. ]])
|
| 272 |
+
"""
|
| 273 |
+
|
| 274 |
+
_parameter_constraints: dict = {
|
| 275 |
+
**_BaseImputer._parameter_constraints,
|
| 276 |
+
"estimator": [None, HasMethods(["fit", "predict"])],
|
| 277 |
+
"sample_posterior": ["boolean"],
|
| 278 |
+
"max_iter": [Interval(Integral, 0, None, closed="left")],
|
| 279 |
+
"tol": [Interval(Real, 0, None, closed="left")],
|
| 280 |
+
"n_nearest_features": [None, Interval(Integral, 1, None, closed="left")],
|
| 281 |
+
"initial_strategy": [
|
| 282 |
+
StrOptions({"mean", "median", "most_frequent", "constant"})
|
| 283 |
+
],
|
| 284 |
+
"imputation_order": [
|
| 285 |
+
StrOptions({"ascending", "descending", "roman", "arabic", "random"})
|
| 286 |
+
],
|
| 287 |
+
"skip_complete": ["boolean"],
|
| 288 |
+
"min_value": [None, Interval(Real, None, None, closed="both"), "array-like"],
|
| 289 |
+
"max_value": [None, Interval(Real, None, None, closed="both"), "array-like"],
|
| 290 |
+
"verbose": ["verbose"],
|
| 291 |
+
"random_state": ["random_state"],
|
| 292 |
+
}
|
| 293 |
+
|
| 294 |
+
def __init__(
|
| 295 |
+
self,
|
| 296 |
+
estimator=None,
|
| 297 |
+
*,
|
| 298 |
+
missing_values=np.nan,
|
| 299 |
+
sample_posterior=False,
|
| 300 |
+
max_iter=10,
|
| 301 |
+
tol=1e-3,
|
| 302 |
+
n_nearest_features=None,
|
| 303 |
+
initial_strategy="mean",
|
| 304 |
+
imputation_order="ascending",
|
| 305 |
+
skip_complete=False,
|
| 306 |
+
min_value=-np.inf,
|
| 307 |
+
max_value=np.inf,
|
| 308 |
+
verbose=0,
|
| 309 |
+
random_state=None,
|
| 310 |
+
add_indicator=False,
|
| 311 |
+
keep_empty_features=False,
|
| 312 |
+
):
|
| 313 |
+
super().__init__(
|
| 314 |
+
missing_values=missing_values,
|
| 315 |
+
add_indicator=add_indicator,
|
| 316 |
+
keep_empty_features=keep_empty_features,
|
| 317 |
+
)
|
| 318 |
+
|
| 319 |
+
self.estimator = estimator
|
| 320 |
+
self.sample_posterior = sample_posterior
|
| 321 |
+
self.max_iter = max_iter
|
| 322 |
+
self.tol = tol
|
| 323 |
+
self.n_nearest_features = n_nearest_features
|
| 324 |
+
self.initial_strategy = initial_strategy
|
| 325 |
+
self.imputation_order = imputation_order
|
| 326 |
+
self.skip_complete = skip_complete
|
| 327 |
+
self.min_value = min_value
|
| 328 |
+
self.max_value = max_value
|
| 329 |
+
self.verbose = verbose
|
| 330 |
+
self.random_state = random_state
|
| 331 |
+
|
| 332 |
+
def _impute_one_feature(
|
| 333 |
+
self,
|
| 334 |
+
X_filled,
|
| 335 |
+
mask_missing_values,
|
| 336 |
+
feat_idx,
|
| 337 |
+
neighbor_feat_idx,
|
| 338 |
+
estimator=None,
|
| 339 |
+
fit_mode=True,
|
| 340 |
+
):
|
| 341 |
+
"""Impute a single feature from the others provided.
|
| 342 |
+
|
| 343 |
+
This function predicts the missing values of one of the features using
|
| 344 |
+
the current estimates of all the other features. The `estimator` must
|
| 345 |
+
support `return_std=True` in its `predict` method for this function
|
| 346 |
+
to work.
|
| 347 |
+
|
| 348 |
+
Parameters
|
| 349 |
+
----------
|
| 350 |
+
X_filled : ndarray
|
| 351 |
+
Input data with the most recent imputations.
|
| 352 |
+
|
| 353 |
+
mask_missing_values : ndarray
|
| 354 |
+
Input data's missing indicator matrix.
|
| 355 |
+
|
| 356 |
+
feat_idx : int
|
| 357 |
+
Index of the feature currently being imputed.
|
| 358 |
+
|
| 359 |
+
neighbor_feat_idx : ndarray
|
| 360 |
+
Indices of the features to be used in imputing `feat_idx`.
|
| 361 |
+
|
| 362 |
+
estimator : object
|
| 363 |
+
The estimator to use at this step of the round-robin imputation.
|
| 364 |
+
If `sample_posterior=True`, the estimator must support
|
| 365 |
+
`return_std` in its `predict` method.
|
| 366 |
+
If None, it will be cloned from self._estimator.
|
| 367 |
+
|
| 368 |
+
fit_mode : boolean, default=True
|
| 369 |
+
Whether to fit and predict with the estimator or just predict.
|
| 370 |
+
|
| 371 |
+
Returns
|
| 372 |
+
-------
|
| 373 |
+
X_filled : ndarray
|
| 374 |
+
Input data with `X_filled[missing_row_mask, feat_idx]` updated.
|
| 375 |
+
|
| 376 |
+
estimator : estimator with sklearn API
|
| 377 |
+
The fitted estimator used to impute
|
| 378 |
+
`X_filled[missing_row_mask, feat_idx]`.
|
| 379 |
+
"""
|
| 380 |
+
if estimator is None and fit_mode is False:
|
| 381 |
+
raise ValueError(
|
| 382 |
+
"If fit_mode is False, then an already-fitted "
|
| 383 |
+
"estimator should be passed in."
|
| 384 |
+
)
|
| 385 |
+
|
| 386 |
+
if estimator is None:
|
| 387 |
+
estimator = clone(self._estimator)
|
| 388 |
+
|
| 389 |
+
missing_row_mask = mask_missing_values[:, feat_idx]
|
| 390 |
+
if fit_mode:
|
| 391 |
+
X_train = _safe_indexing(
|
| 392 |
+
_safe_indexing(X_filled, neighbor_feat_idx, axis=1),
|
| 393 |
+
~missing_row_mask,
|
| 394 |
+
axis=0,
|
| 395 |
+
)
|
| 396 |
+
y_train = _safe_indexing(
|
| 397 |
+
_safe_indexing(X_filled, feat_idx, axis=1),
|
| 398 |
+
~missing_row_mask,
|
| 399 |
+
axis=0,
|
| 400 |
+
)
|
| 401 |
+
estimator.fit(X_train, y_train)
|
| 402 |
+
|
| 403 |
+
# if no missing values, don't predict
|
| 404 |
+
if np.sum(missing_row_mask) == 0:
|
| 405 |
+
return X_filled, estimator
|
| 406 |
+
|
| 407 |
+
# get posterior samples if there is at least one missing value
|
| 408 |
+
X_test = _safe_indexing(
|
| 409 |
+
_safe_indexing(X_filled, neighbor_feat_idx, axis=1),
|
| 410 |
+
missing_row_mask,
|
| 411 |
+
axis=0,
|
| 412 |
+
)
|
| 413 |
+
if self.sample_posterior:
|
| 414 |
+
mus, sigmas = estimator.predict(X_test, return_std=True)
|
| 415 |
+
imputed_values = np.zeros(mus.shape, dtype=X_filled.dtype)
|
| 416 |
+
# two types of problems: (1) non-positive sigmas
|
| 417 |
+
# (2) mus outside legal range of min_value and max_value
|
| 418 |
+
# (results in inf sample)
|
| 419 |
+
positive_sigmas = sigmas > 0
|
| 420 |
+
imputed_values[~positive_sigmas] = mus[~positive_sigmas]
|
| 421 |
+
mus_too_low = mus < self._min_value[feat_idx]
|
| 422 |
+
imputed_values[mus_too_low] = self._min_value[feat_idx]
|
| 423 |
+
mus_too_high = mus > self._max_value[feat_idx]
|
| 424 |
+
imputed_values[mus_too_high] = self._max_value[feat_idx]
|
| 425 |
+
# the rest can be sampled without statistical issues
|
| 426 |
+
inrange_mask = positive_sigmas & ~mus_too_low & ~mus_too_high
|
| 427 |
+
mus = mus[inrange_mask]
|
| 428 |
+
sigmas = sigmas[inrange_mask]
|
| 429 |
+
a = (self._min_value[feat_idx] - mus) / sigmas
|
| 430 |
+
b = (self._max_value[feat_idx] - mus) / sigmas
|
| 431 |
+
|
| 432 |
+
truncated_normal = stats.truncnorm(a=a, b=b, loc=mus, scale=sigmas)
|
| 433 |
+
imputed_values[inrange_mask] = truncated_normal.rvs(
|
| 434 |
+
random_state=self.random_state_
|
| 435 |
+
)
|
| 436 |
+
else:
|
| 437 |
+
imputed_values = estimator.predict(X_test)
|
| 438 |
+
imputed_values = np.clip(
|
| 439 |
+
imputed_values, self._min_value[feat_idx], self._max_value[feat_idx]
|
| 440 |
+
)
|
| 441 |
+
|
| 442 |
+
# update the feature
|
| 443 |
+
_safe_assign(
|
| 444 |
+
X_filled,
|
| 445 |
+
imputed_values,
|
| 446 |
+
row_indexer=missing_row_mask,
|
| 447 |
+
column_indexer=feat_idx,
|
| 448 |
+
)
|
| 449 |
+
return X_filled, estimator
|
| 450 |
+
|
| 451 |
+
def _get_neighbor_feat_idx(self, n_features, feat_idx, abs_corr_mat):
|
| 452 |
+
"""Get a list of other features to predict `feat_idx`.
|
| 453 |
+
|
| 454 |
+
If `self.n_nearest_features` is less than or equal to the total
|
| 455 |
+
number of features, then use a probability proportional to the absolute
|
| 456 |
+
correlation between `feat_idx` and each other feature to randomly
|
| 457 |
+
choose a subsample of the other features (without replacement).
|
| 458 |
+
|
| 459 |
+
Parameters
|
| 460 |
+
----------
|
| 461 |
+
n_features : int
|
| 462 |
+
Number of features in `X`.
|
| 463 |
+
|
| 464 |
+
feat_idx : int
|
| 465 |
+
Index of the feature currently being imputed.
|
| 466 |
+
|
| 467 |
+
abs_corr_mat : ndarray, shape (n_features, n_features)
|
| 468 |
+
Absolute correlation matrix of `X`. The diagonal has been zeroed
|
| 469 |
+
out and each feature has been normalized to sum to 1. Can be None.
|
| 470 |
+
|
| 471 |
+
Returns
|
| 472 |
+
-------
|
| 473 |
+
neighbor_feat_idx : array-like
|
| 474 |
+
The features to use to impute `feat_idx`.
|
| 475 |
+
"""
|
| 476 |
+
if self.n_nearest_features is not None and self.n_nearest_features < n_features:
|
| 477 |
+
p = abs_corr_mat[:, feat_idx]
|
| 478 |
+
neighbor_feat_idx = self.random_state_.choice(
|
| 479 |
+
np.arange(n_features), self.n_nearest_features, replace=False, p=p
|
| 480 |
+
)
|
| 481 |
+
else:
|
| 482 |
+
inds_left = np.arange(feat_idx)
|
| 483 |
+
inds_right = np.arange(feat_idx + 1, n_features)
|
| 484 |
+
neighbor_feat_idx = np.concatenate((inds_left, inds_right))
|
| 485 |
+
return neighbor_feat_idx
|
| 486 |
+
|
| 487 |
+
def _get_ordered_idx(self, mask_missing_values):
|
| 488 |
+
"""Decide in what order we will update the features.
|
| 489 |
+
|
| 490 |
+
As a homage to the MICE R package, we will have 4 main options of
|
| 491 |
+
how to order the updates, and use a random order if anything else
|
| 492 |
+
is specified.
|
| 493 |
+
|
| 494 |
+
Also, this function skips features which have no missing values.
|
| 495 |
+
|
| 496 |
+
Parameters
|
| 497 |
+
----------
|
| 498 |
+
mask_missing_values : array-like, shape (n_samples, n_features)
|
| 499 |
+
Input data's missing indicator matrix, where `n_samples` is the
|
| 500 |
+
number of samples and `n_features` is the number of features.
|
| 501 |
+
|
| 502 |
+
Returns
|
| 503 |
+
-------
|
| 504 |
+
ordered_idx : ndarray, shape (n_features,)
|
| 505 |
+
The order in which to impute the features.
|
| 506 |
+
"""
|
| 507 |
+
frac_of_missing_values = mask_missing_values.mean(axis=0)
|
| 508 |
+
if self.skip_complete:
|
| 509 |
+
missing_values_idx = np.flatnonzero(frac_of_missing_values)
|
| 510 |
+
else:
|
| 511 |
+
missing_values_idx = np.arange(np.shape(frac_of_missing_values)[0])
|
| 512 |
+
if self.imputation_order == "roman":
|
| 513 |
+
ordered_idx = missing_values_idx
|
| 514 |
+
elif self.imputation_order == "arabic":
|
| 515 |
+
ordered_idx = missing_values_idx[::-1]
|
| 516 |
+
elif self.imputation_order == "ascending":
|
| 517 |
+
n = len(frac_of_missing_values) - len(missing_values_idx)
|
| 518 |
+
ordered_idx = np.argsort(frac_of_missing_values, kind="mergesort")[n:]
|
| 519 |
+
elif self.imputation_order == "descending":
|
| 520 |
+
n = len(frac_of_missing_values) - len(missing_values_idx)
|
| 521 |
+
ordered_idx = np.argsort(frac_of_missing_values, kind="mergesort")[n:][::-1]
|
| 522 |
+
elif self.imputation_order == "random":
|
| 523 |
+
ordered_idx = missing_values_idx
|
| 524 |
+
self.random_state_.shuffle(ordered_idx)
|
| 525 |
+
return ordered_idx
|
| 526 |
+
|
| 527 |
+
def _get_abs_corr_mat(self, X_filled, tolerance=1e-6):
|
| 528 |
+
"""Get absolute correlation matrix between features.
|
| 529 |
+
|
| 530 |
+
Parameters
|
| 531 |
+
----------
|
| 532 |
+
X_filled : ndarray, shape (n_samples, n_features)
|
| 533 |
+
Input data with the most recent imputations.
|
| 534 |
+
|
| 535 |
+
tolerance : float, default=1e-6
|
| 536 |
+
`abs_corr_mat` can have nans, which will be replaced
|
| 537 |
+
with `tolerance`.
|
| 538 |
+
|
| 539 |
+
Returns
|
| 540 |
+
-------
|
| 541 |
+
abs_corr_mat : ndarray, shape (n_features, n_features)
|
| 542 |
+
Absolute correlation matrix of `X` at the beginning of the
|
| 543 |
+
current round. The diagonal has been zeroed out and each feature's
|
| 544 |
+
absolute correlations with all others have been normalized to sum
|
| 545 |
+
to 1.
|
| 546 |
+
"""
|
| 547 |
+
n_features = X_filled.shape[1]
|
| 548 |
+
if self.n_nearest_features is None or self.n_nearest_features >= n_features:
|
| 549 |
+
return None
|
| 550 |
+
with np.errstate(invalid="ignore"):
|
| 551 |
+
# if a feature in the neighborhood has only a single value
|
| 552 |
+
# (e.g., categorical feature), the std. dev. will be null and
|
| 553 |
+
# np.corrcoef will raise a warning due to a division by zero
|
| 554 |
+
abs_corr_mat = np.abs(np.corrcoef(X_filled.T))
|
| 555 |
+
# np.corrcoef is not defined for features with zero std
|
| 556 |
+
abs_corr_mat[np.isnan(abs_corr_mat)] = tolerance
|
| 557 |
+
# ensures exploration, i.e. at least some probability of sampling
|
| 558 |
+
np.clip(abs_corr_mat, tolerance, None, out=abs_corr_mat)
|
| 559 |
+
# features are not their own neighbors
|
| 560 |
+
np.fill_diagonal(abs_corr_mat, 0)
|
| 561 |
+
# needs to sum to 1 for np.random.choice sampling
|
| 562 |
+
abs_corr_mat = normalize(abs_corr_mat, norm="l1", axis=0, copy=False)
|
| 563 |
+
return abs_corr_mat
|
| 564 |
+
|
| 565 |
+
def _initial_imputation(self, X, in_fit=False):
|
| 566 |
+
"""Perform initial imputation for input `X`.
|
| 567 |
+
|
| 568 |
+
Parameters
|
| 569 |
+
----------
|
| 570 |
+
X : ndarray of shape (n_samples, n_features)
|
| 571 |
+
Input data, where `n_samples` is the number of samples and
|
| 572 |
+
`n_features` is the number of features.
|
| 573 |
+
|
| 574 |
+
in_fit : bool, default=False
|
| 575 |
+
Whether function is called in :meth:`fit`.
|
| 576 |
+
|
| 577 |
+
Returns
|
| 578 |
+
-------
|
| 579 |
+
Xt : ndarray of shape (n_samples, n_features)
|
| 580 |
+
Input data, where `n_samples` is the number of samples and
|
| 581 |
+
`n_features` is the number of features.
|
| 582 |
+
|
| 583 |
+
X_filled : ndarray of shape (n_samples, n_features)
|
| 584 |
+
Input data with the most recent imputations.
|
| 585 |
+
|
| 586 |
+
mask_missing_values : ndarray of shape (n_samples, n_features)
|
| 587 |
+
Input data's missing indicator matrix, where `n_samples` is the
|
| 588 |
+
number of samples and `n_features` is the number of features,
|
| 589 |
+
masked by non-missing features.
|
| 590 |
+
|
| 591 |
+
X_missing_mask : ndarray, shape (n_samples, n_features)
|
| 592 |
+
Input data's mask matrix indicating missing datapoints, where
|
| 593 |
+
`n_samples` is the number of samples and `n_features` is the
|
| 594 |
+
number of features.
|
| 595 |
+
"""
|
| 596 |
+
if is_scalar_nan(self.missing_values):
|
| 597 |
+
force_all_finite = "allow-nan"
|
| 598 |
+
else:
|
| 599 |
+
force_all_finite = True
|
| 600 |
+
|
| 601 |
+
X = self._validate_data(
|
| 602 |
+
X,
|
| 603 |
+
dtype=FLOAT_DTYPES,
|
| 604 |
+
order="F",
|
| 605 |
+
reset=in_fit,
|
| 606 |
+
force_all_finite=force_all_finite,
|
| 607 |
+
)
|
| 608 |
+
_check_inputs_dtype(X, self.missing_values)
|
| 609 |
+
|
| 610 |
+
X_missing_mask = _get_mask(X, self.missing_values)
|
| 611 |
+
mask_missing_values = X_missing_mask.copy()
|
| 612 |
+
if self.initial_imputer_ is None:
|
| 613 |
+
self.initial_imputer_ = SimpleImputer(
|
| 614 |
+
missing_values=self.missing_values,
|
| 615 |
+
strategy=self.initial_strategy,
|
| 616 |
+
keep_empty_features=self.keep_empty_features,
|
| 617 |
+
)
|
| 618 |
+
X_filled = self.initial_imputer_.fit_transform(X)
|
| 619 |
+
else:
|
| 620 |
+
X_filled = self.initial_imputer_.transform(X)
|
| 621 |
+
|
| 622 |
+
valid_mask = np.flatnonzero(
|
| 623 |
+
np.logical_not(np.isnan(self.initial_imputer_.statistics_))
|
| 624 |
+
)
|
| 625 |
+
|
| 626 |
+
if not self.keep_empty_features:
|
| 627 |
+
# drop empty features
|
| 628 |
+
Xt = X[:, valid_mask]
|
| 629 |
+
mask_missing_values = mask_missing_values[:, valid_mask]
|
| 630 |
+
else:
|
| 631 |
+
# mark empty features as not missing and keep the original
|
| 632 |
+
# imputation
|
| 633 |
+
mask_missing_values[:, valid_mask] = True
|
| 634 |
+
Xt = X
|
| 635 |
+
|
| 636 |
+
return Xt, X_filled, mask_missing_values, X_missing_mask
|
| 637 |
+
|
| 638 |
+
@staticmethod
|
| 639 |
+
def _validate_limit(limit, limit_type, n_features):
|
| 640 |
+
"""Validate the limits (min/max) of the feature values.
|
| 641 |
+
|
| 642 |
+
Converts scalar min/max limits to vectors of shape `(n_features,)`.
|
| 643 |
+
|
| 644 |
+
Parameters
|
| 645 |
+
----------
|
| 646 |
+
limit: scalar or array-like
|
| 647 |
+
The user-specified limit (i.e, min_value or max_value).
|
| 648 |
+
limit_type: {'max', 'min'}
|
| 649 |
+
Type of limit to validate.
|
| 650 |
+
n_features: int
|
| 651 |
+
Number of features in the dataset.
|
| 652 |
+
|
| 653 |
+
Returns
|
| 654 |
+
-------
|
| 655 |
+
limit: ndarray, shape(n_features,)
|
| 656 |
+
Array of limits, one for each feature.
|
| 657 |
+
"""
|
| 658 |
+
limit_bound = np.inf if limit_type == "max" else -np.inf
|
| 659 |
+
limit = limit_bound if limit is None else limit
|
| 660 |
+
if np.isscalar(limit):
|
| 661 |
+
limit = np.full(n_features, limit)
|
| 662 |
+
limit = check_array(limit, force_all_finite=False, copy=False, ensure_2d=False)
|
| 663 |
+
if not limit.shape[0] == n_features:
|
| 664 |
+
raise ValueError(
|
| 665 |
+
f"'{limit_type}_value' should be of "
|
| 666 |
+
f"shape ({n_features},) when an array-like "
|
| 667 |
+
f"is provided. Got {limit.shape}, instead."
|
| 668 |
+
)
|
| 669 |
+
return limit
|
| 670 |
+
|
| 671 |
+
def fit_transform(self, X, y=None):
|
| 672 |
+
"""Fit the imputer on `X` and return the transformed `X`.
|
| 673 |
+
|
| 674 |
+
Parameters
|
| 675 |
+
----------
|
| 676 |
+
X : array-like, shape (n_samples, n_features)
|
| 677 |
+
Input data, where `n_samples` is the number of samples and
|
| 678 |
+
`n_features` is the number of features.
|
| 679 |
+
|
| 680 |
+
y : Ignored
|
| 681 |
+
Not used, present for API consistency by convention.
|
| 682 |
+
|
| 683 |
+
Returns
|
| 684 |
+
-------
|
| 685 |
+
Xt : array-like, shape (n_samples, n_features)
|
| 686 |
+
The imputed input data.
|
| 687 |
+
"""
|
| 688 |
+
self._validate_params()
|
| 689 |
+
self.random_state_ = getattr(
|
| 690 |
+
self, "random_state_", check_random_state(self.random_state)
|
| 691 |
+
)
|
| 692 |
+
|
| 693 |
+
if self.estimator is None:
|
| 694 |
+
from ..linear_model import BayesianRidge
|
| 695 |
+
|
| 696 |
+
self._estimator = BayesianRidge()
|
| 697 |
+
else:
|
| 698 |
+
self._estimator = clone(self.estimator)
|
| 699 |
+
|
| 700 |
+
self.imputation_sequence_ = []
|
| 701 |
+
|
| 702 |
+
self.initial_imputer_ = None
|
| 703 |
+
|
| 704 |
+
X, Xt, mask_missing_values, complete_mask = self._initial_imputation(
|
| 705 |
+
X, in_fit=True
|
| 706 |
+
)
|
| 707 |
+
|
| 708 |
+
super()._fit_indicator(complete_mask)
|
| 709 |
+
X_indicator = super()._transform_indicator(complete_mask)
|
| 710 |
+
|
| 711 |
+
if self.max_iter == 0 or np.all(mask_missing_values):
|
| 712 |
+
self.n_iter_ = 0
|
| 713 |
+
return super()._concatenate_indicator(Xt, X_indicator)
|
| 714 |
+
|
| 715 |
+
# Edge case: a single feature. We return the initial ...
|
| 716 |
+
if Xt.shape[1] == 1:
|
| 717 |
+
self.n_iter_ = 0
|
| 718 |
+
return super()._concatenate_indicator(Xt, X_indicator)
|
| 719 |
+
|
| 720 |
+
self._min_value = self._validate_limit(self.min_value, "min", X.shape[1])
|
| 721 |
+
self._max_value = self._validate_limit(self.max_value, "max", X.shape[1])
|
| 722 |
+
|
| 723 |
+
if not np.all(np.greater(self._max_value, self._min_value)):
|
| 724 |
+
raise ValueError("One (or more) features have min_value >= max_value.")
|
| 725 |
+
|
| 726 |
+
# order in which to impute
|
| 727 |
+
# note this is probably too slow for large feature data (d > 100000)
|
| 728 |
+
# and a better way would be good.
|
| 729 |
+
# see: https://goo.gl/KyCNwj and subsequent comments
|
| 730 |
+
ordered_idx = self._get_ordered_idx(mask_missing_values)
|
| 731 |
+
self.n_features_with_missing_ = len(ordered_idx)
|
| 732 |
+
|
| 733 |
+
abs_corr_mat = self._get_abs_corr_mat(Xt)
|
| 734 |
+
|
| 735 |
+
n_samples, n_features = Xt.shape
|
| 736 |
+
if self.verbose > 0:
|
| 737 |
+
print("[IterativeImputer] Completing matrix with shape %s" % (X.shape,))
|
| 738 |
+
start_t = time()
|
| 739 |
+
if not self.sample_posterior:
|
| 740 |
+
Xt_previous = Xt.copy()
|
| 741 |
+
normalized_tol = self.tol * np.max(np.abs(X[~mask_missing_values]))
|
| 742 |
+
for self.n_iter_ in range(1, self.max_iter + 1):
|
| 743 |
+
if self.imputation_order == "random":
|
| 744 |
+
ordered_idx = self._get_ordered_idx(mask_missing_values)
|
| 745 |
+
|
| 746 |
+
for feat_idx in ordered_idx:
|
| 747 |
+
neighbor_feat_idx = self._get_neighbor_feat_idx(
|
| 748 |
+
n_features, feat_idx, abs_corr_mat
|
| 749 |
+
)
|
| 750 |
+
Xt, estimator = self._impute_one_feature(
|
| 751 |
+
Xt,
|
| 752 |
+
mask_missing_values,
|
| 753 |
+
feat_idx,
|
| 754 |
+
neighbor_feat_idx,
|
| 755 |
+
estimator=None,
|
| 756 |
+
fit_mode=True,
|
| 757 |
+
)
|
| 758 |
+
estimator_triplet = _ImputerTriplet(
|
| 759 |
+
feat_idx, neighbor_feat_idx, estimator
|
| 760 |
+
)
|
| 761 |
+
self.imputation_sequence_.append(estimator_triplet)
|
| 762 |
+
|
| 763 |
+
if self.verbose > 1:
|
| 764 |
+
print(
|
| 765 |
+
"[IterativeImputer] Ending imputation round "
|
| 766 |
+
"%d/%d, elapsed time %0.2f"
|
| 767 |
+
% (self.n_iter_, self.max_iter, time() - start_t)
|
| 768 |
+
)
|
| 769 |
+
|
| 770 |
+
if not self.sample_posterior:
|
| 771 |
+
inf_norm = np.linalg.norm(Xt - Xt_previous, ord=np.inf, axis=None)
|
| 772 |
+
if self.verbose > 0:
|
| 773 |
+
print(
|
| 774 |
+
"[IterativeImputer] Change: {}, scaled tolerance: {} ".format(
|
| 775 |
+
inf_norm, normalized_tol
|
| 776 |
+
)
|
| 777 |
+
)
|
| 778 |
+
if inf_norm < normalized_tol:
|
| 779 |
+
if self.verbose > 0:
|
| 780 |
+
print("[IterativeImputer] Early stopping criterion reached.")
|
| 781 |
+
break
|
| 782 |
+
Xt_previous = Xt.copy()
|
| 783 |
+
else:
|
| 784 |
+
if not self.sample_posterior:
|
| 785 |
+
warnings.warn(
|
| 786 |
+
"[IterativeImputer] Early stopping criterion not reached.",
|
| 787 |
+
ConvergenceWarning,
|
| 788 |
+
)
|
| 789 |
+
_assign_where(Xt, X, cond=~mask_missing_values)
|
| 790 |
+
|
| 791 |
+
return super()._concatenate_indicator(Xt, X_indicator)
|
| 792 |
+
|
| 793 |
+
def transform(self, X):
|
| 794 |
+
"""Impute all missing values in `X`.
|
| 795 |
+
|
| 796 |
+
Note that this is stochastic, and that if `random_state` is not fixed,
|
| 797 |
+
repeated calls, or permuted input, results will differ.
|
| 798 |
+
|
| 799 |
+
Parameters
|
| 800 |
+
----------
|
| 801 |
+
X : array-like of shape (n_samples, n_features)
|
| 802 |
+
The input data to complete.
|
| 803 |
+
|
| 804 |
+
Returns
|
| 805 |
+
-------
|
| 806 |
+
Xt : array-like, shape (n_samples, n_features)
|
| 807 |
+
The imputed input data.
|
| 808 |
+
"""
|
| 809 |
+
check_is_fitted(self)
|
| 810 |
+
|
| 811 |
+
X, Xt, mask_missing_values, complete_mask = self._initial_imputation(
|
| 812 |
+
X, in_fit=False
|
| 813 |
+
)
|
| 814 |
+
|
| 815 |
+
X_indicator = super()._transform_indicator(complete_mask)
|
| 816 |
+
|
| 817 |
+
if self.n_iter_ == 0 or np.all(mask_missing_values):
|
| 818 |
+
return super()._concatenate_indicator(Xt, X_indicator)
|
| 819 |
+
|
| 820 |
+
imputations_per_round = len(self.imputation_sequence_) // self.n_iter_
|
| 821 |
+
i_rnd = 0
|
| 822 |
+
if self.verbose > 0:
|
| 823 |
+
print("[IterativeImputer] Completing matrix with shape %s" % (X.shape,))
|
| 824 |
+
start_t = time()
|
| 825 |
+
for it, estimator_triplet in enumerate(self.imputation_sequence_):
|
| 826 |
+
Xt, _ = self._impute_one_feature(
|
| 827 |
+
Xt,
|
| 828 |
+
mask_missing_values,
|
| 829 |
+
estimator_triplet.feat_idx,
|
| 830 |
+
estimator_triplet.neighbor_feat_idx,
|
| 831 |
+
estimator=estimator_triplet.estimator,
|
| 832 |
+
fit_mode=False,
|
| 833 |
+
)
|
| 834 |
+
if not (it + 1) % imputations_per_round:
|
| 835 |
+
if self.verbose > 1:
|
| 836 |
+
print(
|
| 837 |
+
"[IterativeImputer] Ending imputation round "
|
| 838 |
+
"%d/%d, elapsed time %0.2f"
|
| 839 |
+
% (i_rnd + 1, self.n_iter_, time() - start_t)
|
| 840 |
+
)
|
| 841 |
+
i_rnd += 1
|
| 842 |
+
|
| 843 |
+
_assign_where(Xt, X, cond=~mask_missing_values)
|
| 844 |
+
|
| 845 |
+
return super()._concatenate_indicator(Xt, X_indicator)
|
| 846 |
+
|
| 847 |
+
def fit(self, X, y=None):
|
| 848 |
+
"""Fit the imputer on `X` and return self.
|
| 849 |
+
|
| 850 |
+
Parameters
|
| 851 |
+
----------
|
| 852 |
+
X : array-like, shape (n_samples, n_features)
|
| 853 |
+
Input data, where `n_samples` is the number of samples and
|
| 854 |
+
`n_features` is the number of features.
|
| 855 |
+
|
| 856 |
+
y : Ignored
|
| 857 |
+
Not used, present for API consistency by convention.
|
| 858 |
+
|
| 859 |
+
Returns
|
| 860 |
+
-------
|
| 861 |
+
self : object
|
| 862 |
+
Fitted estimator.
|
| 863 |
+
"""
|
| 864 |
+
self.fit_transform(X)
|
| 865 |
+
return self
|
| 866 |
+
|
| 867 |
+
def get_feature_names_out(self, input_features=None):
|
| 868 |
+
"""Get output feature names for transformation.
|
| 869 |
+
|
| 870 |
+
Parameters
|
| 871 |
+
----------
|
| 872 |
+
input_features : array-like of str or None, default=None
|
| 873 |
+
Input features.
|
| 874 |
+
|
| 875 |
+
- If `input_features` is `None`, then `feature_names_in_` is
|
| 876 |
+
used as feature names in. If `feature_names_in_` is not defined,
|
| 877 |
+
then the following input feature names are generated:
|
| 878 |
+
`["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
|
| 879 |
+
- If `input_features` is an array-like, then `input_features` must
|
| 880 |
+
match `feature_names_in_` if `feature_names_in_` is defined.
|
| 881 |
+
|
| 882 |
+
Returns
|
| 883 |
+
-------
|
| 884 |
+
feature_names_out : ndarray of str objects
|
| 885 |
+
Transformed feature names.
|
| 886 |
+
"""
|
| 887 |
+
input_features = _check_feature_names_in(self, input_features)
|
| 888 |
+
names = self.initial_imputer_.get_feature_names_out(input_features)
|
| 889 |
+
return self._concatenate_indicator_feature_names_out(names, input_features)
|
mgm/lib/python3.10/site-packages/sklearn/impute/_knn.py
ADDED
|
@@ -0,0 +1,391 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Authors: Ashim Bhattarai <ashimb9@gmail.com>
|
| 2 |
+
# Thomas J Fan <thomasjpfan@gmail.com>
|
| 3 |
+
# License: BSD 3 clause
|
| 4 |
+
|
| 5 |
+
from numbers import Integral
|
| 6 |
+
import numpy as np
|
| 7 |
+
|
| 8 |
+
from ._base import _BaseImputer
|
| 9 |
+
from ..utils.validation import FLOAT_DTYPES
|
| 10 |
+
from ..metrics import pairwise_distances_chunked
|
| 11 |
+
from ..metrics.pairwise import _NAN_METRICS
|
| 12 |
+
from ..neighbors._base import _get_weights
|
| 13 |
+
from ..utils import is_scalar_nan
|
| 14 |
+
from ..utils._mask import _get_mask
|
| 15 |
+
from ..utils.validation import check_is_fitted
|
| 16 |
+
from ..utils.validation import _check_feature_names_in
|
| 17 |
+
from ..utils._param_validation import Hidden, Interval, StrOptions
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class KNNImputer(_BaseImputer):
|
| 21 |
+
"""Imputation for completing missing values using k-Nearest Neighbors.
|
| 22 |
+
|
| 23 |
+
Each sample's missing values are imputed using the mean value from
|
| 24 |
+
`n_neighbors` nearest neighbors found in the training set. Two samples are
|
| 25 |
+
close if the features that neither is missing are close.
|
| 26 |
+
|
| 27 |
+
Read more in the :ref:`User Guide <knnimpute>`.
|
| 28 |
+
|
| 29 |
+
.. versionadded:: 0.22
|
| 30 |
+
|
| 31 |
+
Parameters
|
| 32 |
+
----------
|
| 33 |
+
missing_values : int, float, str, np.nan or None, default=np.nan
|
| 34 |
+
The placeholder for the missing values. All occurrences of
|
| 35 |
+
`missing_values` will be imputed. For pandas' dataframes with
|
| 36 |
+
nullable integer dtypes with missing values, `missing_values`
|
| 37 |
+
should be set to np.nan, since `pd.NA` will be converted to np.nan.
|
| 38 |
+
|
| 39 |
+
n_neighbors : int, default=5
|
| 40 |
+
Number of neighboring samples to use for imputation.
|
| 41 |
+
|
| 42 |
+
weights : {'uniform', 'distance'} or callable, default='uniform'
|
| 43 |
+
Weight function used in prediction. Possible values:
|
| 44 |
+
|
| 45 |
+
- 'uniform' : uniform weights. All points in each neighborhood are
|
| 46 |
+
weighted equally.
|
| 47 |
+
- 'distance' : weight points by the inverse of their distance.
|
| 48 |
+
in this case, closer neighbors of a query point will have a
|
| 49 |
+
greater influence than neighbors which are further away.
|
| 50 |
+
- callable : a user-defined function which accepts an
|
| 51 |
+
array of distances, and returns an array of the same shape
|
| 52 |
+
containing the weights.
|
| 53 |
+
|
| 54 |
+
metric : {'nan_euclidean'} or callable, default='nan_euclidean'
|
| 55 |
+
Distance metric for searching neighbors. Possible values:
|
| 56 |
+
|
| 57 |
+
- 'nan_euclidean'
|
| 58 |
+
- callable : a user-defined function which conforms to the definition
|
| 59 |
+
of ``_pairwise_callable(X, Y, metric, **kwds)``. The function
|
| 60 |
+
accepts two arrays, X and Y, and a `missing_values` keyword in
|
| 61 |
+
`kwds` and returns a scalar distance value.
|
| 62 |
+
|
| 63 |
+
copy : bool, default=True
|
| 64 |
+
If True, a copy of X will be created. If False, imputation will
|
| 65 |
+
be done in-place whenever possible.
|
| 66 |
+
|
| 67 |
+
add_indicator : bool, default=False
|
| 68 |
+
If True, a :class:`MissingIndicator` transform will stack onto the
|
| 69 |
+
output of the imputer's transform. This allows a predictive estimator
|
| 70 |
+
to account for missingness despite imputation. If a feature has no
|
| 71 |
+
missing values at fit/train time, the feature won't appear on the
|
| 72 |
+
missing indicator even if there are missing values at transform/test
|
| 73 |
+
time.
|
| 74 |
+
|
| 75 |
+
keep_empty_features : bool, default=False
|
| 76 |
+
If True, features that consist exclusively of missing values when
|
| 77 |
+
`fit` is called are returned in results when `transform` is called.
|
| 78 |
+
The imputed value is always `0`.
|
| 79 |
+
|
| 80 |
+
.. versionadded:: 1.2
|
| 81 |
+
|
| 82 |
+
Attributes
|
| 83 |
+
----------
|
| 84 |
+
indicator_ : :class:`~sklearn.impute.MissingIndicator`
|
| 85 |
+
Indicator used to add binary indicators for missing values.
|
| 86 |
+
``None`` if add_indicator is False.
|
| 87 |
+
|
| 88 |
+
n_features_in_ : int
|
| 89 |
+
Number of features seen during :term:`fit`.
|
| 90 |
+
|
| 91 |
+
.. versionadded:: 0.24
|
| 92 |
+
|
| 93 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
| 94 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
| 95 |
+
has feature names that are all strings.
|
| 96 |
+
|
| 97 |
+
.. versionadded:: 1.0
|
| 98 |
+
|
| 99 |
+
See Also
|
| 100 |
+
--------
|
| 101 |
+
SimpleImputer : Univariate imputer for completing missing values
|
| 102 |
+
with simple strategies.
|
| 103 |
+
IterativeImputer : Multivariate imputer that estimates values to impute for
|
| 104 |
+
each feature with missing values from all the others.
|
| 105 |
+
|
| 106 |
+
References
|
| 107 |
+
----------
|
| 108 |
+
* Olga Troyanskaya, Michael Cantor, Gavin Sherlock, Pat Brown, Trevor
|
| 109 |
+
Hastie, Robert Tibshirani, David Botstein and Russ B. Altman, Missing
|
| 110 |
+
value estimation methods for DNA microarrays, BIOINFORMATICS Vol. 17
|
| 111 |
+
no. 6, 2001 Pages 520-525.
|
| 112 |
+
|
| 113 |
+
Examples
|
| 114 |
+
--------
|
| 115 |
+
>>> import numpy as np
|
| 116 |
+
>>> from sklearn.impute import KNNImputer
|
| 117 |
+
>>> X = [[1, 2, np.nan], [3, 4, 3], [np.nan, 6, 5], [8, 8, 7]]
|
| 118 |
+
>>> imputer = KNNImputer(n_neighbors=2)
|
| 119 |
+
>>> imputer.fit_transform(X)
|
| 120 |
+
array([[1. , 2. , 4. ],
|
| 121 |
+
[3. , 4. , 3. ],
|
| 122 |
+
[5.5, 6. , 5. ],
|
| 123 |
+
[8. , 8. , 7. ]])
|
| 124 |
+
"""
|
| 125 |
+
|
| 126 |
+
_parameter_constraints: dict = {
|
| 127 |
+
**_BaseImputer._parameter_constraints,
|
| 128 |
+
"n_neighbors": [Interval(Integral, 1, None, closed="left")],
|
| 129 |
+
"weights": [StrOptions({"uniform", "distance"}), callable, Hidden(None)],
|
| 130 |
+
"metric": [StrOptions(set(_NAN_METRICS)), callable],
|
| 131 |
+
"copy": ["boolean"],
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
def __init__(
|
| 135 |
+
self,
|
| 136 |
+
*,
|
| 137 |
+
missing_values=np.nan,
|
| 138 |
+
n_neighbors=5,
|
| 139 |
+
weights="uniform",
|
| 140 |
+
metric="nan_euclidean",
|
| 141 |
+
copy=True,
|
| 142 |
+
add_indicator=False,
|
| 143 |
+
keep_empty_features=False,
|
| 144 |
+
):
|
| 145 |
+
super().__init__(
|
| 146 |
+
missing_values=missing_values,
|
| 147 |
+
add_indicator=add_indicator,
|
| 148 |
+
keep_empty_features=keep_empty_features,
|
| 149 |
+
)
|
| 150 |
+
self.n_neighbors = n_neighbors
|
| 151 |
+
self.weights = weights
|
| 152 |
+
self.metric = metric
|
| 153 |
+
self.copy = copy
|
| 154 |
+
|
| 155 |
+
def _calc_impute(self, dist_pot_donors, n_neighbors, fit_X_col, mask_fit_X_col):
|
| 156 |
+
"""Helper function to impute a single column.
|
| 157 |
+
|
| 158 |
+
Parameters
|
| 159 |
+
----------
|
| 160 |
+
dist_pot_donors : ndarray of shape (n_receivers, n_potential_donors)
|
| 161 |
+
Distance matrix between the receivers and potential donors from
|
| 162 |
+
training set. There must be at least one non-nan distance between
|
| 163 |
+
a receiver and a potential donor.
|
| 164 |
+
|
| 165 |
+
n_neighbors : int
|
| 166 |
+
Number of neighbors to consider.
|
| 167 |
+
|
| 168 |
+
fit_X_col : ndarray of shape (n_potential_donors,)
|
| 169 |
+
Column of potential donors from training set.
|
| 170 |
+
|
| 171 |
+
mask_fit_X_col : ndarray of shape (n_potential_donors,)
|
| 172 |
+
Missing mask for fit_X_col.
|
| 173 |
+
|
| 174 |
+
Returns
|
| 175 |
+
-------
|
| 176 |
+
imputed_values: ndarray of shape (n_receivers,)
|
| 177 |
+
Imputed values for receiver.
|
| 178 |
+
"""
|
| 179 |
+
# Get donors
|
| 180 |
+
donors_idx = np.argpartition(dist_pot_donors, n_neighbors - 1, axis=1)[
|
| 181 |
+
:, :n_neighbors
|
| 182 |
+
]
|
| 183 |
+
|
| 184 |
+
# Get weight matrix from distance matrix
|
| 185 |
+
donors_dist = dist_pot_donors[
|
| 186 |
+
np.arange(donors_idx.shape[0])[:, None], donors_idx
|
| 187 |
+
]
|
| 188 |
+
|
| 189 |
+
weight_matrix = _get_weights(donors_dist, self.weights)
|
| 190 |
+
|
| 191 |
+
# fill nans with zeros
|
| 192 |
+
if weight_matrix is not None:
|
| 193 |
+
weight_matrix[np.isnan(weight_matrix)] = 0.0
|
| 194 |
+
|
| 195 |
+
# Retrieve donor values and calculate kNN average
|
| 196 |
+
donors = fit_X_col.take(donors_idx)
|
| 197 |
+
donors_mask = mask_fit_X_col.take(donors_idx)
|
| 198 |
+
donors = np.ma.array(donors, mask=donors_mask)
|
| 199 |
+
|
| 200 |
+
return np.ma.average(donors, axis=1, weights=weight_matrix).data
|
| 201 |
+
|
| 202 |
+
def fit(self, X, y=None):
|
| 203 |
+
"""Fit the imputer on X.
|
| 204 |
+
|
| 205 |
+
Parameters
|
| 206 |
+
----------
|
| 207 |
+
X : array-like shape of (n_samples, n_features)
|
| 208 |
+
Input data, where `n_samples` is the number of samples and
|
| 209 |
+
`n_features` is the number of features.
|
| 210 |
+
|
| 211 |
+
y : Ignored
|
| 212 |
+
Not used, present here for API consistency by convention.
|
| 213 |
+
|
| 214 |
+
Returns
|
| 215 |
+
-------
|
| 216 |
+
self : object
|
| 217 |
+
The fitted `KNNImputer` class instance.
|
| 218 |
+
"""
|
| 219 |
+
self._validate_params()
|
| 220 |
+
# Check data integrity and calling arguments
|
| 221 |
+
if not is_scalar_nan(self.missing_values):
|
| 222 |
+
force_all_finite = True
|
| 223 |
+
else:
|
| 224 |
+
force_all_finite = "allow-nan"
|
| 225 |
+
|
| 226 |
+
X = self._validate_data(
|
| 227 |
+
X,
|
| 228 |
+
accept_sparse=False,
|
| 229 |
+
dtype=FLOAT_DTYPES,
|
| 230 |
+
force_all_finite=force_all_finite,
|
| 231 |
+
copy=self.copy,
|
| 232 |
+
)
|
| 233 |
+
|
| 234 |
+
self._fit_X = X
|
| 235 |
+
self._mask_fit_X = _get_mask(self._fit_X, self.missing_values)
|
| 236 |
+
self._valid_mask = ~np.all(self._mask_fit_X, axis=0)
|
| 237 |
+
|
| 238 |
+
super()._fit_indicator(self._mask_fit_X)
|
| 239 |
+
|
| 240 |
+
return self
|
| 241 |
+
|
| 242 |
+
def transform(self, X):
|
| 243 |
+
"""Impute all missing values in X.
|
| 244 |
+
|
| 245 |
+
Parameters
|
| 246 |
+
----------
|
| 247 |
+
X : array-like of shape (n_samples, n_features)
|
| 248 |
+
The input data to complete.
|
| 249 |
+
|
| 250 |
+
Returns
|
| 251 |
+
-------
|
| 252 |
+
X : array-like of shape (n_samples, n_output_features)
|
| 253 |
+
The imputed dataset. `n_output_features` is the number of features
|
| 254 |
+
that is not always missing during `fit`.
|
| 255 |
+
"""
|
| 256 |
+
|
| 257 |
+
check_is_fitted(self)
|
| 258 |
+
if not is_scalar_nan(self.missing_values):
|
| 259 |
+
force_all_finite = True
|
| 260 |
+
else:
|
| 261 |
+
force_all_finite = "allow-nan"
|
| 262 |
+
X = self._validate_data(
|
| 263 |
+
X,
|
| 264 |
+
accept_sparse=False,
|
| 265 |
+
dtype=FLOAT_DTYPES,
|
| 266 |
+
force_all_finite=force_all_finite,
|
| 267 |
+
copy=self.copy,
|
| 268 |
+
reset=False,
|
| 269 |
+
)
|
| 270 |
+
|
| 271 |
+
mask = _get_mask(X, self.missing_values)
|
| 272 |
+
mask_fit_X = self._mask_fit_X
|
| 273 |
+
valid_mask = self._valid_mask
|
| 274 |
+
|
| 275 |
+
X_indicator = super()._transform_indicator(mask)
|
| 276 |
+
|
| 277 |
+
# Removes columns where the training data is all nan
|
| 278 |
+
if not np.any(mask):
|
| 279 |
+
# No missing values in X
|
| 280 |
+
if self.keep_empty_features:
|
| 281 |
+
Xc = X
|
| 282 |
+
Xc[:, ~valid_mask] = 0
|
| 283 |
+
else:
|
| 284 |
+
Xc = X[:, valid_mask]
|
| 285 |
+
return Xc
|
| 286 |
+
|
| 287 |
+
row_missing_idx = np.flatnonzero(mask.any(axis=1))
|
| 288 |
+
|
| 289 |
+
non_missing_fix_X = np.logical_not(mask_fit_X)
|
| 290 |
+
|
| 291 |
+
# Maps from indices from X to indices in dist matrix
|
| 292 |
+
dist_idx_map = np.zeros(X.shape[0], dtype=int)
|
| 293 |
+
dist_idx_map[row_missing_idx] = np.arange(row_missing_idx.shape[0])
|
| 294 |
+
|
| 295 |
+
def process_chunk(dist_chunk, start):
|
| 296 |
+
row_missing_chunk = row_missing_idx[start : start + len(dist_chunk)]
|
| 297 |
+
|
| 298 |
+
# Find and impute missing by column
|
| 299 |
+
for col in range(X.shape[1]):
|
| 300 |
+
if not valid_mask[col]:
|
| 301 |
+
# column was all missing during training
|
| 302 |
+
continue
|
| 303 |
+
|
| 304 |
+
col_mask = mask[row_missing_chunk, col]
|
| 305 |
+
if not np.any(col_mask):
|
| 306 |
+
# column has no missing values
|
| 307 |
+
continue
|
| 308 |
+
|
| 309 |
+
(potential_donors_idx,) = np.nonzero(non_missing_fix_X[:, col])
|
| 310 |
+
|
| 311 |
+
# receivers_idx are indices in X
|
| 312 |
+
receivers_idx = row_missing_chunk[np.flatnonzero(col_mask)]
|
| 313 |
+
|
| 314 |
+
# distances for samples that needed imputation for column
|
| 315 |
+
dist_subset = dist_chunk[dist_idx_map[receivers_idx] - start][
|
| 316 |
+
:, potential_donors_idx
|
| 317 |
+
]
|
| 318 |
+
|
| 319 |
+
# receivers with all nan distances impute with mean
|
| 320 |
+
all_nan_dist_mask = np.isnan(dist_subset).all(axis=1)
|
| 321 |
+
all_nan_receivers_idx = receivers_idx[all_nan_dist_mask]
|
| 322 |
+
|
| 323 |
+
if all_nan_receivers_idx.size:
|
| 324 |
+
col_mean = np.ma.array(
|
| 325 |
+
self._fit_X[:, col], mask=mask_fit_X[:, col]
|
| 326 |
+
).mean()
|
| 327 |
+
X[all_nan_receivers_idx, col] = col_mean
|
| 328 |
+
|
| 329 |
+
if len(all_nan_receivers_idx) == len(receivers_idx):
|
| 330 |
+
# all receivers imputed with mean
|
| 331 |
+
continue
|
| 332 |
+
|
| 333 |
+
# receivers with at least one defined distance
|
| 334 |
+
receivers_idx = receivers_idx[~all_nan_dist_mask]
|
| 335 |
+
dist_subset = dist_chunk[dist_idx_map[receivers_idx] - start][
|
| 336 |
+
:, potential_donors_idx
|
| 337 |
+
]
|
| 338 |
+
|
| 339 |
+
n_neighbors = min(self.n_neighbors, len(potential_donors_idx))
|
| 340 |
+
value = self._calc_impute(
|
| 341 |
+
dist_subset,
|
| 342 |
+
n_neighbors,
|
| 343 |
+
self._fit_X[potential_donors_idx, col],
|
| 344 |
+
mask_fit_X[potential_donors_idx, col],
|
| 345 |
+
)
|
| 346 |
+
X[receivers_idx, col] = value
|
| 347 |
+
|
| 348 |
+
# process in fixed-memory chunks
|
| 349 |
+
gen = pairwise_distances_chunked(
|
| 350 |
+
X[row_missing_idx, :],
|
| 351 |
+
self._fit_X,
|
| 352 |
+
metric=self.metric,
|
| 353 |
+
missing_values=self.missing_values,
|
| 354 |
+
force_all_finite=force_all_finite,
|
| 355 |
+
reduce_func=process_chunk,
|
| 356 |
+
)
|
| 357 |
+
for chunk in gen:
|
| 358 |
+
# process_chunk modifies X in place. No return value.
|
| 359 |
+
pass
|
| 360 |
+
|
| 361 |
+
if self.keep_empty_features:
|
| 362 |
+
Xc = X
|
| 363 |
+
Xc[:, ~valid_mask] = 0
|
| 364 |
+
else:
|
| 365 |
+
Xc = X[:, valid_mask]
|
| 366 |
+
|
| 367 |
+
return super()._concatenate_indicator(Xc, X_indicator)
|
| 368 |
+
|
| 369 |
+
def get_feature_names_out(self, input_features=None):
|
| 370 |
+
"""Get output feature names for transformation.
|
| 371 |
+
|
| 372 |
+
Parameters
|
| 373 |
+
----------
|
| 374 |
+
input_features : array-like of str or None, default=None
|
| 375 |
+
Input features.
|
| 376 |
+
|
| 377 |
+
- If `input_features` is `None`, then `feature_names_in_` is
|
| 378 |
+
used as feature names in. If `feature_names_in_` is not defined,
|
| 379 |
+
then the following input feature names are generated:
|
| 380 |
+
`["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
|
| 381 |
+
- If `input_features` is an array-like, then `input_features` must
|
| 382 |
+
match `feature_names_in_` if `feature_names_in_` is defined.
|
| 383 |
+
|
| 384 |
+
Returns
|
| 385 |
+
-------
|
| 386 |
+
feature_names_out : ndarray of str objects
|
| 387 |
+
Transformed feature names.
|
| 388 |
+
"""
|
| 389 |
+
input_features = _check_feature_names_in(self, input_features)
|
| 390 |
+
names = input_features[self._valid_mask]
|
| 391 |
+
return self._concatenate_indicator_feature_names_out(names, input_features)
|
mgm/lib/python3.10/site-packages/sklearn/impute/tests/__init__.py
ADDED
|
File without changes
|
mgm/lib/python3.10/site-packages/sklearn/impute/tests/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (169 Bytes). View file
|
|
|