ZTWHHH commited on
Commit
26e48ca
·
verified ·
1 Parent(s): 954b3ac

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. mplug_owl2/lib/python3.10/site-packages/accelerate/__pycache__/accelerator.cpython-310.pyc +3 -0
  3. mplug_owl2/lib/python3.10/site-packages/sklearn/covariance/__init__.py +45 -0
  4. mplug_owl2/lib/python3.10/site-packages/sklearn/covariance/__pycache__/__init__.cpython-310.pyc +0 -0
  5. mplug_owl2/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_elliptic_envelope.cpython-310.pyc +0 -0
  6. mplug_owl2/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_empirical_covariance.cpython-310.pyc +0 -0
  7. mplug_owl2/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_graph_lasso.cpython-310.pyc +0 -0
  8. mplug_owl2/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_robust_covariance.cpython-310.pyc +0 -0
  9. mplug_owl2/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_shrunk_covariance.cpython-310.pyc +0 -0
  10. mplug_owl2/lib/python3.10/site-packages/sklearn/covariance/_elliptic_envelope.py +265 -0
  11. mplug_owl2/lib/python3.10/site-packages/sklearn/covariance/_empirical_covariance.py +348 -0
  12. mplug_owl2/lib/python3.10/site-packages/sklearn/covariance/_robust_covariance.py +866 -0
  13. mplug_owl2/lib/python3.10/site-packages/sklearn/covariance/_shrunk_covariance.py +764 -0
  14. mplug_owl2/lib/python3.10/site-packages/sklearn/covariance/tests/__init__.py +0 -0
  15. mplug_owl2/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  16. mplug_owl2/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_graphical_lasso.cpython-310.pyc +0 -0
  17. mplug_owl2/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_robust_covariance.cpython-310.pyc +0 -0
  18. mplug_owl2/lib/python3.10/site-packages/sklearn/covariance/tests/test_elliptic_envelope.py +50 -0
  19. mplug_owl2/lib/python3.10/site-packages/sklearn/covariance/tests/test_graphical_lasso.py +242 -0
  20. mplug_owl2/lib/python3.10/site-packages/sklearn/covariance/tests/test_robust_covariance.py +174 -0
  21. mplug_owl2/lib/python3.10/site-packages/sklearn/experimental/__pycache__/__init__.cpython-310.pyc +0 -0
  22. mplug_owl2/lib/python3.10/site-packages/sklearn/experimental/__pycache__/enable_hist_gradient_boosting.cpython-310.pyc +0 -0
  23. mplug_owl2/lib/python3.10/site-packages/sklearn/experimental/enable_iterative_imputer.py +20 -0
  24. mplug_owl2/lib/python3.10/site-packages/sklearn/experimental/tests/test_enable_hist_gradient_boosting.py +14 -0
  25. mplug_owl2/lib/python3.10/site-packages/sklearn/gaussian_process/__init__.py +16 -0
  26. mplug_owl2/lib/python3.10/site-packages/sklearn/gaussian_process/__pycache__/__init__.cpython-310.pyc +0 -0
  27. mplug_owl2/lib/python3.10/site-packages/sklearn/gaussian_process/__pycache__/_gpc.cpython-310.pyc +0 -0
  28. mplug_owl2/lib/python3.10/site-packages/sklearn/gaussian_process/__pycache__/_gpr.cpython-310.pyc +0 -0
  29. mplug_owl2/lib/python3.10/site-packages/sklearn/gaussian_process/__pycache__/kernels.cpython-310.pyc +0 -0
  30. mplug_owl2/lib/python3.10/site-packages/sklearn/gaussian_process/_gpc.py +903 -0
  31. mplug_owl2/lib/python3.10/site-packages/sklearn/gaussian_process/_gpr.py +639 -0
  32. mplug_owl2/lib/python3.10/site-packages/sklearn/gaussian_process/kernels.py +2390 -0
  33. mplug_owl2/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__init__.py +0 -0
  34. mplug_owl2/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  35. mplug_owl2/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__pycache__/_mini_sequence_kernel.cpython-310.pyc +0 -0
  36. mplug_owl2/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__pycache__/test_gpc.cpython-310.pyc +0 -0
  37. mplug_owl2/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__pycache__/test_gpr.cpython-310.pyc +0 -0
  38. mplug_owl2/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__pycache__/test_kernels.cpython-310.pyc +0 -0
  39. mplug_owl2/lib/python3.10/site-packages/sklearn/gaussian_process/tests/_mini_sequence_kernel.py +50 -0
  40. mplug_owl2/lib/python3.10/site-packages/sklearn/gaussian_process/tests/test_gpc.py +286 -0
  41. mplug_owl2/lib/python3.10/site-packages/sklearn/gaussian_process/tests/test_gpr.py +800 -0
  42. mplug_owl2/lib/python3.10/site-packages/sklearn/gaussian_process/tests/test_kernels.py +390 -0
  43. mplug_owl2/lib/python3.10/site-packages/sklearn/mixture/__pycache__/__init__.cpython-310.pyc +0 -0
  44. mplug_owl2/lib/python3.10/site-packages/sklearn/model_selection/__init__.py +89 -0
  45. mplug_owl2/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/__init__.cpython-310.pyc +0 -0
  46. mplug_owl2/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_plot.cpython-310.pyc +0 -0
  47. mplug_owl2/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_search.cpython-310.pyc +0 -0
  48. mplug_owl2/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_search_successive_halving.cpython-310.pyc +0 -0
  49. mplug_owl2/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_split.cpython-310.pyc +0 -0
  50. mplug_owl2/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_validation.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -711,3 +711,4 @@ mplug_owl2/lib/python3.10/site-packages/torch/sparse/__pycache__/_triton_ops_met
711
  mplug_owl2/lib/python3.10/site-packages/torch/__pycache__/_torch_docs.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
712
  openflamingo/compiler_compat/ld filter=lfs diff=lfs merge=lfs -text
713
  openflamingo/bin/openssl filter=lfs diff=lfs merge=lfs -text
 
 
711
  mplug_owl2/lib/python3.10/site-packages/torch/__pycache__/_torch_docs.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
712
  openflamingo/compiler_compat/ld filter=lfs diff=lfs merge=lfs -text
713
  openflamingo/bin/openssl filter=lfs diff=lfs merge=lfs -text
714
+ mplug_owl2/lib/python3.10/site-packages/accelerate/__pycache__/accelerator.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
mplug_owl2/lib/python3.10/site-packages/accelerate/__pycache__/accelerator.cpython-310.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5ddcbb4e99b619475eb78b56fca5800125b74ad7e88a41d797a133e8e76b0bc
3
+ size 116508
mplug_owl2/lib/python3.10/site-packages/sklearn/covariance/__init__.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.covariance` module includes methods and algorithms to
3
+ robustly estimate the covariance of features given a set of points. The
4
+ precision matrix defined as the inverse of the covariance is also estimated.
5
+ Covariance estimation is closely related to the theory of Gaussian Graphical
6
+ Models.
7
+ """
8
+
9
+ from ._empirical_covariance import (
10
+ empirical_covariance,
11
+ EmpiricalCovariance,
12
+ log_likelihood,
13
+ )
14
+ from ._shrunk_covariance import (
15
+ shrunk_covariance,
16
+ ShrunkCovariance,
17
+ ledoit_wolf,
18
+ ledoit_wolf_shrinkage,
19
+ LedoitWolf,
20
+ oas,
21
+ OAS,
22
+ )
23
+ from ._robust_covariance import fast_mcd, MinCovDet
24
+ from ._graph_lasso import graphical_lasso, GraphicalLasso, GraphicalLassoCV
25
+ from ._elliptic_envelope import EllipticEnvelope
26
+
27
+
28
+ __all__ = [
29
+ "EllipticEnvelope",
30
+ "EmpiricalCovariance",
31
+ "GraphicalLasso",
32
+ "GraphicalLassoCV",
33
+ "LedoitWolf",
34
+ "MinCovDet",
35
+ "OAS",
36
+ "ShrunkCovariance",
37
+ "empirical_covariance",
38
+ "fast_mcd",
39
+ "graphical_lasso",
40
+ "ledoit_wolf",
41
+ "ledoit_wolf_shrinkage",
42
+ "log_likelihood",
43
+ "oas",
44
+ "shrunk_covariance",
45
+ ]
mplug_owl2/lib/python3.10/site-packages/sklearn/covariance/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.15 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_elliptic_envelope.cpython-310.pyc ADDED
Binary file (9.44 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_empirical_covariance.cpython-310.pyc ADDED
Binary file (11.2 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_graph_lasso.cpython-310.pyc ADDED
Binary file (27.1 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_robust_covariance.cpython-310.pyc ADDED
Binary file (24.1 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_shrunk_covariance.cpython-310.pyc ADDED
Binary file (22.3 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/sklearn/covariance/_elliptic_envelope.py ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Virgile Fritsch <virgile.fritsch@inria.fr>
2
+ #
3
+ # License: BSD 3 clause
4
+
5
+ import numpy as np
6
+ from numbers import Real
7
+ from . import MinCovDet
8
+ from ..utils._param_validation import Interval
9
+ from ..utils.validation import check_is_fitted
10
+ from ..metrics import accuracy_score
11
+ from ..base import OutlierMixin
12
+
13
+
14
+ class EllipticEnvelope(OutlierMixin, MinCovDet):
15
+ """An object for detecting outliers in a Gaussian distributed dataset.
16
+
17
+ Read more in the :ref:`User Guide <outlier_detection>`.
18
+
19
+ Parameters
20
+ ----------
21
+ store_precision : bool, default=True
22
+ Specify if the estimated precision is stored.
23
+
24
+ assume_centered : bool, default=False
25
+ If True, the support of robust location and covariance estimates
26
+ is computed, and a covariance estimate is recomputed from it,
27
+ without centering the data.
28
+ Useful to work with data whose mean is significantly equal to
29
+ zero but is not exactly zero.
30
+ If False, the robust location and covariance are directly computed
31
+ with the FastMCD algorithm without additional treatment.
32
+
33
+ support_fraction : float, default=None
34
+ The proportion of points to be included in the support of the raw
35
+ MCD estimate. If None, the minimum value of support_fraction will
36
+ be used within the algorithm: `[n_sample + n_features + 1] / 2`.
37
+ Range is (0, 1).
38
+
39
+ contamination : float, default=0.1
40
+ The amount of contamination of the data set, i.e. the proportion
41
+ of outliers in the data set. Range is (0, 0.5].
42
+
43
+ random_state : int, RandomState instance or None, default=None
44
+ Determines the pseudo random number generator for shuffling
45
+ the data. Pass an int for reproducible results across multiple function
46
+ calls. See :term:`Glossary <random_state>`.
47
+
48
+ Attributes
49
+ ----------
50
+ location_ : ndarray of shape (n_features,)
51
+ Estimated robust location.
52
+
53
+ covariance_ : ndarray of shape (n_features, n_features)
54
+ Estimated robust covariance matrix.
55
+
56
+ precision_ : ndarray of shape (n_features, n_features)
57
+ Estimated pseudo inverse matrix.
58
+ (stored only if store_precision is True)
59
+
60
+ support_ : ndarray of shape (n_samples,)
61
+ A mask of the observations that have been used to compute the
62
+ robust estimates of location and shape.
63
+
64
+ offset_ : float
65
+ Offset used to define the decision function from the raw scores.
66
+ We have the relation: ``decision_function = score_samples - offset_``.
67
+ The offset depends on the contamination parameter and is defined in
68
+ such a way we obtain the expected number of outliers (samples with
69
+ decision function < 0) in training.
70
+
71
+ .. versionadded:: 0.20
72
+
73
+ raw_location_ : ndarray of shape (n_features,)
74
+ The raw robust estimated location before correction and re-weighting.
75
+
76
+ raw_covariance_ : ndarray of shape (n_features, n_features)
77
+ The raw robust estimated covariance before correction and re-weighting.
78
+
79
+ raw_support_ : ndarray of shape (n_samples,)
80
+ A mask of the observations that have been used to compute
81
+ the raw robust estimates of location and shape, before correction
82
+ and re-weighting.
83
+
84
+ dist_ : ndarray of shape (n_samples,)
85
+ Mahalanobis distances of the training set (on which :meth:`fit` is
86
+ called) observations.
87
+
88
+ n_features_in_ : int
89
+ Number of features seen during :term:`fit`.
90
+
91
+ .. versionadded:: 0.24
92
+
93
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
94
+ Names of features seen during :term:`fit`. Defined only when `X`
95
+ has feature names that are all strings.
96
+
97
+ .. versionadded:: 1.0
98
+
99
+ See Also
100
+ --------
101
+ EmpiricalCovariance : Maximum likelihood covariance estimator.
102
+ GraphicalLasso : Sparse inverse covariance estimation
103
+ with an l1-penalized estimator.
104
+ LedoitWolf : LedoitWolf Estimator.
105
+ MinCovDet : Minimum Covariance Determinant
106
+ (robust estimator of covariance).
107
+ OAS : Oracle Approximating Shrinkage Estimator.
108
+ ShrunkCovariance : Covariance estimator with shrinkage.
109
+
110
+ Notes
111
+ -----
112
+ Outlier detection from covariance estimation may break or not
113
+ perform well in high-dimensional settings. In particular, one will
114
+ always take care to work with ``n_samples > n_features ** 2``.
115
+
116
+ References
117
+ ----------
118
+ .. [1] Rousseeuw, P.J., Van Driessen, K. "A fast algorithm for the
119
+ minimum covariance determinant estimator" Technometrics 41(3), 212
120
+ (1999)
121
+
122
+ Examples
123
+ --------
124
+ >>> import numpy as np
125
+ >>> from sklearn.covariance import EllipticEnvelope
126
+ >>> true_cov = np.array([[.8, .3],
127
+ ... [.3, .4]])
128
+ >>> X = np.random.RandomState(0).multivariate_normal(mean=[0, 0],
129
+ ... cov=true_cov,
130
+ ... size=500)
131
+ >>> cov = EllipticEnvelope(random_state=0).fit(X)
132
+ >>> # predict returns 1 for an inlier and -1 for an outlier
133
+ >>> cov.predict([[0, 0],
134
+ ... [3, 3]])
135
+ array([ 1, -1])
136
+ >>> cov.covariance_
137
+ array([[0.7411..., 0.2535...],
138
+ [0.2535..., 0.3053...]])
139
+ >>> cov.location_
140
+ array([0.0813... , 0.0427...])
141
+ """
142
+
143
+ _parameter_constraints: dict = {
144
+ **MinCovDet._parameter_constraints,
145
+ "contamination": [Interval(Real, 0, 0.5, closed="right")],
146
+ }
147
+
148
+ def __init__(
149
+ self,
150
+ *,
151
+ store_precision=True,
152
+ assume_centered=False,
153
+ support_fraction=None,
154
+ contamination=0.1,
155
+ random_state=None,
156
+ ):
157
+ super().__init__(
158
+ store_precision=store_precision,
159
+ assume_centered=assume_centered,
160
+ support_fraction=support_fraction,
161
+ random_state=random_state,
162
+ )
163
+ self.contamination = contamination
164
+
165
+ def fit(self, X, y=None):
166
+ """Fit the EllipticEnvelope model.
167
+
168
+ Parameters
169
+ ----------
170
+ X : array-like of shape (n_samples, n_features)
171
+ Training data.
172
+
173
+ y : Ignored
174
+ Not used, present for API consistency by convention.
175
+
176
+ Returns
177
+ -------
178
+ self : object
179
+ Returns the instance itself.
180
+ """
181
+ # `_validate_params` is called in `MinCovDet`
182
+ super().fit(X)
183
+ self.offset_ = np.percentile(-self.dist_, 100.0 * self.contamination)
184
+ return self
185
+
186
+ def decision_function(self, X):
187
+ """Compute the decision function of the given observations.
188
+
189
+ Parameters
190
+ ----------
191
+ X : array-like of shape (n_samples, n_features)
192
+ The data matrix.
193
+
194
+ Returns
195
+ -------
196
+ decision : ndarray of shape (n_samples,)
197
+ Decision function of the samples.
198
+ It is equal to the shifted Mahalanobis distances.
199
+ The threshold for being an outlier is 0, which ensures a
200
+ compatibility with other outlier detection algorithms.
201
+ """
202
+ check_is_fitted(self)
203
+ negative_mahal_dist = self.score_samples(X)
204
+ return negative_mahal_dist - self.offset_
205
+
206
+ def score_samples(self, X):
207
+ """Compute the negative Mahalanobis distances.
208
+
209
+ Parameters
210
+ ----------
211
+ X : array-like of shape (n_samples, n_features)
212
+ The data matrix.
213
+
214
+ Returns
215
+ -------
216
+ negative_mahal_distances : array-like of shape (n_samples,)
217
+ Opposite of the Mahalanobis distances.
218
+ """
219
+ check_is_fitted(self)
220
+ return -self.mahalanobis(X)
221
+
222
+ def predict(self, X):
223
+ """
224
+ Predict labels (1 inlier, -1 outlier) of X according to fitted model.
225
+
226
+ Parameters
227
+ ----------
228
+ X : array-like of shape (n_samples, n_features)
229
+ The data matrix.
230
+
231
+ Returns
232
+ -------
233
+ is_inlier : ndarray of shape (n_samples,)
234
+ Returns -1 for anomalies/outliers and +1 for inliers.
235
+ """
236
+ values = self.decision_function(X)
237
+ is_inlier = np.full(values.shape[0], -1, dtype=int)
238
+ is_inlier[values >= 0] = 1
239
+
240
+ return is_inlier
241
+
242
+ def score(self, X, y, sample_weight=None):
243
+ """Return the mean accuracy on the given test data and labels.
244
+
245
+ In multi-label classification, this is the subset accuracy
246
+ which is a harsh metric since you require for each sample that
247
+ each label set be correctly predicted.
248
+
249
+ Parameters
250
+ ----------
251
+ X : array-like of shape (n_samples, n_features)
252
+ Test samples.
253
+
254
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs)
255
+ True labels for X.
256
+
257
+ sample_weight : array-like of shape (n_samples,), default=None
258
+ Sample weights.
259
+
260
+ Returns
261
+ -------
262
+ score : float
263
+ Mean accuracy of self.predict(X) w.r.t. y.
264
+ """
265
+ return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
mplug_owl2/lib/python3.10/site-packages/sklearn/covariance/_empirical_covariance.py ADDED
@@ -0,0 +1,348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Maximum likelihood covariance estimator.
3
+
4
+ """
5
+
6
+ # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
7
+ # Gael Varoquaux <gael.varoquaux@normalesup.org>
8
+ # Virgile Fritsch <virgile.fritsch@inria.fr>
9
+ #
10
+ # License: BSD 3 clause
11
+
12
+ # avoid division truncation
13
+ import warnings
14
+ import numpy as np
15
+ from scipy import linalg
16
+
17
+ from .. import config_context
18
+ from ..base import BaseEstimator
19
+ from ..utils import check_array
20
+ from ..utils.extmath import fast_logdet
21
+ from ..metrics.pairwise import pairwise_distances
22
+
23
+
24
+ def log_likelihood(emp_cov, precision):
25
+ """Compute the sample mean of the log_likelihood under a covariance model.
26
+
27
+ Computes the empirical expected log-likelihood, allowing for universal
28
+ comparison (beyond this software package), and accounts for normalization
29
+ terms and scaling.
30
+
31
+ Parameters
32
+ ----------
33
+ emp_cov : ndarray of shape (n_features, n_features)
34
+ Maximum Likelihood Estimator of covariance.
35
+
36
+ precision : ndarray of shape (n_features, n_features)
37
+ The precision matrix of the covariance model to be tested.
38
+
39
+ Returns
40
+ -------
41
+ log_likelihood_ : float
42
+ Sample mean of the log-likelihood.
43
+ """
44
+ p = precision.shape[0]
45
+ log_likelihood_ = -np.sum(emp_cov * precision) + fast_logdet(precision)
46
+ log_likelihood_ -= p * np.log(2 * np.pi)
47
+ log_likelihood_ /= 2.0
48
+ return log_likelihood_
49
+
50
+
51
+ def empirical_covariance(X, *, assume_centered=False):
52
+ """Compute the Maximum likelihood covariance estimator.
53
+
54
+ Parameters
55
+ ----------
56
+ X : ndarray of shape (n_samples, n_features)
57
+ Data from which to compute the covariance estimate.
58
+
59
+ assume_centered : bool, default=False
60
+ If `True`, data will not be centered before computation.
61
+ Useful when working with data whose mean is almost, but not exactly
62
+ zero.
63
+ If `False`, data will be centered before computation.
64
+
65
+ Returns
66
+ -------
67
+ covariance : ndarray of shape (n_features, n_features)
68
+ Empirical covariance (Maximum Likelihood Estimator).
69
+
70
+ Examples
71
+ --------
72
+ >>> from sklearn.covariance import empirical_covariance
73
+ >>> X = [[1,1,1],[1,1,1],[1,1,1],
74
+ ... [0,0,0],[0,0,0],[0,0,0]]
75
+ >>> empirical_covariance(X)
76
+ array([[0.25, 0.25, 0.25],
77
+ [0.25, 0.25, 0.25],
78
+ [0.25, 0.25, 0.25]])
79
+ """
80
+ X = np.asarray(X)
81
+
82
+ if X.ndim == 1:
83
+ X = np.reshape(X, (1, -1))
84
+
85
+ if X.shape[0] == 1:
86
+ warnings.warn(
87
+ "Only one sample available. You may want to reshape your data array"
88
+ )
89
+
90
+ if assume_centered:
91
+ covariance = np.dot(X.T, X) / X.shape[0]
92
+ else:
93
+ covariance = np.cov(X.T, bias=1)
94
+
95
+ if covariance.ndim == 0:
96
+ covariance = np.array([[covariance]])
97
+ return covariance
98
+
99
+
100
+ class EmpiricalCovariance(BaseEstimator):
101
+ """Maximum likelihood covariance estimator.
102
+
103
+ Read more in the :ref:`User Guide <covariance>`.
104
+
105
+ Parameters
106
+ ----------
107
+ store_precision : bool, default=True
108
+ Specifies if the estimated precision is stored.
109
+
110
+ assume_centered : bool, default=False
111
+ If True, data are not centered before computation.
112
+ Useful when working with data whose mean is almost, but not exactly
113
+ zero.
114
+ If False (default), data are centered before computation.
115
+
116
+ Attributes
117
+ ----------
118
+ location_ : ndarray of shape (n_features,)
119
+ Estimated location, i.e. the estimated mean.
120
+
121
+ covariance_ : ndarray of shape (n_features, n_features)
122
+ Estimated covariance matrix
123
+
124
+ precision_ : ndarray of shape (n_features, n_features)
125
+ Estimated pseudo-inverse matrix.
126
+ (stored only if store_precision is True)
127
+
128
+ n_features_in_ : int
129
+ Number of features seen during :term:`fit`.
130
+
131
+ .. versionadded:: 0.24
132
+
133
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
134
+ Names of features seen during :term:`fit`. Defined only when `X`
135
+ has feature names that are all strings.
136
+
137
+ .. versionadded:: 1.0
138
+
139
+ See Also
140
+ --------
141
+ EllipticEnvelope : An object for detecting outliers in
142
+ a Gaussian distributed dataset.
143
+ GraphicalLasso : Sparse inverse covariance estimation
144
+ with an l1-penalized estimator.
145
+ LedoitWolf : LedoitWolf Estimator.
146
+ MinCovDet : Minimum Covariance Determinant
147
+ (robust estimator of covariance).
148
+ OAS : Oracle Approximating Shrinkage Estimator.
149
+ ShrunkCovariance : Covariance estimator with shrinkage.
150
+
151
+ Examples
152
+ --------
153
+ >>> import numpy as np
154
+ >>> from sklearn.covariance import EmpiricalCovariance
155
+ >>> from sklearn.datasets import make_gaussian_quantiles
156
+ >>> real_cov = np.array([[.8, .3],
157
+ ... [.3, .4]])
158
+ >>> rng = np.random.RandomState(0)
159
+ >>> X = rng.multivariate_normal(mean=[0, 0],
160
+ ... cov=real_cov,
161
+ ... size=500)
162
+ >>> cov = EmpiricalCovariance().fit(X)
163
+ >>> cov.covariance_
164
+ array([[0.7569..., 0.2818...],
165
+ [0.2818..., 0.3928...]])
166
+ >>> cov.location_
167
+ array([0.0622..., 0.0193...])
168
+ """
169
+
170
+ _parameter_constraints: dict = {
171
+ "store_precision": ["boolean"],
172
+ "assume_centered": ["boolean"],
173
+ }
174
+
175
+ def __init__(self, *, store_precision=True, assume_centered=False):
176
+ self.store_precision = store_precision
177
+ self.assume_centered = assume_centered
178
+
179
+ def _set_covariance(self, covariance):
180
+ """Saves the covariance and precision estimates
181
+
182
+ Storage is done accordingly to `self.store_precision`.
183
+ Precision stored only if invertible.
184
+
185
+ Parameters
186
+ ----------
187
+ covariance : array-like of shape (n_features, n_features)
188
+ Estimated covariance matrix to be stored, and from which precision
189
+ is computed.
190
+ """
191
+ covariance = check_array(covariance)
192
+ # set covariance
193
+ self.covariance_ = covariance
194
+ # set precision
195
+ if self.store_precision:
196
+ self.precision_ = linalg.pinvh(covariance, check_finite=False)
197
+ else:
198
+ self.precision_ = None
199
+
200
+ def get_precision(self):
201
+ """Getter for the precision matrix.
202
+
203
+ Returns
204
+ -------
205
+ precision_ : array-like of shape (n_features, n_features)
206
+ The precision matrix associated to the current covariance object.
207
+ """
208
+ if self.store_precision:
209
+ precision = self.precision_
210
+ else:
211
+ precision = linalg.pinvh(self.covariance_, check_finite=False)
212
+ return precision
213
+
214
+ def fit(self, X, y=None):
215
+ """Fit the maximum likelihood covariance estimator to X.
216
+
217
+ Parameters
218
+ ----------
219
+ X : array-like of shape (n_samples, n_features)
220
+ Training data, where `n_samples` is the number of samples and
221
+ `n_features` is the number of features.
222
+
223
+ y : Ignored
224
+ Not used, present for API consistency by convention.
225
+
226
+ Returns
227
+ -------
228
+ self : object
229
+ Returns the instance itself.
230
+ """
231
+ self._validate_params()
232
+ X = self._validate_data(X)
233
+ if self.assume_centered:
234
+ self.location_ = np.zeros(X.shape[1])
235
+ else:
236
+ self.location_ = X.mean(0)
237
+ covariance = empirical_covariance(X, assume_centered=self.assume_centered)
238
+ self._set_covariance(covariance)
239
+
240
+ return self
241
+
242
+ def score(self, X_test, y=None):
243
+ """Compute the log-likelihood of `X_test` under the estimated Gaussian model.
244
+
245
+ The Gaussian model is defined by its mean and covariance matrix which are
246
+ represented respectively by `self.location_` and `self.covariance_`.
247
+
248
+ Parameters
249
+ ----------
250
+ X_test : array-like of shape (n_samples, n_features)
251
+ Test data of which we compute the likelihood, where `n_samples` is
252
+ the number of samples and `n_features` is the number of features.
253
+ `X_test` is assumed to be drawn from the same distribution than
254
+ the data used in fit (including centering).
255
+
256
+ y : Ignored
257
+ Not used, present for API consistency by convention.
258
+
259
+ Returns
260
+ -------
261
+ res : float
262
+ The log-likelihood of `X_test` with `self.location_` and `self.covariance_`
263
+ as estimators of the Gaussian model mean and covariance matrix respectively.
264
+ """
265
+ X_test = self._validate_data(X_test, reset=False)
266
+ # compute empirical covariance of the test set
267
+ test_cov = empirical_covariance(X_test - self.location_, assume_centered=True)
268
+ # compute log likelihood
269
+ res = log_likelihood(test_cov, self.get_precision())
270
+
271
+ return res
272
+
273
+ def error_norm(self, comp_cov, norm="frobenius", scaling=True, squared=True):
274
+ """Compute the Mean Squared Error between two covariance estimators.
275
+
276
+ Parameters
277
+ ----------
278
+ comp_cov : array-like of shape (n_features, n_features)
279
+ The covariance to compare with.
280
+
281
+ norm : {"frobenius", "spectral"}, default="frobenius"
282
+ The type of norm used to compute the error. Available error types:
283
+ - 'frobenius' (default): sqrt(tr(A^t.A))
284
+ - 'spectral': sqrt(max(eigenvalues(A^t.A))
285
+ where A is the error ``(comp_cov - self.covariance_)``.
286
+
287
+ scaling : bool, default=True
288
+ If True (default), the squared error norm is divided by n_features.
289
+ If False, the squared error norm is not rescaled.
290
+
291
+ squared : bool, default=True
292
+ Whether to compute the squared error norm or the error norm.
293
+ If True (default), the squared error norm is returned.
294
+ If False, the error norm is returned.
295
+
296
+ Returns
297
+ -------
298
+ result : float
299
+ The Mean Squared Error (in the sense of the Frobenius norm) between
300
+ `self` and `comp_cov` covariance estimators.
301
+ """
302
+ # compute the error
303
+ error = comp_cov - self.covariance_
304
+ # compute the error norm
305
+ if norm == "frobenius":
306
+ squared_norm = np.sum(error**2)
307
+ elif norm == "spectral":
308
+ squared_norm = np.amax(linalg.svdvals(np.dot(error.T, error)))
309
+ else:
310
+ raise NotImplementedError(
311
+ "Only spectral and frobenius norms are implemented"
312
+ )
313
+ # optionally scale the error norm
314
+ if scaling:
315
+ squared_norm = squared_norm / error.shape[0]
316
+ # finally get either the squared norm or the norm
317
+ if squared:
318
+ result = squared_norm
319
+ else:
320
+ result = np.sqrt(squared_norm)
321
+
322
+ return result
323
+
324
+ def mahalanobis(self, X):
325
+ """Compute the squared Mahalanobis distances of given observations.
326
+
327
+ Parameters
328
+ ----------
329
+ X : array-like of shape (n_samples, n_features)
330
+ The observations, the Mahalanobis distances of the which we
331
+ compute. Observations are assumed to be drawn from the same
332
+ distribution than the data used in fit.
333
+
334
+ Returns
335
+ -------
336
+ dist : ndarray of shape (n_samples,)
337
+ Squared Mahalanobis distances of the observations.
338
+ """
339
+ X = self._validate_data(X, reset=False)
340
+
341
+ precision = self.get_precision()
342
+ with config_context(assume_finite=True):
343
+ # compute mahalanobis distances
344
+ dist = pairwise_distances(
345
+ X, self.location_[np.newaxis, :], metric="mahalanobis", VI=precision
346
+ )
347
+
348
+ return np.reshape(dist, (len(X),)) ** 2
mplug_owl2/lib/python3.10/site-packages/sklearn/covariance/_robust_covariance.py ADDED
@@ -0,0 +1,866 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Robust location and covariance estimators.
3
+
4
+ Here are implemented estimators that are resistant to outliers.
5
+
6
+ """
7
+ # Author: Virgile Fritsch <virgile.fritsch@inria.fr>
8
+ #
9
+ # License: BSD 3 clause
10
+
11
+ import warnings
12
+ from numbers import Integral, Real
13
+ import numpy as np
14
+ from scipy import linalg
15
+ from scipy.stats import chi2
16
+
17
+ from . import empirical_covariance, EmpiricalCovariance
18
+ from ..utils.extmath import fast_logdet
19
+ from ..utils import check_random_state, check_array
20
+ from ..utils._param_validation import Interval
21
+
22
+
23
+ # Minimum Covariance Determinant
24
+ # Implementing of an algorithm by Rousseeuw & Van Driessen described in
25
+ # (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
26
+ # 1999, American Statistical Association and the American Society
27
+ # for Quality, TECHNOMETRICS)
28
+ # XXX Is this really a public function? It's not listed in the docs or
29
+ # exported by sklearn.covariance. Deprecate?
30
+ def c_step(
31
+ X,
32
+ n_support,
33
+ remaining_iterations=30,
34
+ initial_estimates=None,
35
+ verbose=False,
36
+ cov_computation_method=empirical_covariance,
37
+ random_state=None,
38
+ ):
39
+ """C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
40
+
41
+ Parameters
42
+ ----------
43
+ X : array-like of shape (n_samples, n_features)
44
+ Data set in which we look for the n_support observations whose
45
+ scatter matrix has minimum determinant.
46
+
47
+ n_support : int
48
+ Number of observations to compute the robust estimates of location
49
+ and covariance from. This parameter must be greater than
50
+ `n_samples / 2`.
51
+
52
+ remaining_iterations : int, default=30
53
+ Number of iterations to perform.
54
+ According to [Rouseeuw1999]_, two iterations are sufficient to get
55
+ close to the minimum, and we never need more than 30 to reach
56
+ convergence.
57
+
58
+ initial_estimates : tuple of shape (2,), default=None
59
+ Initial estimates of location and shape from which to run the c_step
60
+ procedure:
61
+ - initial_estimates[0]: an initial location estimate
62
+ - initial_estimates[1]: an initial covariance estimate
63
+
64
+ verbose : bool, default=False
65
+ Verbose mode.
66
+
67
+ cov_computation_method : callable, \
68
+ default=:func:`sklearn.covariance.empirical_covariance`
69
+ The function which will be used to compute the covariance.
70
+ Must return array of shape (n_features, n_features).
71
+
72
+ random_state : int, RandomState instance or None, default=None
73
+ Determines the pseudo random number generator for shuffling the data.
74
+ Pass an int for reproducible results across multiple function calls.
75
+ See :term:`Glossary <random_state>`.
76
+
77
+ Returns
78
+ -------
79
+ location : ndarray of shape (n_features,)
80
+ Robust location estimates.
81
+
82
+ covariance : ndarray of shape (n_features, n_features)
83
+ Robust covariance estimates.
84
+
85
+ support : ndarray of shape (n_samples,)
86
+ A mask for the `n_support` observations whose scatter matrix has
87
+ minimum determinant.
88
+
89
+ References
90
+ ----------
91
+ .. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
92
+ Estimator, 1999, American Statistical Association and the American
93
+ Society for Quality, TECHNOMETRICS
94
+ """
95
+ X = np.asarray(X)
96
+ random_state = check_random_state(random_state)
97
+ return _c_step(
98
+ X,
99
+ n_support,
100
+ remaining_iterations=remaining_iterations,
101
+ initial_estimates=initial_estimates,
102
+ verbose=verbose,
103
+ cov_computation_method=cov_computation_method,
104
+ random_state=random_state,
105
+ )
106
+
107
+
108
+ def _c_step(
109
+ X,
110
+ n_support,
111
+ random_state,
112
+ remaining_iterations=30,
113
+ initial_estimates=None,
114
+ verbose=False,
115
+ cov_computation_method=empirical_covariance,
116
+ ):
117
+ n_samples, n_features = X.shape
118
+ dist = np.inf
119
+
120
+ # Initialisation
121
+ support = np.zeros(n_samples, dtype=bool)
122
+ if initial_estimates is None:
123
+ # compute initial robust estimates from a random subset
124
+ support[random_state.permutation(n_samples)[:n_support]] = True
125
+ else:
126
+ # get initial robust estimates from the function parameters
127
+ location = initial_estimates[0]
128
+ covariance = initial_estimates[1]
129
+ # run a special iteration for that case (to get an initial support)
130
+ precision = linalg.pinvh(covariance)
131
+ X_centered = X - location
132
+ dist = (np.dot(X_centered, precision) * X_centered).sum(1)
133
+ # compute new estimates
134
+ support[np.argsort(dist)[:n_support]] = True
135
+
136
+ X_support = X[support]
137
+ location = X_support.mean(0)
138
+ covariance = cov_computation_method(X_support)
139
+
140
+ # Iterative procedure for Minimum Covariance Determinant computation
141
+ det = fast_logdet(covariance)
142
+ # If the data already has singular covariance, calculate the precision,
143
+ # as the loop below will not be entered.
144
+ if np.isinf(det):
145
+ precision = linalg.pinvh(covariance)
146
+
147
+ previous_det = np.inf
148
+ while det < previous_det and remaining_iterations > 0 and not np.isinf(det):
149
+ # save old estimates values
150
+ previous_location = location
151
+ previous_covariance = covariance
152
+ previous_det = det
153
+ previous_support = support
154
+ # compute a new support from the full data set mahalanobis distances
155
+ precision = linalg.pinvh(covariance)
156
+ X_centered = X - location
157
+ dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
158
+ # compute new estimates
159
+ support = np.zeros(n_samples, dtype=bool)
160
+ support[np.argsort(dist)[:n_support]] = True
161
+ X_support = X[support]
162
+ location = X_support.mean(axis=0)
163
+ covariance = cov_computation_method(X_support)
164
+ det = fast_logdet(covariance)
165
+ # update remaining iterations for early stopping
166
+ remaining_iterations -= 1
167
+
168
+ previous_dist = dist
169
+ dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
170
+ # Check if best fit already found (det => 0, logdet => -inf)
171
+ if np.isinf(det):
172
+ results = location, covariance, det, support, dist
173
+ # Check convergence
174
+ if np.allclose(det, previous_det):
175
+ # c_step procedure converged
176
+ if verbose:
177
+ print(
178
+ "Optimal couple (location, covariance) found before"
179
+ " ending iterations (%d left)" % (remaining_iterations)
180
+ )
181
+ results = location, covariance, det, support, dist
182
+ elif det > previous_det:
183
+ # determinant has increased (should not happen)
184
+ warnings.warn(
185
+ "Determinant has increased; this should not happen: "
186
+ "log(det) > log(previous_det) (%.15f > %.15f). "
187
+ "You may want to try with a higher value of "
188
+ "support_fraction (current value: %.3f)."
189
+ % (det, previous_det, n_support / n_samples),
190
+ RuntimeWarning,
191
+ )
192
+ results = (
193
+ previous_location,
194
+ previous_covariance,
195
+ previous_det,
196
+ previous_support,
197
+ previous_dist,
198
+ )
199
+
200
+ # Check early stopping
201
+ if remaining_iterations == 0:
202
+ if verbose:
203
+ print("Maximum number of iterations reached")
204
+ results = location, covariance, det, support, dist
205
+
206
+ return results
207
+
208
+
209
+ def select_candidates(
210
+ X,
211
+ n_support,
212
+ n_trials,
213
+ select=1,
214
+ n_iter=30,
215
+ verbose=False,
216
+ cov_computation_method=empirical_covariance,
217
+ random_state=None,
218
+ ):
219
+ """Finds the best pure subset of observations to compute MCD from it.
220
+
221
+ The purpose of this function is to find the best sets of n_support
222
+ observations with respect to a minimization of their covariance
223
+ matrix determinant. Equivalently, it removes n_samples-n_support
224
+ observations to construct what we call a pure data set (i.e. not
225
+ containing outliers). The list of the observations of the pure
226
+ data set is referred to as the `support`.
227
+
228
+ Starting from a random support, the pure data set is found by the
229
+ c_step procedure introduced by Rousseeuw and Van Driessen in
230
+ [RV]_.
231
+
232
+ Parameters
233
+ ----------
234
+ X : array-like of shape (n_samples, n_features)
235
+ Data (sub)set in which we look for the n_support purest observations.
236
+
237
+ n_support : int
238
+ The number of samples the pure data set must contain.
239
+ This parameter must be in the range `[(n + p + 1)/2] < n_support < n`.
240
+
241
+ n_trials : int or tuple of shape (2,)
242
+ Number of different initial sets of observations from which to
243
+ run the algorithm. This parameter should be a strictly positive
244
+ integer.
245
+ Instead of giving a number of trials to perform, one can provide a
246
+ list of initial estimates that will be used to iteratively run
247
+ c_step procedures. In this case:
248
+ - n_trials[0]: array-like, shape (n_trials, n_features)
249
+ is the list of `n_trials` initial location estimates
250
+ - n_trials[1]: array-like, shape (n_trials, n_features, n_features)
251
+ is the list of `n_trials` initial covariances estimates
252
+
253
+ select : int, default=1
254
+ Number of best candidates results to return. This parameter must be
255
+ a strictly positive integer.
256
+
257
+ n_iter : int, default=30
258
+ Maximum number of iterations for the c_step procedure.
259
+ (2 is enough to be close to the final solution. "Never" exceeds 20).
260
+ This parameter must be a strictly positive integer.
261
+
262
+ verbose : bool, default=False
263
+ Control the output verbosity.
264
+
265
+ cov_computation_method : callable, \
266
+ default=:func:`sklearn.covariance.empirical_covariance`
267
+ The function which will be used to compute the covariance.
268
+ Must return an array of shape (n_features, n_features).
269
+
270
+ random_state : int, RandomState instance or None, default=None
271
+ Determines the pseudo random number generator for shuffling the data.
272
+ Pass an int for reproducible results across multiple function calls.
273
+ See :term:`Glossary <random_state>`.
274
+
275
+ See Also
276
+ ---------
277
+ c_step
278
+
279
+ Returns
280
+ -------
281
+ best_locations : ndarray of shape (select, n_features)
282
+ The `select` location estimates computed from the `select` best
283
+ supports found in the data set (`X`).
284
+
285
+ best_covariances : ndarray of shape (select, n_features, n_features)
286
+ The `select` covariance estimates computed from the `select`
287
+ best supports found in the data set (`X`).
288
+
289
+ best_supports : ndarray of shape (select, n_samples)
290
+ The `select` best supports found in the data set (`X`).
291
+
292
+ References
293
+ ----------
294
+ .. [RV] A Fast Algorithm for the Minimum Covariance Determinant
295
+ Estimator, 1999, American Statistical Association and the American
296
+ Society for Quality, TECHNOMETRICS
297
+ """
298
+ random_state = check_random_state(random_state)
299
+
300
+ if isinstance(n_trials, Integral):
301
+ run_from_estimates = False
302
+ elif isinstance(n_trials, tuple):
303
+ run_from_estimates = True
304
+ estimates_list = n_trials
305
+ n_trials = estimates_list[0].shape[0]
306
+ else:
307
+ raise TypeError(
308
+ "Invalid 'n_trials' parameter, expected tuple or integer, got %s (%s)"
309
+ % (n_trials, type(n_trials))
310
+ )
311
+
312
+ # compute `n_trials` location and shape estimates candidates in the subset
313
+ all_estimates = []
314
+ if not run_from_estimates:
315
+ # perform `n_trials` computations from random initial supports
316
+ for j in range(n_trials):
317
+ all_estimates.append(
318
+ _c_step(
319
+ X,
320
+ n_support,
321
+ remaining_iterations=n_iter,
322
+ verbose=verbose,
323
+ cov_computation_method=cov_computation_method,
324
+ random_state=random_state,
325
+ )
326
+ )
327
+ else:
328
+ # perform computations from every given initial estimates
329
+ for j in range(n_trials):
330
+ initial_estimates = (estimates_list[0][j], estimates_list[1][j])
331
+ all_estimates.append(
332
+ _c_step(
333
+ X,
334
+ n_support,
335
+ remaining_iterations=n_iter,
336
+ initial_estimates=initial_estimates,
337
+ verbose=verbose,
338
+ cov_computation_method=cov_computation_method,
339
+ random_state=random_state,
340
+ )
341
+ )
342
+ all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = zip(
343
+ *all_estimates
344
+ )
345
+ # find the `n_best` best results among the `n_trials` ones
346
+ index_best = np.argsort(all_dets_sub)[:select]
347
+ best_locations = np.asarray(all_locs_sub)[index_best]
348
+ best_covariances = np.asarray(all_covs_sub)[index_best]
349
+ best_supports = np.asarray(all_supports_sub)[index_best]
350
+ best_ds = np.asarray(all_ds_sub)[index_best]
351
+
352
+ return best_locations, best_covariances, best_supports, best_ds
353
+
354
+
355
+ def fast_mcd(
356
+ X,
357
+ support_fraction=None,
358
+ cov_computation_method=empirical_covariance,
359
+ random_state=None,
360
+ ):
361
+ """Estimate the Minimum Covariance Determinant matrix.
362
+
363
+ Read more in the :ref:`User Guide <robust_covariance>`.
364
+
365
+ Parameters
366
+ ----------
367
+ X : array-like of shape (n_samples, n_features)
368
+ The data matrix, with p features and n samples.
369
+
370
+ support_fraction : float, default=None
371
+ The proportion of points to be included in the support of the raw
372
+ MCD estimate. Default is `None`, which implies that the minimum
373
+ value of `support_fraction` will be used within the algorithm:
374
+ `(n_sample + n_features + 1) / 2`. This parameter must be in the
375
+ range (0, 1).
376
+
377
+ cov_computation_method : callable, \
378
+ default=:func:`sklearn.covariance.empirical_covariance`
379
+ The function which will be used to compute the covariance.
380
+ Must return an array of shape (n_features, n_features).
381
+
382
+ random_state : int, RandomState instance or None, default=None
383
+ Determines the pseudo random number generator for shuffling the data.
384
+ Pass an int for reproducible results across multiple function calls.
385
+ See :term:`Glossary <random_state>`.
386
+
387
+ Returns
388
+ -------
389
+ location : ndarray of shape (n_features,)
390
+ Robust location of the data.
391
+
392
+ covariance : ndarray of shape (n_features, n_features)
393
+ Robust covariance of the features.
394
+
395
+ support : ndarray of shape (n_samples,), dtype=bool
396
+ A mask of the observations that have been used to compute
397
+ the robust location and covariance estimates of the data set.
398
+
399
+ Notes
400
+ -----
401
+ The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
402
+ in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
403
+ 1999, American Statistical Association and the American Society
404
+ for Quality, TECHNOMETRICS".
405
+ The principle is to compute robust estimates and random subsets before
406
+ pooling them into a larger subsets, and finally into the full data set.
407
+ Depending on the size of the initial sample, we have one, two or three
408
+ such computation levels.
409
+
410
+ Note that only raw estimates are returned. If one is interested in
411
+ the correction and reweighting steps described in [RouseeuwVan]_,
412
+ see the MinCovDet object.
413
+
414
+ References
415
+ ----------
416
+
417
+ .. [RouseeuwVan] A Fast Algorithm for the Minimum Covariance
418
+ Determinant Estimator, 1999, American Statistical Association
419
+ and the American Society for Quality, TECHNOMETRICS
420
+
421
+ .. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
422
+ Asymptotics For The Minimum Covariance Determinant Estimator,
423
+ The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
424
+ """
425
+ random_state = check_random_state(random_state)
426
+
427
+ X = check_array(X, ensure_min_samples=2, estimator="fast_mcd")
428
+ n_samples, n_features = X.shape
429
+
430
+ # minimum breakdown value
431
+ if support_fraction is None:
432
+ n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
433
+ else:
434
+ n_support = int(support_fraction * n_samples)
435
+
436
+ # 1-dimensional case quick computation
437
+ # (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
438
+ # Regression and Outlier Detection, John Wiley & Sons, chapter 4)
439
+ if n_features == 1:
440
+ if n_support < n_samples:
441
+ # find the sample shortest halves
442
+ X_sorted = np.sort(np.ravel(X))
443
+ diff = X_sorted[n_support:] - X_sorted[: (n_samples - n_support)]
444
+ halves_start = np.where(diff == np.min(diff))[0]
445
+ # take the middle points' mean to get the robust location estimate
446
+ location = (
447
+ 0.5
448
+ * (X_sorted[n_support + halves_start] + X_sorted[halves_start]).mean()
449
+ )
450
+ support = np.zeros(n_samples, dtype=bool)
451
+ X_centered = X - location
452
+ support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
453
+ covariance = np.asarray([[np.var(X[support])]])
454
+ location = np.array([location])
455
+ # get precision matrix in an optimized way
456
+ precision = linalg.pinvh(covariance)
457
+ dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
458
+ else:
459
+ support = np.ones(n_samples, dtype=bool)
460
+ covariance = np.asarray([[np.var(X)]])
461
+ location = np.asarray([np.mean(X)])
462
+ X_centered = X - location
463
+ # get precision matrix in an optimized way
464
+ precision = linalg.pinvh(covariance)
465
+ dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
466
+ # Starting FastMCD algorithm for p-dimensional case
467
+ if (n_samples > 500) and (n_features > 1):
468
+ # 1. Find candidate supports on subsets
469
+ # a. split the set in subsets of size ~ 300
470
+ n_subsets = n_samples // 300
471
+ n_samples_subsets = n_samples // n_subsets
472
+ samples_shuffle = random_state.permutation(n_samples)
473
+ h_subset = int(np.ceil(n_samples_subsets * (n_support / float(n_samples))))
474
+ # b. perform a total of 500 trials
475
+ n_trials_tot = 500
476
+ # c. select 10 best (location, covariance) for each subset
477
+ n_best_sub = 10
478
+ n_trials = max(10, n_trials_tot // n_subsets)
479
+ n_best_tot = n_subsets * n_best_sub
480
+ all_best_locations = np.zeros((n_best_tot, n_features))
481
+ try:
482
+ all_best_covariances = np.zeros((n_best_tot, n_features, n_features))
483
+ except MemoryError:
484
+ # The above is too big. Let's try with something much small
485
+ # (and less optimal)
486
+ n_best_tot = 10
487
+ all_best_covariances = np.zeros((n_best_tot, n_features, n_features))
488
+ n_best_sub = 2
489
+ for i in range(n_subsets):
490
+ low_bound = i * n_samples_subsets
491
+ high_bound = low_bound + n_samples_subsets
492
+ current_subset = X[samples_shuffle[low_bound:high_bound]]
493
+ best_locations_sub, best_covariances_sub, _, _ = select_candidates(
494
+ current_subset,
495
+ h_subset,
496
+ n_trials,
497
+ select=n_best_sub,
498
+ n_iter=2,
499
+ cov_computation_method=cov_computation_method,
500
+ random_state=random_state,
501
+ )
502
+ subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
503
+ all_best_locations[subset_slice] = best_locations_sub
504
+ all_best_covariances[subset_slice] = best_covariances_sub
505
+ # 2. Pool the candidate supports into a merged set
506
+ # (possibly the full dataset)
507
+ n_samples_merged = min(1500, n_samples)
508
+ h_merged = int(np.ceil(n_samples_merged * (n_support / float(n_samples))))
509
+ if n_samples > 1500:
510
+ n_best_merged = 10
511
+ else:
512
+ n_best_merged = 1
513
+ # find the best couples (location, covariance) on the merged set
514
+ selection = random_state.permutation(n_samples)[:n_samples_merged]
515
+ locations_merged, covariances_merged, supports_merged, d = select_candidates(
516
+ X[selection],
517
+ h_merged,
518
+ n_trials=(all_best_locations, all_best_covariances),
519
+ select=n_best_merged,
520
+ cov_computation_method=cov_computation_method,
521
+ random_state=random_state,
522
+ )
523
+ # 3. Finally get the overall best (locations, covariance) couple
524
+ if n_samples < 1500:
525
+ # directly get the best couple (location, covariance)
526
+ location = locations_merged[0]
527
+ covariance = covariances_merged[0]
528
+ support = np.zeros(n_samples, dtype=bool)
529
+ dist = np.zeros(n_samples)
530
+ support[selection] = supports_merged[0]
531
+ dist[selection] = d[0]
532
+ else:
533
+ # select the best couple on the full dataset
534
+ locations_full, covariances_full, supports_full, d = select_candidates(
535
+ X,
536
+ n_support,
537
+ n_trials=(locations_merged, covariances_merged),
538
+ select=1,
539
+ cov_computation_method=cov_computation_method,
540
+ random_state=random_state,
541
+ )
542
+ location = locations_full[0]
543
+ covariance = covariances_full[0]
544
+ support = supports_full[0]
545
+ dist = d[0]
546
+ elif n_features > 1:
547
+ # 1. Find the 10 best couples (location, covariance)
548
+ # considering two iterations
549
+ n_trials = 30
550
+ n_best = 10
551
+ locations_best, covariances_best, _, _ = select_candidates(
552
+ X,
553
+ n_support,
554
+ n_trials=n_trials,
555
+ select=n_best,
556
+ n_iter=2,
557
+ cov_computation_method=cov_computation_method,
558
+ random_state=random_state,
559
+ )
560
+ # 2. Select the best couple on the full dataset amongst the 10
561
+ locations_full, covariances_full, supports_full, d = select_candidates(
562
+ X,
563
+ n_support,
564
+ n_trials=(locations_best, covariances_best),
565
+ select=1,
566
+ cov_computation_method=cov_computation_method,
567
+ random_state=random_state,
568
+ )
569
+ location = locations_full[0]
570
+ covariance = covariances_full[0]
571
+ support = supports_full[0]
572
+ dist = d[0]
573
+
574
+ return location, covariance, support, dist
575
+
576
+
577
+ class MinCovDet(EmpiricalCovariance):
578
+ """Minimum Covariance Determinant (MCD): robust estimator of covariance.
579
+
580
+ The Minimum Covariance Determinant covariance estimator is to be applied
581
+ on Gaussian-distributed data, but could still be relevant on data
582
+ drawn from a unimodal, symmetric distribution. It is not meant to be used
583
+ with multi-modal data (the algorithm used to fit a MinCovDet object is
584
+ likely to fail in such a case).
585
+ One should consider projection pursuit methods to deal with multi-modal
586
+ datasets.
587
+
588
+ Read more in the :ref:`User Guide <robust_covariance>`.
589
+
590
+ Parameters
591
+ ----------
592
+ store_precision : bool, default=True
593
+ Specify if the estimated precision is stored.
594
+
595
+ assume_centered : bool, default=False
596
+ If True, the support of the robust location and the covariance
597
+ estimates is computed, and a covariance estimate is recomputed from
598
+ it, without centering the data.
599
+ Useful to work with data whose mean is significantly equal to
600
+ zero but is not exactly zero.
601
+ If False, the robust location and covariance are directly computed
602
+ with the FastMCD algorithm without additional treatment.
603
+
604
+ support_fraction : float, default=None
605
+ The proportion of points to be included in the support of the raw
606
+ MCD estimate. Default is None, which implies that the minimum
607
+ value of support_fraction will be used within the algorithm:
608
+ `(n_sample + n_features + 1) / 2`. The parameter must be in the range
609
+ (0, 1].
610
+
611
+ random_state : int, RandomState instance or None, default=None
612
+ Determines the pseudo random number generator for shuffling the data.
613
+ Pass an int for reproducible results across multiple function calls.
614
+ See :term:`Glossary <random_state>`.
615
+
616
+ Attributes
617
+ ----------
618
+ raw_location_ : ndarray of shape (n_features,)
619
+ The raw robust estimated location before correction and re-weighting.
620
+
621
+ raw_covariance_ : ndarray of shape (n_features, n_features)
622
+ The raw robust estimated covariance before correction and re-weighting.
623
+
624
+ raw_support_ : ndarray of shape (n_samples,)
625
+ A mask of the observations that have been used to compute
626
+ the raw robust estimates of location and shape, before correction
627
+ and re-weighting.
628
+
629
+ location_ : ndarray of shape (n_features,)
630
+ Estimated robust location.
631
+
632
+ covariance_ : ndarray of shape (n_features, n_features)
633
+ Estimated robust covariance matrix.
634
+
635
+ precision_ : ndarray of shape (n_features, n_features)
636
+ Estimated pseudo inverse matrix.
637
+ (stored only if store_precision is True)
638
+
639
+ support_ : ndarray of shape (n_samples,)
640
+ A mask of the observations that have been used to compute
641
+ the robust estimates of location and shape.
642
+
643
+ dist_ : ndarray of shape (n_samples,)
644
+ Mahalanobis distances of the training set (on which :meth:`fit` is
645
+ called) observations.
646
+
647
+ n_features_in_ : int
648
+ Number of features seen during :term:`fit`.
649
+
650
+ .. versionadded:: 0.24
651
+
652
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
653
+ Names of features seen during :term:`fit`. Defined only when `X`
654
+ has feature names that are all strings.
655
+
656
+ .. versionadded:: 1.0
657
+
658
+ See Also
659
+ --------
660
+ EllipticEnvelope : An object for detecting outliers in
661
+ a Gaussian distributed dataset.
662
+ EmpiricalCovariance : Maximum likelihood covariance estimator.
663
+ GraphicalLasso : Sparse inverse covariance estimation
664
+ with an l1-penalized estimator.
665
+ GraphicalLassoCV : Sparse inverse covariance with cross-validated
666
+ choice of the l1 penalty.
667
+ LedoitWolf : LedoitWolf Estimator.
668
+ OAS : Oracle Approximating Shrinkage Estimator.
669
+ ShrunkCovariance : Covariance estimator with shrinkage.
670
+
671
+ References
672
+ ----------
673
+
674
+ .. [Rouseeuw1984] P. J. Rousseeuw. Least median of squares regression.
675
+ J. Am Stat Ass, 79:871, 1984.
676
+ .. [Rousseeuw] A Fast Algorithm for the Minimum Covariance Determinant
677
+ Estimator, 1999, American Statistical Association and the American
678
+ Society for Quality, TECHNOMETRICS
679
+ .. [ButlerDavies] R. W. Butler, P. L. Davies and M. Jhun,
680
+ Asymptotics For The Minimum Covariance Determinant Estimator,
681
+ The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
682
+
683
+ Examples
684
+ --------
685
+ >>> import numpy as np
686
+ >>> from sklearn.covariance import MinCovDet
687
+ >>> from sklearn.datasets import make_gaussian_quantiles
688
+ >>> real_cov = np.array([[.8, .3],
689
+ ... [.3, .4]])
690
+ >>> rng = np.random.RandomState(0)
691
+ >>> X = rng.multivariate_normal(mean=[0, 0],
692
+ ... cov=real_cov,
693
+ ... size=500)
694
+ >>> cov = MinCovDet(random_state=0).fit(X)
695
+ >>> cov.covariance_
696
+ array([[0.7411..., 0.2535...],
697
+ [0.2535..., 0.3053...]])
698
+ >>> cov.location_
699
+ array([0.0813... , 0.0427...])
700
+ """
701
+
702
+ _parameter_constraints: dict = {
703
+ **EmpiricalCovariance._parameter_constraints,
704
+ "support_fraction": [Interval(Real, 0, 1, closed="right"), None],
705
+ "random_state": ["random_state"],
706
+ }
707
+ _nonrobust_covariance = staticmethod(empirical_covariance)
708
+
709
+ def __init__(
710
+ self,
711
+ *,
712
+ store_precision=True,
713
+ assume_centered=False,
714
+ support_fraction=None,
715
+ random_state=None,
716
+ ):
717
+ self.store_precision = store_precision
718
+ self.assume_centered = assume_centered
719
+ self.support_fraction = support_fraction
720
+ self.random_state = random_state
721
+
722
+ def fit(self, X, y=None):
723
+ """Fit a Minimum Covariance Determinant with the FastMCD algorithm.
724
+
725
+ Parameters
726
+ ----------
727
+ X : array-like of shape (n_samples, n_features)
728
+ Training data, where `n_samples` is the number of samples
729
+ and `n_features` is the number of features.
730
+
731
+ y : Ignored
732
+ Not used, present for API consistency by convention.
733
+
734
+ Returns
735
+ -------
736
+ self : object
737
+ Returns the instance itself.
738
+ """
739
+ self._validate_params()
740
+ X = self._validate_data(X, ensure_min_samples=2, estimator="MinCovDet")
741
+ random_state = check_random_state(self.random_state)
742
+ n_samples, n_features = X.shape
743
+ # check that the empirical covariance is full rank
744
+ if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
745
+ warnings.warn(
746
+ "The covariance matrix associated to your dataset is not full rank"
747
+ )
748
+ # compute and store raw estimates
749
+ raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
750
+ X,
751
+ support_fraction=self.support_fraction,
752
+ cov_computation_method=self._nonrobust_covariance,
753
+ random_state=random_state,
754
+ )
755
+ if self.assume_centered:
756
+ raw_location = np.zeros(n_features)
757
+ raw_covariance = self._nonrobust_covariance(
758
+ X[raw_support], assume_centered=True
759
+ )
760
+ # get precision matrix in an optimized way
761
+ precision = linalg.pinvh(raw_covariance)
762
+ raw_dist = np.sum(np.dot(X, precision) * X, 1)
763
+ self.raw_location_ = raw_location
764
+ self.raw_covariance_ = raw_covariance
765
+ self.raw_support_ = raw_support
766
+ self.location_ = raw_location
767
+ self.support_ = raw_support
768
+ self.dist_ = raw_dist
769
+ # obtain consistency at normal models
770
+ self.correct_covariance(X)
771
+ # re-weight estimator
772
+ self.reweight_covariance(X)
773
+
774
+ return self
775
+
776
+ def correct_covariance(self, data):
777
+ """Apply a correction to raw Minimum Covariance Determinant estimates.
778
+
779
+ Correction using the empirical correction factor suggested
780
+ by Rousseeuw and Van Driessen in [RVD]_.
781
+
782
+ Parameters
783
+ ----------
784
+ data : array-like of shape (n_samples, n_features)
785
+ The data matrix, with p features and n samples.
786
+ The data set must be the one which was used to compute
787
+ the raw estimates.
788
+
789
+ Returns
790
+ -------
791
+ covariance_corrected : ndarray of shape (n_features, n_features)
792
+ Corrected robust covariance estimate.
793
+
794
+ References
795
+ ----------
796
+
797
+ .. [RVD] A Fast Algorithm for the Minimum Covariance
798
+ Determinant Estimator, 1999, American Statistical Association
799
+ and the American Society for Quality, TECHNOMETRICS
800
+ """
801
+
802
+ # Check that the covariance of the support data is not equal to 0.
803
+ # Otherwise self.dist_ = 0 and thus correction = 0.
804
+ n_samples = len(self.dist_)
805
+ n_support = np.sum(self.support_)
806
+ if n_support < n_samples and np.allclose(self.raw_covariance_, 0):
807
+ raise ValueError(
808
+ "The covariance matrix of the support data "
809
+ "is equal to 0, try to increase support_fraction"
810
+ )
811
+ correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
812
+ covariance_corrected = self.raw_covariance_ * correction
813
+ self.dist_ /= correction
814
+ return covariance_corrected
815
+
816
+ def reweight_covariance(self, data):
817
+ """Re-weight raw Minimum Covariance Determinant estimates.
818
+
819
+ Re-weight observations using Rousseeuw's method (equivalent to
820
+ deleting outlying observations from the data set before
821
+ computing location and covariance estimates) described
822
+ in [RVDriessen]_.
823
+
824
+ Parameters
825
+ ----------
826
+ data : array-like of shape (n_samples, n_features)
827
+ The data matrix, with p features and n samples.
828
+ The data set must be the one which was used to compute
829
+ the raw estimates.
830
+
831
+ Returns
832
+ -------
833
+ location_reweighted : ndarray of shape (n_features,)
834
+ Re-weighted robust location estimate.
835
+
836
+ covariance_reweighted : ndarray of shape (n_features, n_features)
837
+ Re-weighted robust covariance estimate.
838
+
839
+ support_reweighted : ndarray of shape (n_samples,), dtype=bool
840
+ A mask of the observations that have been used to compute
841
+ the re-weighted robust location and covariance estimates.
842
+
843
+ References
844
+ ----------
845
+
846
+ .. [RVDriessen] A Fast Algorithm for the Minimum Covariance
847
+ Determinant Estimator, 1999, American Statistical Association
848
+ and the American Society for Quality, TECHNOMETRICS
849
+ """
850
+ n_samples, n_features = data.shape
851
+ mask = self.dist_ < chi2(n_features).isf(0.025)
852
+ if self.assume_centered:
853
+ location_reweighted = np.zeros(n_features)
854
+ else:
855
+ location_reweighted = data[mask].mean(0)
856
+ covariance_reweighted = self._nonrobust_covariance(
857
+ data[mask], assume_centered=self.assume_centered
858
+ )
859
+ support_reweighted = np.zeros(n_samples, dtype=bool)
860
+ support_reweighted[mask] = True
861
+ self._set_covariance(covariance_reweighted)
862
+ self.location_ = location_reweighted
863
+ self.support_ = support_reweighted
864
+ X_centered = data - self.location_
865
+ self.dist_ = np.sum(np.dot(X_centered, self.get_precision()) * X_centered, 1)
866
+ return location_reweighted, covariance_reweighted, support_reweighted
mplug_owl2/lib/python3.10/site-packages/sklearn/covariance/_shrunk_covariance.py ADDED
@@ -0,0 +1,764 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Covariance estimators using shrinkage.
3
+
4
+ Shrinkage corresponds to regularising `cov` using a convex combination:
5
+ shrunk_cov = (1-shrinkage)*cov + shrinkage*structured_estimate.
6
+
7
+ """
8
+
9
+ # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
10
+ # Gael Varoquaux <gael.varoquaux@normalesup.org>
11
+ # Virgile Fritsch <virgile.fritsch@inria.fr>
12
+ #
13
+ # License: BSD 3 clause
14
+
15
+ # avoid division truncation
16
+ import warnings
17
+ from numbers import Real, Integral
18
+ import numpy as np
19
+
20
+ from . import empirical_covariance, EmpiricalCovariance
21
+ from .._config import config_context
22
+ from ..utils import check_array
23
+ from ..utils._param_validation import Interval
24
+
25
+
26
+ def _oas(X, *, assume_centered=False):
27
+ """Estimate covariance with the Oracle Approximating Shrinkage algorithm.
28
+
29
+ The formulation is based on [1]_.
30
+ [1] "Shrinkage algorithms for MMSE covariance estimation.",
31
+ Chen, Y., Wiesel, A., Eldar, Y. C., & Hero, A. O.
32
+ IEEE Transactions on Signal Processing, 58(10), 5016-5029, 2010.
33
+ https://arxiv.org/pdf/0907.4698.pdf
34
+ """
35
+ if len(X.shape) == 2 and X.shape[1] == 1:
36
+ # for only one feature, the result is the same whatever the shrinkage
37
+ if not assume_centered:
38
+ X = X - X.mean()
39
+ return np.atleast_2d((X**2).mean()), 0.0
40
+
41
+ n_samples, n_features = X.shape
42
+
43
+ emp_cov = empirical_covariance(X, assume_centered=assume_centered)
44
+
45
+ # The shrinkage is defined as:
46
+ # shrinkage = min(
47
+ # trace(S @ S.T) + trace(S)**2) / ((n + 1) (trace(S @ S.T) - trace(S)**2 / p), 1
48
+ # )
49
+ # where n and p are n_samples and n_features, respectively (cf. Eq. 23 in [1]).
50
+ # The factor 2 / p is omitted since it does not impact the value of the estimator
51
+ # for large p.
52
+
53
+ # Instead of computing trace(S)**2, we can compute the average of the squared
54
+ # elements of S that is equal to trace(S)**2 / p**2.
55
+ # See the definition of the Frobenius norm:
56
+ # https://en.wikipedia.org/wiki/Matrix_norm#Frobenius_norm
57
+ alpha = np.mean(emp_cov**2)
58
+ mu = np.trace(emp_cov) / n_features
59
+ mu_squared = mu**2
60
+
61
+ # The factor 1 / p**2 will cancel out since it is in both the numerator and
62
+ # denominator
63
+ num = alpha + mu_squared
64
+ den = (n_samples + 1) * (alpha - mu_squared / n_features)
65
+ shrinkage = 1.0 if den == 0 else min(num / den, 1.0)
66
+
67
+ # The shrunk covariance is defined as:
68
+ # (1 - shrinkage) * S + shrinkage * F (cf. Eq. 4 in [1])
69
+ # where S is the empirical covariance and F is the shrinkage target defined as
70
+ # F = trace(S) / n_features * np.identity(n_features) (cf. Eq. 3 in [1])
71
+ shrunk_cov = (1.0 - shrinkage) * emp_cov
72
+ shrunk_cov.flat[:: n_features + 1] += shrinkage * mu
73
+
74
+ return shrunk_cov, shrinkage
75
+
76
+
77
+ ###############################################################################
78
+ # Public API
79
+ # ShrunkCovariance estimator
80
+
81
+
82
+ def shrunk_covariance(emp_cov, shrinkage=0.1):
83
+ """Calculate a covariance matrix shrunk on the diagonal.
84
+
85
+ Read more in the :ref:`User Guide <shrunk_covariance>`.
86
+
87
+ Parameters
88
+ ----------
89
+ emp_cov : array-like of shape (n_features, n_features)
90
+ Covariance matrix to be shrunk.
91
+
92
+ shrinkage : float, default=0.1
93
+ Coefficient in the convex combination used for the computation
94
+ of the shrunk estimate. Range is [0, 1].
95
+
96
+ Returns
97
+ -------
98
+ shrunk_cov : ndarray of shape (n_features, n_features)
99
+ Shrunk covariance.
100
+
101
+ Notes
102
+ -----
103
+ The regularized (shrunk) covariance is given by::
104
+
105
+ (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)
106
+
107
+ where `mu = trace(cov) / n_features`.
108
+ """
109
+ emp_cov = check_array(emp_cov)
110
+ n_features = emp_cov.shape[0]
111
+
112
+ mu = np.trace(emp_cov) / n_features
113
+ shrunk_cov = (1.0 - shrinkage) * emp_cov
114
+ shrunk_cov.flat[:: n_features + 1] += shrinkage * mu
115
+
116
+ return shrunk_cov
117
+
118
+
119
+ class ShrunkCovariance(EmpiricalCovariance):
120
+ """Covariance estimator with shrinkage.
121
+
122
+ Read more in the :ref:`User Guide <shrunk_covariance>`.
123
+
124
+ Parameters
125
+ ----------
126
+ store_precision : bool, default=True
127
+ Specify if the estimated precision is stored.
128
+
129
+ assume_centered : bool, default=False
130
+ If True, data will not be centered before computation.
131
+ Useful when working with data whose mean is almost, but not exactly
132
+ zero.
133
+ If False, data will be centered before computation.
134
+
135
+ shrinkage : float, default=0.1
136
+ Coefficient in the convex combination used for the computation
137
+ of the shrunk estimate. Range is [0, 1].
138
+
139
+ Attributes
140
+ ----------
141
+ covariance_ : ndarray of shape (n_features, n_features)
142
+ Estimated covariance matrix
143
+
144
+ location_ : ndarray of shape (n_features,)
145
+ Estimated location, i.e. the estimated mean.
146
+
147
+ precision_ : ndarray of shape (n_features, n_features)
148
+ Estimated pseudo inverse matrix.
149
+ (stored only if store_precision is True)
150
+
151
+ n_features_in_ : int
152
+ Number of features seen during :term:`fit`.
153
+
154
+ .. versionadded:: 0.24
155
+
156
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
157
+ Names of features seen during :term:`fit`. Defined only when `X`
158
+ has feature names that are all strings.
159
+
160
+ .. versionadded:: 1.0
161
+
162
+ See Also
163
+ --------
164
+ EllipticEnvelope : An object for detecting outliers in
165
+ a Gaussian distributed dataset.
166
+ EmpiricalCovariance : Maximum likelihood covariance estimator.
167
+ GraphicalLasso : Sparse inverse covariance estimation
168
+ with an l1-penalized estimator.
169
+ GraphicalLassoCV : Sparse inverse covariance with cross-validated
170
+ choice of the l1 penalty.
171
+ LedoitWolf : LedoitWolf Estimator.
172
+ MinCovDet : Minimum Covariance Determinant
173
+ (robust estimator of covariance).
174
+ OAS : Oracle Approximating Shrinkage Estimator.
175
+
176
+ Notes
177
+ -----
178
+ The regularized covariance is given by:
179
+
180
+ (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)
181
+
182
+ where mu = trace(cov) / n_features
183
+
184
+ Examples
185
+ --------
186
+ >>> import numpy as np
187
+ >>> from sklearn.covariance import ShrunkCovariance
188
+ >>> from sklearn.datasets import make_gaussian_quantiles
189
+ >>> real_cov = np.array([[.8, .3],
190
+ ... [.3, .4]])
191
+ >>> rng = np.random.RandomState(0)
192
+ >>> X = rng.multivariate_normal(mean=[0, 0],
193
+ ... cov=real_cov,
194
+ ... size=500)
195
+ >>> cov = ShrunkCovariance().fit(X)
196
+ >>> cov.covariance_
197
+ array([[0.7387..., 0.2536...],
198
+ [0.2536..., 0.4110...]])
199
+ >>> cov.location_
200
+ array([0.0622..., 0.0193...])
201
+ """
202
+
203
+ _parameter_constraints: dict = {
204
+ **EmpiricalCovariance._parameter_constraints,
205
+ "shrinkage": [Interval(Real, 0, 1, closed="both")],
206
+ }
207
+
208
+ def __init__(self, *, store_precision=True, assume_centered=False, shrinkage=0.1):
209
+ super().__init__(
210
+ store_precision=store_precision, assume_centered=assume_centered
211
+ )
212
+ self.shrinkage = shrinkage
213
+
214
+ def fit(self, X, y=None):
215
+ """Fit the shrunk covariance model to X.
216
+
217
+ Parameters
218
+ ----------
219
+ X : array-like of shape (n_samples, n_features)
220
+ Training data, where `n_samples` is the number of samples
221
+ and `n_features` is the number of features.
222
+
223
+ y : Ignored
224
+ Not used, present for API consistency by convention.
225
+
226
+ Returns
227
+ -------
228
+ self : object
229
+ Returns the instance itself.
230
+ """
231
+ self._validate_params()
232
+ X = self._validate_data(X)
233
+ # Not calling the parent object to fit, to avoid a potential
234
+ # matrix inversion when setting the precision
235
+ if self.assume_centered:
236
+ self.location_ = np.zeros(X.shape[1])
237
+ else:
238
+ self.location_ = X.mean(0)
239
+ covariance = empirical_covariance(X, assume_centered=self.assume_centered)
240
+ covariance = shrunk_covariance(covariance, self.shrinkage)
241
+ self._set_covariance(covariance)
242
+
243
+ return self
244
+
245
+
246
+ # Ledoit-Wolf estimator
247
+
248
+
249
+ def ledoit_wolf_shrinkage(X, assume_centered=False, block_size=1000):
250
+ """Estimate the shrunk Ledoit-Wolf covariance matrix.
251
+
252
+ Read more in the :ref:`User Guide <shrunk_covariance>`.
253
+
254
+ Parameters
255
+ ----------
256
+ X : array-like of shape (n_samples, n_features)
257
+ Data from which to compute the Ledoit-Wolf shrunk covariance shrinkage.
258
+
259
+ assume_centered : bool, default=False
260
+ If True, data will not be centered before computation.
261
+ Useful to work with data whose mean is significantly equal to
262
+ zero but is not exactly zero.
263
+ If False, data will be centered before computation.
264
+
265
+ block_size : int, default=1000
266
+ Size of blocks into which the covariance matrix will be split.
267
+
268
+ Returns
269
+ -------
270
+ shrinkage : float
271
+ Coefficient in the convex combination used for the computation
272
+ of the shrunk estimate.
273
+
274
+ Notes
275
+ -----
276
+ The regularized (shrunk) covariance is:
277
+
278
+ (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)
279
+
280
+ where mu = trace(cov) / n_features
281
+ """
282
+ X = check_array(X)
283
+ # for only one feature, the result is the same whatever the shrinkage
284
+ if len(X.shape) == 2 and X.shape[1] == 1:
285
+ return 0.0
286
+ if X.ndim == 1:
287
+ X = np.reshape(X, (1, -1))
288
+
289
+ if X.shape[0] == 1:
290
+ warnings.warn(
291
+ "Only one sample available. You may want to reshape your data array"
292
+ )
293
+ n_samples, n_features = X.shape
294
+
295
+ # optionally center data
296
+ if not assume_centered:
297
+ X = X - X.mean(0)
298
+
299
+ # A non-blocked version of the computation is present in the tests
300
+ # in tests/test_covariance.py
301
+
302
+ # number of blocks to split the covariance matrix into
303
+ n_splits = int(n_features / block_size)
304
+ X2 = X**2
305
+ emp_cov_trace = np.sum(X2, axis=0) / n_samples
306
+ mu = np.sum(emp_cov_trace) / n_features
307
+ beta_ = 0.0 # sum of the coefficients of <X2.T, X2>
308
+ delta_ = 0.0 # sum of the *squared* coefficients of <X.T, X>
309
+ # starting block computation
310
+ for i in range(n_splits):
311
+ for j in range(n_splits):
312
+ rows = slice(block_size * i, block_size * (i + 1))
313
+ cols = slice(block_size * j, block_size * (j + 1))
314
+ beta_ += np.sum(np.dot(X2.T[rows], X2[:, cols]))
315
+ delta_ += np.sum(np.dot(X.T[rows], X[:, cols]) ** 2)
316
+ rows = slice(block_size * i, block_size * (i + 1))
317
+ beta_ += np.sum(np.dot(X2.T[rows], X2[:, block_size * n_splits :]))
318
+ delta_ += np.sum(np.dot(X.T[rows], X[:, block_size * n_splits :]) ** 2)
319
+ for j in range(n_splits):
320
+ cols = slice(block_size * j, block_size * (j + 1))
321
+ beta_ += np.sum(np.dot(X2.T[block_size * n_splits :], X2[:, cols]))
322
+ delta_ += np.sum(np.dot(X.T[block_size * n_splits :], X[:, cols]) ** 2)
323
+ delta_ += np.sum(
324
+ np.dot(X.T[block_size * n_splits :], X[:, block_size * n_splits :]) ** 2
325
+ )
326
+ delta_ /= n_samples**2
327
+ beta_ += np.sum(
328
+ np.dot(X2.T[block_size * n_splits :], X2[:, block_size * n_splits :])
329
+ )
330
+ # use delta_ to compute beta
331
+ beta = 1.0 / (n_features * n_samples) * (beta_ / n_samples - delta_)
332
+ # delta is the sum of the squared coefficients of (<X.T,X> - mu*Id) / p
333
+ delta = delta_ - 2.0 * mu * emp_cov_trace.sum() + n_features * mu**2
334
+ delta /= n_features
335
+ # get final beta as the min between beta and delta
336
+ # We do this to prevent shrinking more than "1", which would invert
337
+ # the value of covariances
338
+ beta = min(beta, delta)
339
+ # finally get shrinkage
340
+ shrinkage = 0 if beta == 0 else beta / delta
341
+ return shrinkage
342
+
343
+
344
+ def ledoit_wolf(X, *, assume_centered=False, block_size=1000):
345
+ """Estimate the shrunk Ledoit-Wolf covariance matrix.
346
+
347
+ Read more in the :ref:`User Guide <shrunk_covariance>`.
348
+
349
+ Parameters
350
+ ----------
351
+ X : array-like of shape (n_samples, n_features)
352
+ Data from which to compute the covariance estimate.
353
+
354
+ assume_centered : bool, default=False
355
+ If True, data will not be centered before computation.
356
+ Useful to work with data whose mean is significantly equal to
357
+ zero but is not exactly zero.
358
+ If False, data will be centered before computation.
359
+
360
+ block_size : int, default=1000
361
+ Size of blocks into which the covariance matrix will be split.
362
+ This is purely a memory optimization and does not affect results.
363
+
364
+ Returns
365
+ -------
366
+ shrunk_cov : ndarray of shape (n_features, n_features)
367
+ Shrunk covariance.
368
+
369
+ shrinkage : float
370
+ Coefficient in the convex combination used for the computation
371
+ of the shrunk estimate.
372
+
373
+ Notes
374
+ -----
375
+ The regularized (shrunk) covariance is:
376
+
377
+ (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)
378
+
379
+ where mu = trace(cov) / n_features
380
+ """
381
+ X = check_array(X)
382
+ # for only one feature, the result is the same whatever the shrinkage
383
+ if len(X.shape) == 2 and X.shape[1] == 1:
384
+ if not assume_centered:
385
+ X = X - X.mean()
386
+ return np.atleast_2d((X**2).mean()), 0.0
387
+ if X.ndim == 1:
388
+ X = np.reshape(X, (1, -1))
389
+ warnings.warn(
390
+ "Only one sample available. You may want to reshape your data array"
391
+ )
392
+ n_features = X.size
393
+ else:
394
+ _, n_features = X.shape
395
+
396
+ # get Ledoit-Wolf shrinkage
397
+ shrinkage = ledoit_wolf_shrinkage(
398
+ X, assume_centered=assume_centered, block_size=block_size
399
+ )
400
+ emp_cov = empirical_covariance(X, assume_centered=assume_centered)
401
+ mu = np.sum(np.trace(emp_cov)) / n_features
402
+ shrunk_cov = (1.0 - shrinkage) * emp_cov
403
+ shrunk_cov.flat[:: n_features + 1] += shrinkage * mu
404
+
405
+ return shrunk_cov, shrinkage
406
+
407
+
408
+ class LedoitWolf(EmpiricalCovariance):
409
+ """LedoitWolf Estimator.
410
+
411
+ Ledoit-Wolf is a particular form of shrinkage, where the shrinkage
412
+ coefficient is computed using O. Ledoit and M. Wolf's formula as
413
+ described in "A Well-Conditioned Estimator for Large-Dimensional
414
+ Covariance Matrices", Ledoit and Wolf, Journal of Multivariate
415
+ Analysis, Volume 88, Issue 2, February 2004, pages 365-411.
416
+
417
+ Read more in the :ref:`User Guide <shrunk_covariance>`.
418
+
419
+ Parameters
420
+ ----------
421
+ store_precision : bool, default=True
422
+ Specify if the estimated precision is stored.
423
+
424
+ assume_centered : bool, default=False
425
+ If True, data will not be centered before computation.
426
+ Useful when working with data whose mean is almost, but not exactly
427
+ zero.
428
+ If False (default), data will be centered before computation.
429
+
430
+ block_size : int, default=1000
431
+ Size of blocks into which the covariance matrix will be split
432
+ during its Ledoit-Wolf estimation. This is purely a memory
433
+ optimization and does not affect results.
434
+
435
+ Attributes
436
+ ----------
437
+ covariance_ : ndarray of shape (n_features, n_features)
438
+ Estimated covariance matrix.
439
+
440
+ location_ : ndarray of shape (n_features,)
441
+ Estimated location, i.e. the estimated mean.
442
+
443
+ precision_ : ndarray of shape (n_features, n_features)
444
+ Estimated pseudo inverse matrix.
445
+ (stored only if store_precision is True)
446
+
447
+ shrinkage_ : float
448
+ Coefficient in the convex combination used for the computation
449
+ of the shrunk estimate. Range is [0, 1].
450
+
451
+ n_features_in_ : int
452
+ Number of features seen during :term:`fit`.
453
+
454
+ .. versionadded:: 0.24
455
+
456
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
457
+ Names of features seen during :term:`fit`. Defined only when `X`
458
+ has feature names that are all strings.
459
+
460
+ .. versionadded:: 1.0
461
+
462
+ See Also
463
+ --------
464
+ EllipticEnvelope : An object for detecting outliers in
465
+ a Gaussian distributed dataset.
466
+ EmpiricalCovariance : Maximum likelihood covariance estimator.
467
+ GraphicalLasso : Sparse inverse covariance estimation
468
+ with an l1-penalized estimator.
469
+ GraphicalLassoCV : Sparse inverse covariance with cross-validated
470
+ choice of the l1 penalty.
471
+ MinCovDet : Minimum Covariance Determinant
472
+ (robust estimator of covariance).
473
+ OAS : Oracle Approximating Shrinkage Estimator.
474
+ ShrunkCovariance : Covariance estimator with shrinkage.
475
+
476
+ Notes
477
+ -----
478
+ The regularised covariance is:
479
+
480
+ (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)
481
+
482
+ where mu = trace(cov) / n_features
483
+ and shrinkage is given by the Ledoit and Wolf formula (see References)
484
+
485
+ References
486
+ ----------
487
+ "A Well-Conditioned Estimator for Large-Dimensional Covariance Matrices",
488
+ Ledoit and Wolf, Journal of Multivariate Analysis, Volume 88, Issue 2,
489
+ February 2004, pages 365-411.
490
+
491
+ Examples
492
+ --------
493
+ >>> import numpy as np
494
+ >>> from sklearn.covariance import LedoitWolf
495
+ >>> real_cov = np.array([[.4, .2],
496
+ ... [.2, .8]])
497
+ >>> np.random.seed(0)
498
+ >>> X = np.random.multivariate_normal(mean=[0, 0],
499
+ ... cov=real_cov,
500
+ ... size=50)
501
+ >>> cov = LedoitWolf().fit(X)
502
+ >>> cov.covariance_
503
+ array([[0.4406..., 0.1616...],
504
+ [0.1616..., 0.8022...]])
505
+ >>> cov.location_
506
+ array([ 0.0595... , -0.0075...])
507
+ """
508
+
509
+ _parameter_constraints: dict = {
510
+ **EmpiricalCovariance._parameter_constraints,
511
+ "block_size": [Interval(Integral, 1, None, closed="left")],
512
+ }
513
+
514
+ def __init__(self, *, store_precision=True, assume_centered=False, block_size=1000):
515
+ super().__init__(
516
+ store_precision=store_precision, assume_centered=assume_centered
517
+ )
518
+ self.block_size = block_size
519
+
520
+ def fit(self, X, y=None):
521
+ """Fit the Ledoit-Wolf shrunk covariance model to X.
522
+
523
+ Parameters
524
+ ----------
525
+ X : array-like of shape (n_samples, n_features)
526
+ Training data, where `n_samples` is the number of samples
527
+ and `n_features` is the number of features.
528
+ y : Ignored
529
+ Not used, present for API consistency by convention.
530
+
531
+ Returns
532
+ -------
533
+ self : object
534
+ Returns the instance itself.
535
+ """
536
+ self._validate_params()
537
+ # Not calling the parent object to fit, to avoid computing the
538
+ # covariance matrix (and potentially the precision)
539
+ X = self._validate_data(X)
540
+ if self.assume_centered:
541
+ self.location_ = np.zeros(X.shape[1])
542
+ else:
543
+ self.location_ = X.mean(0)
544
+ with config_context(assume_finite=True):
545
+ covariance, shrinkage = ledoit_wolf(
546
+ X - self.location_, assume_centered=True, block_size=self.block_size
547
+ )
548
+ self.shrinkage_ = shrinkage
549
+ self._set_covariance(covariance)
550
+
551
+ return self
552
+
553
+
554
+ # OAS estimator
555
+ def oas(X, *, assume_centered=False):
556
+ """Estimate covariance with the Oracle Approximating Shrinkage as proposed in [1]_.
557
+
558
+ Read more in the :ref:`User Guide <shrunk_covariance>`.
559
+
560
+ Parameters
561
+ ----------
562
+ X : array-like of shape (n_samples, n_features)
563
+ Data from which to compute the covariance estimate.
564
+
565
+ assume_centered : bool, default=False
566
+ If True, data will not be centered before computation.
567
+ Useful to work with data whose mean is significantly equal to
568
+ zero but is not exactly zero.
569
+ If False, data will be centered before computation.
570
+
571
+ Returns
572
+ -------
573
+ shrunk_cov : array-like of shape (n_features, n_features)
574
+ Shrunk covariance.
575
+
576
+ shrinkage : float
577
+ Coefficient in the convex combination used for the computation
578
+ of the shrunk estimate.
579
+
580
+ Notes
581
+ -----
582
+ The regularised covariance is:
583
+
584
+ (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features),
585
+
586
+ where mu = trace(cov) / n_features and shrinkage is given by the OAS formula
587
+ (see [1]_).
588
+
589
+ The shrinkage formulation implemented here differs from Eq. 23 in [1]_. In
590
+ the original article, formula (23) states that 2/p (p being the number of
591
+ features) is multiplied by Trace(cov*cov) in both the numerator and
592
+ denominator, but this operation is omitted because for a large p, the value
593
+ of 2/p is so small that it doesn't affect the value of the estimator.
594
+
595
+ References
596
+ ----------
597
+ .. [1] :arxiv:`"Shrinkage algorithms for MMSE covariance estimation.",
598
+ Chen, Y., Wiesel, A., Eldar, Y. C., & Hero, A. O.
599
+ IEEE Transactions on Signal Processing, 58(10), 5016-5029, 2010.
600
+ <0907.4698>`
601
+ """
602
+ X = np.asarray(X)
603
+ # for only one feature, the result is the same whatever the shrinkage
604
+ if len(X.shape) == 2 and X.shape[1] == 1:
605
+ if not assume_centered:
606
+ X = X - X.mean()
607
+ return np.atleast_2d((X**2).mean()), 0.0
608
+ if X.ndim == 1:
609
+ X = np.reshape(X, (1, -1))
610
+ warnings.warn(
611
+ "Only one sample available. You may want to reshape your data array"
612
+ )
613
+ n_samples = 1
614
+ n_features = X.size
615
+ else:
616
+ n_samples, n_features = X.shape
617
+
618
+ emp_cov = empirical_covariance(X, assume_centered=assume_centered)
619
+ mu = np.trace(emp_cov) / n_features
620
+
621
+ # formula from Chen et al.'s **implementation**
622
+ alpha = np.mean(emp_cov**2)
623
+ num = alpha + mu**2
624
+ den = (n_samples + 1.0) * (alpha - (mu**2) / n_features)
625
+
626
+ shrinkage = 1.0 if den == 0 else min(num / den, 1.0)
627
+ shrunk_cov = (1.0 - shrinkage) * emp_cov
628
+ shrunk_cov.flat[:: n_features + 1] += shrinkage * mu
629
+
630
+ return shrunk_cov, shrinkage
631
+
632
+
633
+ class OAS(EmpiricalCovariance):
634
+ """Oracle Approximating Shrinkage Estimator as proposed in [1]_.
635
+
636
+ Read more in the :ref:`User Guide <shrunk_covariance>`.
637
+
638
+ Parameters
639
+ ----------
640
+ store_precision : bool, default=True
641
+ Specify if the estimated precision is stored.
642
+
643
+ assume_centered : bool, default=False
644
+ If True, data will not be centered before computation.
645
+ Useful when working with data whose mean is almost, but not exactly
646
+ zero.
647
+ If False (default), data will be centered before computation.
648
+
649
+ Attributes
650
+ ----------
651
+ covariance_ : ndarray of shape (n_features, n_features)
652
+ Estimated covariance matrix.
653
+
654
+ location_ : ndarray of shape (n_features,)
655
+ Estimated location, i.e. the estimated mean.
656
+
657
+ precision_ : ndarray of shape (n_features, n_features)
658
+ Estimated pseudo inverse matrix.
659
+ (stored only if store_precision is True)
660
+
661
+ shrinkage_ : float
662
+ coefficient in the convex combination used for the computation
663
+ of the shrunk estimate. Range is [0, 1].
664
+
665
+ n_features_in_ : int
666
+ Number of features seen during :term:`fit`.
667
+
668
+ .. versionadded:: 0.24
669
+
670
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
671
+ Names of features seen during :term:`fit`. Defined only when `X`
672
+ has feature names that are all strings.
673
+
674
+ .. versionadded:: 1.0
675
+
676
+ See Also
677
+ --------
678
+ EllipticEnvelope : An object for detecting outliers in
679
+ a Gaussian distributed dataset.
680
+ EmpiricalCovariance : Maximum likelihood covariance estimator.
681
+ GraphicalLasso : Sparse inverse covariance estimation
682
+ with an l1-penalized estimator.
683
+ GraphicalLassoCV : Sparse inverse covariance with cross-validated
684
+ choice of the l1 penalty.
685
+ LedoitWolf : LedoitWolf Estimator.
686
+ MinCovDet : Minimum Covariance Determinant
687
+ (robust estimator of covariance).
688
+ ShrunkCovariance : Covariance estimator with shrinkage.
689
+
690
+ Notes
691
+ -----
692
+ The regularised covariance is:
693
+
694
+ (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features),
695
+
696
+ where mu = trace(cov) / n_features and shrinkage is given by the OAS formula
697
+ (see [1]_).
698
+
699
+ The shrinkage formulation implemented here differs from Eq. 23 in [1]_. In
700
+ the original article, formula (23) states that 2/p (p being the number of
701
+ features) is multiplied by Trace(cov*cov) in both the numerator and
702
+ denominator, but this operation is omitted because for a large p, the value
703
+ of 2/p is so small that it doesn't affect the value of the estimator.
704
+
705
+ References
706
+ ----------
707
+ .. [1] :arxiv:`"Shrinkage algorithms for MMSE covariance estimation.",
708
+ Chen, Y., Wiesel, A., Eldar, Y. C., & Hero, A. O.
709
+ IEEE Transactions on Signal Processing, 58(10), 5016-5029, 2010.
710
+ <0907.4698>`
711
+
712
+ Examples
713
+ --------
714
+ >>> import numpy as np
715
+ >>> from sklearn.covariance import OAS
716
+ >>> from sklearn.datasets import make_gaussian_quantiles
717
+ >>> real_cov = np.array([[.8, .3],
718
+ ... [.3, .4]])
719
+ >>> rng = np.random.RandomState(0)
720
+ >>> X = rng.multivariate_normal(mean=[0, 0],
721
+ ... cov=real_cov,
722
+ ... size=500)
723
+ >>> oas = OAS().fit(X)
724
+ >>> oas.covariance_
725
+ array([[0.7533..., 0.2763...],
726
+ [0.2763..., 0.3964...]])
727
+ >>> oas.precision_
728
+ array([[ 1.7833..., -1.2431... ],
729
+ [-1.2431..., 3.3889...]])
730
+ >>> oas.shrinkage_
731
+ 0.0195...
732
+ """
733
+
734
+ def fit(self, X, y=None):
735
+ """Fit the Oracle Approximating Shrinkage covariance model to X.
736
+
737
+ Parameters
738
+ ----------
739
+ X : array-like of shape (n_samples, n_features)
740
+ Training data, where `n_samples` is the number of samples
741
+ and `n_features` is the number of features.
742
+ y : Ignored
743
+ Not used, present for API consistency by convention.
744
+
745
+ Returns
746
+ -------
747
+ self : object
748
+ Returns the instance itself.
749
+ """
750
+ self._validate_params()
751
+
752
+ X = self._validate_data(X)
753
+ # Not calling the parent object to fit, to avoid computing the
754
+ # covariance matrix (and potentially the precision)
755
+ if self.assume_centered:
756
+ self.location_ = np.zeros(X.shape[1])
757
+ else:
758
+ self.location_ = X.mean(0)
759
+
760
+ covariance, shrinkage = oas(X - self.location_, assume_centered=True)
761
+ self.shrinkage_ = shrinkage
762
+ self._set_covariance(covariance)
763
+
764
+ return self
mplug_owl2/lib/python3.10/site-packages/sklearn/covariance/tests/__init__.py ADDED
File without changes
mplug_owl2/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (180 Bytes). View file
 
mplug_owl2/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_graphical_lasso.cpython-310.pyc ADDED
Binary file (6.71 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_robust_covariance.cpython-310.pyc ADDED
Binary file (4.39 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/sklearn/covariance/tests/test_elliptic_envelope.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Testing for Elliptic Envelope algorithm (sklearn.covariance.elliptic_envelope).
3
+ """
4
+
5
+ import numpy as np
6
+ import pytest
7
+
8
+ from sklearn.covariance import EllipticEnvelope
9
+ from sklearn.utils._testing import assert_almost_equal
10
+ from sklearn.utils._testing import assert_array_almost_equal
11
+ from sklearn.utils._testing import assert_array_equal
12
+ from sklearn.exceptions import NotFittedError
13
+
14
+
15
+ def test_elliptic_envelope(global_random_seed):
16
+ rnd = np.random.RandomState(global_random_seed)
17
+ X = rnd.randn(100, 10)
18
+ clf = EllipticEnvelope(contamination=0.1)
19
+ with pytest.raises(NotFittedError):
20
+ clf.predict(X)
21
+ with pytest.raises(NotFittedError):
22
+ clf.decision_function(X)
23
+ clf.fit(X)
24
+ y_pred = clf.predict(X)
25
+ scores = clf.score_samples(X)
26
+ decisions = clf.decision_function(X)
27
+
28
+ assert_array_almost_equal(scores, -clf.mahalanobis(X))
29
+ assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
30
+ assert_almost_equal(
31
+ clf.score(X, np.ones(100)), (100 - y_pred[y_pred == -1].size) / 100.0
32
+ )
33
+ assert sum(y_pred == -1) == sum(decisions < 0)
34
+
35
+
36
+ def test_score_samples():
37
+ X_train = [[1, 1], [1, 2], [2, 1]]
38
+ clf1 = EllipticEnvelope(contamination=0.2).fit(X_train)
39
+ clf2 = EllipticEnvelope().fit(X_train)
40
+ assert_array_equal(
41
+ clf1.score_samples([[2.0, 2.0]]),
42
+ clf1.decision_function([[2.0, 2.0]]) + clf1.offset_,
43
+ )
44
+ assert_array_equal(
45
+ clf2.score_samples([[2.0, 2.0]]),
46
+ clf2.decision_function([[2.0, 2.0]]) + clf2.offset_,
47
+ )
48
+ assert_array_equal(
49
+ clf1.score_samples([[2.0, 2.0]]), clf2.score_samples([[2.0, 2.0]])
50
+ )
mplug_owl2/lib/python3.10/site-packages/sklearn/covariance/tests/test_graphical_lasso.py ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Test the graphical_lasso module.
2
+ """
3
+ import sys
4
+ import pytest
5
+
6
+ import numpy as np
7
+ from scipy import linalg
8
+
9
+ from numpy.testing import assert_allclose
10
+ from sklearn.utils._testing import assert_array_almost_equal
11
+ from sklearn.utils._testing import assert_array_less
12
+ from sklearn.utils._testing import _convert_container
13
+
14
+ from sklearn.covariance import (
15
+ graphical_lasso,
16
+ GraphicalLasso,
17
+ GraphicalLassoCV,
18
+ empirical_covariance,
19
+ )
20
+ from sklearn.datasets import make_sparse_spd_matrix
21
+ from io import StringIO
22
+ from sklearn.utils import check_random_state
23
+ from sklearn import datasets
24
+
25
+
26
+ def test_graphical_lasso(random_state=0):
27
+ # Sample data from a sparse multivariate normal
28
+ dim = 20
29
+ n_samples = 100
30
+ random_state = check_random_state(random_state)
31
+ prec = make_sparse_spd_matrix(dim, alpha=0.95, random_state=random_state)
32
+ cov = linalg.inv(prec)
33
+ X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
34
+ emp_cov = empirical_covariance(X)
35
+
36
+ for alpha in (0.0, 0.1, 0.25):
37
+ covs = dict()
38
+ icovs = dict()
39
+ for method in ("cd", "lars"):
40
+ cov_, icov_, costs = graphical_lasso(
41
+ emp_cov, return_costs=True, alpha=alpha, mode=method
42
+ )
43
+ covs[method] = cov_
44
+ icovs[method] = icov_
45
+ costs, dual_gap = np.array(costs).T
46
+ # Check that the costs always decrease (doesn't hold if alpha == 0)
47
+ if not alpha == 0:
48
+ assert_array_less(np.diff(costs), 0)
49
+ # Check that the 2 approaches give similar results
50
+ assert_array_almost_equal(covs["cd"], covs["lars"], decimal=4)
51
+ assert_array_almost_equal(icovs["cd"], icovs["lars"], decimal=4)
52
+
53
+ # Smoke test the estimator
54
+ model = GraphicalLasso(alpha=0.25).fit(X)
55
+ model.score(X)
56
+ assert_array_almost_equal(model.covariance_, covs["cd"], decimal=4)
57
+ assert_array_almost_equal(model.covariance_, covs["lars"], decimal=4)
58
+
59
+ # For a centered matrix, assume_centered could be chosen True or False
60
+ # Check that this returns indeed the same result for centered data
61
+ Z = X - X.mean(0)
62
+ precs = list()
63
+ for assume_centered in (False, True):
64
+ prec_ = GraphicalLasso(assume_centered=assume_centered).fit(Z).precision_
65
+ precs.append(prec_)
66
+ assert_array_almost_equal(precs[0], precs[1])
67
+
68
+
69
+ def test_graphical_lasso_iris():
70
+ # Hard-coded solution from R glasso package for alpha=1.0
71
+ # (need to set penalize.diagonal to FALSE)
72
+ cov_R = np.array(
73
+ [
74
+ [0.68112222, 0.0000000, 0.265820, 0.02464314],
75
+ [0.00000000, 0.1887129, 0.000000, 0.00000000],
76
+ [0.26582000, 0.0000000, 3.095503, 0.28697200],
77
+ [0.02464314, 0.0000000, 0.286972, 0.57713289],
78
+ ]
79
+ )
80
+ icov_R = np.array(
81
+ [
82
+ [1.5190747, 0.000000, -0.1304475, 0.0000000],
83
+ [0.0000000, 5.299055, 0.0000000, 0.0000000],
84
+ [-0.1304475, 0.000000, 0.3498624, -0.1683946],
85
+ [0.0000000, 0.000000, -0.1683946, 1.8164353],
86
+ ]
87
+ )
88
+ X = datasets.load_iris().data
89
+ emp_cov = empirical_covariance(X)
90
+ for method in ("cd", "lars"):
91
+ cov, icov = graphical_lasso(emp_cov, alpha=1.0, return_costs=False, mode=method)
92
+ assert_array_almost_equal(cov, cov_R)
93
+ assert_array_almost_equal(icov, icov_R)
94
+
95
+
96
+ def test_graph_lasso_2D():
97
+ # Hard-coded solution from Python skggm package
98
+ # obtained by calling `quic(emp_cov, lam=.1, tol=1e-8)`
99
+ cov_skggm = np.array([[3.09550269, 1.186972], [1.186972, 0.57713289]])
100
+
101
+ icov_skggm = np.array([[1.52836773, -3.14334831], [-3.14334831, 8.19753385]])
102
+ X = datasets.load_iris().data[:, 2:]
103
+ emp_cov = empirical_covariance(X)
104
+ for method in ("cd", "lars"):
105
+ cov, icov = graphical_lasso(emp_cov, alpha=0.1, return_costs=False, mode=method)
106
+ assert_array_almost_equal(cov, cov_skggm)
107
+ assert_array_almost_equal(icov, icov_skggm)
108
+
109
+
110
+ def test_graphical_lasso_iris_singular():
111
+ # Small subset of rows to test the rank-deficient case
112
+ # Need to choose samples such that none of the variances are zero
113
+ indices = np.arange(10, 13)
114
+
115
+ # Hard-coded solution from R glasso package for alpha=0.01
116
+ cov_R = np.array(
117
+ [
118
+ [0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
119
+ [0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
120
+ [0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
121
+ [0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222],
122
+ ]
123
+ )
124
+ icov_R = np.array(
125
+ [
126
+ [24.42244057, -16.831679593, 0.0, 0.0],
127
+ [-16.83168201, 24.351841681, -6.206896552, -12.5],
128
+ [0.0, -6.206896171, 153.103448276, 0.0],
129
+ [0.0, -12.499999143, 0.0, 462.5],
130
+ ]
131
+ )
132
+ X = datasets.load_iris().data[indices, :]
133
+ emp_cov = empirical_covariance(X)
134
+ for method in ("cd", "lars"):
135
+ cov, icov = graphical_lasso(
136
+ emp_cov, alpha=0.01, return_costs=False, mode=method
137
+ )
138
+ assert_array_almost_equal(cov, cov_R, decimal=5)
139
+ assert_array_almost_equal(icov, icov_R, decimal=5)
140
+
141
+
142
+ def test_graphical_lasso_cv(random_state=1):
143
+ # Sample data from a sparse multivariate normal
144
+ dim = 5
145
+ n_samples = 6
146
+ random_state = check_random_state(random_state)
147
+ prec = make_sparse_spd_matrix(dim, alpha=0.96, random_state=random_state)
148
+ cov = linalg.inv(prec)
149
+ X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
150
+ # Capture stdout, to smoke test the verbose mode
151
+ orig_stdout = sys.stdout
152
+ try:
153
+ sys.stdout = StringIO()
154
+ # We need verbose very high so that Parallel prints on stdout
155
+ GraphicalLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
156
+ finally:
157
+ sys.stdout = orig_stdout
158
+
159
+
160
+ @pytest.mark.parametrize("alphas_container_type", ["list", "tuple", "array"])
161
+ def test_graphical_lasso_cv_alphas_iterable(alphas_container_type):
162
+ """Check that we can pass an array-like to `alphas`.
163
+
164
+ Non-regression test for:
165
+ https://github.com/scikit-learn/scikit-learn/issues/22489
166
+ """
167
+ true_cov = np.array(
168
+ [
169
+ [0.8, 0.0, 0.2, 0.0],
170
+ [0.0, 0.4, 0.0, 0.0],
171
+ [0.2, 0.0, 0.3, 0.1],
172
+ [0.0, 0.0, 0.1, 0.7],
173
+ ]
174
+ )
175
+ rng = np.random.RandomState(0)
176
+ X = rng.multivariate_normal(mean=[0, 0, 0, 0], cov=true_cov, size=200)
177
+ alphas = _convert_container([0.02, 0.03], alphas_container_type)
178
+ GraphicalLassoCV(alphas=alphas, tol=1e-1, n_jobs=1).fit(X)
179
+
180
+
181
+ @pytest.mark.parametrize(
182
+ "alphas,err_type,err_msg",
183
+ [
184
+ ([-0.02, 0.03], ValueError, "must be > 0"),
185
+ ([0, 0.03], ValueError, "must be > 0"),
186
+ (["not_number", 0.03], TypeError, "must be an instance of float"),
187
+ ],
188
+ )
189
+ def test_graphical_lasso_cv_alphas_invalid_array(alphas, err_type, err_msg):
190
+ """Check that if an array-like containing a value
191
+ outside of (0, inf] is passed to `alphas`, a ValueError is raised.
192
+ Check if a string is passed, a TypeError is raised.
193
+ """
194
+ true_cov = np.array(
195
+ [
196
+ [0.8, 0.0, 0.2, 0.0],
197
+ [0.0, 0.4, 0.0, 0.0],
198
+ [0.2, 0.0, 0.3, 0.1],
199
+ [0.0, 0.0, 0.1, 0.7],
200
+ ]
201
+ )
202
+ rng = np.random.RandomState(0)
203
+ X = rng.multivariate_normal(mean=[0, 0, 0, 0], cov=true_cov, size=200)
204
+
205
+ with pytest.raises(err_type, match=err_msg):
206
+ GraphicalLassoCV(alphas=alphas, tol=1e-1, n_jobs=1).fit(X)
207
+
208
+
209
+ def test_graphical_lasso_cv_scores():
210
+ splits = 4
211
+ n_alphas = 5
212
+ n_refinements = 3
213
+ true_cov = np.array(
214
+ [
215
+ [0.8, 0.0, 0.2, 0.0],
216
+ [0.0, 0.4, 0.0, 0.0],
217
+ [0.2, 0.0, 0.3, 0.1],
218
+ [0.0, 0.0, 0.1, 0.7],
219
+ ]
220
+ )
221
+ rng = np.random.RandomState(0)
222
+ X = rng.multivariate_normal(mean=[0, 0, 0, 0], cov=true_cov, size=200)
223
+ cov = GraphicalLassoCV(cv=splits, alphas=n_alphas, n_refinements=n_refinements).fit(
224
+ X
225
+ )
226
+
227
+ cv_results = cov.cv_results_
228
+ # alpha and one for each split
229
+
230
+ total_alphas = n_refinements * n_alphas + 1
231
+ keys = ["alphas"]
232
+ split_keys = [f"split{i}_test_score" for i in range(splits)]
233
+ for key in keys + split_keys:
234
+ assert key in cv_results
235
+ assert len(cv_results[key]) == total_alphas
236
+
237
+ cv_scores = np.asarray([cov.cv_results_[key] for key in split_keys])
238
+ expected_mean = cv_scores.mean(axis=0)
239
+ expected_std = cv_scores.std(axis=0)
240
+
241
+ assert_allclose(cov.cv_results_["mean_test_score"], expected_mean)
242
+ assert_allclose(cov.cv_results_["std_test_score"], expected_std)
mplug_owl2/lib/python3.10/site-packages/sklearn/covariance/tests/test_robust_covariance.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
2
+ # Gael Varoquaux <gael.varoquaux@normalesup.org>
3
+ # Virgile Fritsch <virgile.fritsch@inria.fr>
4
+ #
5
+ # License: BSD 3 clause
6
+
7
+ import itertools
8
+
9
+ import numpy as np
10
+ import pytest
11
+
12
+ from sklearn.utils._testing import assert_array_almost_equal
13
+
14
+ from sklearn import datasets
15
+ from sklearn.covariance import empirical_covariance, MinCovDet
16
+ from sklearn.covariance import fast_mcd
17
+
18
+ X = datasets.load_iris().data
19
+ X_1d = X[:, 0]
20
+ n_samples, n_features = X.shape
21
+
22
+
23
+ def test_mcd():
24
+ # Tests the FastMCD algorithm implementation
25
+ # Small data set
26
+ # test without outliers (random independent normal data)
27
+ launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
28
+ # test with a contaminated data set (medium contamination)
29
+ launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
30
+ # test with a contaminated data set (strong contamination)
31
+ launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
32
+
33
+ # Medium data set
34
+ launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
35
+
36
+ # Large data set
37
+ launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
38
+
39
+ # 1D data set
40
+ launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
41
+
42
+
43
+ def test_fast_mcd_on_invalid_input():
44
+ X = np.arange(100)
45
+ msg = "Expected 2D array, got 1D array instead"
46
+ with pytest.raises(ValueError, match=msg):
47
+ fast_mcd(X)
48
+
49
+
50
+ def test_mcd_class_on_invalid_input():
51
+ X = np.arange(100)
52
+ mcd = MinCovDet()
53
+ msg = "Expected 2D array, got 1D array instead"
54
+ with pytest.raises(ValueError, match=msg):
55
+ mcd.fit(X)
56
+
57
+
58
+ def launch_mcd_on_dataset(
59
+ n_samples, n_features, n_outliers, tol_loc, tol_cov, tol_support
60
+ ):
61
+
62
+ rand_gen = np.random.RandomState(0)
63
+ data = rand_gen.randn(n_samples, n_features)
64
+ # add some outliers
65
+ outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
66
+ outliers_offset = 10.0 * (rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
67
+ data[outliers_index] += outliers_offset
68
+ inliers_mask = np.ones(n_samples).astype(bool)
69
+ inliers_mask[outliers_index] = False
70
+
71
+ pure_data = data[inliers_mask]
72
+ # compute MCD by fitting an object
73
+ mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
74
+ T = mcd_fit.location_
75
+ S = mcd_fit.covariance_
76
+ H = mcd_fit.support_
77
+ # compare with the estimates learnt from the inliers
78
+ error_location = np.mean((pure_data.mean(0) - T) ** 2)
79
+ assert error_location < tol_loc
80
+ error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
81
+ assert error_cov < tol_cov
82
+ assert np.sum(H) >= tol_support
83
+ assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
84
+
85
+
86
+ def test_mcd_issue1127():
87
+ # Check that the code does not break with X.shape = (3, 1)
88
+ # (i.e. n_support = n_samples)
89
+ rnd = np.random.RandomState(0)
90
+ X = rnd.normal(size=(3, 1))
91
+ mcd = MinCovDet()
92
+ mcd.fit(X)
93
+
94
+
95
+ def test_mcd_issue3367():
96
+ # Check that MCD completes when the covariance matrix is singular
97
+ # i.e. one of the rows and columns are all zeros
98
+ rand_gen = np.random.RandomState(0)
99
+
100
+ # Think of these as the values for X and Y -> 10 values between -5 and 5
101
+ data_values = np.linspace(-5, 5, 10).tolist()
102
+ # Get the cartesian product of all possible coordinate pairs from above set
103
+ data = np.array(list(itertools.product(data_values, data_values)))
104
+
105
+ # Add a third column that's all zeros to make our data a set of point
106
+ # within a plane, which means that the covariance matrix will be singular
107
+ data = np.hstack((data, np.zeros((data.shape[0], 1))))
108
+
109
+ # The below line of code should raise an exception if the covariance matrix
110
+ # is singular. As a further test, since we have points in XYZ, the
111
+ # principle components (Eigenvectors) of these directly relate to the
112
+ # geometry of the points. Since it's a plane, we should be able to test
113
+ # that the Eigenvector that corresponds to the smallest Eigenvalue is the
114
+ # plane normal, specifically [0, 0, 1], since everything is in the XY plane
115
+ # (as I've set it up above). To do this one would start by:
116
+ #
117
+ # evals, evecs = np.linalg.eigh(mcd_fit.covariance_)
118
+ # normal = evecs[:, np.argmin(evals)]
119
+ #
120
+ # After which we need to assert that our `normal` is equal to [0, 0, 1].
121
+ # Do note that there is floating point error associated with this, so it's
122
+ # best to subtract the two and then compare some small tolerance (e.g.
123
+ # 1e-12).
124
+ MinCovDet(random_state=rand_gen).fit(data)
125
+
126
+
127
+ def test_mcd_support_covariance_is_zero():
128
+ # Check that MCD returns a ValueError with informative message when the
129
+ # covariance of the support data is equal to 0.
130
+ X_1 = np.array([0.5, 0.1, 0.1, 0.1, 0.957, 0.1, 0.1, 0.1, 0.4285, 0.1])
131
+ X_1 = X_1.reshape(-1, 1)
132
+ X_2 = np.array([0.5, 0.3, 0.3, 0.3, 0.957, 0.3, 0.3, 0.3, 0.4285, 0.3])
133
+ X_2 = X_2.reshape(-1, 1)
134
+ msg = (
135
+ "The covariance matrix of the support data is equal to 0, try to "
136
+ "increase support_fraction"
137
+ )
138
+ for X in [X_1, X_2]:
139
+ with pytest.raises(ValueError, match=msg):
140
+ MinCovDet().fit(X)
141
+
142
+
143
+ def test_mcd_increasing_det_warning():
144
+ # Check that a warning is raised if we observe increasing determinants
145
+ # during the c_step. In theory the sequence of determinants should be
146
+ # decreasing. Increasing determinants are likely due to ill-conditioned
147
+ # covariance matrices that result in poor precision matrices.
148
+
149
+ X = [
150
+ [5.1, 3.5, 1.4, 0.2],
151
+ [4.9, 3.0, 1.4, 0.2],
152
+ [4.7, 3.2, 1.3, 0.2],
153
+ [4.6, 3.1, 1.5, 0.2],
154
+ [5.0, 3.6, 1.4, 0.2],
155
+ [4.6, 3.4, 1.4, 0.3],
156
+ [5.0, 3.4, 1.5, 0.2],
157
+ [4.4, 2.9, 1.4, 0.2],
158
+ [4.9, 3.1, 1.5, 0.1],
159
+ [5.4, 3.7, 1.5, 0.2],
160
+ [4.8, 3.4, 1.6, 0.2],
161
+ [4.8, 3.0, 1.4, 0.1],
162
+ [4.3, 3.0, 1.1, 0.1],
163
+ [5.1, 3.5, 1.4, 0.3],
164
+ [5.7, 3.8, 1.7, 0.3],
165
+ [5.4, 3.4, 1.7, 0.2],
166
+ [4.6, 3.6, 1.0, 0.2],
167
+ [5.0, 3.0, 1.6, 0.2],
168
+ [5.2, 3.5, 1.5, 0.2],
169
+ ]
170
+
171
+ mcd = MinCovDet(random_state=1)
172
+ warn_msg = "Determinant has increased"
173
+ with pytest.warns(RuntimeWarning, match=warn_msg):
174
+ mcd.fit(X)
mplug_owl2/lib/python3.10/site-packages/sklearn/experimental/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (433 Bytes). View file
 
mplug_owl2/lib/python3.10/site-packages/sklearn/experimental/__pycache__/enable_hist_gradient_boosting.cpython-310.pyc ADDED
Binary file (821 Bytes). View file
 
mplug_owl2/lib/python3.10/site-packages/sklearn/experimental/enable_iterative_imputer.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Enables IterativeImputer
2
+
3
+ The API and results of this estimator might change without any deprecation
4
+ cycle.
5
+
6
+ Importing this file dynamically sets :class:`~sklearn.impute.IterativeImputer`
7
+ as an attribute of the impute module::
8
+
9
+ >>> # explicitly require this experimental feature
10
+ >>> from sklearn.experimental import enable_iterative_imputer # noqa
11
+ >>> # now you can import normally from impute
12
+ >>> from sklearn.impute import IterativeImputer
13
+ """
14
+
15
+ from ..impute._iterative import IterativeImputer
16
+ from .. import impute
17
+
18
+ # use settattr to avoid mypy errors when monkeypatching
19
+ setattr(impute, "IterativeImputer", IterativeImputer)
20
+ impute.__all__ += ["IterativeImputer"]
mplug_owl2/lib/python3.10/site-packages/sklearn/experimental/tests/test_enable_hist_gradient_boosting.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Tests for making sure experimental imports work as expected."""
2
+
3
+ import textwrap
4
+
5
+ from sklearn.utils._testing import assert_run_python_script
6
+
7
+
8
+ def test_import_raises_warning():
9
+ code = """
10
+ import pytest
11
+ with pytest.warns(UserWarning, match="it is not needed to import"):
12
+ from sklearn.experimental import enable_hist_gradient_boosting # noqa
13
+ """
14
+ assert_run_python_script(textwrap.dedent(code))
mplug_owl2/lib/python3.10/site-packages/sklearn/gaussian_process/__init__.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
2
+ # Vincent Dubourg <vincent.dubourg@gmail.com>
3
+ # (mostly translation, see implementation details)
4
+ # License: BSD 3 clause
5
+
6
+ """
7
+ The :mod:`sklearn.gaussian_process` module implements Gaussian Process
8
+ based regression and classification.
9
+ """
10
+
11
+ from ._gpr import GaussianProcessRegressor
12
+ from ._gpc import GaussianProcessClassifier
13
+ from . import kernels
14
+
15
+
16
+ __all__ = ["GaussianProcessRegressor", "GaussianProcessClassifier", "kernels"]
mplug_owl2/lib/python3.10/site-packages/sklearn/gaussian_process/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (481 Bytes). View file
 
mplug_owl2/lib/python3.10/site-packages/sklearn/gaussian_process/__pycache__/_gpc.cpython-310.pyc ADDED
Binary file (29.3 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/sklearn/gaussian_process/__pycache__/_gpr.cpython-310.pyc ADDED
Binary file (18.9 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/sklearn/gaussian_process/__pycache__/kernels.cpython-310.pyc ADDED
Binary file (71 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/sklearn/gaussian_process/_gpc.py ADDED
@@ -0,0 +1,903 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Gaussian processes classification."""
2
+
3
+ # Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
4
+ #
5
+ # License: BSD 3 clause
6
+
7
+ from numbers import Integral
8
+ from operator import itemgetter
9
+
10
+ import numpy as np
11
+ from scipy.linalg import cholesky, cho_solve, solve
12
+ import scipy.optimize
13
+ from scipy.special import erf, expit
14
+
15
+ from ..base import BaseEstimator, ClassifierMixin, clone
16
+ from .kernels import Kernel, RBF, CompoundKernel, ConstantKernel as C
17
+ from ..utils.validation import check_is_fitted
18
+ from ..utils import check_random_state
19
+ from ..utils.optimize import _check_optimize_result
20
+ from ..utils._param_validation import Interval, StrOptions
21
+ from ..preprocessing import LabelEncoder
22
+ from ..multiclass import OneVsRestClassifier, OneVsOneClassifier
23
+
24
+
25
+ # Values required for approximating the logistic sigmoid by
26
+ # error functions. coefs are obtained via:
27
+ # x = np.array([0, 0.6, 2, 3.5, 4.5, np.inf])
28
+ # b = logistic(x)
29
+ # A = (erf(np.dot(x, self.lambdas)) + 1) / 2
30
+ # coefs = lstsq(A, b)[0]
31
+ LAMBDAS = np.array([0.41, 0.4, 0.37, 0.44, 0.39])[:, np.newaxis]
32
+ COEFS = np.array(
33
+ [-1854.8214151, 3516.89893646, 221.29346712, 128.12323805, -2010.49422654]
34
+ )[:, np.newaxis]
35
+
36
+
37
+ class _BinaryGaussianProcessClassifierLaplace(BaseEstimator):
38
+ """Binary Gaussian process classification based on Laplace approximation.
39
+
40
+ The implementation is based on Algorithm 3.1, 3.2, and 5.1 from [RW2006]_.
41
+
42
+ Internally, the Laplace approximation is used for approximating the
43
+ non-Gaussian posterior by a Gaussian.
44
+
45
+ Currently, the implementation is restricted to using the logistic link
46
+ function.
47
+
48
+ .. versionadded:: 0.18
49
+
50
+ Parameters
51
+ ----------
52
+ kernel : kernel instance, default=None
53
+ The kernel specifying the covariance function of the GP. If None is
54
+ passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
55
+ the kernel's hyperparameters are optimized during fitting.
56
+
57
+ optimizer : 'fmin_l_bfgs_b' or callable, default='fmin_l_bfgs_b'
58
+ Can either be one of the internally supported optimizers for optimizing
59
+ the kernel's parameters, specified by a string, or an externally
60
+ defined optimizer passed as a callable. If a callable is passed, it
61
+ must have the signature::
62
+
63
+ def optimizer(obj_func, initial_theta, bounds):
64
+ # * 'obj_func' is the objective function to be maximized, which
65
+ # takes the hyperparameters theta as parameter and an
66
+ # optional flag eval_gradient, which determines if the
67
+ # gradient is returned additionally to the function value
68
+ # * 'initial_theta': the initial value for theta, which can be
69
+ # used by local optimizers
70
+ # * 'bounds': the bounds on the values of theta
71
+ ....
72
+ # Returned are the best found hyperparameters theta and
73
+ # the corresponding value of the target function.
74
+ return theta_opt, func_min
75
+
76
+ Per default, the 'L-BFGS-B' algorithm from scipy.optimize.minimize
77
+ is used. If None is passed, the kernel's parameters are kept fixed.
78
+ Available internal optimizers are::
79
+
80
+ 'fmin_l_bfgs_b'
81
+
82
+ n_restarts_optimizer : int, default=0
83
+ The number of restarts of the optimizer for finding the kernel's
84
+ parameters which maximize the log-marginal likelihood. The first run
85
+ of the optimizer is performed from the kernel's initial parameters,
86
+ the remaining ones (if any) from thetas sampled log-uniform randomly
87
+ from the space of allowed theta-values. If greater than 0, all bounds
88
+ must be finite. Note that n_restarts_optimizer=0 implies that one
89
+ run is performed.
90
+
91
+ max_iter_predict : int, default=100
92
+ The maximum number of iterations in Newton's method for approximating
93
+ the posterior during predict. Smaller values will reduce computation
94
+ time at the cost of worse results.
95
+
96
+ warm_start : bool, default=False
97
+ If warm-starts are enabled, the solution of the last Newton iteration
98
+ on the Laplace approximation of the posterior mode is used as
99
+ initialization for the next call of _posterior_mode(). This can speed
100
+ up convergence when _posterior_mode is called several times on similar
101
+ problems as in hyperparameter optimization. See :term:`the Glossary
102
+ <warm_start>`.
103
+
104
+ copy_X_train : bool, default=True
105
+ If True, a persistent copy of the training data is stored in the
106
+ object. Otherwise, just a reference to the training data is stored,
107
+ which might cause predictions to change if the data is modified
108
+ externally.
109
+
110
+ random_state : int, RandomState instance or None, default=None
111
+ Determines random number generation used to initialize the centers.
112
+ Pass an int for reproducible results across multiple function calls.
113
+ See :term:`Glossary <random_state>`.
114
+
115
+ Attributes
116
+ ----------
117
+ X_train_ : array-like of shape (n_samples, n_features) or list of object
118
+ Feature vectors or other representations of training data (also
119
+ required for prediction).
120
+
121
+ y_train_ : array-like of shape (n_samples,)
122
+ Target values in training data (also required for prediction)
123
+
124
+ classes_ : array-like of shape (n_classes,)
125
+ Unique class labels.
126
+
127
+ kernel_ : kernl instance
128
+ The kernel used for prediction. The structure of the kernel is the
129
+ same as the one passed as parameter but with optimized hyperparameters
130
+
131
+ L_ : array-like of shape (n_samples, n_samples)
132
+ Lower-triangular Cholesky decomposition of the kernel in X_train_
133
+
134
+ pi_ : array-like of shape (n_samples,)
135
+ The probabilities of the positive class for the training points
136
+ X_train_
137
+
138
+ W_sr_ : array-like of shape (n_samples,)
139
+ Square root of W, the Hessian of log-likelihood of the latent function
140
+ values for the observed labels. Since W is diagonal, only the diagonal
141
+ of sqrt(W) is stored.
142
+
143
+ log_marginal_likelihood_value_ : float
144
+ The log-marginal-likelihood of ``self.kernel_.theta``
145
+
146
+ References
147
+ ----------
148
+ .. [RW2006] `Carl E. Rasmussen and Christopher K.I. Williams,
149
+ "Gaussian Processes for Machine Learning",
150
+ MIT Press 2006 <https://www.gaussianprocess.org/gpml/chapters/RW.pdf>`_
151
+ """
152
+
153
+ def __init__(
154
+ self,
155
+ kernel=None,
156
+ *,
157
+ optimizer="fmin_l_bfgs_b",
158
+ n_restarts_optimizer=0,
159
+ max_iter_predict=100,
160
+ warm_start=False,
161
+ copy_X_train=True,
162
+ random_state=None,
163
+ ):
164
+ self.kernel = kernel
165
+ self.optimizer = optimizer
166
+ self.n_restarts_optimizer = n_restarts_optimizer
167
+ self.max_iter_predict = max_iter_predict
168
+ self.warm_start = warm_start
169
+ self.copy_X_train = copy_X_train
170
+ self.random_state = random_state
171
+
172
+ def fit(self, X, y):
173
+ """Fit Gaussian process classification model.
174
+
175
+ Parameters
176
+ ----------
177
+ X : array-like of shape (n_samples, n_features) or list of object
178
+ Feature vectors or other representations of training data.
179
+
180
+ y : array-like of shape (n_samples,)
181
+ Target values, must be binary.
182
+
183
+ Returns
184
+ -------
185
+ self : returns an instance of self.
186
+ """
187
+ if self.kernel is None: # Use an RBF kernel as default
188
+ self.kernel_ = C(1.0, constant_value_bounds="fixed") * RBF(
189
+ 1.0, length_scale_bounds="fixed"
190
+ )
191
+ else:
192
+ self.kernel_ = clone(self.kernel)
193
+
194
+ self.rng = check_random_state(self.random_state)
195
+
196
+ self.X_train_ = np.copy(X) if self.copy_X_train else X
197
+
198
+ # Encode class labels and check that it is a binary classification
199
+ # problem
200
+ label_encoder = LabelEncoder()
201
+ self.y_train_ = label_encoder.fit_transform(y)
202
+ self.classes_ = label_encoder.classes_
203
+ if self.classes_.size > 2:
204
+ raise ValueError(
205
+ "%s supports only binary classification. y contains classes %s"
206
+ % (self.__class__.__name__, self.classes_)
207
+ )
208
+ elif self.classes_.size == 1:
209
+ raise ValueError(
210
+ "{0:s} requires 2 classes; got {1:d} class".format(
211
+ self.__class__.__name__, self.classes_.size
212
+ )
213
+ )
214
+
215
+ if self.optimizer is not None and self.kernel_.n_dims > 0:
216
+ # Choose hyperparameters based on maximizing the log-marginal
217
+ # likelihood (potentially starting from several initial values)
218
+ def obj_func(theta, eval_gradient=True):
219
+ if eval_gradient:
220
+ lml, grad = self.log_marginal_likelihood(
221
+ theta, eval_gradient=True, clone_kernel=False
222
+ )
223
+ return -lml, -grad
224
+ else:
225
+ return -self.log_marginal_likelihood(theta, clone_kernel=False)
226
+
227
+ # First optimize starting from theta specified in kernel
228
+ optima = [
229
+ self._constrained_optimization(
230
+ obj_func, self.kernel_.theta, self.kernel_.bounds
231
+ )
232
+ ]
233
+
234
+ # Additional runs are performed from log-uniform chosen initial
235
+ # theta
236
+ if self.n_restarts_optimizer > 0:
237
+ if not np.isfinite(self.kernel_.bounds).all():
238
+ raise ValueError(
239
+ "Multiple optimizer restarts (n_restarts_optimizer>0) "
240
+ "requires that all bounds are finite."
241
+ )
242
+ bounds = self.kernel_.bounds
243
+ for iteration in range(self.n_restarts_optimizer):
244
+ theta_initial = np.exp(self.rng.uniform(bounds[:, 0], bounds[:, 1]))
245
+ optima.append(
246
+ self._constrained_optimization(obj_func, theta_initial, bounds)
247
+ )
248
+ # Select result from run with minimal (negative) log-marginal
249
+ # likelihood
250
+ lml_values = list(map(itemgetter(1), optima))
251
+ self.kernel_.theta = optima[np.argmin(lml_values)][0]
252
+ self.kernel_._check_bounds_params()
253
+
254
+ self.log_marginal_likelihood_value_ = -np.min(lml_values)
255
+ else:
256
+ self.log_marginal_likelihood_value_ = self.log_marginal_likelihood(
257
+ self.kernel_.theta
258
+ )
259
+
260
+ # Precompute quantities required for predictions which are independent
261
+ # of actual query points
262
+ K = self.kernel_(self.X_train_)
263
+
264
+ _, (self.pi_, self.W_sr_, self.L_, _, _) = self._posterior_mode(
265
+ K, return_temporaries=True
266
+ )
267
+
268
+ return self
269
+
270
+ def predict(self, X):
271
+ """Perform classification on an array of test vectors X.
272
+
273
+ Parameters
274
+ ----------
275
+ X : array-like of shape (n_samples, n_features) or list of object
276
+ Query points where the GP is evaluated for classification.
277
+
278
+ Returns
279
+ -------
280
+ C : ndarray of shape (n_samples,)
281
+ Predicted target values for X, values are from ``classes_``
282
+ """
283
+ check_is_fitted(self)
284
+
285
+ # As discussed on Section 3.4.2 of GPML, for making hard binary
286
+ # decisions, it is enough to compute the MAP of the posterior and
287
+ # pass it through the link function
288
+ K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)
289
+ f_star = K_star.T.dot(self.y_train_ - self.pi_) # Algorithm 3.2,Line 4
290
+
291
+ return np.where(f_star > 0, self.classes_[1], self.classes_[0])
292
+
293
+ def predict_proba(self, X):
294
+ """Return probability estimates for the test vector X.
295
+
296
+ Parameters
297
+ ----------
298
+ X : array-like of shape (n_samples, n_features) or list of object
299
+ Query points where the GP is evaluated for classification.
300
+
301
+ Returns
302
+ -------
303
+ C : array-like of shape (n_samples, n_classes)
304
+ Returns the probability of the samples for each class in
305
+ the model. The columns correspond to the classes in sorted
306
+ order, as they appear in the attribute ``classes_``.
307
+ """
308
+ check_is_fitted(self)
309
+
310
+ # Based on Algorithm 3.2 of GPML
311
+ K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)
312
+ f_star = K_star.T.dot(self.y_train_ - self.pi_) # Line 4
313
+ v = solve(self.L_, self.W_sr_[:, np.newaxis] * K_star) # Line 5
314
+ # Line 6 (compute np.diag(v.T.dot(v)) via einsum)
315
+ var_f_star = self.kernel_.diag(X) - np.einsum("ij,ij->j", v, v)
316
+
317
+ # Line 7:
318
+ # Approximate \int log(z) * N(z | f_star, var_f_star)
319
+ # Approximation is due to Williams & Barber, "Bayesian Classification
320
+ # with Gaussian Processes", Appendix A: Approximate the logistic
321
+ # sigmoid by a linear combination of 5 error functions.
322
+ # For information on how this integral can be computed see
323
+ # blitiri.blogspot.de/2012/11/gaussian-integral-of-error-function.html
324
+ alpha = 1 / (2 * var_f_star)
325
+ gamma = LAMBDAS * f_star
326
+ integrals = (
327
+ np.sqrt(np.pi / alpha)
328
+ * erf(gamma * np.sqrt(alpha / (alpha + LAMBDAS**2)))
329
+ / (2 * np.sqrt(var_f_star * 2 * np.pi))
330
+ )
331
+ pi_star = (COEFS * integrals).sum(axis=0) + 0.5 * COEFS.sum()
332
+
333
+ return np.vstack((1 - pi_star, pi_star)).T
334
+
335
+ def log_marginal_likelihood(
336
+ self, theta=None, eval_gradient=False, clone_kernel=True
337
+ ):
338
+ """Returns log-marginal likelihood of theta for training data.
339
+
340
+ Parameters
341
+ ----------
342
+ theta : array-like of shape (n_kernel_params,), default=None
343
+ Kernel hyperparameters for which the log-marginal likelihood is
344
+ evaluated. If None, the precomputed log_marginal_likelihood
345
+ of ``self.kernel_.theta`` is returned.
346
+
347
+ eval_gradient : bool, default=False
348
+ If True, the gradient of the log-marginal likelihood with respect
349
+ to the kernel hyperparameters at position theta is returned
350
+ additionally. If True, theta must not be None.
351
+
352
+ clone_kernel : bool, default=True
353
+ If True, the kernel attribute is copied. If False, the kernel
354
+ attribute is modified, but may result in a performance improvement.
355
+
356
+ Returns
357
+ -------
358
+ log_likelihood : float
359
+ Log-marginal likelihood of theta for training data.
360
+
361
+ log_likelihood_gradient : ndarray of shape (n_kernel_params,), \
362
+ optional
363
+ Gradient of the log-marginal likelihood with respect to the kernel
364
+ hyperparameters at position theta.
365
+ Only returned when `eval_gradient` is True.
366
+ """
367
+ if theta is None:
368
+ if eval_gradient:
369
+ raise ValueError("Gradient can only be evaluated for theta!=None")
370
+ return self.log_marginal_likelihood_value_
371
+
372
+ if clone_kernel:
373
+ kernel = self.kernel_.clone_with_theta(theta)
374
+ else:
375
+ kernel = self.kernel_
376
+ kernel.theta = theta
377
+
378
+ if eval_gradient:
379
+ K, K_gradient = kernel(self.X_train_, eval_gradient=True)
380
+ else:
381
+ K = kernel(self.X_train_)
382
+
383
+ # Compute log-marginal-likelihood Z and also store some temporaries
384
+ # which can be reused for computing Z's gradient
385
+ Z, (pi, W_sr, L, b, a) = self._posterior_mode(K, return_temporaries=True)
386
+
387
+ if not eval_gradient:
388
+ return Z
389
+
390
+ # Compute gradient based on Algorithm 5.1 of GPML
391
+ d_Z = np.empty(theta.shape[0])
392
+ # XXX: Get rid of the np.diag() in the next line
393
+ R = W_sr[:, np.newaxis] * cho_solve((L, True), np.diag(W_sr)) # Line 7
394
+ C = solve(L, W_sr[:, np.newaxis] * K) # Line 8
395
+ # Line 9: (use einsum to compute np.diag(C.T.dot(C))))
396
+ s_2 = (
397
+ -0.5
398
+ * (np.diag(K) - np.einsum("ij, ij -> j", C, C))
399
+ * (pi * (1 - pi) * (1 - 2 * pi))
400
+ ) # third derivative
401
+
402
+ for j in range(d_Z.shape[0]):
403
+ C = K_gradient[:, :, j] # Line 11
404
+ # Line 12: (R.T.ravel().dot(C.ravel()) = np.trace(R.dot(C)))
405
+ s_1 = 0.5 * a.T.dot(C).dot(a) - 0.5 * R.T.ravel().dot(C.ravel())
406
+
407
+ b = C.dot(self.y_train_ - pi) # Line 13
408
+ s_3 = b - K.dot(R.dot(b)) # Line 14
409
+
410
+ d_Z[j] = s_1 + s_2.T.dot(s_3) # Line 15
411
+
412
+ return Z, d_Z
413
+
414
+ def _posterior_mode(self, K, return_temporaries=False):
415
+ """Mode-finding for binary Laplace GPC and fixed kernel.
416
+
417
+ This approximates the posterior of the latent function values for given
418
+ inputs and target observations with a Gaussian approximation and uses
419
+ Newton's iteration to find the mode of this approximation.
420
+ """
421
+ # Based on Algorithm 3.1 of GPML
422
+
423
+ # If warm_start are enabled, we reuse the last solution for the
424
+ # posterior mode as initialization; otherwise, we initialize with 0
425
+ if (
426
+ self.warm_start
427
+ and hasattr(self, "f_cached")
428
+ and self.f_cached.shape == self.y_train_.shape
429
+ ):
430
+ f = self.f_cached
431
+ else:
432
+ f = np.zeros_like(self.y_train_, dtype=np.float64)
433
+
434
+ # Use Newton's iteration method to find mode of Laplace approximation
435
+ log_marginal_likelihood = -np.inf
436
+ for _ in range(self.max_iter_predict):
437
+ # Line 4
438
+ pi = expit(f)
439
+ W = pi * (1 - pi)
440
+ # Line 5
441
+ W_sr = np.sqrt(W)
442
+ W_sr_K = W_sr[:, np.newaxis] * K
443
+ B = np.eye(W.shape[0]) + W_sr_K * W_sr
444
+ L = cholesky(B, lower=True)
445
+ # Line 6
446
+ b = W * f + (self.y_train_ - pi)
447
+ # Line 7
448
+ a = b - W_sr * cho_solve((L, True), W_sr_K.dot(b))
449
+ # Line 8
450
+ f = K.dot(a)
451
+
452
+ # Line 10: Compute log marginal likelihood in loop and use as
453
+ # convergence criterion
454
+ lml = (
455
+ -0.5 * a.T.dot(f)
456
+ - np.log1p(np.exp(-(self.y_train_ * 2 - 1) * f)).sum()
457
+ - np.log(np.diag(L)).sum()
458
+ )
459
+ # Check if we have converged (log marginal likelihood does
460
+ # not decrease)
461
+ # XXX: more complex convergence criterion
462
+ if lml - log_marginal_likelihood < 1e-10:
463
+ break
464
+ log_marginal_likelihood = lml
465
+
466
+ self.f_cached = f # Remember solution for later warm-starts
467
+ if return_temporaries:
468
+ return log_marginal_likelihood, (pi, W_sr, L, b, a)
469
+ else:
470
+ return log_marginal_likelihood
471
+
472
+ def _constrained_optimization(self, obj_func, initial_theta, bounds):
473
+ if self.optimizer == "fmin_l_bfgs_b":
474
+ opt_res = scipy.optimize.minimize(
475
+ obj_func, initial_theta, method="L-BFGS-B", jac=True, bounds=bounds
476
+ )
477
+ _check_optimize_result("lbfgs", opt_res)
478
+ theta_opt, func_min = opt_res.x, opt_res.fun
479
+ elif callable(self.optimizer):
480
+ theta_opt, func_min = self.optimizer(obj_func, initial_theta, bounds=bounds)
481
+ else:
482
+ raise ValueError("Unknown optimizer %s." % self.optimizer)
483
+
484
+ return theta_opt, func_min
485
+
486
+
487
+ class GaussianProcessClassifier(ClassifierMixin, BaseEstimator):
488
+ """Gaussian process classification (GPC) based on Laplace approximation.
489
+
490
+ The implementation is based on Algorithm 3.1, 3.2, and 5.1 from [RW2006]_.
491
+
492
+ Internally, the Laplace approximation is used for approximating the
493
+ non-Gaussian posterior by a Gaussian.
494
+
495
+ Currently, the implementation is restricted to using the logistic link
496
+ function. For multi-class classification, several binary one-versus rest
497
+ classifiers are fitted. Note that this class thus does not implement
498
+ a true multi-class Laplace approximation.
499
+
500
+ Read more in the :ref:`User Guide <gaussian_process>`.
501
+
502
+ .. versionadded:: 0.18
503
+
504
+ Parameters
505
+ ----------
506
+ kernel : kernel instance, default=None
507
+ The kernel specifying the covariance function of the GP. If None is
508
+ passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
509
+ the kernel's hyperparameters are optimized during fitting. Also kernel
510
+ cannot be a `CompoundKernel`.
511
+
512
+ optimizer : 'fmin_l_bfgs_b', callable or None, default='fmin_l_bfgs_b'
513
+ Can either be one of the internally supported optimizers for optimizing
514
+ the kernel's parameters, specified by a string, or an externally
515
+ defined optimizer passed as a callable. If a callable is passed, it
516
+ must have the signature::
517
+
518
+ def optimizer(obj_func, initial_theta, bounds):
519
+ # * 'obj_func' is the objective function to be maximized, which
520
+ # takes the hyperparameters theta as parameter and an
521
+ # optional flag eval_gradient, which determines if the
522
+ # gradient is returned additionally to the function value
523
+ # * 'initial_theta': the initial value for theta, which can be
524
+ # used by local optimizers
525
+ # * 'bounds': the bounds on the values of theta
526
+ ....
527
+ # Returned are the best found hyperparameters theta and
528
+ # the corresponding value of the target function.
529
+ return theta_opt, func_min
530
+
531
+ Per default, the 'L-BFGS-B' algorithm from scipy.optimize.minimize
532
+ is used. If None is passed, the kernel's parameters are kept fixed.
533
+ Available internal optimizers are::
534
+
535
+ 'fmin_l_bfgs_b'
536
+
537
+ n_restarts_optimizer : int, default=0
538
+ The number of restarts of the optimizer for finding the kernel's
539
+ parameters which maximize the log-marginal likelihood. The first run
540
+ of the optimizer is performed from the kernel's initial parameters,
541
+ the remaining ones (if any) from thetas sampled log-uniform randomly
542
+ from the space of allowed theta-values. If greater than 0, all bounds
543
+ must be finite. Note that n_restarts_optimizer=0 implies that one
544
+ run is performed.
545
+
546
+ max_iter_predict : int, default=100
547
+ The maximum number of iterations in Newton's method for approximating
548
+ the posterior during predict. Smaller values will reduce computation
549
+ time at the cost of worse results.
550
+
551
+ warm_start : bool, default=False
552
+ If warm-starts are enabled, the solution of the last Newton iteration
553
+ on the Laplace approximation of the posterior mode is used as
554
+ initialization for the next call of _posterior_mode(). This can speed
555
+ up convergence when _posterior_mode is called several times on similar
556
+ problems as in hyperparameter optimization. See :term:`the Glossary
557
+ <warm_start>`.
558
+
559
+ copy_X_train : bool, default=True
560
+ If True, a persistent copy of the training data is stored in the
561
+ object. Otherwise, just a reference to the training data is stored,
562
+ which might cause predictions to change if the data is modified
563
+ externally.
564
+
565
+ random_state : int, RandomState instance or None, default=None
566
+ Determines random number generation used to initialize the centers.
567
+ Pass an int for reproducible results across multiple function calls.
568
+ See :term:`Glossary <random_state>`.
569
+
570
+ multi_class : {'one_vs_rest', 'one_vs_one'}, default='one_vs_rest'
571
+ Specifies how multi-class classification problems are handled.
572
+ Supported are 'one_vs_rest' and 'one_vs_one'. In 'one_vs_rest',
573
+ one binary Gaussian process classifier is fitted for each class, which
574
+ is trained to separate this class from the rest. In 'one_vs_one', one
575
+ binary Gaussian process classifier is fitted for each pair of classes,
576
+ which is trained to separate these two classes. The predictions of
577
+ these binary predictors are combined into multi-class predictions.
578
+ Note that 'one_vs_one' does not support predicting probability
579
+ estimates.
580
+
581
+ n_jobs : int, default=None
582
+ The number of jobs to use for the computation: the specified
583
+ multiclass problems are computed in parallel.
584
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
585
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
586
+ for more details.
587
+
588
+ Attributes
589
+ ----------
590
+ base_estimator_ : ``Estimator`` instance
591
+ The estimator instance that defines the likelihood function
592
+ using the observed data.
593
+
594
+ kernel_ : kernel instance
595
+ The kernel used for prediction. In case of binary classification,
596
+ the structure of the kernel is the same as the one passed as parameter
597
+ but with optimized hyperparameters. In case of multi-class
598
+ classification, a CompoundKernel is returned which consists of the
599
+ different kernels used in the one-versus-rest classifiers.
600
+
601
+ log_marginal_likelihood_value_ : float
602
+ The log-marginal-likelihood of ``self.kernel_.theta``
603
+
604
+ classes_ : array-like of shape (n_classes,)
605
+ Unique class labels.
606
+
607
+ n_classes_ : int
608
+ The number of classes in the training data
609
+
610
+ n_features_in_ : int
611
+ Number of features seen during :term:`fit`.
612
+
613
+ .. versionadded:: 0.24
614
+
615
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
616
+ Names of features seen during :term:`fit`. Defined only when `X`
617
+ has feature names that are all strings.
618
+
619
+ .. versionadded:: 1.0
620
+
621
+ See Also
622
+ --------
623
+ GaussianProcessRegressor : Gaussian process regression (GPR).
624
+
625
+ References
626
+ ----------
627
+ .. [RW2006] `Carl E. Rasmussen and Christopher K.I. Williams,
628
+ "Gaussian Processes for Machine Learning",
629
+ MIT Press 2006 <https://www.gaussianprocess.org/gpml/chapters/RW.pdf>`_
630
+
631
+ Examples
632
+ --------
633
+ >>> from sklearn.datasets import load_iris
634
+ >>> from sklearn.gaussian_process import GaussianProcessClassifier
635
+ >>> from sklearn.gaussian_process.kernels import RBF
636
+ >>> X, y = load_iris(return_X_y=True)
637
+ >>> kernel = 1.0 * RBF(1.0)
638
+ >>> gpc = GaussianProcessClassifier(kernel=kernel,
639
+ ... random_state=0).fit(X, y)
640
+ >>> gpc.score(X, y)
641
+ 0.9866...
642
+ >>> gpc.predict_proba(X[:2,:])
643
+ array([[0.83548752, 0.03228706, 0.13222543],
644
+ [0.79064206, 0.06525643, 0.14410151]])
645
+ """
646
+
647
+ _parameter_constraints: dict = {
648
+ "kernel": [Kernel, None],
649
+ "optimizer": [StrOptions({"fmin_l_bfgs_b"}), callable, None],
650
+ "n_restarts_optimizer": [Interval(Integral, 0, None, closed="left")],
651
+ "max_iter_predict": [Interval(Integral, 1, None, closed="left")],
652
+ "warm_start": ["boolean"],
653
+ "copy_X_train": ["boolean"],
654
+ "random_state": ["random_state"],
655
+ "multi_class": [StrOptions({"one_vs_rest", "one_vs_one"})],
656
+ "n_jobs": [Integral, None],
657
+ }
658
+
659
+ def __init__(
660
+ self,
661
+ kernel=None,
662
+ *,
663
+ optimizer="fmin_l_bfgs_b",
664
+ n_restarts_optimizer=0,
665
+ max_iter_predict=100,
666
+ warm_start=False,
667
+ copy_X_train=True,
668
+ random_state=None,
669
+ multi_class="one_vs_rest",
670
+ n_jobs=None,
671
+ ):
672
+ self.kernel = kernel
673
+ self.optimizer = optimizer
674
+ self.n_restarts_optimizer = n_restarts_optimizer
675
+ self.max_iter_predict = max_iter_predict
676
+ self.warm_start = warm_start
677
+ self.copy_X_train = copy_X_train
678
+ self.random_state = random_state
679
+ self.multi_class = multi_class
680
+ self.n_jobs = n_jobs
681
+
682
+ def fit(self, X, y):
683
+ """Fit Gaussian process classification model.
684
+
685
+ Parameters
686
+ ----------
687
+ X : array-like of shape (n_samples, n_features) or list of object
688
+ Feature vectors or other representations of training data.
689
+
690
+ y : array-like of shape (n_samples,)
691
+ Target values, must be binary.
692
+
693
+ Returns
694
+ -------
695
+ self : object
696
+ Returns an instance of self.
697
+ """
698
+ self._validate_params()
699
+
700
+ if isinstance(self.kernel, CompoundKernel):
701
+ raise ValueError("kernel cannot be a CompoundKernel")
702
+
703
+ if self.kernel is None or self.kernel.requires_vector_input:
704
+ X, y = self._validate_data(
705
+ X, y, multi_output=False, ensure_2d=True, dtype="numeric"
706
+ )
707
+ else:
708
+ X, y = self._validate_data(
709
+ X, y, multi_output=False, ensure_2d=False, dtype=None
710
+ )
711
+
712
+ self.base_estimator_ = _BinaryGaussianProcessClassifierLaplace(
713
+ kernel=self.kernel,
714
+ optimizer=self.optimizer,
715
+ n_restarts_optimizer=self.n_restarts_optimizer,
716
+ max_iter_predict=self.max_iter_predict,
717
+ warm_start=self.warm_start,
718
+ copy_X_train=self.copy_X_train,
719
+ random_state=self.random_state,
720
+ )
721
+
722
+ self.classes_ = np.unique(y)
723
+ self.n_classes_ = self.classes_.size
724
+ if self.n_classes_ == 1:
725
+ raise ValueError(
726
+ "GaussianProcessClassifier requires 2 or more "
727
+ "distinct classes; got %d class (only class %s "
728
+ "is present)" % (self.n_classes_, self.classes_[0])
729
+ )
730
+ if self.n_classes_ > 2:
731
+ if self.multi_class == "one_vs_rest":
732
+ self.base_estimator_ = OneVsRestClassifier(
733
+ self.base_estimator_, n_jobs=self.n_jobs
734
+ )
735
+ elif self.multi_class == "one_vs_one":
736
+ self.base_estimator_ = OneVsOneClassifier(
737
+ self.base_estimator_, n_jobs=self.n_jobs
738
+ )
739
+ else:
740
+ raise ValueError("Unknown multi-class mode %s" % self.multi_class)
741
+
742
+ self.base_estimator_.fit(X, y)
743
+
744
+ if self.n_classes_ > 2:
745
+ self.log_marginal_likelihood_value_ = np.mean(
746
+ [
747
+ estimator.log_marginal_likelihood()
748
+ for estimator in self.base_estimator_.estimators_
749
+ ]
750
+ )
751
+ else:
752
+ self.log_marginal_likelihood_value_ = (
753
+ self.base_estimator_.log_marginal_likelihood()
754
+ )
755
+
756
+ return self
757
+
758
+ def predict(self, X):
759
+ """Perform classification on an array of test vectors X.
760
+
761
+ Parameters
762
+ ----------
763
+ X : array-like of shape (n_samples, n_features) or list of object
764
+ Query points where the GP is evaluated for classification.
765
+
766
+ Returns
767
+ -------
768
+ C : ndarray of shape (n_samples,)
769
+ Predicted target values for X, values are from ``classes_``.
770
+ """
771
+ check_is_fitted(self)
772
+
773
+ if self.kernel is None or self.kernel.requires_vector_input:
774
+ X = self._validate_data(X, ensure_2d=True, dtype="numeric", reset=False)
775
+ else:
776
+ X = self._validate_data(X, ensure_2d=False, dtype=None, reset=False)
777
+
778
+ return self.base_estimator_.predict(X)
779
+
780
+ def predict_proba(self, X):
781
+ """Return probability estimates for the test vector X.
782
+
783
+ Parameters
784
+ ----------
785
+ X : array-like of shape (n_samples, n_features) or list of object
786
+ Query points where the GP is evaluated for classification.
787
+
788
+ Returns
789
+ -------
790
+ C : array-like of shape (n_samples, n_classes)
791
+ Returns the probability of the samples for each class in
792
+ the model. The columns correspond to the classes in sorted
793
+ order, as they appear in the attribute :term:`classes_`.
794
+ """
795
+ check_is_fitted(self)
796
+ if self.n_classes_ > 2 and self.multi_class == "one_vs_one":
797
+ raise ValueError(
798
+ "one_vs_one multi-class mode does not support "
799
+ "predicting probability estimates. Use "
800
+ "one_vs_rest mode instead."
801
+ )
802
+
803
+ if self.kernel is None or self.kernel.requires_vector_input:
804
+ X = self._validate_data(X, ensure_2d=True, dtype="numeric", reset=False)
805
+ else:
806
+ X = self._validate_data(X, ensure_2d=False, dtype=None, reset=False)
807
+
808
+ return self.base_estimator_.predict_proba(X)
809
+
810
+ @property
811
+ def kernel_(self):
812
+ """Return the kernel of the base estimator."""
813
+ if self.n_classes_ == 2:
814
+ return self.base_estimator_.kernel_
815
+ else:
816
+ return CompoundKernel(
817
+ [estimator.kernel_ for estimator in self.base_estimator_.estimators_]
818
+ )
819
+
820
+ def log_marginal_likelihood(
821
+ self, theta=None, eval_gradient=False, clone_kernel=True
822
+ ):
823
+ """Return log-marginal likelihood of theta for training data.
824
+
825
+ In the case of multi-class classification, the mean log-marginal
826
+ likelihood of the one-versus-rest classifiers are returned.
827
+
828
+ Parameters
829
+ ----------
830
+ theta : array-like of shape (n_kernel_params,), default=None
831
+ Kernel hyperparameters for which the log-marginal likelihood is
832
+ evaluated. In the case of multi-class classification, theta may
833
+ be the hyperparameters of the compound kernel or of an individual
834
+ kernel. In the latter case, all individual kernel get assigned the
835
+ same theta values. If None, the precomputed log_marginal_likelihood
836
+ of ``self.kernel_.theta`` is returned.
837
+
838
+ eval_gradient : bool, default=False
839
+ If True, the gradient of the log-marginal likelihood with respect
840
+ to the kernel hyperparameters at position theta is returned
841
+ additionally. Note that gradient computation is not supported
842
+ for non-binary classification. If True, theta must not be None.
843
+
844
+ clone_kernel : bool, default=True
845
+ If True, the kernel attribute is copied. If False, the kernel
846
+ attribute is modified, but may result in a performance improvement.
847
+
848
+ Returns
849
+ -------
850
+ log_likelihood : float
851
+ Log-marginal likelihood of theta for training data.
852
+
853
+ log_likelihood_gradient : ndarray of shape (n_kernel_params,), optional
854
+ Gradient of the log-marginal likelihood with respect to the kernel
855
+ hyperparameters at position theta.
856
+ Only returned when `eval_gradient` is True.
857
+ """
858
+ check_is_fitted(self)
859
+
860
+ if theta is None:
861
+ if eval_gradient:
862
+ raise ValueError("Gradient can only be evaluated for theta!=None")
863
+ return self.log_marginal_likelihood_value_
864
+
865
+ theta = np.asarray(theta)
866
+ if self.n_classes_ == 2:
867
+ return self.base_estimator_.log_marginal_likelihood(
868
+ theta, eval_gradient, clone_kernel=clone_kernel
869
+ )
870
+ else:
871
+ if eval_gradient:
872
+ raise NotImplementedError(
873
+ "Gradient of log-marginal-likelihood not implemented for "
874
+ "multi-class GPC."
875
+ )
876
+ estimators = self.base_estimator_.estimators_
877
+ n_dims = estimators[0].kernel_.n_dims
878
+ if theta.shape[0] == n_dims: # use same theta for all sub-kernels
879
+ return np.mean(
880
+ [
881
+ estimator.log_marginal_likelihood(
882
+ theta, clone_kernel=clone_kernel
883
+ )
884
+ for i, estimator in enumerate(estimators)
885
+ ]
886
+ )
887
+ elif theta.shape[0] == n_dims * self.classes_.shape[0]:
888
+ # theta for compound kernel
889
+ return np.mean(
890
+ [
891
+ estimator.log_marginal_likelihood(
892
+ theta[n_dims * i : n_dims * (i + 1)],
893
+ clone_kernel=clone_kernel,
894
+ )
895
+ for i, estimator in enumerate(estimators)
896
+ ]
897
+ )
898
+ else:
899
+ raise ValueError(
900
+ "Shape of theta must be either %d or %d. "
901
+ "Obtained theta with shape %d."
902
+ % (n_dims, n_dims * self.classes_.shape[0], theta.shape[0])
903
+ )
mplug_owl2/lib/python3.10/site-packages/sklearn/gaussian_process/_gpr.py ADDED
@@ -0,0 +1,639 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Gaussian processes regression."""
2
+
3
+ # Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
4
+ # Modified by: Pete Green <p.l.green@liverpool.ac.uk>
5
+ # License: BSD 3 clause
6
+
7
+ import warnings
8
+ from numbers import Integral, Real
9
+ from operator import itemgetter
10
+
11
+ import numpy as np
12
+ from scipy.linalg import cholesky, cho_solve, solve_triangular
13
+ import scipy.optimize
14
+
15
+ from ..base import BaseEstimator, RegressorMixin, clone
16
+ from ..base import MultiOutputMixin
17
+ from .kernels import Kernel, RBF, ConstantKernel as C
18
+ from ..preprocessing._data import _handle_zeros_in_scale
19
+ from ..utils import check_random_state
20
+ from ..utils.optimize import _check_optimize_result
21
+ from ..utils._param_validation import Interval, StrOptions
22
+
23
+ GPR_CHOLESKY_LOWER = True
24
+
25
+
26
+ class GaussianProcessRegressor(MultiOutputMixin, RegressorMixin, BaseEstimator):
27
+ """Gaussian process regression (GPR).
28
+
29
+ The implementation is based on Algorithm 2.1 of [RW2006]_.
30
+
31
+ In addition to standard scikit-learn estimator API,
32
+ :class:`GaussianProcessRegressor`:
33
+
34
+ * allows prediction without prior fitting (based on the GP prior)
35
+ * provides an additional method `sample_y(X)`, which evaluates samples
36
+ drawn from the GPR (prior or posterior) at given inputs
37
+ * exposes a method `log_marginal_likelihood(theta)`, which can be used
38
+ externally for other ways of selecting hyperparameters, e.g., via
39
+ Markov chain Monte Carlo.
40
+
41
+ Read more in the :ref:`User Guide <gaussian_process>`.
42
+
43
+ .. versionadded:: 0.18
44
+
45
+ Parameters
46
+ ----------
47
+ kernel : kernel instance, default=None
48
+ The kernel specifying the covariance function of the GP. If None is
49
+ passed, the kernel ``ConstantKernel(1.0, constant_value_bounds="fixed")
50
+ * RBF(1.0, length_scale_bounds="fixed")`` is used as default. Note that
51
+ the kernel hyperparameters are optimized during fitting unless the
52
+ bounds are marked as "fixed".
53
+
54
+ alpha : float or ndarray of shape (n_samples,), default=1e-10
55
+ Value added to the diagonal of the kernel matrix during fitting.
56
+ This can prevent a potential numerical issue during fitting, by
57
+ ensuring that the calculated values form a positive definite matrix.
58
+ It can also be interpreted as the variance of additional Gaussian
59
+ measurement noise on the training observations. Note that this is
60
+ different from using a `WhiteKernel`. If an array is passed, it must
61
+ have the same number of entries as the data used for fitting and is
62
+ used as datapoint-dependent noise level. Allowing to specify the
63
+ noise level directly as a parameter is mainly for convenience and
64
+ for consistency with :class:`~sklearn.linear_model.Ridge`.
65
+
66
+ optimizer : "fmin_l_bfgs_b", callable or None, default="fmin_l_bfgs_b"
67
+ Can either be one of the internally supported optimizers for optimizing
68
+ the kernel's parameters, specified by a string, or an externally
69
+ defined optimizer passed as a callable. If a callable is passed, it
70
+ must have the signature::
71
+
72
+ def optimizer(obj_func, initial_theta, bounds):
73
+ # * 'obj_func': the objective function to be minimized, which
74
+ # takes the hyperparameters theta as a parameter and an
75
+ # optional flag eval_gradient, which determines if the
76
+ # gradient is returned additionally to the function value
77
+ # * 'initial_theta': the initial value for theta, which can be
78
+ # used by local optimizers
79
+ # * 'bounds': the bounds on the values of theta
80
+ ....
81
+ # Returned are the best found hyperparameters theta and
82
+ # the corresponding value of the target function.
83
+ return theta_opt, func_min
84
+
85
+ Per default, the L-BFGS-B algorithm from `scipy.optimize.minimize`
86
+ is used. If None is passed, the kernel's parameters are kept fixed.
87
+ Available internal optimizers are: `{'fmin_l_bfgs_b'}`.
88
+
89
+ n_restarts_optimizer : int, default=0
90
+ The number of restarts of the optimizer for finding the kernel's
91
+ parameters which maximize the log-marginal likelihood. The first run
92
+ of the optimizer is performed from the kernel's initial parameters,
93
+ the remaining ones (if any) from thetas sampled log-uniform randomly
94
+ from the space of allowed theta-values. If greater than 0, all bounds
95
+ must be finite. Note that `n_restarts_optimizer == 0` implies that one
96
+ run is performed.
97
+
98
+ normalize_y : bool, default=False
99
+ Whether or not to normalize the target values `y` by removing the mean
100
+ and scaling to unit-variance. This is recommended for cases where
101
+ zero-mean, unit-variance priors are used. Note that, in this
102
+ implementation, the normalisation is reversed before the GP predictions
103
+ are reported.
104
+
105
+ .. versionchanged:: 0.23
106
+
107
+ copy_X_train : bool, default=True
108
+ If True, a persistent copy of the training data is stored in the
109
+ object. Otherwise, just a reference to the training data is stored,
110
+ which might cause predictions to change if the data is modified
111
+ externally.
112
+
113
+ random_state : int, RandomState instance or None, default=None
114
+ Determines random number generation used to initialize the centers.
115
+ Pass an int for reproducible results across multiple function calls.
116
+ See :term:`Glossary <random_state>`.
117
+
118
+ Attributes
119
+ ----------
120
+ X_train_ : array-like of shape (n_samples, n_features) or list of object
121
+ Feature vectors or other representations of training data (also
122
+ required for prediction).
123
+
124
+ y_train_ : array-like of shape (n_samples,) or (n_samples, n_targets)
125
+ Target values in training data (also required for prediction).
126
+
127
+ kernel_ : kernel instance
128
+ The kernel used for prediction. The structure of the kernel is the
129
+ same as the one passed as parameter but with optimized hyperparameters.
130
+
131
+ L_ : array-like of shape (n_samples, n_samples)
132
+ Lower-triangular Cholesky decomposition of the kernel in ``X_train_``.
133
+
134
+ alpha_ : array-like of shape (n_samples,)
135
+ Dual coefficients of training data points in kernel space.
136
+
137
+ log_marginal_likelihood_value_ : float
138
+ The log-marginal-likelihood of ``self.kernel_.theta``.
139
+
140
+ n_features_in_ : int
141
+ Number of features seen during :term:`fit`.
142
+
143
+ .. versionadded:: 0.24
144
+
145
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
146
+ Names of features seen during :term:`fit`. Defined only when `X`
147
+ has feature names that are all strings.
148
+
149
+ .. versionadded:: 1.0
150
+
151
+ See Also
152
+ --------
153
+ GaussianProcessClassifier : Gaussian process classification (GPC)
154
+ based on Laplace approximation.
155
+
156
+ References
157
+ ----------
158
+ .. [RW2006] `Carl E. Rasmussen and Christopher K.I. Williams,
159
+ "Gaussian Processes for Machine Learning",
160
+ MIT Press 2006 <https://www.gaussianprocess.org/gpml/chapters/RW.pdf>`_
161
+
162
+ Examples
163
+ --------
164
+ >>> from sklearn.datasets import make_friedman2
165
+ >>> from sklearn.gaussian_process import GaussianProcessRegressor
166
+ >>> from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel
167
+ >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
168
+ >>> kernel = DotProduct() + WhiteKernel()
169
+ >>> gpr = GaussianProcessRegressor(kernel=kernel,
170
+ ... random_state=0).fit(X, y)
171
+ >>> gpr.score(X, y)
172
+ 0.3680...
173
+ >>> gpr.predict(X[:2,:], return_std=True)
174
+ (array([653.0..., 592.1...]), array([316.6..., 316.6...]))
175
+ """
176
+
177
+ _parameter_constraints: dict = {
178
+ "kernel": [None, Kernel],
179
+ "alpha": [Interval(Real, 0, None, closed="left"), np.ndarray],
180
+ "optimizer": [StrOptions({"fmin_l_bfgs_b"}), callable, None],
181
+ "n_restarts_optimizer": [Interval(Integral, 0, None, closed="left")],
182
+ "normalize_y": ["boolean"],
183
+ "copy_X_train": ["boolean"],
184
+ "random_state": ["random_state"],
185
+ }
186
+
187
+ def __init__(
188
+ self,
189
+ kernel=None,
190
+ *,
191
+ alpha=1e-10,
192
+ optimizer="fmin_l_bfgs_b",
193
+ n_restarts_optimizer=0,
194
+ normalize_y=False,
195
+ copy_X_train=True,
196
+ random_state=None,
197
+ ):
198
+ self.kernel = kernel
199
+ self.alpha = alpha
200
+ self.optimizer = optimizer
201
+ self.n_restarts_optimizer = n_restarts_optimizer
202
+ self.normalize_y = normalize_y
203
+ self.copy_X_train = copy_X_train
204
+ self.random_state = random_state
205
+
206
+ def fit(self, X, y):
207
+ """Fit Gaussian process regression model.
208
+
209
+ Parameters
210
+ ----------
211
+ X : array-like of shape (n_samples, n_features) or list of object
212
+ Feature vectors or other representations of training data.
213
+
214
+ y : array-like of shape (n_samples,) or (n_samples, n_targets)
215
+ Target values.
216
+
217
+ Returns
218
+ -------
219
+ self : object
220
+ GaussianProcessRegressor class instance.
221
+ """
222
+ self._validate_params()
223
+
224
+ if self.kernel is None: # Use an RBF kernel as default
225
+ self.kernel_ = C(1.0, constant_value_bounds="fixed") * RBF(
226
+ 1.0, length_scale_bounds="fixed"
227
+ )
228
+ else:
229
+ self.kernel_ = clone(self.kernel)
230
+
231
+ self._rng = check_random_state(self.random_state)
232
+
233
+ if self.kernel_.requires_vector_input:
234
+ dtype, ensure_2d = "numeric", True
235
+ else:
236
+ dtype, ensure_2d = None, False
237
+ X, y = self._validate_data(
238
+ X,
239
+ y,
240
+ multi_output=True,
241
+ y_numeric=True,
242
+ ensure_2d=ensure_2d,
243
+ dtype=dtype,
244
+ )
245
+
246
+ # Normalize target value
247
+ if self.normalize_y:
248
+ self._y_train_mean = np.mean(y, axis=0)
249
+ self._y_train_std = _handle_zeros_in_scale(np.std(y, axis=0), copy=False)
250
+
251
+ # Remove mean and make unit variance
252
+ y = (y - self._y_train_mean) / self._y_train_std
253
+
254
+ else:
255
+ shape_y_stats = (y.shape[1],) if y.ndim == 2 else 1
256
+ self._y_train_mean = np.zeros(shape=shape_y_stats)
257
+ self._y_train_std = np.ones(shape=shape_y_stats)
258
+
259
+ if np.iterable(self.alpha) and self.alpha.shape[0] != y.shape[0]:
260
+ if self.alpha.shape[0] == 1:
261
+ self.alpha = self.alpha[0]
262
+ else:
263
+ raise ValueError(
264
+ "alpha must be a scalar or an array with same number of "
265
+ f"entries as y. ({self.alpha.shape[0]} != {y.shape[0]})"
266
+ )
267
+
268
+ self.X_train_ = np.copy(X) if self.copy_X_train else X
269
+ self.y_train_ = np.copy(y) if self.copy_X_train else y
270
+
271
+ if self.optimizer is not None and self.kernel_.n_dims > 0:
272
+ # Choose hyperparameters based on maximizing the log-marginal
273
+ # likelihood (potentially starting from several initial values)
274
+ def obj_func(theta, eval_gradient=True):
275
+ if eval_gradient:
276
+ lml, grad = self.log_marginal_likelihood(
277
+ theta, eval_gradient=True, clone_kernel=False
278
+ )
279
+ return -lml, -grad
280
+ else:
281
+ return -self.log_marginal_likelihood(theta, clone_kernel=False)
282
+
283
+ # First optimize starting from theta specified in kernel
284
+ optima = [
285
+ (
286
+ self._constrained_optimization(
287
+ obj_func, self.kernel_.theta, self.kernel_.bounds
288
+ )
289
+ )
290
+ ]
291
+
292
+ # Additional runs are performed from log-uniform chosen initial
293
+ # theta
294
+ if self.n_restarts_optimizer > 0:
295
+ if not np.isfinite(self.kernel_.bounds).all():
296
+ raise ValueError(
297
+ "Multiple optimizer restarts (n_restarts_optimizer>0) "
298
+ "requires that all bounds are finite."
299
+ )
300
+ bounds = self.kernel_.bounds
301
+ for iteration in range(self.n_restarts_optimizer):
302
+ theta_initial = self._rng.uniform(bounds[:, 0], bounds[:, 1])
303
+ optima.append(
304
+ self._constrained_optimization(obj_func, theta_initial, bounds)
305
+ )
306
+ # Select result from run with minimal (negative) log-marginal
307
+ # likelihood
308
+ lml_values = list(map(itemgetter(1), optima))
309
+ self.kernel_.theta = optima[np.argmin(lml_values)][0]
310
+ self.kernel_._check_bounds_params()
311
+
312
+ self.log_marginal_likelihood_value_ = -np.min(lml_values)
313
+ else:
314
+ self.log_marginal_likelihood_value_ = self.log_marginal_likelihood(
315
+ self.kernel_.theta, clone_kernel=False
316
+ )
317
+
318
+ # Precompute quantities required for predictions which are independent
319
+ # of actual query points
320
+ # Alg. 2.1, page 19, line 2 -> L = cholesky(K + sigma^2 I)
321
+ K = self.kernel_(self.X_train_)
322
+ K[np.diag_indices_from(K)] += self.alpha
323
+ try:
324
+ self.L_ = cholesky(K, lower=GPR_CHOLESKY_LOWER, check_finite=False)
325
+ except np.linalg.LinAlgError as exc:
326
+ exc.args = (
327
+ f"The kernel, {self.kernel_}, is not returning a positive "
328
+ "definite matrix. Try gradually increasing the 'alpha' "
329
+ "parameter of your GaussianProcessRegressor estimator.",
330
+ ) + exc.args
331
+ raise
332
+ # Alg 2.1, page 19, line 3 -> alpha = L^T \ (L \ y)
333
+ self.alpha_ = cho_solve(
334
+ (self.L_, GPR_CHOLESKY_LOWER),
335
+ self.y_train_,
336
+ check_finite=False,
337
+ )
338
+ return self
339
+
340
+ def predict(self, X, return_std=False, return_cov=False):
341
+ """Predict using the Gaussian process regression model.
342
+
343
+ We can also predict based on an unfitted model by using the GP prior.
344
+ In addition to the mean of the predictive distribution, optionally also
345
+ returns its standard deviation (`return_std=True`) or covariance
346
+ (`return_cov=True`). Note that at most one of the two can be requested.
347
+
348
+ Parameters
349
+ ----------
350
+ X : array-like of shape (n_samples, n_features) or list of object
351
+ Query points where the GP is evaluated.
352
+
353
+ return_std : bool, default=False
354
+ If True, the standard-deviation of the predictive distribution at
355
+ the query points is returned along with the mean.
356
+
357
+ return_cov : bool, default=False
358
+ If True, the covariance of the joint predictive distribution at
359
+ the query points is returned along with the mean.
360
+
361
+ Returns
362
+ -------
363
+ y_mean : ndarray of shape (n_samples,) or (n_samples, n_targets)
364
+ Mean of predictive distribution a query points.
365
+
366
+ y_std : ndarray of shape (n_samples,) or (n_samples, n_targets), optional
367
+ Standard deviation of predictive distribution at query points.
368
+ Only returned when `return_std` is True.
369
+
370
+ y_cov : ndarray of shape (n_samples, n_samples) or \
371
+ (n_samples, n_samples, n_targets), optional
372
+ Covariance of joint predictive distribution a query points.
373
+ Only returned when `return_cov` is True.
374
+ """
375
+ if return_std and return_cov:
376
+ raise RuntimeError(
377
+ "At most one of return_std or return_cov can be requested."
378
+ )
379
+
380
+ if self.kernel is None or self.kernel.requires_vector_input:
381
+ dtype, ensure_2d = "numeric", True
382
+ else:
383
+ dtype, ensure_2d = None, False
384
+
385
+ X = self._validate_data(X, ensure_2d=ensure_2d, dtype=dtype, reset=False)
386
+
387
+ if not hasattr(self, "X_train_"): # Unfitted;predict based on GP prior
388
+ if self.kernel is None:
389
+ kernel = C(1.0, constant_value_bounds="fixed") * RBF(
390
+ 1.0, length_scale_bounds="fixed"
391
+ )
392
+ else:
393
+ kernel = self.kernel
394
+ y_mean = np.zeros(X.shape[0])
395
+ if return_cov:
396
+ y_cov = kernel(X)
397
+ return y_mean, y_cov
398
+ elif return_std:
399
+ y_var = kernel.diag(X)
400
+ return y_mean, np.sqrt(y_var)
401
+ else:
402
+ return y_mean
403
+ else: # Predict based on GP posterior
404
+ # Alg 2.1, page 19, line 4 -> f*_bar = K(X_test, X_train) . alpha
405
+ K_trans = self.kernel_(X, self.X_train_)
406
+ y_mean = K_trans @ self.alpha_
407
+
408
+ # undo normalisation
409
+ y_mean = self._y_train_std * y_mean + self._y_train_mean
410
+
411
+ # if y_mean has shape (n_samples, 1), reshape to (n_samples,)
412
+ if y_mean.ndim > 1 and y_mean.shape[1] == 1:
413
+ y_mean = np.squeeze(y_mean, axis=1)
414
+
415
+ # Alg 2.1, page 19, line 5 -> v = L \ K(X_test, X_train)^T
416
+ V = solve_triangular(
417
+ self.L_, K_trans.T, lower=GPR_CHOLESKY_LOWER, check_finite=False
418
+ )
419
+
420
+ if return_cov:
421
+ # Alg 2.1, page 19, line 6 -> K(X_test, X_test) - v^T. v
422
+ y_cov = self.kernel_(X) - V.T @ V
423
+
424
+ # undo normalisation
425
+ y_cov = np.outer(y_cov, self._y_train_std**2).reshape(
426
+ *y_cov.shape, -1
427
+ )
428
+ # if y_cov has shape (n_samples, n_samples, 1), reshape to
429
+ # (n_samples, n_samples)
430
+ if y_cov.shape[2] == 1:
431
+ y_cov = np.squeeze(y_cov, axis=2)
432
+
433
+ return y_mean, y_cov
434
+ elif return_std:
435
+ # Compute variance of predictive distribution
436
+ # Use einsum to avoid explicitly forming the large matrix
437
+ # V^T @ V just to extract its diagonal afterward.
438
+ y_var = self.kernel_.diag(X).copy()
439
+ y_var -= np.einsum("ij,ji->i", V.T, V)
440
+
441
+ # Check if any of the variances is negative because of
442
+ # numerical issues. If yes: set the variance to 0.
443
+ y_var_negative = y_var < 0
444
+ if np.any(y_var_negative):
445
+ warnings.warn(
446
+ "Predicted variances smaller than 0. "
447
+ "Setting those variances to 0."
448
+ )
449
+ y_var[y_var_negative] = 0.0
450
+
451
+ # undo normalisation
452
+ y_var = np.outer(y_var, self._y_train_std**2).reshape(
453
+ *y_var.shape, -1
454
+ )
455
+
456
+ # if y_var has shape (n_samples, 1), reshape to (n_samples,)
457
+ if y_var.shape[1] == 1:
458
+ y_var = np.squeeze(y_var, axis=1)
459
+
460
+ return y_mean, np.sqrt(y_var)
461
+ else:
462
+ return y_mean
463
+
464
+ def sample_y(self, X, n_samples=1, random_state=0):
465
+ """Draw samples from Gaussian process and evaluate at X.
466
+
467
+ Parameters
468
+ ----------
469
+ X : array-like of shape (n_samples_X, n_features) or list of object
470
+ Query points where the GP is evaluated.
471
+
472
+ n_samples : int, default=1
473
+ Number of samples drawn from the Gaussian process per query point.
474
+
475
+ random_state : int, RandomState instance or None, default=0
476
+ Determines random number generation to randomly draw samples.
477
+ Pass an int for reproducible results across multiple function
478
+ calls.
479
+ See :term:`Glossary <random_state>`.
480
+
481
+ Returns
482
+ -------
483
+ y_samples : ndarray of shape (n_samples_X, n_samples), or \
484
+ (n_samples_X, n_targets, n_samples)
485
+ Values of n_samples samples drawn from Gaussian process and
486
+ evaluated at query points.
487
+ """
488
+ rng = check_random_state(random_state)
489
+
490
+ y_mean, y_cov = self.predict(X, return_cov=True)
491
+ if y_mean.ndim == 1:
492
+ y_samples = rng.multivariate_normal(y_mean, y_cov, n_samples).T
493
+ else:
494
+ y_samples = [
495
+ rng.multivariate_normal(
496
+ y_mean[:, target], y_cov[..., target], n_samples
497
+ ).T[:, np.newaxis]
498
+ for target in range(y_mean.shape[1])
499
+ ]
500
+ y_samples = np.hstack(y_samples)
501
+ return y_samples
502
+
503
+ def log_marginal_likelihood(
504
+ self, theta=None, eval_gradient=False, clone_kernel=True
505
+ ):
506
+ """Return log-marginal likelihood of theta for training data.
507
+
508
+ Parameters
509
+ ----------
510
+ theta : array-like of shape (n_kernel_params,) default=None
511
+ Kernel hyperparameters for which the log-marginal likelihood is
512
+ evaluated. If None, the precomputed log_marginal_likelihood
513
+ of ``self.kernel_.theta`` is returned.
514
+
515
+ eval_gradient : bool, default=False
516
+ If True, the gradient of the log-marginal likelihood with respect
517
+ to the kernel hyperparameters at position theta is returned
518
+ additionally. If True, theta must not be None.
519
+
520
+ clone_kernel : bool, default=True
521
+ If True, the kernel attribute is copied. If False, the kernel
522
+ attribute is modified, but may result in a performance improvement.
523
+
524
+ Returns
525
+ -------
526
+ log_likelihood : float
527
+ Log-marginal likelihood of theta for training data.
528
+
529
+ log_likelihood_gradient : ndarray of shape (n_kernel_params,), optional
530
+ Gradient of the log-marginal likelihood with respect to the kernel
531
+ hyperparameters at position theta.
532
+ Only returned when eval_gradient is True.
533
+ """
534
+ if theta is None:
535
+ if eval_gradient:
536
+ raise ValueError("Gradient can only be evaluated for theta!=None")
537
+ return self.log_marginal_likelihood_value_
538
+
539
+ if clone_kernel:
540
+ kernel = self.kernel_.clone_with_theta(theta)
541
+ else:
542
+ kernel = self.kernel_
543
+ kernel.theta = theta
544
+
545
+ if eval_gradient:
546
+ K, K_gradient = kernel(self.X_train_, eval_gradient=True)
547
+ else:
548
+ K = kernel(self.X_train_)
549
+
550
+ # Alg. 2.1, page 19, line 2 -> L = cholesky(K + sigma^2 I)
551
+ K[np.diag_indices_from(K)] += self.alpha
552
+ try:
553
+ L = cholesky(K, lower=GPR_CHOLESKY_LOWER, check_finite=False)
554
+ except np.linalg.LinAlgError:
555
+ return (-np.inf, np.zeros_like(theta)) if eval_gradient else -np.inf
556
+
557
+ # Support multi-dimensional output of self.y_train_
558
+ y_train = self.y_train_
559
+ if y_train.ndim == 1:
560
+ y_train = y_train[:, np.newaxis]
561
+
562
+ # Alg 2.1, page 19, line 3 -> alpha = L^T \ (L \ y)
563
+ alpha = cho_solve((L, GPR_CHOLESKY_LOWER), y_train, check_finite=False)
564
+
565
+ # Alg 2.1, page 19, line 7
566
+ # -0.5 . y^T . alpha - sum(log(diag(L))) - n_samples / 2 log(2*pi)
567
+ # y is originally thought to be a (1, n_samples) row vector. However,
568
+ # in multioutputs, y is of shape (n_samples, 2) and we need to compute
569
+ # y^T . alpha for each output, independently using einsum. Thus, it
570
+ # is equivalent to:
571
+ # for output_idx in range(n_outputs):
572
+ # log_likelihood_dims[output_idx] = (
573
+ # y_train[:, [output_idx]] @ alpha[:, [output_idx]]
574
+ # )
575
+ log_likelihood_dims = -0.5 * np.einsum("ik,ik->k", y_train, alpha)
576
+ log_likelihood_dims -= np.log(np.diag(L)).sum()
577
+ log_likelihood_dims -= K.shape[0] / 2 * np.log(2 * np.pi)
578
+ # the log likehood is sum-up across the outputs
579
+ log_likelihood = log_likelihood_dims.sum(axis=-1)
580
+
581
+ if eval_gradient:
582
+ # Eq. 5.9, p. 114, and footnote 5 in p. 114
583
+ # 0.5 * trace((alpha . alpha^T - K^-1) . K_gradient)
584
+ # alpha is supposed to be a vector of (n_samples,) elements. With
585
+ # multioutputs, alpha is a matrix of size (n_samples, n_outputs).
586
+ # Therefore, we want to construct a matrix of
587
+ # (n_samples, n_samples, n_outputs) equivalent to
588
+ # for output_idx in range(n_outputs):
589
+ # output_alpha = alpha[:, [output_idx]]
590
+ # inner_term[..., output_idx] = output_alpha @ output_alpha.T
591
+ inner_term = np.einsum("ik,jk->ijk", alpha, alpha)
592
+ # compute K^-1 of shape (n_samples, n_samples)
593
+ K_inv = cho_solve(
594
+ (L, GPR_CHOLESKY_LOWER), np.eye(K.shape[0]), check_finite=False
595
+ )
596
+ # create a new axis to use broadcasting between inner_term and
597
+ # K_inv
598
+ inner_term -= K_inv[..., np.newaxis]
599
+ # Since we are interested about the trace of
600
+ # inner_term @ K_gradient, we don't explicitly compute the
601
+ # matrix-by-matrix operation and instead use an einsum. Therefore
602
+ # it is equivalent to:
603
+ # for param_idx in range(n_kernel_params):
604
+ # for output_idx in range(n_output):
605
+ # log_likehood_gradient_dims[param_idx, output_idx] = (
606
+ # inner_term[..., output_idx] @
607
+ # K_gradient[..., param_idx]
608
+ # )
609
+ log_likelihood_gradient_dims = 0.5 * np.einsum(
610
+ "ijl,jik->kl", inner_term, K_gradient
611
+ )
612
+ # the log likehood gradient is the sum-up across the outputs
613
+ log_likelihood_gradient = log_likelihood_gradient_dims.sum(axis=-1)
614
+
615
+ if eval_gradient:
616
+ return log_likelihood, log_likelihood_gradient
617
+ else:
618
+ return log_likelihood
619
+
620
+ def _constrained_optimization(self, obj_func, initial_theta, bounds):
621
+ if self.optimizer == "fmin_l_bfgs_b":
622
+ opt_res = scipy.optimize.minimize(
623
+ obj_func,
624
+ initial_theta,
625
+ method="L-BFGS-B",
626
+ jac=True,
627
+ bounds=bounds,
628
+ )
629
+ _check_optimize_result("lbfgs", opt_res)
630
+ theta_opt, func_min = opt_res.x, opt_res.fun
631
+ elif callable(self.optimizer):
632
+ theta_opt, func_min = self.optimizer(obj_func, initial_theta, bounds=bounds)
633
+ else:
634
+ raise ValueError(f"Unknown optimizer {self.optimizer}.")
635
+
636
+ return theta_opt, func_min
637
+
638
+ def _more_tags(self):
639
+ return {"requires_fit": False}
mplug_owl2/lib/python3.10/site-packages/sklearn/gaussian_process/kernels.py ADDED
@@ -0,0 +1,2390 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Kernels for Gaussian process regression and classification.
2
+
3
+ The kernels in this module allow kernel-engineering, i.e., they can be
4
+ combined via the "+" and "*" operators or be exponentiated with a scalar
5
+ via "**". These sum and product expressions can also contain scalar values,
6
+ which are automatically converted to a constant kernel.
7
+
8
+ All kernels allow (analytic) gradient-based hyperparameter optimization.
9
+ The space of hyperparameters can be specified by giving lower und upper
10
+ boundaries for the value of each hyperparameter (the search space is thus
11
+ rectangular). Instead of specifying bounds, hyperparameters can also be
12
+ declared to be "fixed", which causes these hyperparameters to be excluded from
13
+ optimization.
14
+ """
15
+
16
+ # Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
17
+ # License: BSD 3 clause
18
+
19
+ # Note: this module is strongly inspired by the kernel module of the george
20
+ # package.
21
+
22
+ from abc import ABCMeta, abstractmethod
23
+ from collections import namedtuple
24
+ import math
25
+ from inspect import signature
26
+
27
+ import numpy as np
28
+ from scipy.special import kv, gamma
29
+ from scipy.spatial.distance import pdist, cdist, squareform
30
+
31
+ from ..metrics.pairwise import pairwise_kernels
32
+ from ..base import clone
33
+ from ..utils.validation import _num_samples
34
+ from ..exceptions import ConvergenceWarning
35
+
36
+ import warnings
37
+
38
+
39
+ def _check_length_scale(X, length_scale):
40
+ length_scale = np.squeeze(length_scale).astype(float)
41
+ if np.ndim(length_scale) > 1:
42
+ raise ValueError("length_scale cannot be of dimension greater than 1")
43
+ if np.ndim(length_scale) == 1 and X.shape[1] != length_scale.shape[0]:
44
+ raise ValueError(
45
+ "Anisotropic kernel must have the same number of "
46
+ "dimensions as data (%d!=%d)" % (length_scale.shape[0], X.shape[1])
47
+ )
48
+ return length_scale
49
+
50
+
51
+ class Hyperparameter(
52
+ namedtuple(
53
+ "Hyperparameter", ("name", "value_type", "bounds", "n_elements", "fixed")
54
+ )
55
+ ):
56
+ """A kernel hyperparameter's specification in form of a namedtuple.
57
+
58
+ .. versionadded:: 0.18
59
+
60
+ Attributes
61
+ ----------
62
+ name : str
63
+ The name of the hyperparameter. Note that a kernel using a
64
+ hyperparameter with name "x" must have the attributes self.x and
65
+ self.x_bounds
66
+
67
+ value_type : str
68
+ The type of the hyperparameter. Currently, only "numeric"
69
+ hyperparameters are supported.
70
+
71
+ bounds : pair of floats >= 0 or "fixed"
72
+ The lower and upper bound on the parameter. If n_elements>1, a pair
73
+ of 1d array with n_elements each may be given alternatively. If
74
+ the string "fixed" is passed as bounds, the hyperparameter's value
75
+ cannot be changed.
76
+
77
+ n_elements : int, default=1
78
+ The number of elements of the hyperparameter value. Defaults to 1,
79
+ which corresponds to a scalar hyperparameter. n_elements > 1
80
+ corresponds to a hyperparameter which is vector-valued,
81
+ such as, e.g., anisotropic length-scales.
82
+
83
+ fixed : bool, default=None
84
+ Whether the value of this hyperparameter is fixed, i.e., cannot be
85
+ changed during hyperparameter tuning. If None is passed, the "fixed" is
86
+ derived based on the given bounds.
87
+
88
+ Examples
89
+ --------
90
+ >>> from sklearn.gaussian_process.kernels import ConstantKernel
91
+ >>> from sklearn.datasets import make_friedman2
92
+ >>> from sklearn.gaussian_process import GaussianProcessRegressor
93
+ >>> from sklearn.gaussian_process.kernels import Hyperparameter
94
+ >>> X, y = make_friedman2(n_samples=50, noise=0, random_state=0)
95
+ >>> kernel = ConstantKernel(constant_value=1.0,
96
+ ... constant_value_bounds=(0.0, 10.0))
97
+
98
+ We can access each hyperparameter:
99
+
100
+ >>> for hyperparameter in kernel.hyperparameters:
101
+ ... print(hyperparameter)
102
+ Hyperparameter(name='constant_value', value_type='numeric',
103
+ bounds=array([[ 0., 10.]]), n_elements=1, fixed=False)
104
+
105
+ >>> params = kernel.get_params()
106
+ >>> for key in sorted(params): print(f"{key} : {params[key]}")
107
+ constant_value : 1.0
108
+ constant_value_bounds : (0.0, 10.0)
109
+ """
110
+
111
+ # A raw namedtuple is very memory efficient as it packs the attributes
112
+ # in a struct to get rid of the __dict__ of attributes in particular it
113
+ # does not copy the string for the keys on each instance.
114
+ # By deriving a namedtuple class just to introduce the __init__ method we
115
+ # would also reintroduce the __dict__ on the instance. By telling the
116
+ # Python interpreter that this subclass uses static __slots__ instead of
117
+ # dynamic attributes. Furthermore we don't need any additional slot in the
118
+ # subclass so we set __slots__ to the empty tuple.
119
+ __slots__ = ()
120
+
121
+ def __new__(cls, name, value_type, bounds, n_elements=1, fixed=None):
122
+ if not isinstance(bounds, str) or bounds != "fixed":
123
+ bounds = np.atleast_2d(bounds)
124
+ if n_elements > 1: # vector-valued parameter
125
+ if bounds.shape[0] == 1:
126
+ bounds = np.repeat(bounds, n_elements, 0)
127
+ elif bounds.shape[0] != n_elements:
128
+ raise ValueError(
129
+ "Bounds on %s should have either 1 or "
130
+ "%d dimensions. Given are %d"
131
+ % (name, n_elements, bounds.shape[0])
132
+ )
133
+
134
+ if fixed is None:
135
+ fixed = isinstance(bounds, str) and bounds == "fixed"
136
+ return super(Hyperparameter, cls).__new__(
137
+ cls, name, value_type, bounds, n_elements, fixed
138
+ )
139
+
140
+ # This is mainly a testing utility to check that two hyperparameters
141
+ # are equal.
142
+ def __eq__(self, other):
143
+ return (
144
+ self.name == other.name
145
+ and self.value_type == other.value_type
146
+ and np.all(self.bounds == other.bounds)
147
+ and self.n_elements == other.n_elements
148
+ and self.fixed == other.fixed
149
+ )
150
+
151
+
152
+ class Kernel(metaclass=ABCMeta):
153
+ """Base class for all kernels.
154
+
155
+ .. versionadded:: 0.18
156
+ """
157
+
158
+ def get_params(self, deep=True):
159
+ """Get parameters of this kernel.
160
+
161
+ Parameters
162
+ ----------
163
+ deep : bool, default=True
164
+ If True, will return the parameters for this estimator and
165
+ contained subobjects that are estimators.
166
+
167
+ Returns
168
+ -------
169
+ params : dict
170
+ Parameter names mapped to their values.
171
+ """
172
+ params = dict()
173
+
174
+ # introspect the constructor arguments to find the model parameters
175
+ # to represent
176
+ cls = self.__class__
177
+ init = getattr(cls.__init__, "deprecated_original", cls.__init__)
178
+ init_sign = signature(init)
179
+ args, varargs = [], []
180
+ for parameter in init_sign.parameters.values():
181
+ if parameter.kind != parameter.VAR_KEYWORD and parameter.name != "self":
182
+ args.append(parameter.name)
183
+ if parameter.kind == parameter.VAR_POSITIONAL:
184
+ varargs.append(parameter.name)
185
+
186
+ if len(varargs) != 0:
187
+ raise RuntimeError(
188
+ "scikit-learn kernels should always "
189
+ "specify their parameters in the signature"
190
+ " of their __init__ (no varargs)."
191
+ " %s doesn't follow this convention." % (cls,)
192
+ )
193
+ for arg in args:
194
+ params[arg] = getattr(self, arg)
195
+
196
+ return params
197
+
198
+ def set_params(self, **params):
199
+ """Set the parameters of this kernel.
200
+
201
+ The method works on simple kernels as well as on nested kernels.
202
+ The latter have parameters of the form ``<component>__<parameter>``
203
+ so that it's possible to update each component of a nested object.
204
+
205
+ Returns
206
+ -------
207
+ self
208
+ """
209
+ if not params:
210
+ # Simple optimisation to gain speed (inspect is slow)
211
+ return self
212
+ valid_params = self.get_params(deep=True)
213
+ for key, value in params.items():
214
+ split = key.split("__", 1)
215
+ if len(split) > 1:
216
+ # nested objects case
217
+ name, sub_name = split
218
+ if name not in valid_params:
219
+ raise ValueError(
220
+ "Invalid parameter %s for kernel %s. "
221
+ "Check the list of available parameters "
222
+ "with `kernel.get_params().keys()`." % (name, self)
223
+ )
224
+ sub_object = valid_params[name]
225
+ sub_object.set_params(**{sub_name: value})
226
+ else:
227
+ # simple objects case
228
+ if key not in valid_params:
229
+ raise ValueError(
230
+ "Invalid parameter %s for kernel %s. "
231
+ "Check the list of available parameters "
232
+ "with `kernel.get_params().keys()`."
233
+ % (key, self.__class__.__name__)
234
+ )
235
+ setattr(self, key, value)
236
+ return self
237
+
238
+ def clone_with_theta(self, theta):
239
+ """Returns a clone of self with given hyperparameters theta.
240
+
241
+ Parameters
242
+ ----------
243
+ theta : ndarray of shape (n_dims,)
244
+ The hyperparameters
245
+ """
246
+ cloned = clone(self)
247
+ cloned.theta = theta
248
+ return cloned
249
+
250
+ @property
251
+ def n_dims(self):
252
+ """Returns the number of non-fixed hyperparameters of the kernel."""
253
+ return self.theta.shape[0]
254
+
255
+ @property
256
+ def hyperparameters(self):
257
+ """Returns a list of all hyperparameter specifications."""
258
+ r = [
259
+ getattr(self, attr)
260
+ for attr in dir(self)
261
+ if attr.startswith("hyperparameter_")
262
+ ]
263
+ return r
264
+
265
+ @property
266
+ def theta(self):
267
+ """Returns the (flattened, log-transformed) non-fixed hyperparameters.
268
+
269
+ Note that theta are typically the log-transformed values of the
270
+ kernel's hyperparameters as this representation of the search space
271
+ is more amenable for hyperparameter search, as hyperparameters like
272
+ length-scales naturally live on a log-scale.
273
+
274
+ Returns
275
+ -------
276
+ theta : ndarray of shape (n_dims,)
277
+ The non-fixed, log-transformed hyperparameters of the kernel
278
+ """
279
+ theta = []
280
+ params = self.get_params()
281
+ for hyperparameter in self.hyperparameters:
282
+ if not hyperparameter.fixed:
283
+ theta.append(params[hyperparameter.name])
284
+ if len(theta) > 0:
285
+ return np.log(np.hstack(theta))
286
+ else:
287
+ return np.array([])
288
+
289
+ @theta.setter
290
+ def theta(self, theta):
291
+ """Sets the (flattened, log-transformed) non-fixed hyperparameters.
292
+
293
+ Parameters
294
+ ----------
295
+ theta : ndarray of shape (n_dims,)
296
+ The non-fixed, log-transformed hyperparameters of the kernel
297
+ """
298
+ params = self.get_params()
299
+ i = 0
300
+ for hyperparameter in self.hyperparameters:
301
+ if hyperparameter.fixed:
302
+ continue
303
+ if hyperparameter.n_elements > 1:
304
+ # vector-valued parameter
305
+ params[hyperparameter.name] = np.exp(
306
+ theta[i : i + hyperparameter.n_elements]
307
+ )
308
+ i += hyperparameter.n_elements
309
+ else:
310
+ params[hyperparameter.name] = np.exp(theta[i])
311
+ i += 1
312
+
313
+ if i != len(theta):
314
+ raise ValueError(
315
+ "theta has not the correct number of entries."
316
+ " Should be %d; given are %d" % (i, len(theta))
317
+ )
318
+ self.set_params(**params)
319
+
320
+ @property
321
+ def bounds(self):
322
+ """Returns the log-transformed bounds on the theta.
323
+
324
+ Returns
325
+ -------
326
+ bounds : ndarray of shape (n_dims, 2)
327
+ The log-transformed bounds on the kernel's hyperparameters theta
328
+ """
329
+ bounds = [
330
+ hyperparameter.bounds
331
+ for hyperparameter in self.hyperparameters
332
+ if not hyperparameter.fixed
333
+ ]
334
+ if len(bounds) > 0:
335
+ return np.log(np.vstack(bounds))
336
+ else:
337
+ return np.array([])
338
+
339
+ def __add__(self, b):
340
+ if not isinstance(b, Kernel):
341
+ return Sum(self, ConstantKernel(b))
342
+ return Sum(self, b)
343
+
344
+ def __radd__(self, b):
345
+ if not isinstance(b, Kernel):
346
+ return Sum(ConstantKernel(b), self)
347
+ return Sum(b, self)
348
+
349
+ def __mul__(self, b):
350
+ if not isinstance(b, Kernel):
351
+ return Product(self, ConstantKernel(b))
352
+ return Product(self, b)
353
+
354
+ def __rmul__(self, b):
355
+ if not isinstance(b, Kernel):
356
+ return Product(ConstantKernel(b), self)
357
+ return Product(b, self)
358
+
359
+ def __pow__(self, b):
360
+ return Exponentiation(self, b)
361
+
362
+ def __eq__(self, b):
363
+ if type(self) != type(b):
364
+ return False
365
+ params_a = self.get_params()
366
+ params_b = b.get_params()
367
+ for key in set(list(params_a.keys()) + list(params_b.keys())):
368
+ if np.any(params_a.get(key, None) != params_b.get(key, None)):
369
+ return False
370
+ return True
371
+
372
+ def __repr__(self):
373
+ return "{0}({1})".format(
374
+ self.__class__.__name__, ", ".join(map("{0:.3g}".format, self.theta))
375
+ )
376
+
377
+ @abstractmethod
378
+ def __call__(self, X, Y=None, eval_gradient=False):
379
+ """Evaluate the kernel."""
380
+
381
+ @abstractmethod
382
+ def diag(self, X):
383
+ """Returns the diagonal of the kernel k(X, X).
384
+
385
+ The result of this method is identical to np.diag(self(X)); however,
386
+ it can be evaluated more efficiently since only the diagonal is
387
+ evaluated.
388
+
389
+ Parameters
390
+ ----------
391
+ X : array-like of shape (n_samples,)
392
+ Left argument of the returned kernel k(X, Y)
393
+
394
+ Returns
395
+ -------
396
+ K_diag : ndarray of shape (n_samples_X,)
397
+ Diagonal of kernel k(X, X)
398
+ """
399
+
400
+ @abstractmethod
401
+ def is_stationary(self):
402
+ """Returns whether the kernel is stationary."""
403
+
404
+ @property
405
+ def requires_vector_input(self):
406
+ """Returns whether the kernel is defined on fixed-length feature
407
+ vectors or generic objects. Defaults to True for backward
408
+ compatibility."""
409
+ return True
410
+
411
+ def _check_bounds_params(self):
412
+ """Called after fitting to warn if bounds may have been too tight."""
413
+ list_close = np.isclose(self.bounds, np.atleast_2d(self.theta).T)
414
+ idx = 0
415
+ for hyp in self.hyperparameters:
416
+ if hyp.fixed:
417
+ continue
418
+ for dim in range(hyp.n_elements):
419
+ if list_close[idx, 0]:
420
+ warnings.warn(
421
+ "The optimal value found for "
422
+ "dimension %s of parameter %s is "
423
+ "close to the specified lower "
424
+ "bound %s. Decreasing the bound and"
425
+ " calling fit again may find a "
426
+ "better value." % (dim, hyp.name, hyp.bounds[dim][0]),
427
+ ConvergenceWarning,
428
+ )
429
+ elif list_close[idx, 1]:
430
+ warnings.warn(
431
+ "The optimal value found for "
432
+ "dimension %s of parameter %s is "
433
+ "close to the specified upper "
434
+ "bound %s. Increasing the bound and"
435
+ " calling fit again may find a "
436
+ "better value." % (dim, hyp.name, hyp.bounds[dim][1]),
437
+ ConvergenceWarning,
438
+ )
439
+ idx += 1
440
+
441
+
442
+ class NormalizedKernelMixin:
443
+ """Mixin for kernels which are normalized: k(X, X)=1.
444
+
445
+ .. versionadded:: 0.18
446
+ """
447
+
448
+ def diag(self, X):
449
+ """Returns the diagonal of the kernel k(X, X).
450
+
451
+ The result of this method is identical to np.diag(self(X)); however,
452
+ it can be evaluated more efficiently since only the diagonal is
453
+ evaluated.
454
+
455
+ Parameters
456
+ ----------
457
+ X : ndarray of shape (n_samples_X, n_features)
458
+ Left argument of the returned kernel k(X, Y)
459
+
460
+ Returns
461
+ -------
462
+ K_diag : ndarray of shape (n_samples_X,)
463
+ Diagonal of kernel k(X, X)
464
+ """
465
+ return np.ones(X.shape[0])
466
+
467
+
468
+ class StationaryKernelMixin:
469
+ """Mixin for kernels which are stationary: k(X, Y)= f(X-Y).
470
+
471
+ .. versionadded:: 0.18
472
+ """
473
+
474
+ def is_stationary(self):
475
+ """Returns whether the kernel is stationary."""
476
+ return True
477
+
478
+
479
+ class GenericKernelMixin:
480
+ """Mixin for kernels which operate on generic objects such as variable-
481
+ length sequences, trees, and graphs.
482
+
483
+ .. versionadded:: 0.22
484
+ """
485
+
486
+ @property
487
+ def requires_vector_input(self):
488
+ """Whether the kernel works only on fixed-length feature vectors."""
489
+ return False
490
+
491
+
492
+ class CompoundKernel(Kernel):
493
+ """Kernel which is composed of a set of other kernels.
494
+
495
+ .. versionadded:: 0.18
496
+
497
+ Parameters
498
+ ----------
499
+ kernels : list of Kernels
500
+ The other kernels
501
+
502
+ Examples
503
+ --------
504
+ >>> from sklearn.gaussian_process.kernels import WhiteKernel
505
+ >>> from sklearn.gaussian_process.kernels import RBF
506
+ >>> from sklearn.gaussian_process.kernels import CompoundKernel
507
+ >>> kernel = CompoundKernel(
508
+ ... [WhiteKernel(noise_level=3.0), RBF(length_scale=2.0)])
509
+ >>> print(kernel.bounds)
510
+ [[-11.51292546 11.51292546]
511
+ [-11.51292546 11.51292546]]
512
+ >>> print(kernel.n_dims)
513
+ 2
514
+ >>> print(kernel.theta)
515
+ [1.09861229 0.69314718]
516
+ """
517
+
518
+ def __init__(self, kernels):
519
+ self.kernels = kernels
520
+
521
+ def get_params(self, deep=True):
522
+ """Get parameters of this kernel.
523
+
524
+ Parameters
525
+ ----------
526
+ deep : bool, default=True
527
+ If True, will return the parameters for this estimator and
528
+ contained subobjects that are estimators.
529
+
530
+ Returns
531
+ -------
532
+ params : dict
533
+ Parameter names mapped to their values.
534
+ """
535
+ return dict(kernels=self.kernels)
536
+
537
+ @property
538
+ def theta(self):
539
+ """Returns the (flattened, log-transformed) non-fixed hyperparameters.
540
+
541
+ Note that theta are typically the log-transformed values of the
542
+ kernel's hyperparameters as this representation of the search space
543
+ is more amenable for hyperparameter search, as hyperparameters like
544
+ length-scales naturally live on a log-scale.
545
+
546
+ Returns
547
+ -------
548
+ theta : ndarray of shape (n_dims,)
549
+ The non-fixed, log-transformed hyperparameters of the kernel
550
+ """
551
+ return np.hstack([kernel.theta for kernel in self.kernels])
552
+
553
+ @theta.setter
554
+ def theta(self, theta):
555
+ """Sets the (flattened, log-transformed) non-fixed hyperparameters.
556
+
557
+ Parameters
558
+ ----------
559
+ theta : array of shape (n_dims,)
560
+ The non-fixed, log-transformed hyperparameters of the kernel
561
+ """
562
+ k_dims = self.k1.n_dims
563
+ for i, kernel in enumerate(self.kernels):
564
+ kernel.theta = theta[i * k_dims : (i + 1) * k_dims]
565
+
566
+ @property
567
+ def bounds(self):
568
+ """Returns the log-transformed bounds on the theta.
569
+
570
+ Returns
571
+ -------
572
+ bounds : array of shape (n_dims, 2)
573
+ The log-transformed bounds on the kernel's hyperparameters theta
574
+ """
575
+ return np.vstack([kernel.bounds for kernel in self.kernels])
576
+
577
+ def __call__(self, X, Y=None, eval_gradient=False):
578
+ """Return the kernel k(X, Y) and optionally its gradient.
579
+
580
+ Note that this compound kernel returns the results of all simple kernel
581
+ stacked along an additional axis.
582
+
583
+ Parameters
584
+ ----------
585
+ X : array-like of shape (n_samples_X, n_features) or list of object, \
586
+ default=None
587
+ Left argument of the returned kernel k(X, Y)
588
+
589
+ Y : array-like of shape (n_samples_X, n_features) or list of object, \
590
+ default=None
591
+ Right argument of the returned kernel k(X, Y). If None, k(X, X)
592
+ is evaluated instead.
593
+
594
+ eval_gradient : bool, default=False
595
+ Determines whether the gradient with respect to the log of the
596
+ kernel hyperparameter is computed.
597
+
598
+ Returns
599
+ -------
600
+ K : ndarray of shape (n_samples_X, n_samples_Y, n_kernels)
601
+ Kernel k(X, Y)
602
+
603
+ K_gradient : ndarray of shape \
604
+ (n_samples_X, n_samples_X, n_dims, n_kernels), optional
605
+ The gradient of the kernel k(X, X) with respect to the log of the
606
+ hyperparameter of the kernel. Only returned when `eval_gradient`
607
+ is True.
608
+ """
609
+ if eval_gradient:
610
+ K = []
611
+ K_grad = []
612
+ for kernel in self.kernels:
613
+ K_single, K_grad_single = kernel(X, Y, eval_gradient)
614
+ K.append(K_single)
615
+ K_grad.append(K_grad_single[..., np.newaxis])
616
+ return np.dstack(K), np.concatenate(K_grad, 3)
617
+ else:
618
+ return np.dstack([kernel(X, Y, eval_gradient) for kernel in self.kernels])
619
+
620
+ def __eq__(self, b):
621
+ if type(self) != type(b) or len(self.kernels) != len(b.kernels):
622
+ return False
623
+ return np.all(
624
+ [self.kernels[i] == b.kernels[i] for i in range(len(self.kernels))]
625
+ )
626
+
627
+ def is_stationary(self):
628
+ """Returns whether the kernel is stationary."""
629
+ return np.all([kernel.is_stationary() for kernel in self.kernels])
630
+
631
+ @property
632
+ def requires_vector_input(self):
633
+ """Returns whether the kernel is defined on discrete structures."""
634
+ return np.any([kernel.requires_vector_input for kernel in self.kernels])
635
+
636
+ def diag(self, X):
637
+ """Returns the diagonal of the kernel k(X, X).
638
+
639
+ The result of this method is identical to `np.diag(self(X))`; however,
640
+ it can be evaluated more efficiently since only the diagonal is
641
+ evaluated.
642
+
643
+ Parameters
644
+ ----------
645
+ X : array-like of shape (n_samples_X, n_features) or list of object
646
+ Argument to the kernel.
647
+
648
+ Returns
649
+ -------
650
+ K_diag : ndarray of shape (n_samples_X, n_kernels)
651
+ Diagonal of kernel k(X, X)
652
+ """
653
+ return np.vstack([kernel.diag(X) for kernel in self.kernels]).T
654
+
655
+
656
+ class KernelOperator(Kernel):
657
+ """Base class for all kernel operators.
658
+
659
+ .. versionadded:: 0.18
660
+ """
661
+
662
+ def __init__(self, k1, k2):
663
+ self.k1 = k1
664
+ self.k2 = k2
665
+
666
+ def get_params(self, deep=True):
667
+ """Get parameters of this kernel.
668
+
669
+ Parameters
670
+ ----------
671
+ deep : bool, default=True
672
+ If True, will return the parameters for this estimator and
673
+ contained subobjects that are estimators.
674
+
675
+ Returns
676
+ -------
677
+ params : dict
678
+ Parameter names mapped to their values.
679
+ """
680
+ params = dict(k1=self.k1, k2=self.k2)
681
+ if deep:
682
+ deep_items = self.k1.get_params().items()
683
+ params.update(("k1__" + k, val) for k, val in deep_items)
684
+ deep_items = self.k2.get_params().items()
685
+ params.update(("k2__" + k, val) for k, val in deep_items)
686
+
687
+ return params
688
+
689
+ @property
690
+ def hyperparameters(self):
691
+ """Returns a list of all hyperparameter."""
692
+ r = [
693
+ Hyperparameter(
694
+ "k1__" + hyperparameter.name,
695
+ hyperparameter.value_type,
696
+ hyperparameter.bounds,
697
+ hyperparameter.n_elements,
698
+ )
699
+ for hyperparameter in self.k1.hyperparameters
700
+ ]
701
+
702
+ for hyperparameter in self.k2.hyperparameters:
703
+ r.append(
704
+ Hyperparameter(
705
+ "k2__" + hyperparameter.name,
706
+ hyperparameter.value_type,
707
+ hyperparameter.bounds,
708
+ hyperparameter.n_elements,
709
+ )
710
+ )
711
+ return r
712
+
713
+ @property
714
+ def theta(self):
715
+ """Returns the (flattened, log-transformed) non-fixed hyperparameters.
716
+
717
+ Note that theta are typically the log-transformed values of the
718
+ kernel's hyperparameters as this representation of the search space
719
+ is more amenable for hyperparameter search, as hyperparameters like
720
+ length-scales naturally live on a log-scale.
721
+
722
+ Returns
723
+ -------
724
+ theta : ndarray of shape (n_dims,)
725
+ The non-fixed, log-transformed hyperparameters of the kernel
726
+ """
727
+ return np.append(self.k1.theta, self.k2.theta)
728
+
729
+ @theta.setter
730
+ def theta(self, theta):
731
+ """Sets the (flattened, log-transformed) non-fixed hyperparameters.
732
+
733
+ Parameters
734
+ ----------
735
+ theta : ndarray of shape (n_dims,)
736
+ The non-fixed, log-transformed hyperparameters of the kernel
737
+ """
738
+ k1_dims = self.k1.n_dims
739
+ self.k1.theta = theta[:k1_dims]
740
+ self.k2.theta = theta[k1_dims:]
741
+
742
+ @property
743
+ def bounds(self):
744
+ """Returns the log-transformed bounds on the theta.
745
+
746
+ Returns
747
+ -------
748
+ bounds : ndarray of shape (n_dims, 2)
749
+ The log-transformed bounds on the kernel's hyperparameters theta
750
+ """
751
+ if self.k1.bounds.size == 0:
752
+ return self.k2.bounds
753
+ if self.k2.bounds.size == 0:
754
+ return self.k1.bounds
755
+ return np.vstack((self.k1.bounds, self.k2.bounds))
756
+
757
+ def __eq__(self, b):
758
+ if type(self) != type(b):
759
+ return False
760
+ return (self.k1 == b.k1 and self.k2 == b.k2) or (
761
+ self.k1 == b.k2 and self.k2 == b.k1
762
+ )
763
+
764
+ def is_stationary(self):
765
+ """Returns whether the kernel is stationary."""
766
+ return self.k1.is_stationary() and self.k2.is_stationary()
767
+
768
+ @property
769
+ def requires_vector_input(self):
770
+ """Returns whether the kernel is stationary."""
771
+ return self.k1.requires_vector_input or self.k2.requires_vector_input
772
+
773
+
774
+ class Sum(KernelOperator):
775
+ """The `Sum` kernel takes two kernels :math:`k_1` and :math:`k_2`
776
+ and combines them via
777
+
778
+ .. math::
779
+ k_{sum}(X, Y) = k_1(X, Y) + k_2(X, Y)
780
+
781
+ Note that the `__add__` magic method is overridden, so
782
+ `Sum(RBF(), RBF())` is equivalent to using the + operator
783
+ with `RBF() + RBF()`.
784
+
785
+
786
+ Read more in the :ref:`User Guide <gp_kernels>`.
787
+
788
+ .. versionadded:: 0.18
789
+
790
+ Parameters
791
+ ----------
792
+ k1 : Kernel
793
+ The first base-kernel of the sum-kernel
794
+
795
+ k2 : Kernel
796
+ The second base-kernel of the sum-kernel
797
+
798
+ Examples
799
+ --------
800
+ >>> from sklearn.datasets import make_friedman2
801
+ >>> from sklearn.gaussian_process import GaussianProcessRegressor
802
+ >>> from sklearn.gaussian_process.kernels import RBF, Sum, ConstantKernel
803
+ >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
804
+ >>> kernel = Sum(ConstantKernel(2), RBF())
805
+ >>> gpr = GaussianProcessRegressor(kernel=kernel,
806
+ ... random_state=0).fit(X, y)
807
+ >>> gpr.score(X, y)
808
+ 1.0
809
+ >>> kernel
810
+ 1.41**2 + RBF(length_scale=1)
811
+ """
812
+
813
+ def __call__(self, X, Y=None, eval_gradient=False):
814
+ """Return the kernel k(X, Y) and optionally its gradient.
815
+
816
+ Parameters
817
+ ----------
818
+ X : array-like of shape (n_samples_X, n_features) or list of object
819
+ Left argument of the returned kernel k(X, Y)
820
+
821
+ Y : array-like of shape (n_samples_X, n_features) or list of object,\
822
+ default=None
823
+ Right argument of the returned kernel k(X, Y). If None, k(X, X)
824
+ is evaluated instead.
825
+
826
+ eval_gradient : bool, default=False
827
+ Determines whether the gradient with respect to the log of
828
+ the kernel hyperparameter is computed.
829
+
830
+ Returns
831
+ -------
832
+ K : ndarray of shape (n_samples_X, n_samples_Y)
833
+ Kernel k(X, Y)
834
+
835
+ K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\
836
+ optional
837
+ The gradient of the kernel k(X, X) with respect to the log of the
838
+ hyperparameter of the kernel. Only returned when `eval_gradient`
839
+ is True.
840
+ """
841
+ if eval_gradient:
842
+ K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
843
+ K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
844
+ return K1 + K2, np.dstack((K1_gradient, K2_gradient))
845
+ else:
846
+ return self.k1(X, Y) + self.k2(X, Y)
847
+
848
+ def diag(self, X):
849
+ """Returns the diagonal of the kernel k(X, X).
850
+
851
+ The result of this method is identical to `np.diag(self(X))`; however,
852
+ it can be evaluated more efficiently since only the diagonal is
853
+ evaluated.
854
+
855
+ Parameters
856
+ ----------
857
+ X : array-like of shape (n_samples_X, n_features) or list of object
858
+ Argument to the kernel.
859
+
860
+ Returns
861
+ -------
862
+ K_diag : ndarray of shape (n_samples_X,)
863
+ Diagonal of kernel k(X, X)
864
+ """
865
+ return self.k1.diag(X) + self.k2.diag(X)
866
+
867
+ def __repr__(self):
868
+ return "{0} + {1}".format(self.k1, self.k2)
869
+
870
+
871
+ class Product(KernelOperator):
872
+ """The `Product` kernel takes two kernels :math:`k_1` and :math:`k_2`
873
+ and combines them via
874
+
875
+ .. math::
876
+ k_{prod}(X, Y) = k_1(X, Y) * k_2(X, Y)
877
+
878
+ Note that the `__mul__` magic method is overridden, so
879
+ `Product(RBF(), RBF())` is equivalent to using the * operator
880
+ with `RBF() * RBF()`.
881
+
882
+ Read more in the :ref:`User Guide <gp_kernels>`.
883
+
884
+ .. versionadded:: 0.18
885
+
886
+ Parameters
887
+ ----------
888
+ k1 : Kernel
889
+ The first base-kernel of the product-kernel
890
+
891
+ k2 : Kernel
892
+ The second base-kernel of the product-kernel
893
+
894
+
895
+ Examples
896
+ --------
897
+ >>> from sklearn.datasets import make_friedman2
898
+ >>> from sklearn.gaussian_process import GaussianProcessRegressor
899
+ >>> from sklearn.gaussian_process.kernels import (RBF, Product,
900
+ ... ConstantKernel)
901
+ >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
902
+ >>> kernel = Product(ConstantKernel(2), RBF())
903
+ >>> gpr = GaussianProcessRegressor(kernel=kernel,
904
+ ... random_state=0).fit(X, y)
905
+ >>> gpr.score(X, y)
906
+ 1.0
907
+ >>> kernel
908
+ 1.41**2 * RBF(length_scale=1)
909
+ """
910
+
911
+ def __call__(self, X, Y=None, eval_gradient=False):
912
+ """Return the kernel k(X, Y) and optionally its gradient.
913
+
914
+ Parameters
915
+ ----------
916
+ X : array-like of shape (n_samples_X, n_features) or list of object
917
+ Left argument of the returned kernel k(X, Y)
918
+
919
+ Y : array-like of shape (n_samples_Y, n_features) or list of object,\
920
+ default=None
921
+ Right argument of the returned kernel k(X, Y). If None, k(X, X)
922
+ is evaluated instead.
923
+
924
+ eval_gradient : bool, default=False
925
+ Determines whether the gradient with respect to the log of
926
+ the kernel hyperparameter is computed.
927
+
928
+ Returns
929
+ -------
930
+ K : ndarray of shape (n_samples_X, n_samples_Y)
931
+ Kernel k(X, Y)
932
+
933
+ K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
934
+ optional
935
+ The gradient of the kernel k(X, X) with respect to the log of the
936
+ hyperparameter of the kernel. Only returned when `eval_gradient`
937
+ is True.
938
+ """
939
+ if eval_gradient:
940
+ K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
941
+ K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
942
+ return K1 * K2, np.dstack(
943
+ (K1_gradient * K2[:, :, np.newaxis], K2_gradient * K1[:, :, np.newaxis])
944
+ )
945
+ else:
946
+ return self.k1(X, Y) * self.k2(X, Y)
947
+
948
+ def diag(self, X):
949
+ """Returns the diagonal of the kernel k(X, X).
950
+
951
+ The result of this method is identical to np.diag(self(X)); however,
952
+ it can be evaluated more efficiently since only the diagonal is
953
+ evaluated.
954
+
955
+ Parameters
956
+ ----------
957
+ X : array-like of shape (n_samples_X, n_features) or list of object
958
+ Argument to the kernel.
959
+
960
+ Returns
961
+ -------
962
+ K_diag : ndarray of shape (n_samples_X,)
963
+ Diagonal of kernel k(X, X)
964
+ """
965
+ return self.k1.diag(X) * self.k2.diag(X)
966
+
967
+ def __repr__(self):
968
+ return "{0} * {1}".format(self.k1, self.k2)
969
+
970
+
971
+ class Exponentiation(Kernel):
972
+ """The Exponentiation kernel takes one base kernel and a scalar parameter
973
+ :math:`p` and combines them via
974
+
975
+ .. math::
976
+ k_{exp}(X, Y) = k(X, Y) ^p
977
+
978
+ Note that the `__pow__` magic method is overridden, so
979
+ `Exponentiation(RBF(), 2)` is equivalent to using the ** operator
980
+ with `RBF() ** 2`.
981
+
982
+
983
+ Read more in the :ref:`User Guide <gp_kernels>`.
984
+
985
+ .. versionadded:: 0.18
986
+
987
+ Parameters
988
+ ----------
989
+ kernel : Kernel
990
+ The base kernel
991
+
992
+ exponent : float
993
+ The exponent for the base kernel
994
+
995
+
996
+ Examples
997
+ --------
998
+ >>> from sklearn.datasets import make_friedman2
999
+ >>> from sklearn.gaussian_process import GaussianProcessRegressor
1000
+ >>> from sklearn.gaussian_process.kernels import (RationalQuadratic,
1001
+ ... Exponentiation)
1002
+ >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
1003
+ >>> kernel = Exponentiation(RationalQuadratic(), exponent=2)
1004
+ >>> gpr = GaussianProcessRegressor(kernel=kernel, alpha=5,
1005
+ ... random_state=0).fit(X, y)
1006
+ >>> gpr.score(X, y)
1007
+ 0.419...
1008
+ >>> gpr.predict(X[:1,:], return_std=True)
1009
+ (array([635.5...]), array([0.559...]))
1010
+ """
1011
+
1012
+ def __init__(self, kernel, exponent):
1013
+ self.kernel = kernel
1014
+ self.exponent = exponent
1015
+
1016
+ def get_params(self, deep=True):
1017
+ """Get parameters of this kernel.
1018
+
1019
+ Parameters
1020
+ ----------
1021
+ deep : bool, default=True
1022
+ If True, will return the parameters for this estimator and
1023
+ contained subobjects that are estimators.
1024
+
1025
+ Returns
1026
+ -------
1027
+ params : dict
1028
+ Parameter names mapped to their values.
1029
+ """
1030
+ params = dict(kernel=self.kernel, exponent=self.exponent)
1031
+ if deep:
1032
+ deep_items = self.kernel.get_params().items()
1033
+ params.update(("kernel__" + k, val) for k, val in deep_items)
1034
+ return params
1035
+
1036
+ @property
1037
+ def hyperparameters(self):
1038
+ """Returns a list of all hyperparameter."""
1039
+ r = []
1040
+ for hyperparameter in self.kernel.hyperparameters:
1041
+ r.append(
1042
+ Hyperparameter(
1043
+ "kernel__" + hyperparameter.name,
1044
+ hyperparameter.value_type,
1045
+ hyperparameter.bounds,
1046
+ hyperparameter.n_elements,
1047
+ )
1048
+ )
1049
+ return r
1050
+
1051
+ @property
1052
+ def theta(self):
1053
+ """Returns the (flattened, log-transformed) non-fixed hyperparameters.
1054
+
1055
+ Note that theta are typically the log-transformed values of the
1056
+ kernel's hyperparameters as this representation of the search space
1057
+ is more amenable for hyperparameter search, as hyperparameters like
1058
+ length-scales naturally live on a log-scale.
1059
+
1060
+ Returns
1061
+ -------
1062
+ theta : ndarray of shape (n_dims,)
1063
+ The non-fixed, log-transformed hyperparameters of the kernel
1064
+ """
1065
+ return self.kernel.theta
1066
+
1067
+ @theta.setter
1068
+ def theta(self, theta):
1069
+ """Sets the (flattened, log-transformed) non-fixed hyperparameters.
1070
+
1071
+ Parameters
1072
+ ----------
1073
+ theta : ndarray of shape (n_dims,)
1074
+ The non-fixed, log-transformed hyperparameters of the kernel
1075
+ """
1076
+ self.kernel.theta = theta
1077
+
1078
+ @property
1079
+ def bounds(self):
1080
+ """Returns the log-transformed bounds on the theta.
1081
+
1082
+ Returns
1083
+ -------
1084
+ bounds : ndarray of shape (n_dims, 2)
1085
+ The log-transformed bounds on the kernel's hyperparameters theta
1086
+ """
1087
+ return self.kernel.bounds
1088
+
1089
+ def __eq__(self, b):
1090
+ if type(self) != type(b):
1091
+ return False
1092
+ return self.kernel == b.kernel and self.exponent == b.exponent
1093
+
1094
+ def __call__(self, X, Y=None, eval_gradient=False):
1095
+ """Return the kernel k(X, Y) and optionally its gradient.
1096
+
1097
+ Parameters
1098
+ ----------
1099
+ X : array-like of shape (n_samples_X, n_features) or list of object
1100
+ Left argument of the returned kernel k(X, Y)
1101
+
1102
+ Y : array-like of shape (n_samples_Y, n_features) or list of object,\
1103
+ default=None
1104
+ Right argument of the returned kernel k(X, Y). If None, k(X, X)
1105
+ is evaluated instead.
1106
+
1107
+ eval_gradient : bool, default=False
1108
+ Determines whether the gradient with respect to the log of
1109
+ the kernel hyperparameter is computed.
1110
+
1111
+ Returns
1112
+ -------
1113
+ K : ndarray of shape (n_samples_X, n_samples_Y)
1114
+ Kernel k(X, Y)
1115
+
1116
+ K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\
1117
+ optional
1118
+ The gradient of the kernel k(X, X) with respect to the log of the
1119
+ hyperparameter of the kernel. Only returned when `eval_gradient`
1120
+ is True.
1121
+ """
1122
+ if eval_gradient:
1123
+ K, K_gradient = self.kernel(X, Y, eval_gradient=True)
1124
+ K_gradient *= self.exponent * K[:, :, np.newaxis] ** (self.exponent - 1)
1125
+ return K**self.exponent, K_gradient
1126
+ else:
1127
+ K = self.kernel(X, Y, eval_gradient=False)
1128
+ return K**self.exponent
1129
+
1130
+ def diag(self, X):
1131
+ """Returns the diagonal of the kernel k(X, X).
1132
+
1133
+ The result of this method is identical to np.diag(self(X)); however,
1134
+ it can be evaluated more efficiently since only the diagonal is
1135
+ evaluated.
1136
+
1137
+ Parameters
1138
+ ----------
1139
+ X : array-like of shape (n_samples_X, n_features) or list of object
1140
+ Argument to the kernel.
1141
+
1142
+ Returns
1143
+ -------
1144
+ K_diag : ndarray of shape (n_samples_X,)
1145
+ Diagonal of kernel k(X, X)
1146
+ """
1147
+ return self.kernel.diag(X) ** self.exponent
1148
+
1149
+ def __repr__(self):
1150
+ return "{0} ** {1}".format(self.kernel, self.exponent)
1151
+
1152
+ def is_stationary(self):
1153
+ """Returns whether the kernel is stationary."""
1154
+ return self.kernel.is_stationary()
1155
+
1156
+ @property
1157
+ def requires_vector_input(self):
1158
+ """Returns whether the kernel is defined on discrete structures."""
1159
+ return self.kernel.requires_vector_input
1160
+
1161
+
1162
+ class ConstantKernel(StationaryKernelMixin, GenericKernelMixin, Kernel):
1163
+ """Constant kernel.
1164
+
1165
+ Can be used as part of a product-kernel where it scales the magnitude of
1166
+ the other factor (kernel) or as part of a sum-kernel, where it modifies
1167
+ the mean of the Gaussian process.
1168
+
1169
+ .. math::
1170
+ k(x_1, x_2) = constant\\_value \\;\\forall\\; x_1, x_2
1171
+
1172
+ Adding a constant kernel is equivalent to adding a constant::
1173
+
1174
+ kernel = RBF() + ConstantKernel(constant_value=2)
1175
+
1176
+ is the same as::
1177
+
1178
+ kernel = RBF() + 2
1179
+
1180
+
1181
+ Read more in the :ref:`User Guide <gp_kernels>`.
1182
+
1183
+ .. versionadded:: 0.18
1184
+
1185
+ Parameters
1186
+ ----------
1187
+ constant_value : float, default=1.0
1188
+ The constant value which defines the covariance:
1189
+ k(x_1, x_2) = constant_value
1190
+
1191
+ constant_value_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
1192
+ The lower and upper bound on `constant_value`.
1193
+ If set to "fixed", `constant_value` cannot be changed during
1194
+ hyperparameter tuning.
1195
+
1196
+ Examples
1197
+ --------
1198
+ >>> from sklearn.datasets import make_friedman2
1199
+ >>> from sklearn.gaussian_process import GaussianProcessRegressor
1200
+ >>> from sklearn.gaussian_process.kernels import RBF, ConstantKernel
1201
+ >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
1202
+ >>> kernel = RBF() + ConstantKernel(constant_value=2)
1203
+ >>> gpr = GaussianProcessRegressor(kernel=kernel, alpha=5,
1204
+ ... random_state=0).fit(X, y)
1205
+ >>> gpr.score(X, y)
1206
+ 0.3696...
1207
+ >>> gpr.predict(X[:1,:], return_std=True)
1208
+ (array([606.1...]), array([0.24...]))
1209
+ """
1210
+
1211
+ def __init__(self, constant_value=1.0, constant_value_bounds=(1e-5, 1e5)):
1212
+ self.constant_value = constant_value
1213
+ self.constant_value_bounds = constant_value_bounds
1214
+
1215
+ @property
1216
+ def hyperparameter_constant_value(self):
1217
+ return Hyperparameter("constant_value", "numeric", self.constant_value_bounds)
1218
+
1219
+ def __call__(self, X, Y=None, eval_gradient=False):
1220
+ """Return the kernel k(X, Y) and optionally its gradient.
1221
+
1222
+ Parameters
1223
+ ----------
1224
+ X : array-like of shape (n_samples_X, n_features) or list of object
1225
+ Left argument of the returned kernel k(X, Y)
1226
+
1227
+ Y : array-like of shape (n_samples_X, n_features) or list of object, \
1228
+ default=None
1229
+ Right argument of the returned kernel k(X, Y). If None, k(X, X)
1230
+ is evaluated instead.
1231
+
1232
+ eval_gradient : bool, default=False
1233
+ Determines whether the gradient with respect to the log of
1234
+ the kernel hyperparameter is computed.
1235
+ Only supported when Y is None.
1236
+
1237
+ Returns
1238
+ -------
1239
+ K : ndarray of shape (n_samples_X, n_samples_Y)
1240
+ Kernel k(X, Y)
1241
+
1242
+ K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
1243
+ optional
1244
+ The gradient of the kernel k(X, X) with respect to the log of the
1245
+ hyperparameter of the kernel. Only returned when eval_gradient
1246
+ is True.
1247
+ """
1248
+ if Y is None:
1249
+ Y = X
1250
+ elif eval_gradient:
1251
+ raise ValueError("Gradient can only be evaluated when Y is None.")
1252
+
1253
+ K = np.full(
1254
+ (_num_samples(X), _num_samples(Y)),
1255
+ self.constant_value,
1256
+ dtype=np.array(self.constant_value).dtype,
1257
+ )
1258
+ if eval_gradient:
1259
+ if not self.hyperparameter_constant_value.fixed:
1260
+ return (
1261
+ K,
1262
+ np.full(
1263
+ (_num_samples(X), _num_samples(X), 1),
1264
+ self.constant_value,
1265
+ dtype=np.array(self.constant_value).dtype,
1266
+ ),
1267
+ )
1268
+ else:
1269
+ return K, np.empty((_num_samples(X), _num_samples(X), 0))
1270
+ else:
1271
+ return K
1272
+
1273
+ def diag(self, X):
1274
+ """Returns the diagonal of the kernel k(X, X).
1275
+
1276
+ The result of this method is identical to np.diag(self(X)); however,
1277
+ it can be evaluated more efficiently since only the diagonal is
1278
+ evaluated.
1279
+
1280
+ Parameters
1281
+ ----------
1282
+ X : array-like of shape (n_samples_X, n_features) or list of object
1283
+ Argument to the kernel.
1284
+
1285
+ Returns
1286
+ -------
1287
+ K_diag : ndarray of shape (n_samples_X,)
1288
+ Diagonal of kernel k(X, X)
1289
+ """
1290
+ return np.full(
1291
+ _num_samples(X),
1292
+ self.constant_value,
1293
+ dtype=np.array(self.constant_value).dtype,
1294
+ )
1295
+
1296
+ def __repr__(self):
1297
+ return "{0:.3g}**2".format(np.sqrt(self.constant_value))
1298
+
1299
+
1300
+ class WhiteKernel(StationaryKernelMixin, GenericKernelMixin, Kernel):
1301
+ """White kernel.
1302
+
1303
+ The main use-case of this kernel is as part of a sum-kernel where it
1304
+ explains the noise of the signal as independently and identically
1305
+ normally-distributed. The parameter noise_level equals the variance of this
1306
+ noise.
1307
+
1308
+ .. math::
1309
+ k(x_1, x_2) = noise\\_level \\text{ if } x_i == x_j \\text{ else } 0
1310
+
1311
+
1312
+ Read more in the :ref:`User Guide <gp_kernels>`.
1313
+
1314
+ .. versionadded:: 0.18
1315
+
1316
+ Parameters
1317
+ ----------
1318
+ noise_level : float, default=1.0
1319
+ Parameter controlling the noise level (variance)
1320
+
1321
+ noise_level_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
1322
+ The lower and upper bound on 'noise_level'.
1323
+ If set to "fixed", 'noise_level' cannot be changed during
1324
+ hyperparameter tuning.
1325
+
1326
+ Examples
1327
+ --------
1328
+ >>> from sklearn.datasets import make_friedman2
1329
+ >>> from sklearn.gaussian_process import GaussianProcessRegressor
1330
+ >>> from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel
1331
+ >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
1332
+ >>> kernel = DotProduct() + WhiteKernel(noise_level=0.5)
1333
+ >>> gpr = GaussianProcessRegressor(kernel=kernel,
1334
+ ... random_state=0).fit(X, y)
1335
+ >>> gpr.score(X, y)
1336
+ 0.3680...
1337
+ >>> gpr.predict(X[:2,:], return_std=True)
1338
+ (array([653.0..., 592.1... ]), array([316.6..., 316.6...]))
1339
+ """
1340
+
1341
+ def __init__(self, noise_level=1.0, noise_level_bounds=(1e-5, 1e5)):
1342
+ self.noise_level = noise_level
1343
+ self.noise_level_bounds = noise_level_bounds
1344
+
1345
+ @property
1346
+ def hyperparameter_noise_level(self):
1347
+ return Hyperparameter("noise_level", "numeric", self.noise_level_bounds)
1348
+
1349
+ def __call__(self, X, Y=None, eval_gradient=False):
1350
+ """Return the kernel k(X, Y) and optionally its gradient.
1351
+
1352
+ Parameters
1353
+ ----------
1354
+ X : array-like of shape (n_samples_X, n_features) or list of object
1355
+ Left argument of the returned kernel k(X, Y)
1356
+
1357
+ Y : array-like of shape (n_samples_X, n_features) or list of object,\
1358
+ default=None
1359
+ Right argument of the returned kernel k(X, Y). If None, k(X, X)
1360
+ is evaluated instead.
1361
+
1362
+ eval_gradient : bool, default=False
1363
+ Determines whether the gradient with respect to the log of
1364
+ the kernel hyperparameter is computed.
1365
+ Only supported when Y is None.
1366
+
1367
+ Returns
1368
+ -------
1369
+ K : ndarray of shape (n_samples_X, n_samples_Y)
1370
+ Kernel k(X, Y)
1371
+
1372
+ K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\
1373
+ optional
1374
+ The gradient of the kernel k(X, X) with respect to the log of the
1375
+ hyperparameter of the kernel. Only returned when eval_gradient
1376
+ is True.
1377
+ """
1378
+ if Y is not None and eval_gradient:
1379
+ raise ValueError("Gradient can only be evaluated when Y is None.")
1380
+
1381
+ if Y is None:
1382
+ K = self.noise_level * np.eye(_num_samples(X))
1383
+ if eval_gradient:
1384
+ if not self.hyperparameter_noise_level.fixed:
1385
+ return (
1386
+ K,
1387
+ self.noise_level * np.eye(_num_samples(X))[:, :, np.newaxis],
1388
+ )
1389
+ else:
1390
+ return K, np.empty((_num_samples(X), _num_samples(X), 0))
1391
+ else:
1392
+ return K
1393
+ else:
1394
+ return np.zeros((_num_samples(X), _num_samples(Y)))
1395
+
1396
+ def diag(self, X):
1397
+ """Returns the diagonal of the kernel k(X, X).
1398
+
1399
+ The result of this method is identical to np.diag(self(X)); however,
1400
+ it can be evaluated more efficiently since only the diagonal is
1401
+ evaluated.
1402
+
1403
+ Parameters
1404
+ ----------
1405
+ X : array-like of shape (n_samples_X, n_features) or list of object
1406
+ Argument to the kernel.
1407
+
1408
+ Returns
1409
+ -------
1410
+ K_diag : ndarray of shape (n_samples_X,)
1411
+ Diagonal of kernel k(X, X)
1412
+ """
1413
+ return np.full(
1414
+ _num_samples(X), self.noise_level, dtype=np.array(self.noise_level).dtype
1415
+ )
1416
+
1417
+ def __repr__(self):
1418
+ return "{0}(noise_level={1:.3g})".format(
1419
+ self.__class__.__name__, self.noise_level
1420
+ )
1421
+
1422
+
1423
+ class RBF(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
1424
+ """Radial basis function kernel (aka squared-exponential kernel).
1425
+
1426
+ The RBF kernel is a stationary kernel. It is also known as the
1427
+ "squared exponential" kernel. It is parameterized by a length scale
1428
+ parameter :math:`l>0`, which can either be a scalar (isotropic variant
1429
+ of the kernel) or a vector with the same number of dimensions as the inputs
1430
+ X (anisotropic variant of the kernel). The kernel is given by:
1431
+
1432
+ .. math::
1433
+ k(x_i, x_j) = \\exp\\left(- \\frac{d(x_i, x_j)^2}{2l^2} \\right)
1434
+
1435
+ where :math:`l` is the length scale of the kernel and
1436
+ :math:`d(\\cdot,\\cdot)` is the Euclidean distance.
1437
+ For advice on how to set the length scale parameter, see e.g. [1]_.
1438
+
1439
+ This kernel is infinitely differentiable, which implies that GPs with this
1440
+ kernel as covariance function have mean square derivatives of all orders,
1441
+ and are thus very smooth.
1442
+ See [2]_, Chapter 4, Section 4.2, for further details of the RBF kernel.
1443
+
1444
+ Read more in the :ref:`User Guide <gp_kernels>`.
1445
+
1446
+ .. versionadded:: 0.18
1447
+
1448
+ Parameters
1449
+ ----------
1450
+ length_scale : float or ndarray of shape (n_features,), default=1.0
1451
+ The length scale of the kernel. If a float, an isotropic kernel is
1452
+ used. If an array, an anisotropic kernel is used where each dimension
1453
+ of l defines the length-scale of the respective feature dimension.
1454
+
1455
+ length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
1456
+ The lower and upper bound on 'length_scale'.
1457
+ If set to "fixed", 'length_scale' cannot be changed during
1458
+ hyperparameter tuning.
1459
+
1460
+ References
1461
+ ----------
1462
+ .. [1] `David Duvenaud (2014). "The Kernel Cookbook:
1463
+ Advice on Covariance functions".
1464
+ <https://www.cs.toronto.edu/~duvenaud/cookbook/>`_
1465
+
1466
+ .. [2] `Carl Edward Rasmussen, Christopher K. I. Williams (2006).
1467
+ "Gaussian Processes for Machine Learning". The MIT Press.
1468
+ <http://www.gaussianprocess.org/gpml/>`_
1469
+
1470
+ Examples
1471
+ --------
1472
+ >>> from sklearn.datasets import load_iris
1473
+ >>> from sklearn.gaussian_process import GaussianProcessClassifier
1474
+ >>> from sklearn.gaussian_process.kernels import RBF
1475
+ >>> X, y = load_iris(return_X_y=True)
1476
+ >>> kernel = 1.0 * RBF(1.0)
1477
+ >>> gpc = GaussianProcessClassifier(kernel=kernel,
1478
+ ... random_state=0).fit(X, y)
1479
+ >>> gpc.score(X, y)
1480
+ 0.9866...
1481
+ >>> gpc.predict_proba(X[:2,:])
1482
+ array([[0.8354..., 0.03228..., 0.1322...],
1483
+ [0.7906..., 0.0652..., 0.1441...]])
1484
+ """
1485
+
1486
+ def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5)):
1487
+ self.length_scale = length_scale
1488
+ self.length_scale_bounds = length_scale_bounds
1489
+
1490
+ @property
1491
+ def anisotropic(self):
1492
+ return np.iterable(self.length_scale) and len(self.length_scale) > 1
1493
+
1494
+ @property
1495
+ def hyperparameter_length_scale(self):
1496
+ if self.anisotropic:
1497
+ return Hyperparameter(
1498
+ "length_scale",
1499
+ "numeric",
1500
+ self.length_scale_bounds,
1501
+ len(self.length_scale),
1502
+ )
1503
+ return Hyperparameter("length_scale", "numeric", self.length_scale_bounds)
1504
+
1505
+ def __call__(self, X, Y=None, eval_gradient=False):
1506
+ """Return the kernel k(X, Y) and optionally its gradient.
1507
+
1508
+ Parameters
1509
+ ----------
1510
+ X : ndarray of shape (n_samples_X, n_features)
1511
+ Left argument of the returned kernel k(X, Y)
1512
+
1513
+ Y : ndarray of shape (n_samples_Y, n_features), default=None
1514
+ Right argument of the returned kernel k(X, Y). If None, k(X, X)
1515
+ if evaluated instead.
1516
+
1517
+ eval_gradient : bool, default=False
1518
+ Determines whether the gradient with respect to the log of
1519
+ the kernel hyperparameter is computed.
1520
+ Only supported when Y is None.
1521
+
1522
+ Returns
1523
+ -------
1524
+ K : ndarray of shape (n_samples_X, n_samples_Y)
1525
+ Kernel k(X, Y)
1526
+
1527
+ K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
1528
+ optional
1529
+ The gradient of the kernel k(X, X) with respect to the log of the
1530
+ hyperparameter of the kernel. Only returned when `eval_gradient`
1531
+ is True.
1532
+ """
1533
+ X = np.atleast_2d(X)
1534
+ length_scale = _check_length_scale(X, self.length_scale)
1535
+ if Y is None:
1536
+ dists = pdist(X / length_scale, metric="sqeuclidean")
1537
+ K = np.exp(-0.5 * dists)
1538
+ # convert from upper-triangular matrix to square matrix
1539
+ K = squareform(K)
1540
+ np.fill_diagonal(K, 1)
1541
+ else:
1542
+ if eval_gradient:
1543
+ raise ValueError("Gradient can only be evaluated when Y is None.")
1544
+ dists = cdist(X / length_scale, Y / length_scale, metric="sqeuclidean")
1545
+ K = np.exp(-0.5 * dists)
1546
+
1547
+ if eval_gradient:
1548
+ if self.hyperparameter_length_scale.fixed:
1549
+ # Hyperparameter l kept fixed
1550
+ return K, np.empty((X.shape[0], X.shape[0], 0))
1551
+ elif not self.anisotropic or length_scale.shape[0] == 1:
1552
+ K_gradient = (K * squareform(dists))[:, :, np.newaxis]
1553
+ return K, K_gradient
1554
+ elif self.anisotropic:
1555
+ # We need to recompute the pairwise dimension-wise distances
1556
+ K_gradient = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 / (
1557
+ length_scale**2
1558
+ )
1559
+ K_gradient *= K[..., np.newaxis]
1560
+ return K, K_gradient
1561
+ else:
1562
+ return K
1563
+
1564
+ def __repr__(self):
1565
+ if self.anisotropic:
1566
+ return "{0}(length_scale=[{1}])".format(
1567
+ self.__class__.__name__,
1568
+ ", ".join(map("{0:.3g}".format, self.length_scale)),
1569
+ )
1570
+ else: # isotropic
1571
+ return "{0}(length_scale={1:.3g})".format(
1572
+ self.__class__.__name__, np.ravel(self.length_scale)[0]
1573
+ )
1574
+
1575
+
1576
+ class Matern(RBF):
1577
+ """Matern kernel.
1578
+
1579
+ The class of Matern kernels is a generalization of the :class:`RBF`.
1580
+ It has an additional parameter :math:`\\nu` which controls the
1581
+ smoothness of the resulting function. The smaller :math:`\\nu`,
1582
+ the less smooth the approximated function is.
1583
+ As :math:`\\nu\\rightarrow\\infty`, the kernel becomes equivalent to
1584
+ the :class:`RBF` kernel. When :math:`\\nu = 1/2`, the Matérn kernel
1585
+ becomes identical to the absolute exponential kernel.
1586
+ Important intermediate values are
1587
+ :math:`\\nu=1.5` (once differentiable functions)
1588
+ and :math:`\\nu=2.5` (twice differentiable functions).
1589
+
1590
+ The kernel is given by:
1591
+
1592
+ .. math::
1593
+ k(x_i, x_j) = \\frac{1}{\\Gamma(\\nu)2^{\\nu-1}}\\Bigg(
1594
+ \\frac{\\sqrt{2\\nu}}{l} d(x_i , x_j )
1595
+ \\Bigg)^\\nu K_\\nu\\Bigg(
1596
+ \\frac{\\sqrt{2\\nu}}{l} d(x_i , x_j )\\Bigg)
1597
+
1598
+
1599
+
1600
+ where :math:`d(\\cdot,\\cdot)` is the Euclidean distance,
1601
+ :math:`K_{\\nu}(\\cdot)` is a modified Bessel function and
1602
+ :math:`\\Gamma(\\cdot)` is the gamma function.
1603
+ See [1]_, Chapter 4, Section 4.2, for details regarding the different
1604
+ variants of the Matern kernel.
1605
+
1606
+ Read more in the :ref:`User Guide <gp_kernels>`.
1607
+
1608
+ .. versionadded:: 0.18
1609
+
1610
+ Parameters
1611
+ ----------
1612
+ length_scale : float or ndarray of shape (n_features,), default=1.0
1613
+ The length scale of the kernel. If a float, an isotropic kernel is
1614
+ used. If an array, an anisotropic kernel is used where each dimension
1615
+ of l defines the length-scale of the respective feature dimension.
1616
+
1617
+ length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
1618
+ The lower and upper bound on 'length_scale'.
1619
+ If set to "fixed", 'length_scale' cannot be changed during
1620
+ hyperparameter tuning.
1621
+
1622
+ nu : float, default=1.5
1623
+ The parameter nu controlling the smoothness of the learned function.
1624
+ The smaller nu, the less smooth the approximated function is.
1625
+ For nu=inf, the kernel becomes equivalent to the RBF kernel and for
1626
+ nu=0.5 to the absolute exponential kernel. Important intermediate
1627
+ values are nu=1.5 (once differentiable functions) and nu=2.5
1628
+ (twice differentiable functions). Note that values of nu not in
1629
+ [0.5, 1.5, 2.5, inf] incur a considerably higher computational cost
1630
+ (appr. 10 times higher) since they require to evaluate the modified
1631
+ Bessel function. Furthermore, in contrast to l, nu is kept fixed to
1632
+ its initial value and not optimized.
1633
+
1634
+ References
1635
+ ----------
1636
+ .. [1] `Carl Edward Rasmussen, Christopher K. I. Williams (2006).
1637
+ "Gaussian Processes for Machine Learning". The MIT Press.
1638
+ <http://www.gaussianprocess.org/gpml/>`_
1639
+
1640
+ Examples
1641
+ --------
1642
+ >>> from sklearn.datasets import load_iris
1643
+ >>> from sklearn.gaussian_process import GaussianProcessClassifier
1644
+ >>> from sklearn.gaussian_process.kernels import Matern
1645
+ >>> X, y = load_iris(return_X_y=True)
1646
+ >>> kernel = 1.0 * Matern(length_scale=1.0, nu=1.5)
1647
+ >>> gpc = GaussianProcessClassifier(kernel=kernel,
1648
+ ... random_state=0).fit(X, y)
1649
+ >>> gpc.score(X, y)
1650
+ 0.9866...
1651
+ >>> gpc.predict_proba(X[:2,:])
1652
+ array([[0.8513..., 0.0368..., 0.1117...],
1653
+ [0.8086..., 0.0693..., 0.1220...]])
1654
+ """
1655
+
1656
+ def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5), nu=1.5):
1657
+ super().__init__(length_scale, length_scale_bounds)
1658
+ self.nu = nu
1659
+
1660
+ def __call__(self, X, Y=None, eval_gradient=False):
1661
+ """Return the kernel k(X, Y) and optionally its gradient.
1662
+
1663
+ Parameters
1664
+ ----------
1665
+ X : ndarray of shape (n_samples_X, n_features)
1666
+ Left argument of the returned kernel k(X, Y)
1667
+
1668
+ Y : ndarray of shape (n_samples_Y, n_features), default=None
1669
+ Right argument of the returned kernel k(X, Y). If None, k(X, X)
1670
+ if evaluated instead.
1671
+
1672
+ eval_gradient : bool, default=False
1673
+ Determines whether the gradient with respect to the log of
1674
+ the kernel hyperparameter is computed.
1675
+ Only supported when Y is None.
1676
+
1677
+ Returns
1678
+ -------
1679
+ K : ndarray of shape (n_samples_X, n_samples_Y)
1680
+ Kernel k(X, Y)
1681
+
1682
+ K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
1683
+ optional
1684
+ The gradient of the kernel k(X, X) with respect to the log of the
1685
+ hyperparameter of the kernel. Only returned when `eval_gradient`
1686
+ is True.
1687
+ """
1688
+ X = np.atleast_2d(X)
1689
+ length_scale = _check_length_scale(X, self.length_scale)
1690
+ if Y is None:
1691
+ dists = pdist(X / length_scale, metric="euclidean")
1692
+ else:
1693
+ if eval_gradient:
1694
+ raise ValueError("Gradient can only be evaluated when Y is None.")
1695
+ dists = cdist(X / length_scale, Y / length_scale, metric="euclidean")
1696
+
1697
+ if self.nu == 0.5:
1698
+ K = np.exp(-dists)
1699
+ elif self.nu == 1.5:
1700
+ K = dists * math.sqrt(3)
1701
+ K = (1.0 + K) * np.exp(-K)
1702
+ elif self.nu == 2.5:
1703
+ K = dists * math.sqrt(5)
1704
+ K = (1.0 + K + K**2 / 3.0) * np.exp(-K)
1705
+ elif self.nu == np.inf:
1706
+ K = np.exp(-(dists**2) / 2.0)
1707
+ else: # general case; expensive to evaluate
1708
+ K = dists
1709
+ K[K == 0.0] += np.finfo(float).eps # strict zeros result in nan
1710
+ tmp = math.sqrt(2 * self.nu) * K
1711
+ K.fill((2 ** (1.0 - self.nu)) / gamma(self.nu))
1712
+ K *= tmp**self.nu
1713
+ K *= kv(self.nu, tmp)
1714
+
1715
+ if Y is None:
1716
+ # convert from upper-triangular matrix to square matrix
1717
+ K = squareform(K)
1718
+ np.fill_diagonal(K, 1)
1719
+
1720
+ if eval_gradient:
1721
+ if self.hyperparameter_length_scale.fixed:
1722
+ # Hyperparameter l kept fixed
1723
+ K_gradient = np.empty((X.shape[0], X.shape[0], 0))
1724
+ return K, K_gradient
1725
+
1726
+ # We need to recompute the pairwise dimension-wise distances
1727
+ if self.anisotropic:
1728
+ D = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 / (
1729
+ length_scale**2
1730
+ )
1731
+ else:
1732
+ D = squareform(dists**2)[:, :, np.newaxis]
1733
+
1734
+ if self.nu == 0.5:
1735
+ denominator = np.sqrt(D.sum(axis=2))[:, :, np.newaxis]
1736
+ divide_result = np.zeros_like(D)
1737
+ np.divide(
1738
+ D,
1739
+ denominator,
1740
+ out=divide_result,
1741
+ where=denominator != 0,
1742
+ )
1743
+ K_gradient = K[..., np.newaxis] * divide_result
1744
+ elif self.nu == 1.5:
1745
+ K_gradient = 3 * D * np.exp(-np.sqrt(3 * D.sum(-1)))[..., np.newaxis]
1746
+ elif self.nu == 2.5:
1747
+ tmp = np.sqrt(5 * D.sum(-1))[..., np.newaxis]
1748
+ K_gradient = 5.0 / 3.0 * D * (tmp + 1) * np.exp(-tmp)
1749
+ elif self.nu == np.inf:
1750
+ K_gradient = D * K[..., np.newaxis]
1751
+ else:
1752
+ # approximate gradient numerically
1753
+ def f(theta): # helper function
1754
+ return self.clone_with_theta(theta)(X, Y)
1755
+
1756
+ return K, _approx_fprime(self.theta, f, 1e-10)
1757
+
1758
+ if not self.anisotropic:
1759
+ return K, K_gradient[:, :].sum(-1)[:, :, np.newaxis]
1760
+ else:
1761
+ return K, K_gradient
1762
+ else:
1763
+ return K
1764
+
1765
+ def __repr__(self):
1766
+ if self.anisotropic:
1767
+ return "{0}(length_scale=[{1}], nu={2:.3g})".format(
1768
+ self.__class__.__name__,
1769
+ ", ".join(map("{0:.3g}".format, self.length_scale)),
1770
+ self.nu,
1771
+ )
1772
+ else:
1773
+ return "{0}(length_scale={1:.3g}, nu={2:.3g})".format(
1774
+ self.__class__.__name__, np.ravel(self.length_scale)[0], self.nu
1775
+ )
1776
+
1777
+
1778
+ class RationalQuadratic(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
1779
+ """Rational Quadratic kernel.
1780
+
1781
+ The RationalQuadratic kernel can be seen as a scale mixture (an infinite
1782
+ sum) of RBF kernels with different characteristic length scales. It is
1783
+ parameterized by a length scale parameter :math:`l>0` and a scale
1784
+ mixture parameter :math:`\\alpha>0`. Only the isotropic variant
1785
+ where length_scale :math:`l` is a scalar is supported at the moment.
1786
+ The kernel is given by:
1787
+
1788
+ .. math::
1789
+ k(x_i, x_j) = \\left(
1790
+ 1 + \\frac{d(x_i, x_j)^2 }{ 2\\alpha l^2}\\right)^{-\\alpha}
1791
+
1792
+ where :math:`\\alpha` is the scale mixture parameter, :math:`l` is
1793
+ the length scale of the kernel and :math:`d(\\cdot,\\cdot)` is the
1794
+ Euclidean distance.
1795
+ For advice on how to set the parameters, see e.g. [1]_.
1796
+
1797
+ Read more in the :ref:`User Guide <gp_kernels>`.
1798
+
1799
+ .. versionadded:: 0.18
1800
+
1801
+ Parameters
1802
+ ----------
1803
+ length_scale : float > 0, default=1.0
1804
+ The length scale of the kernel.
1805
+
1806
+ alpha : float > 0, default=1.0
1807
+ Scale mixture parameter
1808
+
1809
+ length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
1810
+ The lower and upper bound on 'length_scale'.
1811
+ If set to "fixed", 'length_scale' cannot be changed during
1812
+ hyperparameter tuning.
1813
+
1814
+ alpha_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
1815
+ The lower and upper bound on 'alpha'.
1816
+ If set to "fixed", 'alpha' cannot be changed during
1817
+ hyperparameter tuning.
1818
+
1819
+ References
1820
+ ----------
1821
+ .. [1] `David Duvenaud (2014). "The Kernel Cookbook:
1822
+ Advice on Covariance functions".
1823
+ <https://www.cs.toronto.edu/~duvenaud/cookbook/>`_
1824
+
1825
+ Examples
1826
+ --------
1827
+ >>> from sklearn.datasets import load_iris
1828
+ >>> from sklearn.gaussian_process import GaussianProcessClassifier
1829
+ >>> from sklearn.gaussian_process.kernels import RationalQuadratic
1830
+ >>> X, y = load_iris(return_X_y=True)
1831
+ >>> kernel = RationalQuadratic(length_scale=1.0, alpha=1.5)
1832
+ >>> gpc = GaussianProcessClassifier(kernel=kernel,
1833
+ ... random_state=0).fit(X, y)
1834
+ >>> gpc.score(X, y)
1835
+ 0.9733...
1836
+ >>> gpc.predict_proba(X[:2,:])
1837
+ array([[0.8881..., 0.0566..., 0.05518...],
1838
+ [0.8678..., 0.0707... , 0.0614...]])
1839
+ """
1840
+
1841
+ def __init__(
1842
+ self,
1843
+ length_scale=1.0,
1844
+ alpha=1.0,
1845
+ length_scale_bounds=(1e-5, 1e5),
1846
+ alpha_bounds=(1e-5, 1e5),
1847
+ ):
1848
+ self.length_scale = length_scale
1849
+ self.alpha = alpha
1850
+ self.length_scale_bounds = length_scale_bounds
1851
+ self.alpha_bounds = alpha_bounds
1852
+
1853
+ @property
1854
+ def hyperparameter_length_scale(self):
1855
+ return Hyperparameter("length_scale", "numeric", self.length_scale_bounds)
1856
+
1857
+ @property
1858
+ def hyperparameter_alpha(self):
1859
+ return Hyperparameter("alpha", "numeric", self.alpha_bounds)
1860
+
1861
+ def __call__(self, X, Y=None, eval_gradient=False):
1862
+ """Return the kernel k(X, Y) and optionally its gradient.
1863
+
1864
+ Parameters
1865
+ ----------
1866
+ X : ndarray of shape (n_samples_X, n_features)
1867
+ Left argument of the returned kernel k(X, Y)
1868
+
1869
+ Y : ndarray of shape (n_samples_Y, n_features), default=None
1870
+ Right argument of the returned kernel k(X, Y). If None, k(X, X)
1871
+ if evaluated instead.
1872
+
1873
+ eval_gradient : bool, default=False
1874
+ Determines whether the gradient with respect to the log of
1875
+ the kernel hyperparameter is computed.
1876
+ Only supported when Y is None.
1877
+
1878
+ Returns
1879
+ -------
1880
+ K : ndarray of shape (n_samples_X, n_samples_Y)
1881
+ Kernel k(X, Y)
1882
+
1883
+ K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims)
1884
+ The gradient of the kernel k(X, X) with respect to the log of the
1885
+ hyperparameter of the kernel. Only returned when eval_gradient
1886
+ is True.
1887
+ """
1888
+ if len(np.atleast_1d(self.length_scale)) > 1:
1889
+ raise AttributeError(
1890
+ "RationalQuadratic kernel only supports isotropic version, "
1891
+ "please use a single scalar for length_scale"
1892
+ )
1893
+ X = np.atleast_2d(X)
1894
+ if Y is None:
1895
+ dists = squareform(pdist(X, metric="sqeuclidean"))
1896
+ tmp = dists / (2 * self.alpha * self.length_scale**2)
1897
+ base = 1 + tmp
1898
+ K = base**-self.alpha
1899
+ np.fill_diagonal(K, 1)
1900
+ else:
1901
+ if eval_gradient:
1902
+ raise ValueError("Gradient can only be evaluated when Y is None.")
1903
+ dists = cdist(X, Y, metric="sqeuclidean")
1904
+ K = (1 + dists / (2 * self.alpha * self.length_scale**2)) ** -self.alpha
1905
+
1906
+ if eval_gradient:
1907
+ # gradient with respect to length_scale
1908
+ if not self.hyperparameter_length_scale.fixed:
1909
+ length_scale_gradient = dists * K / (self.length_scale**2 * base)
1910
+ length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
1911
+ else: # l is kept fixed
1912
+ length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
1913
+
1914
+ # gradient with respect to alpha
1915
+ if not self.hyperparameter_alpha.fixed:
1916
+ alpha_gradient = K * (
1917
+ -self.alpha * np.log(base)
1918
+ + dists / (2 * self.length_scale**2 * base)
1919
+ )
1920
+ alpha_gradient = alpha_gradient[:, :, np.newaxis]
1921
+ else: # alpha is kept fixed
1922
+ alpha_gradient = np.empty((K.shape[0], K.shape[1], 0))
1923
+
1924
+ return K, np.dstack((alpha_gradient, length_scale_gradient))
1925
+ else:
1926
+ return K
1927
+
1928
+ def __repr__(self):
1929
+ return "{0}(alpha={1:.3g}, length_scale={2:.3g})".format(
1930
+ self.__class__.__name__, self.alpha, self.length_scale
1931
+ )
1932
+
1933
+
1934
+ class ExpSineSquared(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
1935
+ r"""Exp-Sine-Squared kernel (aka periodic kernel).
1936
+
1937
+ The ExpSineSquared kernel allows one to model functions which repeat
1938
+ themselves exactly. It is parameterized by a length scale
1939
+ parameter :math:`l>0` and a periodicity parameter :math:`p>0`.
1940
+ Only the isotropic variant where :math:`l` is a scalar is
1941
+ supported at the moment. The kernel is given by:
1942
+
1943
+ .. math::
1944
+ k(x_i, x_j) = \text{exp}\left(-
1945
+ \frac{ 2\sin^2(\pi d(x_i, x_j)/p) }{ l^ 2} \right)
1946
+
1947
+ where :math:`l` is the length scale of the kernel, :math:`p` the
1948
+ periodicity of the kernel and :math:`d(\\cdot,\\cdot)` is the
1949
+ Euclidean distance.
1950
+
1951
+ Read more in the :ref:`User Guide <gp_kernels>`.
1952
+
1953
+ .. versionadded:: 0.18
1954
+
1955
+ Parameters
1956
+ ----------
1957
+
1958
+ length_scale : float > 0, default=1.0
1959
+ The length scale of the kernel.
1960
+
1961
+ periodicity : float > 0, default=1.0
1962
+ The periodicity of the kernel.
1963
+
1964
+ length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
1965
+ The lower and upper bound on 'length_scale'.
1966
+ If set to "fixed", 'length_scale' cannot be changed during
1967
+ hyperparameter tuning.
1968
+
1969
+ periodicity_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
1970
+ The lower and upper bound on 'periodicity'.
1971
+ If set to "fixed", 'periodicity' cannot be changed during
1972
+ hyperparameter tuning.
1973
+
1974
+ Examples
1975
+ --------
1976
+ >>> from sklearn.datasets import make_friedman2
1977
+ >>> from sklearn.gaussian_process import GaussianProcessRegressor
1978
+ >>> from sklearn.gaussian_process.kernels import ExpSineSquared
1979
+ >>> X, y = make_friedman2(n_samples=50, noise=0, random_state=0)
1980
+ >>> kernel = ExpSineSquared(length_scale=1, periodicity=1)
1981
+ >>> gpr = GaussianProcessRegressor(kernel=kernel, alpha=5,
1982
+ ... random_state=0).fit(X, y)
1983
+ >>> gpr.score(X, y)
1984
+ 0.0144...
1985
+ >>> gpr.predict(X[:2,:], return_std=True)
1986
+ (array([425.6..., 457.5...]), array([0.3894..., 0.3467...]))
1987
+ """
1988
+
1989
+ def __init__(
1990
+ self,
1991
+ length_scale=1.0,
1992
+ periodicity=1.0,
1993
+ length_scale_bounds=(1e-5, 1e5),
1994
+ periodicity_bounds=(1e-5, 1e5),
1995
+ ):
1996
+ self.length_scale = length_scale
1997
+ self.periodicity = periodicity
1998
+ self.length_scale_bounds = length_scale_bounds
1999
+ self.periodicity_bounds = periodicity_bounds
2000
+
2001
+ @property
2002
+ def hyperparameter_length_scale(self):
2003
+ """Returns the length scale"""
2004
+ return Hyperparameter("length_scale", "numeric", self.length_scale_bounds)
2005
+
2006
+ @property
2007
+ def hyperparameter_periodicity(self):
2008
+ return Hyperparameter("periodicity", "numeric", self.periodicity_bounds)
2009
+
2010
+ def __call__(self, X, Y=None, eval_gradient=False):
2011
+ """Return the kernel k(X, Y) and optionally its gradient.
2012
+
2013
+ Parameters
2014
+ ----------
2015
+ X : ndarray of shape (n_samples_X, n_features)
2016
+ Left argument of the returned kernel k(X, Y)
2017
+
2018
+ Y : ndarray of shape (n_samples_Y, n_features), default=None
2019
+ Right argument of the returned kernel k(X, Y). If None, k(X, X)
2020
+ if evaluated instead.
2021
+
2022
+ eval_gradient : bool, default=False
2023
+ Determines whether the gradient with respect to the log of
2024
+ the kernel hyperparameter is computed.
2025
+ Only supported when Y is None.
2026
+
2027
+ Returns
2028
+ -------
2029
+ K : ndarray of shape (n_samples_X, n_samples_Y)
2030
+ Kernel k(X, Y)
2031
+
2032
+ K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
2033
+ optional
2034
+ The gradient of the kernel k(X, X) with respect to the log of the
2035
+ hyperparameter of the kernel. Only returned when `eval_gradient`
2036
+ is True.
2037
+ """
2038
+ X = np.atleast_2d(X)
2039
+ if Y is None:
2040
+ dists = squareform(pdist(X, metric="euclidean"))
2041
+ arg = np.pi * dists / self.periodicity
2042
+ sin_of_arg = np.sin(arg)
2043
+ K = np.exp(-2 * (sin_of_arg / self.length_scale) ** 2)
2044
+ else:
2045
+ if eval_gradient:
2046
+ raise ValueError("Gradient can only be evaluated when Y is None.")
2047
+ dists = cdist(X, Y, metric="euclidean")
2048
+ K = np.exp(
2049
+ -2 * (np.sin(np.pi / self.periodicity * dists) / self.length_scale) ** 2
2050
+ )
2051
+
2052
+ if eval_gradient:
2053
+ cos_of_arg = np.cos(arg)
2054
+ # gradient with respect to length_scale
2055
+ if not self.hyperparameter_length_scale.fixed:
2056
+ length_scale_gradient = 4 / self.length_scale**2 * sin_of_arg**2 * K
2057
+ length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
2058
+ else: # length_scale is kept fixed
2059
+ length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
2060
+ # gradient with respect to p
2061
+ if not self.hyperparameter_periodicity.fixed:
2062
+ periodicity_gradient = (
2063
+ 4 * arg / self.length_scale**2 * cos_of_arg * sin_of_arg * K
2064
+ )
2065
+ periodicity_gradient = periodicity_gradient[:, :, np.newaxis]
2066
+ else: # p is kept fixed
2067
+ periodicity_gradient = np.empty((K.shape[0], K.shape[1], 0))
2068
+
2069
+ return K, np.dstack((length_scale_gradient, periodicity_gradient))
2070
+ else:
2071
+ return K
2072
+
2073
+ def __repr__(self):
2074
+ return "{0}(length_scale={1:.3g}, periodicity={2:.3g})".format(
2075
+ self.__class__.__name__, self.length_scale, self.periodicity
2076
+ )
2077
+
2078
+
2079
+ class DotProduct(Kernel):
2080
+ r"""Dot-Product kernel.
2081
+
2082
+ The DotProduct kernel is non-stationary and can be obtained from linear
2083
+ regression by putting :math:`N(0, 1)` priors on the coefficients
2084
+ of :math:`x_d (d = 1, . . . , D)` and a prior of :math:`N(0, \sigma_0^2)`
2085
+ on the bias. The DotProduct kernel is invariant to a rotation of
2086
+ the coordinates about the origin, but not translations.
2087
+ It is parameterized by a parameter sigma_0 :math:`\sigma`
2088
+ which controls the inhomogenity of the kernel. For :math:`\sigma_0^2 =0`,
2089
+ the kernel is called the homogeneous linear kernel, otherwise
2090
+ it is inhomogeneous. The kernel is given by
2091
+
2092
+ .. math::
2093
+ k(x_i, x_j) = \sigma_0 ^ 2 + x_i \cdot x_j
2094
+
2095
+ The DotProduct kernel is commonly combined with exponentiation.
2096
+
2097
+ See [1]_, Chapter 4, Section 4.2, for further details regarding the
2098
+ DotProduct kernel.
2099
+
2100
+ Read more in the :ref:`User Guide <gp_kernels>`.
2101
+
2102
+ .. versionadded:: 0.18
2103
+
2104
+ Parameters
2105
+ ----------
2106
+ sigma_0 : float >= 0, default=1.0
2107
+ Parameter controlling the inhomogenity of the kernel. If sigma_0=0,
2108
+ the kernel is homogeneous.
2109
+
2110
+ sigma_0_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
2111
+ The lower and upper bound on 'sigma_0'.
2112
+ If set to "fixed", 'sigma_0' cannot be changed during
2113
+ hyperparameter tuning.
2114
+
2115
+ References
2116
+ ----------
2117
+ .. [1] `Carl Edward Rasmussen, Christopher K. I. Williams (2006).
2118
+ "Gaussian Processes for Machine Learning". The MIT Press.
2119
+ <http://www.gaussianprocess.org/gpml/>`_
2120
+
2121
+ Examples
2122
+ --------
2123
+ >>> from sklearn.datasets import make_friedman2
2124
+ >>> from sklearn.gaussian_process import GaussianProcessRegressor
2125
+ >>> from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel
2126
+ >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
2127
+ >>> kernel = DotProduct() + WhiteKernel()
2128
+ >>> gpr = GaussianProcessRegressor(kernel=kernel,
2129
+ ... random_state=0).fit(X, y)
2130
+ >>> gpr.score(X, y)
2131
+ 0.3680...
2132
+ >>> gpr.predict(X[:2,:], return_std=True)
2133
+ (array([653.0..., 592.1...]), array([316.6..., 316.6...]))
2134
+ """
2135
+
2136
+ def __init__(self, sigma_0=1.0, sigma_0_bounds=(1e-5, 1e5)):
2137
+ self.sigma_0 = sigma_0
2138
+ self.sigma_0_bounds = sigma_0_bounds
2139
+
2140
+ @property
2141
+ def hyperparameter_sigma_0(self):
2142
+ return Hyperparameter("sigma_0", "numeric", self.sigma_0_bounds)
2143
+
2144
+ def __call__(self, X, Y=None, eval_gradient=False):
2145
+ """Return the kernel k(X, Y) and optionally its gradient.
2146
+
2147
+ Parameters
2148
+ ----------
2149
+ X : ndarray of shape (n_samples_X, n_features)
2150
+ Left argument of the returned kernel k(X, Y)
2151
+
2152
+ Y : ndarray of shape (n_samples_Y, n_features), default=None
2153
+ Right argument of the returned kernel k(X, Y). If None, k(X, X)
2154
+ if evaluated instead.
2155
+
2156
+ eval_gradient : bool, default=False
2157
+ Determines whether the gradient with respect to the log of
2158
+ the kernel hyperparameter is computed.
2159
+ Only supported when Y is None.
2160
+
2161
+ Returns
2162
+ -------
2163
+ K : ndarray of shape (n_samples_X, n_samples_Y)
2164
+ Kernel k(X, Y)
2165
+
2166
+ K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\
2167
+ optional
2168
+ The gradient of the kernel k(X, X) with respect to the log of the
2169
+ hyperparameter of the kernel. Only returned when `eval_gradient`
2170
+ is True.
2171
+ """
2172
+ X = np.atleast_2d(X)
2173
+ if Y is None:
2174
+ K = np.inner(X, X) + self.sigma_0**2
2175
+ else:
2176
+ if eval_gradient:
2177
+ raise ValueError("Gradient can only be evaluated when Y is None.")
2178
+ K = np.inner(X, Y) + self.sigma_0**2
2179
+
2180
+ if eval_gradient:
2181
+ if not self.hyperparameter_sigma_0.fixed:
2182
+ K_gradient = np.empty((K.shape[0], K.shape[1], 1))
2183
+ K_gradient[..., 0] = 2 * self.sigma_0**2
2184
+ return K, K_gradient
2185
+ else:
2186
+ return K, np.empty((X.shape[0], X.shape[0], 0))
2187
+ else:
2188
+ return K
2189
+
2190
+ def diag(self, X):
2191
+ """Returns the diagonal of the kernel k(X, X).
2192
+
2193
+ The result of this method is identical to np.diag(self(X)); however,
2194
+ it can be evaluated more efficiently since only the diagonal is
2195
+ evaluated.
2196
+
2197
+ Parameters
2198
+ ----------
2199
+ X : ndarray of shape (n_samples_X, n_features)
2200
+ Left argument of the returned kernel k(X, Y).
2201
+
2202
+ Returns
2203
+ -------
2204
+ K_diag : ndarray of shape (n_samples_X,)
2205
+ Diagonal of kernel k(X, X).
2206
+ """
2207
+ return np.einsum("ij,ij->i", X, X) + self.sigma_0**2
2208
+
2209
+ def is_stationary(self):
2210
+ """Returns whether the kernel is stationary."""
2211
+ return False
2212
+
2213
+ def __repr__(self):
2214
+ return "{0}(sigma_0={1:.3g})".format(self.__class__.__name__, self.sigma_0)
2215
+
2216
+
2217
+ # adapted from scipy/optimize/optimize.py for functions with 2d output
2218
+ def _approx_fprime(xk, f, epsilon, args=()):
2219
+ f0 = f(*((xk,) + args))
2220
+ grad = np.zeros((f0.shape[0], f0.shape[1], len(xk)), float)
2221
+ ei = np.zeros((len(xk),), float)
2222
+ for k in range(len(xk)):
2223
+ ei[k] = 1.0
2224
+ d = epsilon * ei
2225
+ grad[:, :, k] = (f(*((xk + d,) + args)) - f0) / d[k]
2226
+ ei[k] = 0.0
2227
+ return grad
2228
+
2229
+
2230
+ class PairwiseKernel(Kernel):
2231
+ """Wrapper for kernels in sklearn.metrics.pairwise.
2232
+
2233
+ A thin wrapper around the functionality of the kernels in
2234
+ sklearn.metrics.pairwise.
2235
+
2236
+ Note: Evaluation of eval_gradient is not analytic but numeric and all
2237
+ kernels support only isotropic distances. The parameter gamma is
2238
+ considered to be a hyperparameter and may be optimized. The other
2239
+ kernel parameters are set directly at initialization and are kept
2240
+ fixed.
2241
+
2242
+ .. versionadded:: 0.18
2243
+
2244
+ Parameters
2245
+ ----------
2246
+ gamma : float, default=1.0
2247
+ Parameter gamma of the pairwise kernel specified by metric. It should
2248
+ be positive.
2249
+
2250
+ gamma_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
2251
+ The lower and upper bound on 'gamma'.
2252
+ If set to "fixed", 'gamma' cannot be changed during
2253
+ hyperparameter tuning.
2254
+
2255
+ metric : {"linear", "additive_chi2", "chi2", "poly", "polynomial", \
2256
+ "rbf", "laplacian", "sigmoid", "cosine"} or callable, \
2257
+ default="linear"
2258
+ The metric to use when calculating kernel between instances in a
2259
+ feature array. If metric is a string, it must be one of the metrics
2260
+ in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
2261
+ If metric is "precomputed", X is assumed to be a kernel matrix.
2262
+ Alternatively, if metric is a callable function, it is called on each
2263
+ pair of instances (rows) and the resulting value recorded. The callable
2264
+ should take two arrays from X as input and return a value indicating
2265
+ the distance between them.
2266
+
2267
+ pairwise_kernels_kwargs : dict, default=None
2268
+ All entries of this dict (if any) are passed as keyword arguments to
2269
+ the pairwise kernel function.
2270
+
2271
+ Examples
2272
+ --------
2273
+ >>> from sklearn.datasets import load_iris
2274
+ >>> from sklearn.gaussian_process import GaussianProcessClassifier
2275
+ >>> from sklearn.gaussian_process.kernels import PairwiseKernel
2276
+ >>> X, y = load_iris(return_X_y=True)
2277
+ >>> kernel = PairwiseKernel(metric='rbf')
2278
+ >>> gpc = GaussianProcessClassifier(kernel=kernel,
2279
+ ... random_state=0).fit(X, y)
2280
+ >>> gpc.score(X, y)
2281
+ 0.9733...
2282
+ >>> gpc.predict_proba(X[:2,:])
2283
+ array([[0.8880..., 0.05663..., 0.05532...],
2284
+ [0.8676..., 0.07073..., 0.06165...]])
2285
+ """
2286
+
2287
+ def __init__(
2288
+ self,
2289
+ gamma=1.0,
2290
+ gamma_bounds=(1e-5, 1e5),
2291
+ metric="linear",
2292
+ pairwise_kernels_kwargs=None,
2293
+ ):
2294
+ self.gamma = gamma
2295
+ self.gamma_bounds = gamma_bounds
2296
+ self.metric = metric
2297
+ self.pairwise_kernels_kwargs = pairwise_kernels_kwargs
2298
+
2299
+ @property
2300
+ def hyperparameter_gamma(self):
2301
+ return Hyperparameter("gamma", "numeric", self.gamma_bounds)
2302
+
2303
+ def __call__(self, X, Y=None, eval_gradient=False):
2304
+ """Return the kernel k(X, Y) and optionally its gradient.
2305
+
2306
+ Parameters
2307
+ ----------
2308
+ X : ndarray of shape (n_samples_X, n_features)
2309
+ Left argument of the returned kernel k(X, Y)
2310
+
2311
+ Y : ndarray of shape (n_samples_Y, n_features), default=None
2312
+ Right argument of the returned kernel k(X, Y). If None, k(X, X)
2313
+ if evaluated instead.
2314
+
2315
+ eval_gradient : bool, default=False
2316
+ Determines whether the gradient with respect to the log of
2317
+ the kernel hyperparameter is computed.
2318
+ Only supported when Y is None.
2319
+
2320
+ Returns
2321
+ -------
2322
+ K : ndarray of shape (n_samples_X, n_samples_Y)
2323
+ Kernel k(X, Y)
2324
+
2325
+ K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\
2326
+ optional
2327
+ The gradient of the kernel k(X, X) with respect to the log of the
2328
+ hyperparameter of the kernel. Only returned when `eval_gradient`
2329
+ is True.
2330
+ """
2331
+ pairwise_kernels_kwargs = self.pairwise_kernels_kwargs
2332
+ if self.pairwise_kernels_kwargs is None:
2333
+ pairwise_kernels_kwargs = {}
2334
+
2335
+ X = np.atleast_2d(X)
2336
+ K = pairwise_kernels(
2337
+ X,
2338
+ Y,
2339
+ metric=self.metric,
2340
+ gamma=self.gamma,
2341
+ filter_params=True,
2342
+ **pairwise_kernels_kwargs,
2343
+ )
2344
+ if eval_gradient:
2345
+ if self.hyperparameter_gamma.fixed:
2346
+ return K, np.empty((X.shape[0], X.shape[0], 0))
2347
+ else:
2348
+ # approximate gradient numerically
2349
+ def f(gamma): # helper function
2350
+ return pairwise_kernels(
2351
+ X,
2352
+ Y,
2353
+ metric=self.metric,
2354
+ gamma=np.exp(gamma),
2355
+ filter_params=True,
2356
+ **pairwise_kernels_kwargs,
2357
+ )
2358
+
2359
+ return K, _approx_fprime(self.theta, f, 1e-10)
2360
+ else:
2361
+ return K
2362
+
2363
+ def diag(self, X):
2364
+ """Returns the diagonal of the kernel k(X, X).
2365
+
2366
+ The result of this method is identical to np.diag(self(X)); however,
2367
+ it can be evaluated more efficiently since only the diagonal is
2368
+ evaluated.
2369
+
2370
+ Parameters
2371
+ ----------
2372
+ X : ndarray of shape (n_samples_X, n_features)
2373
+ Left argument of the returned kernel k(X, Y)
2374
+
2375
+ Returns
2376
+ -------
2377
+ K_diag : ndarray of shape (n_samples_X,)
2378
+ Diagonal of kernel k(X, X)
2379
+ """
2380
+ # We have to fall back to slow way of computing diagonal
2381
+ return np.apply_along_axis(self, 1, X).ravel()
2382
+
2383
+ def is_stationary(self):
2384
+ """Returns whether the kernel is stationary."""
2385
+ return self.metric in ["rbf"]
2386
+
2387
+ def __repr__(self):
2388
+ return "{0}(gamma={1}, metric={2})".format(
2389
+ self.__class__.__name__, self.gamma, self.metric
2390
+ )
mplug_owl2/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__init__.py ADDED
File without changes
mplug_owl2/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (186 Bytes). View file
 
mplug_owl2/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__pycache__/_mini_sequence_kernel.cpython-310.pyc ADDED
Binary file (3.18 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__pycache__/test_gpc.cpython-310.pyc ADDED
Binary file (8.22 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__pycache__/test_gpr.cpython-310.pyc ADDED
Binary file (21.3 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__pycache__/test_kernels.cpython-310.pyc ADDED
Binary file (9.4 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/sklearn/gaussian_process/tests/_mini_sequence_kernel.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sklearn.gaussian_process.kernels import Kernel, Hyperparameter
2
+ from sklearn.gaussian_process.kernels import GenericKernelMixin
3
+ from sklearn.gaussian_process.kernels import StationaryKernelMixin
4
+ import numpy as np
5
+ from sklearn.base import clone
6
+
7
+
8
+ class MiniSeqKernel(GenericKernelMixin, StationaryKernelMixin, Kernel):
9
+ """
10
+ A minimal (but valid) convolutional kernel for sequences of variable
11
+ length.
12
+ """
13
+
14
+ def __init__(self, baseline_similarity=0.5, baseline_similarity_bounds=(1e-5, 1)):
15
+ self.baseline_similarity = baseline_similarity
16
+ self.baseline_similarity_bounds = baseline_similarity_bounds
17
+
18
+ @property
19
+ def hyperparameter_baseline_similarity(self):
20
+ return Hyperparameter(
21
+ "baseline_similarity", "numeric", self.baseline_similarity_bounds
22
+ )
23
+
24
+ def _f(self, s1, s2):
25
+ return sum(
26
+ [1.0 if c1 == c2 else self.baseline_similarity for c1 in s1 for c2 in s2]
27
+ )
28
+
29
+ def _g(self, s1, s2):
30
+ return sum([0.0 if c1 == c2 else 1.0 for c1 in s1 for c2 in s2])
31
+
32
+ def __call__(self, X, Y=None, eval_gradient=False):
33
+ if Y is None:
34
+ Y = X
35
+
36
+ if eval_gradient:
37
+ return (
38
+ np.array([[self._f(x, y) for y in Y] for x in X]),
39
+ np.array([[[self._g(x, y)] for y in Y] for x in X]),
40
+ )
41
+ else:
42
+ return np.array([[self._f(x, y) for y in Y] for x in X])
43
+
44
+ def diag(self, X):
45
+ return np.array([self._f(x, x) for x in X])
46
+
47
+ def clone_with_theta(self, theta):
48
+ cloned = clone(self)
49
+ cloned.theta = theta
50
+ return cloned
mplug_owl2/lib/python3.10/site-packages/sklearn/gaussian_process/tests/test_gpc.py ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Testing for Gaussian process classification """
2
+
3
+ # Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
4
+ # License: BSD 3 clause
5
+
6
+ import warnings
7
+ import numpy as np
8
+
9
+ from scipy.optimize import approx_fprime
10
+
11
+ import pytest
12
+
13
+ from sklearn.gaussian_process import GaussianProcessClassifier
14
+ from sklearn.gaussian_process.kernels import (
15
+ RBF,
16
+ CompoundKernel,
17
+ ConstantKernel as C,
18
+ WhiteKernel,
19
+ )
20
+ from sklearn.gaussian_process.tests._mini_sequence_kernel import MiniSeqKernel
21
+ from sklearn.exceptions import ConvergenceWarning
22
+
23
+ from sklearn.utils._testing import assert_almost_equal, assert_array_equal
24
+
25
+
26
+ def f(x):
27
+ return np.sin(x)
28
+
29
+
30
+ X = np.atleast_2d(np.linspace(0, 10, 30)).T
31
+ X2 = np.atleast_2d([2.0, 4.0, 5.5, 6.5, 7.5]).T
32
+ y = np.array(f(X).ravel() > 0, dtype=int)
33
+ fX = f(X).ravel()
34
+ y_mc = np.empty(y.shape, dtype=int) # multi-class
35
+ y_mc[fX < -0.35] = 0
36
+ y_mc[(fX >= -0.35) & (fX < 0.35)] = 1
37
+ y_mc[fX > 0.35] = 2
38
+
39
+
40
+ fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed")
41
+ kernels = [
42
+ RBF(length_scale=0.1),
43
+ fixed_kernel,
44
+ RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
45
+ C(1.0, (1e-2, 1e2)) * RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
46
+ ]
47
+ non_fixed_kernels = [kernel for kernel in kernels if kernel != fixed_kernel]
48
+
49
+
50
+ @pytest.mark.parametrize("kernel", kernels)
51
+ def test_predict_consistent(kernel):
52
+ # Check binary predict decision has also predicted probability above 0.5.
53
+ gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
54
+ assert_array_equal(gpc.predict(X), gpc.predict_proba(X)[:, 1] >= 0.5)
55
+
56
+
57
+ def test_predict_consistent_structured():
58
+ # Check binary predict decision has also predicted probability above 0.5.
59
+ X = ["A", "AB", "B"]
60
+ y = np.array([True, False, True])
61
+ kernel = MiniSeqKernel(baseline_similarity_bounds="fixed")
62
+ gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
63
+ assert_array_equal(gpc.predict(X), gpc.predict_proba(X)[:, 1] >= 0.5)
64
+
65
+
66
+ @pytest.mark.parametrize("kernel", non_fixed_kernels)
67
+ def test_lml_improving(kernel):
68
+ # Test that hyperparameter-tuning improves log-marginal likelihood.
69
+ gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
70
+ assert gpc.log_marginal_likelihood(gpc.kernel_.theta) > gpc.log_marginal_likelihood(
71
+ kernel.theta
72
+ )
73
+
74
+
75
+ @pytest.mark.parametrize("kernel", kernels)
76
+ def test_lml_precomputed(kernel):
77
+ # Test that lml of optimized kernel is stored correctly.
78
+ gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
79
+ assert_almost_equal(
80
+ gpc.log_marginal_likelihood(gpc.kernel_.theta), gpc.log_marginal_likelihood(), 7
81
+ )
82
+
83
+
84
+ @pytest.mark.parametrize("kernel", kernels)
85
+ def test_lml_without_cloning_kernel(kernel):
86
+ # Test that clone_kernel=False has side-effects of kernel.theta.
87
+ gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
88
+ input_theta = np.ones(gpc.kernel_.theta.shape, dtype=np.float64)
89
+
90
+ gpc.log_marginal_likelihood(input_theta, clone_kernel=False)
91
+ assert_almost_equal(gpc.kernel_.theta, input_theta, 7)
92
+
93
+
94
+ @pytest.mark.parametrize("kernel", non_fixed_kernels)
95
+ def test_converged_to_local_maximum(kernel):
96
+ # Test that we are in local maximum after hyperparameter-optimization.
97
+ gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
98
+
99
+ lml, lml_gradient = gpc.log_marginal_likelihood(gpc.kernel_.theta, True)
100
+
101
+ assert np.all(
102
+ (np.abs(lml_gradient) < 1e-4)
103
+ | (gpc.kernel_.theta == gpc.kernel_.bounds[:, 0])
104
+ | (gpc.kernel_.theta == gpc.kernel_.bounds[:, 1])
105
+ )
106
+
107
+
108
+ @pytest.mark.parametrize("kernel", kernels)
109
+ def test_lml_gradient(kernel):
110
+ # Compare analytic and numeric gradient of log marginal likelihood.
111
+ gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
112
+
113
+ lml, lml_gradient = gpc.log_marginal_likelihood(kernel.theta, True)
114
+ lml_gradient_approx = approx_fprime(
115
+ kernel.theta, lambda theta: gpc.log_marginal_likelihood(theta, False), 1e-10
116
+ )
117
+
118
+ assert_almost_equal(lml_gradient, lml_gradient_approx, 3)
119
+
120
+
121
+ def test_random_starts():
122
+ # Test that an increasing number of random-starts of GP fitting only
123
+ # increases the log marginal likelihood of the chosen theta.
124
+ n_samples, n_features = 25, 2
125
+ rng = np.random.RandomState(0)
126
+ X = rng.randn(n_samples, n_features) * 2 - 1
127
+ y = (np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)) > 0
128
+
129
+ kernel = C(1.0, (1e-2, 1e2)) * RBF(
130
+ length_scale=[1e-3] * n_features, length_scale_bounds=[(1e-4, 1e2)] * n_features
131
+ )
132
+ last_lml = -np.inf
133
+ for n_restarts_optimizer in range(5):
134
+ gp = GaussianProcessClassifier(
135
+ kernel=kernel, n_restarts_optimizer=n_restarts_optimizer, random_state=0
136
+ ).fit(X, y)
137
+ lml = gp.log_marginal_likelihood(gp.kernel_.theta)
138
+ assert lml > last_lml - np.finfo(np.float32).eps
139
+ last_lml = lml
140
+
141
+
142
+ @pytest.mark.parametrize("kernel", non_fixed_kernels)
143
+ def test_custom_optimizer(kernel):
144
+ # Test that GPC can use externally defined optimizers.
145
+ # Define a dummy optimizer that simply tests 10 random hyperparameters
146
+ def optimizer(obj_func, initial_theta, bounds):
147
+ rng = np.random.RandomState(0)
148
+ theta_opt, func_min = initial_theta, obj_func(
149
+ initial_theta, eval_gradient=False
150
+ )
151
+ for _ in range(10):
152
+ theta = np.atleast_1d(
153
+ rng.uniform(np.maximum(-2, bounds[:, 0]), np.minimum(1, bounds[:, 1]))
154
+ )
155
+ f = obj_func(theta, eval_gradient=False)
156
+ if f < func_min:
157
+ theta_opt, func_min = theta, f
158
+ return theta_opt, func_min
159
+
160
+ gpc = GaussianProcessClassifier(kernel=kernel, optimizer=optimizer)
161
+ gpc.fit(X, y_mc)
162
+ # Checks that optimizer improved marginal likelihood
163
+ assert gpc.log_marginal_likelihood(gpc.kernel_.theta) > gpc.log_marginal_likelihood(
164
+ kernel.theta
165
+ )
166
+
167
+
168
+ @pytest.mark.parametrize("kernel", kernels)
169
+ def test_multi_class(kernel):
170
+ # Test GPC for multi-class classification problems.
171
+ gpc = GaussianProcessClassifier(kernel=kernel)
172
+ gpc.fit(X, y_mc)
173
+
174
+ y_prob = gpc.predict_proba(X2)
175
+ assert_almost_equal(y_prob.sum(1), 1)
176
+
177
+ y_pred = gpc.predict(X2)
178
+ assert_array_equal(np.argmax(y_prob, 1), y_pred)
179
+
180
+
181
+ @pytest.mark.parametrize("kernel", kernels)
182
+ def test_multi_class_n_jobs(kernel):
183
+ # Test that multi-class GPC produces identical results with n_jobs>1.
184
+ gpc = GaussianProcessClassifier(kernel=kernel)
185
+ gpc.fit(X, y_mc)
186
+
187
+ gpc_2 = GaussianProcessClassifier(kernel=kernel, n_jobs=2)
188
+ gpc_2.fit(X, y_mc)
189
+
190
+ y_prob = gpc.predict_proba(X2)
191
+ y_prob_2 = gpc_2.predict_proba(X2)
192
+ assert_almost_equal(y_prob, y_prob_2)
193
+
194
+
195
+ def test_warning_bounds():
196
+ kernel = RBF(length_scale_bounds=[1e-5, 1e-3])
197
+ gpc = GaussianProcessClassifier(kernel=kernel)
198
+ warning_message = (
199
+ "The optimal value found for dimension 0 of parameter "
200
+ "length_scale is close to the specified upper bound "
201
+ "0.001. Increasing the bound and calling fit again may "
202
+ "find a better value."
203
+ )
204
+ with pytest.warns(ConvergenceWarning, match=warning_message):
205
+ gpc.fit(X, y)
206
+
207
+ kernel_sum = WhiteKernel(noise_level_bounds=[1e-5, 1e-3]) + RBF(
208
+ length_scale_bounds=[1e3, 1e5]
209
+ )
210
+ gpc_sum = GaussianProcessClassifier(kernel=kernel_sum)
211
+ with warnings.catch_warnings(record=True) as record:
212
+ warnings.simplefilter("always")
213
+ gpc_sum.fit(X, y)
214
+
215
+ assert len(record) == 2
216
+
217
+ assert issubclass(record[0].category, ConvergenceWarning)
218
+ assert (
219
+ record[0].message.args[0]
220
+ == "The optimal value found for "
221
+ "dimension 0 of parameter "
222
+ "k1__noise_level is close to the "
223
+ "specified upper bound 0.001. "
224
+ "Increasing the bound and calling "
225
+ "fit again may find a better value."
226
+ )
227
+
228
+ assert issubclass(record[1].category, ConvergenceWarning)
229
+ assert (
230
+ record[1].message.args[0]
231
+ == "The optimal value found for "
232
+ "dimension 0 of parameter "
233
+ "k2__length_scale is close to the "
234
+ "specified lower bound 1000.0. "
235
+ "Decreasing the bound and calling "
236
+ "fit again may find a better value."
237
+ )
238
+
239
+ X_tile = np.tile(X, 2)
240
+ kernel_dims = RBF(length_scale=[1.0, 2.0], length_scale_bounds=[1e1, 1e2])
241
+ gpc_dims = GaussianProcessClassifier(kernel=kernel_dims)
242
+
243
+ with warnings.catch_warnings(record=True) as record:
244
+ warnings.simplefilter("always")
245
+ gpc_dims.fit(X_tile, y)
246
+
247
+ assert len(record) == 2
248
+
249
+ assert issubclass(record[0].category, ConvergenceWarning)
250
+ assert (
251
+ record[0].message.args[0]
252
+ == "The optimal value found for "
253
+ "dimension 0 of parameter "
254
+ "length_scale is close to the "
255
+ "specified upper bound 100.0. "
256
+ "Increasing the bound and calling "
257
+ "fit again may find a better value."
258
+ )
259
+
260
+ assert issubclass(record[1].category, ConvergenceWarning)
261
+ assert (
262
+ record[1].message.args[0]
263
+ == "The optimal value found for "
264
+ "dimension 1 of parameter "
265
+ "length_scale is close to the "
266
+ "specified upper bound 100.0. "
267
+ "Increasing the bound and calling "
268
+ "fit again may find a better value."
269
+ )
270
+
271
+
272
+ @pytest.mark.parametrize(
273
+ "params, error_type, err_msg",
274
+ [
275
+ (
276
+ {"kernel": CompoundKernel(0)},
277
+ ValueError,
278
+ "kernel cannot be a CompoundKernel",
279
+ )
280
+ ],
281
+ )
282
+ def test_gpc_fit_error(params, error_type, err_msg):
283
+ """Check that expected error are raised during fit."""
284
+ gpc = GaussianProcessClassifier(**params)
285
+ with pytest.raises(error_type, match=err_msg):
286
+ gpc.fit(X, y)
mplug_owl2/lib/python3.10/site-packages/sklearn/gaussian_process/tests/test_gpr.py ADDED
@@ -0,0 +1,800 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Testing for Gaussian process regression """
2
+
3
+ # Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
4
+ # Modified by: Pete Green <p.l.green@liverpool.ac.uk>
5
+ # License: BSD 3 clause
6
+
7
+ import warnings
8
+ import sys
9
+ import re
10
+ import numpy as np
11
+
12
+ from scipy.optimize import approx_fprime
13
+
14
+ import pytest
15
+
16
+ from sklearn.gaussian_process import GaussianProcessRegressor
17
+ from sklearn.gaussian_process.kernels import (
18
+ RBF,
19
+ ConstantKernel as C,
20
+ WhiteKernel,
21
+ )
22
+ from sklearn.gaussian_process.kernels import DotProduct, ExpSineSquared
23
+ from sklearn.gaussian_process.tests._mini_sequence_kernel import MiniSeqKernel
24
+ from sklearn.exceptions import ConvergenceWarning
25
+ from sklearn.utils._testing import (
26
+ assert_array_less,
27
+ assert_almost_equal,
28
+ assert_array_almost_equal,
29
+ assert_allclose,
30
+ )
31
+
32
+
33
+ def f(x):
34
+ return x * np.sin(x)
35
+
36
+
37
+ X = np.atleast_2d([1.0, 3.0, 5.0, 6.0, 7.0, 8.0]).T
38
+ X2 = np.atleast_2d([2.0, 4.0, 5.5, 6.5, 7.5]).T
39
+ y = f(X).ravel()
40
+
41
+ fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed")
42
+ kernels = [
43
+ RBF(length_scale=1.0),
44
+ fixed_kernel,
45
+ RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
46
+ C(1.0, (1e-2, 1e2)) * RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
47
+ C(1.0, (1e-2, 1e2)) * RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3))
48
+ + C(1e-5, (1e-5, 1e2)),
49
+ C(0.1, (1e-2, 1e2)) * RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3))
50
+ + C(1e-5, (1e-5, 1e2)),
51
+ ]
52
+ non_fixed_kernels = [kernel for kernel in kernels if kernel != fixed_kernel]
53
+
54
+
55
+ @pytest.mark.parametrize("kernel", kernels)
56
+ def test_gpr_interpolation(kernel):
57
+ if sys.maxsize <= 2**32:
58
+ pytest.xfail("This test may fail on 32 bit Python")
59
+
60
+ # Test the interpolating property for different kernels.
61
+ gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
62
+ y_pred, y_cov = gpr.predict(X, return_cov=True)
63
+
64
+ assert_almost_equal(y_pred, y)
65
+ assert_almost_equal(np.diag(y_cov), 0.0)
66
+
67
+
68
+ def test_gpr_interpolation_structured():
69
+ # Test the interpolating property for different kernels.
70
+ kernel = MiniSeqKernel(baseline_similarity_bounds="fixed")
71
+ X = ["A", "B", "C"]
72
+ y = np.array([1, 2, 3])
73
+ gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
74
+ y_pred, y_cov = gpr.predict(X, return_cov=True)
75
+
76
+ assert_almost_equal(
77
+ kernel(X, eval_gradient=True)[1].ravel(), (1 - np.eye(len(X))).ravel()
78
+ )
79
+ assert_almost_equal(y_pred, y)
80
+ assert_almost_equal(np.diag(y_cov), 0.0)
81
+
82
+
83
+ @pytest.mark.parametrize("kernel", non_fixed_kernels)
84
+ def test_lml_improving(kernel):
85
+ if sys.maxsize <= 2**32:
86
+ pytest.xfail("This test may fail on 32 bit Python")
87
+
88
+ # Test that hyperparameter-tuning improves log-marginal likelihood.
89
+ gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
90
+ assert gpr.log_marginal_likelihood(gpr.kernel_.theta) > gpr.log_marginal_likelihood(
91
+ kernel.theta
92
+ )
93
+
94
+
95
+ @pytest.mark.parametrize("kernel", kernels)
96
+ def test_lml_precomputed(kernel):
97
+ # Test that lml of optimized kernel is stored correctly.
98
+ gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
99
+ assert gpr.log_marginal_likelihood(gpr.kernel_.theta) == pytest.approx(
100
+ gpr.log_marginal_likelihood()
101
+ )
102
+
103
+
104
+ @pytest.mark.parametrize("kernel", kernels)
105
+ def test_lml_without_cloning_kernel(kernel):
106
+ # Test that lml of optimized kernel is stored correctly.
107
+ gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
108
+ input_theta = np.ones(gpr.kernel_.theta.shape, dtype=np.float64)
109
+
110
+ gpr.log_marginal_likelihood(input_theta, clone_kernel=False)
111
+ assert_almost_equal(gpr.kernel_.theta, input_theta, 7)
112
+
113
+
114
+ @pytest.mark.parametrize("kernel", non_fixed_kernels)
115
+ def test_converged_to_local_maximum(kernel):
116
+ # Test that we are in local maximum after hyperparameter-optimization.
117
+ gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
118
+
119
+ lml, lml_gradient = gpr.log_marginal_likelihood(gpr.kernel_.theta, True)
120
+
121
+ assert np.all(
122
+ (np.abs(lml_gradient) < 1e-4)
123
+ | (gpr.kernel_.theta == gpr.kernel_.bounds[:, 0])
124
+ | (gpr.kernel_.theta == gpr.kernel_.bounds[:, 1])
125
+ )
126
+
127
+
128
+ @pytest.mark.parametrize("kernel", non_fixed_kernels)
129
+ def test_solution_inside_bounds(kernel):
130
+ # Test that hyperparameter-optimization remains in bounds#
131
+ gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
132
+
133
+ bounds = gpr.kernel_.bounds
134
+ max_ = np.finfo(gpr.kernel_.theta.dtype).max
135
+ tiny = 1e-10
136
+ bounds[~np.isfinite(bounds[:, 1]), 1] = max_
137
+
138
+ assert_array_less(bounds[:, 0], gpr.kernel_.theta + tiny)
139
+ assert_array_less(gpr.kernel_.theta, bounds[:, 1] + tiny)
140
+
141
+
142
+ @pytest.mark.parametrize("kernel", kernels)
143
+ def test_lml_gradient(kernel):
144
+ # Compare analytic and numeric gradient of log marginal likelihood.
145
+ gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
146
+
147
+ lml, lml_gradient = gpr.log_marginal_likelihood(kernel.theta, True)
148
+ lml_gradient_approx = approx_fprime(
149
+ kernel.theta, lambda theta: gpr.log_marginal_likelihood(theta, False), 1e-10
150
+ )
151
+
152
+ assert_almost_equal(lml_gradient, lml_gradient_approx, 3)
153
+
154
+
155
+ @pytest.mark.parametrize("kernel", kernels)
156
+ def test_prior(kernel):
157
+ # Test that GP prior has mean 0 and identical variances.
158
+ gpr = GaussianProcessRegressor(kernel=kernel)
159
+
160
+ y_mean, y_cov = gpr.predict(X, return_cov=True)
161
+
162
+ assert_almost_equal(y_mean, 0, 5)
163
+ if len(gpr.kernel.theta) > 1:
164
+ # XXX: quite hacky, works only for current kernels
165
+ assert_almost_equal(np.diag(y_cov), np.exp(kernel.theta[0]), 5)
166
+ else:
167
+ assert_almost_equal(np.diag(y_cov), 1, 5)
168
+
169
+
170
+ @pytest.mark.parametrize("kernel", kernels)
171
+ def test_sample_statistics(kernel):
172
+ # Test that statistics of samples drawn from GP are correct.
173
+ gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
174
+
175
+ y_mean, y_cov = gpr.predict(X2, return_cov=True)
176
+
177
+ samples = gpr.sample_y(X2, 300000)
178
+
179
+ # More digits accuracy would require many more samples
180
+ assert_almost_equal(y_mean, np.mean(samples, 1), 1)
181
+ assert_almost_equal(
182
+ np.diag(y_cov) / np.diag(y_cov).max(),
183
+ np.var(samples, 1) / np.diag(y_cov).max(),
184
+ 1,
185
+ )
186
+
187
+
188
+ def test_no_optimizer():
189
+ # Test that kernel parameters are unmodified when optimizer is None.
190
+ kernel = RBF(1.0)
191
+ gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None).fit(X, y)
192
+ assert np.exp(gpr.kernel_.theta) == 1.0
193
+
194
+
195
+ @pytest.mark.parametrize("kernel", kernels)
196
+ @pytest.mark.parametrize("target", [y, np.ones(X.shape[0], dtype=np.float64)])
197
+ def test_predict_cov_vs_std(kernel, target):
198
+ if sys.maxsize <= 2**32:
199
+ pytest.xfail("This test may fail on 32 bit Python")
200
+
201
+ # Test that predicted std.-dev. is consistent with cov's diagonal.
202
+ gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
203
+ y_mean, y_cov = gpr.predict(X2, return_cov=True)
204
+ y_mean, y_std = gpr.predict(X2, return_std=True)
205
+ assert_almost_equal(np.sqrt(np.diag(y_cov)), y_std)
206
+
207
+
208
+ def test_anisotropic_kernel():
209
+ # Test that GPR can identify meaningful anisotropic length-scales.
210
+ # We learn a function which varies in one dimension ten-times slower
211
+ # than in the other. The corresponding length-scales should differ by at
212
+ # least a factor 5
213
+ rng = np.random.RandomState(0)
214
+ X = rng.uniform(-1, 1, (50, 2))
215
+ y = X[:, 0] + 0.1 * X[:, 1]
216
+
217
+ kernel = RBF([1.0, 1.0])
218
+ gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
219
+ assert np.exp(gpr.kernel_.theta[1]) > np.exp(gpr.kernel_.theta[0]) * 5
220
+
221
+
222
+ def test_random_starts():
223
+ # Test that an increasing number of random-starts of GP fitting only
224
+ # increases the log marginal likelihood of the chosen theta.
225
+ n_samples, n_features = 25, 2
226
+ rng = np.random.RandomState(0)
227
+ X = rng.randn(n_samples, n_features) * 2 - 1
228
+ y = (
229
+ np.sin(X).sum(axis=1)
230
+ + np.sin(3 * X).sum(axis=1)
231
+ + rng.normal(scale=0.1, size=n_samples)
232
+ )
233
+
234
+ kernel = C(1.0, (1e-2, 1e2)) * RBF(
235
+ length_scale=[1.0] * n_features, length_scale_bounds=[(1e-4, 1e2)] * n_features
236
+ ) + WhiteKernel(noise_level=1e-5, noise_level_bounds=(1e-5, 1e1))
237
+ last_lml = -np.inf
238
+ for n_restarts_optimizer in range(5):
239
+ gp = GaussianProcessRegressor(
240
+ kernel=kernel,
241
+ n_restarts_optimizer=n_restarts_optimizer,
242
+ random_state=0,
243
+ ).fit(X, y)
244
+ lml = gp.log_marginal_likelihood(gp.kernel_.theta)
245
+ assert lml > last_lml - np.finfo(np.float32).eps
246
+ last_lml = lml
247
+
248
+
249
+ @pytest.mark.parametrize("kernel", kernels)
250
+ def test_y_normalization(kernel):
251
+ """
252
+ Test normalization of the target values in GP
253
+
254
+ Fitting non-normalizing GP on normalized y and fitting normalizing GP
255
+ on unnormalized y should yield identical results. Note that, here,
256
+ 'normalized y' refers to y that has been made zero mean and unit
257
+ variance.
258
+
259
+ """
260
+
261
+ y_mean = np.mean(y)
262
+ y_std = np.std(y)
263
+ y_norm = (y - y_mean) / y_std
264
+
265
+ # Fit non-normalizing GP on normalized y
266
+ gpr = GaussianProcessRegressor(kernel=kernel)
267
+ gpr.fit(X, y_norm)
268
+
269
+ # Fit normalizing GP on unnormalized y
270
+ gpr_norm = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
271
+ gpr_norm.fit(X, y)
272
+
273
+ # Compare predicted mean, std-devs and covariances
274
+ y_pred, y_pred_std = gpr.predict(X2, return_std=True)
275
+ y_pred = y_pred * y_std + y_mean
276
+ y_pred_std = y_pred_std * y_std
277
+ y_pred_norm, y_pred_std_norm = gpr_norm.predict(X2, return_std=True)
278
+
279
+ assert_almost_equal(y_pred, y_pred_norm)
280
+ assert_almost_equal(y_pred_std, y_pred_std_norm)
281
+
282
+ _, y_cov = gpr.predict(X2, return_cov=True)
283
+ y_cov = y_cov * y_std**2
284
+ _, y_cov_norm = gpr_norm.predict(X2, return_cov=True)
285
+
286
+ assert_almost_equal(y_cov, y_cov_norm)
287
+
288
+
289
+ def test_large_variance_y():
290
+ """
291
+ Here we test that, when noramlize_y=True, our GP can produce a
292
+ sensible fit to training data whose variance is significantly
293
+ larger than unity. This test was made in response to issue #15612.
294
+
295
+ GP predictions are verified against predictions that were made
296
+ using GPy which, here, is treated as the 'gold standard'. Note that we
297
+ only investigate the RBF kernel here, as that is what was used in the
298
+ GPy implementation.
299
+
300
+ The following code can be used to recreate the GPy data:
301
+
302
+ --------------------------------------------------------------------------
303
+ import GPy
304
+
305
+ kernel_gpy = GPy.kern.RBF(input_dim=1, lengthscale=1.)
306
+ gpy = GPy.models.GPRegression(X, np.vstack(y_large), kernel_gpy)
307
+ gpy.optimize()
308
+ y_pred_gpy, y_var_gpy = gpy.predict(X2)
309
+ y_pred_std_gpy = np.sqrt(y_var_gpy)
310
+ --------------------------------------------------------------------------
311
+ """
312
+
313
+ # Here we utilise a larger variance version of the training data
314
+ y_large = 10 * y
315
+
316
+ # Standard GP with normalize_y=True
317
+ RBF_params = {"length_scale": 1.0}
318
+ kernel = RBF(**RBF_params)
319
+ gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
320
+ gpr.fit(X, y_large)
321
+ y_pred, y_pred_std = gpr.predict(X2, return_std=True)
322
+
323
+ # 'Gold standard' mean predictions from GPy
324
+ y_pred_gpy = np.array(
325
+ [15.16918303, -27.98707845, -39.31636019, 14.52605515, 69.18503589]
326
+ )
327
+
328
+ # 'Gold standard' std predictions from GPy
329
+ y_pred_std_gpy = np.array(
330
+ [7.78860962, 3.83179178, 0.63149951, 0.52745188, 0.86170042]
331
+ )
332
+
333
+ # Based on numerical experiments, it's reasonable to expect our
334
+ # GP's mean predictions to get within 7% of predictions of those
335
+ # made by GPy.
336
+ assert_allclose(y_pred, y_pred_gpy, rtol=0.07, atol=0)
337
+
338
+ # Based on numerical experiments, it's reasonable to expect our
339
+ # GP's std predictions to get within 15% of predictions of those
340
+ # made by GPy.
341
+ assert_allclose(y_pred_std, y_pred_std_gpy, rtol=0.15, atol=0)
342
+
343
+
344
+ def test_y_multioutput():
345
+ # Test that GPR can deal with multi-dimensional target values
346
+ y_2d = np.vstack((y, y * 2)).T
347
+
348
+ # Test for fixed kernel that first dimension of 2d GP equals the output
349
+ # of 1d GP and that second dimension is twice as large
350
+ kernel = RBF(length_scale=1.0)
351
+
352
+ gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None, normalize_y=False)
353
+ gpr.fit(X, y)
354
+
355
+ gpr_2d = GaussianProcessRegressor(kernel=kernel, optimizer=None, normalize_y=False)
356
+ gpr_2d.fit(X, y_2d)
357
+
358
+ y_pred_1d, y_std_1d = gpr.predict(X2, return_std=True)
359
+ y_pred_2d, y_std_2d = gpr_2d.predict(X2, return_std=True)
360
+ _, y_cov_1d = gpr.predict(X2, return_cov=True)
361
+ _, y_cov_2d = gpr_2d.predict(X2, return_cov=True)
362
+
363
+ assert_almost_equal(y_pred_1d, y_pred_2d[:, 0])
364
+ assert_almost_equal(y_pred_1d, y_pred_2d[:, 1] / 2)
365
+
366
+ # Standard deviation and covariance do not depend on output
367
+ for target in range(y_2d.shape[1]):
368
+ assert_almost_equal(y_std_1d, y_std_2d[..., target])
369
+ assert_almost_equal(y_cov_1d, y_cov_2d[..., target])
370
+
371
+ y_sample_1d = gpr.sample_y(X2, n_samples=10)
372
+ y_sample_2d = gpr_2d.sample_y(X2, n_samples=10)
373
+
374
+ assert y_sample_1d.shape == (5, 10)
375
+ assert y_sample_2d.shape == (5, 2, 10)
376
+ # Only the first target will be equal
377
+ assert_almost_equal(y_sample_1d, y_sample_2d[:, 0, :])
378
+
379
+ # Test hyperparameter optimization
380
+ for kernel in kernels:
381
+ gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
382
+ gpr.fit(X, y)
383
+
384
+ gpr_2d = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
385
+ gpr_2d.fit(X, np.vstack((y, y)).T)
386
+
387
+ assert_almost_equal(gpr.kernel_.theta, gpr_2d.kernel_.theta, 4)
388
+
389
+
390
+ @pytest.mark.parametrize("kernel", non_fixed_kernels)
391
+ def test_custom_optimizer(kernel):
392
+ # Test that GPR can use externally defined optimizers.
393
+ # Define a dummy optimizer that simply tests 50 random hyperparameters
394
+ def optimizer(obj_func, initial_theta, bounds):
395
+ rng = np.random.RandomState(0)
396
+ theta_opt, func_min = initial_theta, obj_func(
397
+ initial_theta, eval_gradient=False
398
+ )
399
+ for _ in range(50):
400
+ theta = np.atleast_1d(
401
+ rng.uniform(np.maximum(-2, bounds[:, 0]), np.minimum(1, bounds[:, 1]))
402
+ )
403
+ f = obj_func(theta, eval_gradient=False)
404
+ if f < func_min:
405
+ theta_opt, func_min = theta, f
406
+ return theta_opt, func_min
407
+
408
+ gpr = GaussianProcessRegressor(kernel=kernel, optimizer=optimizer)
409
+ gpr.fit(X, y)
410
+ # Checks that optimizer improved marginal likelihood
411
+ assert gpr.log_marginal_likelihood(gpr.kernel_.theta) > gpr.log_marginal_likelihood(
412
+ gpr.kernel.theta
413
+ )
414
+
415
+
416
+ def test_gpr_correct_error_message():
417
+ X = np.arange(12).reshape(6, -1)
418
+ y = np.ones(6)
419
+ kernel = DotProduct()
420
+ gpr = GaussianProcessRegressor(kernel=kernel, alpha=0.0)
421
+ message = (
422
+ "The kernel, %s, is not returning a "
423
+ "positive definite matrix. Try gradually increasing "
424
+ "the 'alpha' parameter of your "
425
+ "GaussianProcessRegressor estimator." % kernel
426
+ )
427
+ with pytest.raises(np.linalg.LinAlgError, match=re.escape(message)):
428
+ gpr.fit(X, y)
429
+
430
+
431
+ @pytest.mark.parametrize("kernel", kernels)
432
+ def test_duplicate_input(kernel):
433
+ # Test GPR can handle two different output-values for the same input.
434
+ gpr_equal_inputs = GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
435
+ gpr_similar_inputs = GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
436
+
437
+ X_ = np.vstack((X, X[0]))
438
+ y_ = np.hstack((y, y[0] + 1))
439
+ gpr_equal_inputs.fit(X_, y_)
440
+
441
+ X_ = np.vstack((X, X[0] + 1e-15))
442
+ y_ = np.hstack((y, y[0] + 1))
443
+ gpr_similar_inputs.fit(X_, y_)
444
+
445
+ X_test = np.linspace(0, 10, 100)[:, None]
446
+ y_pred_equal, y_std_equal = gpr_equal_inputs.predict(X_test, return_std=True)
447
+ y_pred_similar, y_std_similar = gpr_similar_inputs.predict(X_test, return_std=True)
448
+
449
+ assert_almost_equal(y_pred_equal, y_pred_similar)
450
+ assert_almost_equal(y_std_equal, y_std_similar)
451
+
452
+
453
+ def test_no_fit_default_predict():
454
+ # Test that GPR predictions without fit does not break by default.
455
+ default_kernel = C(1.0, constant_value_bounds="fixed") * RBF(
456
+ 1.0, length_scale_bounds="fixed"
457
+ )
458
+ gpr1 = GaussianProcessRegressor()
459
+ _, y_std1 = gpr1.predict(X, return_std=True)
460
+ _, y_cov1 = gpr1.predict(X, return_cov=True)
461
+
462
+ gpr2 = GaussianProcessRegressor(kernel=default_kernel)
463
+ _, y_std2 = gpr2.predict(X, return_std=True)
464
+ _, y_cov2 = gpr2.predict(X, return_cov=True)
465
+
466
+ assert_array_almost_equal(y_std1, y_std2)
467
+ assert_array_almost_equal(y_cov1, y_cov2)
468
+
469
+
470
+ def test_warning_bounds():
471
+ kernel = RBF(length_scale_bounds=[1e-5, 1e-3])
472
+ gpr = GaussianProcessRegressor(kernel=kernel)
473
+ warning_message = (
474
+ "The optimal value found for dimension 0 of parameter "
475
+ "length_scale is close to the specified upper bound "
476
+ "0.001. Increasing the bound and calling fit again may "
477
+ "find a better value."
478
+ )
479
+ with pytest.warns(ConvergenceWarning, match=warning_message):
480
+ gpr.fit(X, y)
481
+
482
+ kernel_sum = WhiteKernel(noise_level_bounds=[1e-5, 1e-3]) + RBF(
483
+ length_scale_bounds=[1e3, 1e5]
484
+ )
485
+ gpr_sum = GaussianProcessRegressor(kernel=kernel_sum)
486
+ with warnings.catch_warnings(record=True) as record:
487
+ warnings.simplefilter("always")
488
+ gpr_sum.fit(X, y)
489
+
490
+ assert len(record) == 2
491
+
492
+ assert issubclass(record[0].category, ConvergenceWarning)
493
+ assert (
494
+ record[0].message.args[0]
495
+ == "The optimal value found for "
496
+ "dimension 0 of parameter "
497
+ "k1__noise_level is close to the "
498
+ "specified upper bound 0.001. "
499
+ "Increasing the bound and calling "
500
+ "fit again may find a better value."
501
+ )
502
+
503
+ assert issubclass(record[1].category, ConvergenceWarning)
504
+ assert (
505
+ record[1].message.args[0]
506
+ == "The optimal value found for "
507
+ "dimension 0 of parameter "
508
+ "k2__length_scale is close to the "
509
+ "specified lower bound 1000.0. "
510
+ "Decreasing the bound and calling "
511
+ "fit again may find a better value."
512
+ )
513
+
514
+ X_tile = np.tile(X, 2)
515
+ kernel_dims = RBF(length_scale=[1.0, 2.0], length_scale_bounds=[1e1, 1e2])
516
+ gpr_dims = GaussianProcessRegressor(kernel=kernel_dims)
517
+
518
+ with warnings.catch_warnings(record=True) as record:
519
+ warnings.simplefilter("always")
520
+ gpr_dims.fit(X_tile, y)
521
+
522
+ assert len(record) == 2
523
+
524
+ assert issubclass(record[0].category, ConvergenceWarning)
525
+ assert (
526
+ record[0].message.args[0]
527
+ == "The optimal value found for "
528
+ "dimension 0 of parameter "
529
+ "length_scale is close to the "
530
+ "specified lower bound 10.0. "
531
+ "Decreasing the bound and calling "
532
+ "fit again may find a better value."
533
+ )
534
+
535
+ assert issubclass(record[1].category, ConvergenceWarning)
536
+ assert (
537
+ record[1].message.args[0]
538
+ == "The optimal value found for "
539
+ "dimension 1 of parameter "
540
+ "length_scale is close to the "
541
+ "specified lower bound 10.0. "
542
+ "Decreasing the bound and calling "
543
+ "fit again may find a better value."
544
+ )
545
+
546
+
547
+ def test_bound_check_fixed_hyperparameter():
548
+ # Regression test for issue #17943
549
+ # Check that having a hyperparameter with fixed bounds doesn't cause an
550
+ # error
551
+ k1 = 50.0**2 * RBF(length_scale=50.0) # long term smooth rising trend
552
+ k2 = ExpSineSquared(
553
+ length_scale=1.0, periodicity=1.0, periodicity_bounds="fixed"
554
+ ) # seasonal component
555
+ kernel = k1 + k2
556
+ GaussianProcessRegressor(kernel=kernel).fit(X, y)
557
+
558
+
559
+ @pytest.mark.parametrize("kernel", kernels)
560
+ def test_constant_target(kernel):
561
+ """Check that the std. dev. is affected to 1 when normalizing a constant
562
+ feature.
563
+ Non-regression test for:
564
+ https://github.com/scikit-learn/scikit-learn/issues/18318
565
+ NaN where affected to the target when scaling due to null std. dev. with
566
+ constant target.
567
+ """
568
+ y_constant = np.ones(X.shape[0], dtype=np.float64)
569
+
570
+ gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
571
+ gpr.fit(X, y_constant)
572
+ assert gpr._y_train_std == pytest.approx(1.0)
573
+
574
+ y_pred, y_cov = gpr.predict(X, return_cov=True)
575
+ assert_allclose(y_pred, y_constant)
576
+ # set atol because we compare to zero
577
+ assert_allclose(np.diag(y_cov), 0.0, atol=1e-9)
578
+
579
+ # Test multi-target data
580
+ n_samples, n_targets = X.shape[0], 2
581
+ rng = np.random.RandomState(0)
582
+ y = np.concatenate(
583
+ [
584
+ rng.normal(size=(n_samples, 1)), # non-constant target
585
+ np.full(shape=(n_samples, 1), fill_value=2), # constant target
586
+ ],
587
+ axis=1,
588
+ )
589
+
590
+ gpr.fit(X, y)
591
+ Y_pred, Y_cov = gpr.predict(X, return_cov=True)
592
+
593
+ assert_allclose(Y_pred[:, 1], 2)
594
+ assert_allclose(np.diag(Y_cov[..., 1]), 0.0, atol=1e-9)
595
+
596
+ assert Y_pred.shape == (n_samples, n_targets)
597
+ assert Y_cov.shape == (n_samples, n_samples, n_targets)
598
+
599
+
600
+ def test_gpr_consistency_std_cov_non_invertible_kernel():
601
+ """Check the consistency between the returned std. dev. and the covariance.
602
+ Non-regression test for:
603
+ https://github.com/scikit-learn/scikit-learn/issues/19936
604
+ Inconsistencies were observed when the kernel cannot be inverted (or
605
+ numerically stable).
606
+ """
607
+ kernel = C(8.98576054e05, (1e-12, 1e12)) * RBF(
608
+ [5.91326520e02, 1.32584051e03], (1e-12, 1e12)
609
+ ) + WhiteKernel(noise_level=1e-5)
610
+ gpr = GaussianProcessRegressor(kernel=kernel, alpha=0, optimizer=None)
611
+ X_train = np.array(
612
+ [
613
+ [0.0, 0.0],
614
+ [1.54919334, -0.77459667],
615
+ [-1.54919334, 0.0],
616
+ [0.0, -1.54919334],
617
+ [0.77459667, 0.77459667],
618
+ [-0.77459667, 1.54919334],
619
+ ]
620
+ )
621
+ y_train = np.array(
622
+ [
623
+ [-2.14882017e-10],
624
+ [-4.66975823e00],
625
+ [4.01823986e00],
626
+ [-1.30303674e00],
627
+ [-1.35760156e00],
628
+ [3.31215668e00],
629
+ ]
630
+ )
631
+ gpr.fit(X_train, y_train)
632
+ X_test = np.array(
633
+ [
634
+ [-1.93649167, -1.93649167],
635
+ [1.93649167, -1.93649167],
636
+ [-1.93649167, 1.93649167],
637
+ [1.93649167, 1.93649167],
638
+ ]
639
+ )
640
+ pred1, std = gpr.predict(X_test, return_std=True)
641
+ pred2, cov = gpr.predict(X_test, return_cov=True)
642
+ assert_allclose(std, np.sqrt(np.diagonal(cov)), rtol=1e-5)
643
+
644
+
645
+ @pytest.mark.parametrize(
646
+ "params, TypeError, err_msg",
647
+ [
648
+ (
649
+ {"alpha": np.zeros(100)},
650
+ ValueError,
651
+ "alpha must be a scalar or an array with same number of entries as y",
652
+ ),
653
+ (
654
+ {
655
+ "kernel": WhiteKernel(noise_level_bounds=(-np.inf, np.inf)),
656
+ "n_restarts_optimizer": 2,
657
+ },
658
+ ValueError,
659
+ "requires that all bounds are finite",
660
+ ),
661
+ ],
662
+ )
663
+ def test_gpr_fit_error(params, TypeError, err_msg):
664
+ """Check that expected error are raised during fit."""
665
+ gpr = GaussianProcessRegressor(**params)
666
+ with pytest.raises(TypeError, match=err_msg):
667
+ gpr.fit(X, y)
668
+
669
+
670
+ def test_gpr_lml_error():
671
+ """Check that we raise the proper error in the LML method."""
672
+ gpr = GaussianProcessRegressor(kernel=RBF()).fit(X, y)
673
+
674
+ err_msg = "Gradient can only be evaluated for theta!=None"
675
+ with pytest.raises(ValueError, match=err_msg):
676
+ gpr.log_marginal_likelihood(eval_gradient=True)
677
+
678
+
679
+ def test_gpr_predict_error():
680
+ """Check that we raise the proper error during predict."""
681
+ gpr = GaussianProcessRegressor(kernel=RBF()).fit(X, y)
682
+
683
+ err_msg = "At most one of return_std or return_cov can be requested."
684
+ with pytest.raises(RuntimeError, match=err_msg):
685
+ gpr.predict(X, return_cov=True, return_std=True)
686
+
687
+
688
+ @pytest.mark.parametrize("normalize_y", [True, False])
689
+ @pytest.mark.parametrize("n_targets", [None, 1, 10])
690
+ def test_predict_shapes(normalize_y, n_targets):
691
+ """Check the shapes of y_mean, y_std, and y_cov in single-output
692
+ (n_targets=None) and multi-output settings, including the edge case when
693
+ n_targets=1, where the sklearn convention is to squeeze the predictions.
694
+
695
+ Non-regression test for:
696
+ https://github.com/scikit-learn/scikit-learn/issues/17394
697
+ https://github.com/scikit-learn/scikit-learn/issues/18065
698
+ https://github.com/scikit-learn/scikit-learn/issues/22174
699
+ """
700
+ rng = np.random.RandomState(1234)
701
+
702
+ n_features, n_samples_train, n_samples_test = 6, 9, 7
703
+
704
+ y_train_shape = (n_samples_train,)
705
+ if n_targets is not None:
706
+ y_train_shape = y_train_shape + (n_targets,)
707
+
708
+ # By convention single-output data is squeezed upon prediction
709
+ y_test_shape = (n_samples_test,)
710
+ if n_targets is not None and n_targets > 1:
711
+ y_test_shape = y_test_shape + (n_targets,)
712
+
713
+ X_train = rng.randn(n_samples_train, n_features)
714
+ X_test = rng.randn(n_samples_test, n_features)
715
+ y_train = rng.randn(*y_train_shape)
716
+
717
+ model = GaussianProcessRegressor(normalize_y=normalize_y)
718
+ model.fit(X_train, y_train)
719
+
720
+ y_pred, y_std = model.predict(X_test, return_std=True)
721
+ _, y_cov = model.predict(X_test, return_cov=True)
722
+
723
+ assert y_pred.shape == y_test_shape
724
+ assert y_std.shape == y_test_shape
725
+ assert y_cov.shape == (n_samples_test,) + y_test_shape
726
+
727
+
728
+ @pytest.mark.parametrize("normalize_y", [True, False])
729
+ @pytest.mark.parametrize("n_targets", [None, 1, 10])
730
+ def test_sample_y_shapes(normalize_y, n_targets):
731
+ """Check the shapes of y_samples in single-output (n_targets=0) and
732
+ multi-output settings, including the edge case when n_targets=1, where the
733
+ sklearn convention is to squeeze the predictions.
734
+
735
+ Non-regression test for:
736
+ https://github.com/scikit-learn/scikit-learn/issues/22175
737
+ """
738
+ rng = np.random.RandomState(1234)
739
+
740
+ n_features, n_samples_train = 6, 9
741
+ # Number of spatial locations to predict at
742
+ n_samples_X_test = 7
743
+ # Number of sample predictions per test point
744
+ n_samples_y_test = 5
745
+
746
+ y_train_shape = (n_samples_train,)
747
+ if n_targets is not None:
748
+ y_train_shape = y_train_shape + (n_targets,)
749
+
750
+ # By convention single-output data is squeezed upon prediction
751
+ if n_targets is not None and n_targets > 1:
752
+ y_test_shape = (n_samples_X_test, n_targets, n_samples_y_test)
753
+ else:
754
+ y_test_shape = (n_samples_X_test, n_samples_y_test)
755
+
756
+ X_train = rng.randn(n_samples_train, n_features)
757
+ X_test = rng.randn(n_samples_X_test, n_features)
758
+ y_train = rng.randn(*y_train_shape)
759
+
760
+ model = GaussianProcessRegressor(normalize_y=normalize_y)
761
+
762
+ # FIXME: before fitting, the estimator does not have information regarding
763
+ # the number of targets and default to 1. This is inconsistent with the shape
764
+ # provided after `fit`. This assert should be made once the following issue
765
+ # is fixed:
766
+ # https://github.com/scikit-learn/scikit-learn/issues/22430
767
+ # y_samples = model.sample_y(X_test, n_samples=n_samples_y_test)
768
+ # assert y_samples.shape == y_test_shape
769
+
770
+ model.fit(X_train, y_train)
771
+
772
+ y_samples = model.sample_y(X_test, n_samples=n_samples_y_test)
773
+ assert y_samples.shape == y_test_shape
774
+
775
+
776
+ class CustomKernel(C):
777
+ """
778
+ A custom kernel that has a diag method that returns the first column of the
779
+ input matrix X. This is a helper for the test to check that the input
780
+ matrix X is not mutated.
781
+ """
782
+
783
+ def diag(self, X):
784
+ return X[:, 0]
785
+
786
+
787
+ def test_gpr_predict_input_not_modified():
788
+ """
789
+ Check that the input X is not modified by the predict method of the
790
+ GaussianProcessRegressor when setting return_std=True.
791
+
792
+ Non-regression test for:
793
+ https://github.com/scikit-learn/scikit-learn/issues/24340
794
+ """
795
+ gpr = GaussianProcessRegressor(kernel=CustomKernel()).fit(X, y)
796
+
797
+ X2_copy = np.copy(X2)
798
+ _, _ = gpr.predict(X2, return_std=True)
799
+
800
+ assert_allclose(X2, X2_copy)
mplug_owl2/lib/python3.10/site-packages/sklearn/gaussian_process/tests/test_kernels.py ADDED
@@ -0,0 +1,390 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Testing for kernels for Gaussian processes."""
2
+
3
+ # Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
4
+ # License: BSD 3 clause
5
+
6
+ import pytest
7
+ import numpy as np
8
+ from inspect import signature
9
+
10
+ from sklearn.gaussian_process.kernels import _approx_fprime
11
+
12
+ from sklearn.metrics.pairwise import (
13
+ PAIRWISE_KERNEL_FUNCTIONS,
14
+ euclidean_distances,
15
+ pairwise_kernels,
16
+ )
17
+ from sklearn.gaussian_process.kernels import (
18
+ RBF,
19
+ Matern,
20
+ RationalQuadratic,
21
+ ExpSineSquared,
22
+ DotProduct,
23
+ ConstantKernel,
24
+ WhiteKernel,
25
+ PairwiseKernel,
26
+ KernelOperator,
27
+ Exponentiation,
28
+ CompoundKernel,
29
+ )
30
+ from sklearn.base import clone
31
+
32
+ from sklearn.utils._testing import (
33
+ assert_almost_equal,
34
+ assert_array_equal,
35
+ assert_array_almost_equal,
36
+ assert_allclose,
37
+ )
38
+
39
+
40
+ X = np.random.RandomState(0).normal(0, 1, (5, 2))
41
+ Y = np.random.RandomState(0).normal(0, 1, (6, 2))
42
+
43
+ kernel_rbf_plus_white = RBF(length_scale=2.0) + WhiteKernel(noise_level=3.0)
44
+ kernels = [
45
+ RBF(length_scale=2.0),
46
+ RBF(length_scale_bounds=(0.5, 2.0)),
47
+ ConstantKernel(constant_value=10.0),
48
+ 2.0 * RBF(length_scale=0.33, length_scale_bounds="fixed"),
49
+ 2.0 * RBF(length_scale=0.5),
50
+ kernel_rbf_plus_white,
51
+ 2.0 * RBF(length_scale=[0.5, 2.0]),
52
+ 2.0 * Matern(length_scale=0.33, length_scale_bounds="fixed"),
53
+ 2.0 * Matern(length_scale=0.5, nu=0.5),
54
+ 2.0 * Matern(length_scale=1.5, nu=1.5),
55
+ 2.0 * Matern(length_scale=2.5, nu=2.5),
56
+ 2.0 * Matern(length_scale=[0.5, 2.0], nu=0.5),
57
+ 3.0 * Matern(length_scale=[2.0, 0.5], nu=1.5),
58
+ 4.0 * Matern(length_scale=[0.5, 0.5], nu=2.5),
59
+ RationalQuadratic(length_scale=0.5, alpha=1.5),
60
+ ExpSineSquared(length_scale=0.5, periodicity=1.5),
61
+ DotProduct(sigma_0=2.0),
62
+ DotProduct(sigma_0=2.0) ** 2,
63
+ RBF(length_scale=[2.0]),
64
+ Matern(length_scale=[2.0]),
65
+ ]
66
+ for metric in PAIRWISE_KERNEL_FUNCTIONS:
67
+ if metric in ["additive_chi2", "chi2"]:
68
+ continue
69
+ kernels.append(PairwiseKernel(gamma=1.0, metric=metric))
70
+
71
+
72
+ @pytest.mark.parametrize("kernel", kernels)
73
+ def test_kernel_gradient(kernel):
74
+ # Compare analytic and numeric gradient of kernels.
75
+ K, K_gradient = kernel(X, eval_gradient=True)
76
+
77
+ assert K_gradient.shape[0] == X.shape[0]
78
+ assert K_gradient.shape[1] == X.shape[0]
79
+ assert K_gradient.shape[2] == kernel.theta.shape[0]
80
+
81
+ def eval_kernel_for_theta(theta):
82
+ kernel_clone = kernel.clone_with_theta(theta)
83
+ K = kernel_clone(X, eval_gradient=False)
84
+ return K
85
+
86
+ K_gradient_approx = _approx_fprime(kernel.theta, eval_kernel_for_theta, 1e-10)
87
+
88
+ assert_almost_equal(K_gradient, K_gradient_approx, 4)
89
+
90
+
91
+ @pytest.mark.parametrize(
92
+ "kernel",
93
+ [
94
+ kernel
95
+ for kernel in kernels
96
+ # skip non-basic kernels
97
+ if not (isinstance(kernel, (KernelOperator, Exponentiation)))
98
+ ],
99
+ )
100
+ def test_kernel_theta(kernel):
101
+ # Check that parameter vector theta of kernel is set correctly.
102
+ theta = kernel.theta
103
+ _, K_gradient = kernel(X, eval_gradient=True)
104
+
105
+ # Determine kernel parameters that contribute to theta
106
+ init_sign = signature(kernel.__class__.__init__).parameters.values()
107
+ args = [p.name for p in init_sign if p.name != "self"]
108
+ theta_vars = map(
109
+ lambda s: s[0 : -len("_bounds")], filter(lambda s: s.endswith("_bounds"), args)
110
+ )
111
+ assert set(hyperparameter.name for hyperparameter in kernel.hyperparameters) == set(
112
+ theta_vars
113
+ )
114
+
115
+ # Check that values returned in theta are consistent with
116
+ # hyperparameter values (being their logarithms)
117
+ for i, hyperparameter in enumerate(kernel.hyperparameters):
118
+ assert theta[i] == np.log(getattr(kernel, hyperparameter.name))
119
+
120
+ # Fixed kernel parameters must be excluded from theta and gradient.
121
+ for i, hyperparameter in enumerate(kernel.hyperparameters):
122
+ # create copy with certain hyperparameter fixed
123
+ params = kernel.get_params()
124
+ params[hyperparameter.name + "_bounds"] = "fixed"
125
+ kernel_class = kernel.__class__
126
+ new_kernel = kernel_class(**params)
127
+ # Check that theta and K_gradient are identical with the fixed
128
+ # dimension left out
129
+ _, K_gradient_new = new_kernel(X, eval_gradient=True)
130
+ assert theta.shape[0] == new_kernel.theta.shape[0] + 1
131
+ assert K_gradient.shape[2] == K_gradient_new.shape[2] + 1
132
+ if i > 0:
133
+ assert theta[:i] == new_kernel.theta[:i]
134
+ assert_array_equal(K_gradient[..., :i], K_gradient_new[..., :i])
135
+ if i + 1 < len(kernel.hyperparameters):
136
+ assert theta[i + 1 :] == new_kernel.theta[i:]
137
+ assert_array_equal(K_gradient[..., i + 1 :], K_gradient_new[..., i:])
138
+
139
+ # Check that values of theta are modified correctly
140
+ for i, hyperparameter in enumerate(kernel.hyperparameters):
141
+ theta[i] = np.log(42)
142
+ kernel.theta = theta
143
+ assert_almost_equal(getattr(kernel, hyperparameter.name), 42)
144
+
145
+ setattr(kernel, hyperparameter.name, 43)
146
+ assert_almost_equal(kernel.theta[i], np.log(43))
147
+
148
+
149
+ @pytest.mark.parametrize(
150
+ "kernel",
151
+ [
152
+ kernel
153
+ for kernel in kernels
154
+ # Identity is not satisfied on diagonal
155
+ if kernel != kernel_rbf_plus_white
156
+ ],
157
+ )
158
+ def test_auto_vs_cross(kernel):
159
+ # Auto-correlation and cross-correlation should be consistent.
160
+ K_auto = kernel(X)
161
+ K_cross = kernel(X, X)
162
+ assert_almost_equal(K_auto, K_cross, 5)
163
+
164
+
165
+ @pytest.mark.parametrize("kernel", kernels)
166
+ def test_kernel_diag(kernel):
167
+ # Test that diag method of kernel returns consistent results.
168
+ K_call_diag = np.diag(kernel(X))
169
+ K_diag = kernel.diag(X)
170
+ assert_almost_equal(K_call_diag, K_diag, 5)
171
+
172
+
173
+ def test_kernel_operator_commutative():
174
+ # Adding kernels and multiplying kernels should be commutative.
175
+ # Check addition
176
+ assert_almost_equal((RBF(2.0) + 1.0)(X), (1.0 + RBF(2.0))(X))
177
+
178
+ # Check multiplication
179
+ assert_almost_equal((3.0 * RBF(2.0))(X), (RBF(2.0) * 3.0)(X))
180
+
181
+
182
+ def test_kernel_anisotropic():
183
+ # Anisotropic kernel should be consistent with isotropic kernels.
184
+ kernel = 3.0 * RBF([0.5, 2.0])
185
+
186
+ K = kernel(X)
187
+ X1 = np.array(X)
188
+ X1[:, 0] *= 4
189
+ K1 = 3.0 * RBF(2.0)(X1)
190
+ assert_almost_equal(K, K1)
191
+
192
+ X2 = np.array(X)
193
+ X2[:, 1] /= 4
194
+ K2 = 3.0 * RBF(0.5)(X2)
195
+ assert_almost_equal(K, K2)
196
+
197
+ # Check getting and setting via theta
198
+ kernel.theta = kernel.theta + np.log(2)
199
+ assert_array_equal(kernel.theta, np.log([6.0, 1.0, 4.0]))
200
+ assert_array_equal(kernel.k2.length_scale, [1.0, 4.0])
201
+
202
+
203
+ @pytest.mark.parametrize(
204
+ "kernel", [kernel for kernel in kernels if kernel.is_stationary()]
205
+ )
206
+ def test_kernel_stationary(kernel):
207
+ # Test stationarity of kernels.
208
+ K = kernel(X, X + 1)
209
+ assert_almost_equal(K[0, 0], np.diag(K))
210
+
211
+
212
+ @pytest.mark.parametrize("kernel", kernels)
213
+ def test_kernel_input_type(kernel):
214
+ # Test whether kernels is for vectors or structured data
215
+ if isinstance(kernel, Exponentiation):
216
+ assert kernel.requires_vector_input == kernel.kernel.requires_vector_input
217
+ if isinstance(kernel, KernelOperator):
218
+ assert kernel.requires_vector_input == (
219
+ kernel.k1.requires_vector_input or kernel.k2.requires_vector_input
220
+ )
221
+
222
+
223
+ def test_compound_kernel_input_type():
224
+ kernel = CompoundKernel([WhiteKernel(noise_level=3.0)])
225
+ assert not kernel.requires_vector_input
226
+
227
+ kernel = CompoundKernel([WhiteKernel(noise_level=3.0), RBF(length_scale=2.0)])
228
+ assert kernel.requires_vector_input
229
+
230
+
231
+ def check_hyperparameters_equal(kernel1, kernel2):
232
+ # Check that hyperparameters of two kernels are equal
233
+ for attr in set(dir(kernel1) + dir(kernel2)):
234
+ if attr.startswith("hyperparameter_"):
235
+ attr_value1 = getattr(kernel1, attr)
236
+ attr_value2 = getattr(kernel2, attr)
237
+ assert attr_value1 == attr_value2
238
+
239
+
240
+ @pytest.mark.parametrize("kernel", kernels)
241
+ def test_kernel_clone(kernel):
242
+ # Test that sklearn's clone works correctly on kernels.
243
+ kernel_cloned = clone(kernel)
244
+
245
+ # XXX: Should this be fixed?
246
+ # This differs from the sklearn's estimators equality check.
247
+ assert kernel == kernel_cloned
248
+ assert id(kernel) != id(kernel_cloned)
249
+
250
+ # Check that all constructor parameters are equal.
251
+ assert kernel.get_params() == kernel_cloned.get_params()
252
+
253
+ # Check that all hyperparameters are equal.
254
+ check_hyperparameters_equal(kernel, kernel_cloned)
255
+
256
+
257
+ @pytest.mark.parametrize("kernel", kernels)
258
+ def test_kernel_clone_after_set_params(kernel):
259
+ # This test is to verify that using set_params does not
260
+ # break clone on kernels.
261
+ # This used to break because in kernels such as the RBF, non-trivial
262
+ # logic that modified the length scale used to be in the constructor
263
+ # See https://github.com/scikit-learn/scikit-learn/issues/6961
264
+ # for more details.
265
+ bounds = (1e-5, 1e5)
266
+ kernel_cloned = clone(kernel)
267
+ params = kernel.get_params()
268
+ # RationalQuadratic kernel is isotropic.
269
+ isotropic_kernels = (ExpSineSquared, RationalQuadratic)
270
+ if "length_scale" in params and not isinstance(kernel, isotropic_kernels):
271
+ length_scale = params["length_scale"]
272
+ if np.iterable(length_scale):
273
+ # XXX unreached code as of v0.22
274
+ params["length_scale"] = length_scale[0]
275
+ params["length_scale_bounds"] = bounds
276
+ else:
277
+ params["length_scale"] = [length_scale] * 2
278
+ params["length_scale_bounds"] = bounds * 2
279
+ kernel_cloned.set_params(**params)
280
+ kernel_cloned_clone = clone(kernel_cloned)
281
+ assert kernel_cloned_clone.get_params() == kernel_cloned.get_params()
282
+ assert id(kernel_cloned_clone) != id(kernel_cloned)
283
+ check_hyperparameters_equal(kernel_cloned, kernel_cloned_clone)
284
+
285
+
286
+ def test_matern_kernel():
287
+ # Test consistency of Matern kernel for special values of nu.
288
+ K = Matern(nu=1.5, length_scale=1.0)(X)
289
+ # the diagonal elements of a matern kernel are 1
290
+ assert_array_almost_equal(np.diag(K), np.ones(X.shape[0]))
291
+ # matern kernel for coef0==0.5 is equal to absolute exponential kernel
292
+ K_absexp = np.exp(-euclidean_distances(X, X, squared=False))
293
+ K = Matern(nu=0.5, length_scale=1.0)(X)
294
+ assert_array_almost_equal(K, K_absexp)
295
+ # matern kernel with coef0==inf is equal to RBF kernel
296
+ K_rbf = RBF(length_scale=1.0)(X)
297
+ K = Matern(nu=np.inf, length_scale=1.0)(X)
298
+ assert_array_almost_equal(K, K_rbf)
299
+ assert_allclose(K, K_rbf)
300
+ # test that special cases of matern kernel (coef0 in [0.5, 1.5, 2.5])
301
+ # result in nearly identical results as the general case for coef0 in
302
+ # [0.5 + tiny, 1.5 + tiny, 2.5 + tiny]
303
+ tiny = 1e-10
304
+ for nu in [0.5, 1.5, 2.5]:
305
+ K1 = Matern(nu=nu, length_scale=1.0)(X)
306
+ K2 = Matern(nu=nu + tiny, length_scale=1.0)(X)
307
+ assert_array_almost_equal(K1, K2)
308
+ # test that coef0==large is close to RBF
309
+ large = 100
310
+ K1 = Matern(nu=large, length_scale=1.0)(X)
311
+ K2 = RBF(length_scale=1.0)(X)
312
+ assert_array_almost_equal(K1, K2, decimal=2)
313
+
314
+
315
+ @pytest.mark.parametrize("kernel", kernels)
316
+ def test_kernel_versus_pairwise(kernel):
317
+ # Check that GP kernels can also be used as pairwise kernels.
318
+
319
+ # Test auto-kernel
320
+ if kernel != kernel_rbf_plus_white:
321
+ # For WhiteKernel: k(X) != k(X,X). This is assumed by
322
+ # pairwise_kernels
323
+ K1 = kernel(X)
324
+ K2 = pairwise_kernels(X, metric=kernel)
325
+ assert_array_almost_equal(K1, K2)
326
+
327
+ # Test cross-kernel
328
+ K1 = kernel(X, Y)
329
+ K2 = pairwise_kernels(X, Y, metric=kernel)
330
+ assert_array_almost_equal(K1, K2)
331
+
332
+
333
+ @pytest.mark.parametrize("kernel", kernels)
334
+ def test_set_get_params(kernel):
335
+ # Check that set_params()/get_params() is consistent with kernel.theta.
336
+
337
+ # Test get_params()
338
+ index = 0
339
+ params = kernel.get_params()
340
+ for hyperparameter in kernel.hyperparameters:
341
+ if isinstance("string", type(hyperparameter.bounds)):
342
+ if hyperparameter.bounds == "fixed":
343
+ continue
344
+ size = hyperparameter.n_elements
345
+ if size > 1: # anisotropic kernels
346
+ assert_almost_equal(
347
+ np.exp(kernel.theta[index : index + size]), params[hyperparameter.name]
348
+ )
349
+ index += size
350
+ else:
351
+ assert_almost_equal(
352
+ np.exp(kernel.theta[index]), params[hyperparameter.name]
353
+ )
354
+ index += 1
355
+ # Test set_params()
356
+ index = 0
357
+ value = 10 # arbitrary value
358
+ for hyperparameter in kernel.hyperparameters:
359
+ if isinstance("string", type(hyperparameter.bounds)):
360
+ if hyperparameter.bounds == "fixed":
361
+ continue
362
+ size = hyperparameter.n_elements
363
+ if size > 1: # anisotropic kernels
364
+ kernel.set_params(**{hyperparameter.name: [value] * size})
365
+ assert_almost_equal(
366
+ np.exp(kernel.theta[index : index + size]), [value] * size
367
+ )
368
+ index += size
369
+ else:
370
+ kernel.set_params(**{hyperparameter.name: value})
371
+ assert_almost_equal(np.exp(kernel.theta[index]), value)
372
+ index += 1
373
+
374
+
375
+ @pytest.mark.parametrize("kernel", kernels)
376
+ def test_repr_kernels(kernel):
377
+ # Smoke-test for repr in kernels.
378
+
379
+ repr(kernel)
380
+
381
+
382
+ def test_rational_quadratic_kernel():
383
+ kernel = RationalQuadratic(length_scale=[1.0, 1.0])
384
+ message = (
385
+ "RationalQuadratic kernel only supports isotropic "
386
+ "version, please use a single "
387
+ "scalar for length_scale"
388
+ )
389
+ with pytest.raises(AttributeError, match=message):
390
+ kernel(X)
mplug_owl2/lib/python3.10/site-packages/sklearn/mixture/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (414 Bytes). View file
 
mplug_owl2/lib/python3.10/site-packages/sklearn/model_selection/__init__.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import typing
2
+
3
+ from ._split import BaseCrossValidator
4
+ from ._split import BaseShuffleSplit
5
+ from ._split import KFold
6
+ from ._split import GroupKFold
7
+ from ._split import StratifiedKFold
8
+ from ._split import TimeSeriesSplit
9
+ from ._split import LeaveOneGroupOut
10
+ from ._split import LeaveOneOut
11
+ from ._split import LeavePGroupsOut
12
+ from ._split import LeavePOut
13
+ from ._split import RepeatedKFold
14
+ from ._split import RepeatedStratifiedKFold
15
+ from ._split import ShuffleSplit
16
+ from ._split import GroupShuffleSplit
17
+ from ._split import StratifiedShuffleSplit
18
+ from ._split import StratifiedGroupKFold
19
+ from ._split import PredefinedSplit
20
+ from ._split import train_test_split
21
+ from ._split import check_cv
22
+
23
+ from ._validation import cross_val_score
24
+ from ._validation import cross_val_predict
25
+ from ._validation import cross_validate
26
+ from ._validation import learning_curve
27
+ from ._validation import permutation_test_score
28
+ from ._validation import validation_curve
29
+
30
+ from ._search import GridSearchCV
31
+ from ._search import RandomizedSearchCV
32
+ from ._search import ParameterGrid
33
+ from ._search import ParameterSampler
34
+
35
+ from ._plot import LearningCurveDisplay
36
+
37
+ if typing.TYPE_CHECKING:
38
+ # Avoid errors in type checkers (e.g. mypy) for experimental estimators.
39
+ # TODO: remove this check once the estimator is no longer experimental.
40
+ from ._search_successive_halving import ( # noqa
41
+ HalvingGridSearchCV,
42
+ HalvingRandomSearchCV,
43
+ )
44
+
45
+
46
+ __all__ = [
47
+ "BaseCrossValidator",
48
+ "BaseShuffleSplit",
49
+ "GridSearchCV",
50
+ "TimeSeriesSplit",
51
+ "KFold",
52
+ "GroupKFold",
53
+ "GroupShuffleSplit",
54
+ "LeaveOneGroupOut",
55
+ "LeaveOneOut",
56
+ "LeavePGroupsOut",
57
+ "LeavePOut",
58
+ "RepeatedKFold",
59
+ "RepeatedStratifiedKFold",
60
+ "ParameterGrid",
61
+ "ParameterSampler",
62
+ "PredefinedSplit",
63
+ "RandomizedSearchCV",
64
+ "ShuffleSplit",
65
+ "StratifiedKFold",
66
+ "StratifiedGroupKFold",
67
+ "StratifiedShuffleSplit",
68
+ "check_cv",
69
+ "cross_val_predict",
70
+ "cross_val_score",
71
+ "cross_validate",
72
+ "learning_curve",
73
+ "LearningCurveDisplay",
74
+ "permutation_test_score",
75
+ "train_test_split",
76
+ "validation_curve",
77
+ ]
78
+
79
+
80
+ # TODO: remove this check once the estimator is no longer experimental.
81
+ def __getattr__(name):
82
+ if name in {"HalvingGridSearchCV", "HalvingRandomSearchCV"}:
83
+ raise ImportError(
84
+ f"{name} is experimental and the API might change without any "
85
+ "deprecation cycle. To use it, you need to explicitly import "
86
+ "enable_halving_search_cv:\n"
87
+ "from sklearn.experimental import enable_halving_search_cv"
88
+ )
89
+ raise AttributeError(f"module {__name__} has no attribute {name}")
mplug_owl2/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.11 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_plot.cpython-310.pyc ADDED
Binary file (14.6 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_search.cpython-310.pyc ADDED
Binary file (61.4 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_search_successive_halving.cpython-310.pyc ADDED
Binary file (37.7 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_split.cpython-310.pyc ADDED
Binary file (81.4 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_validation.cpython-310.pyc ADDED
Binary file (60.1 kB). View file