ZTWHHH commited on
Commit
24aae88
·
verified ·
1 Parent(s): dc0c613

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. mantis_evalkit/lib/python3.10/site-packages/accelerate/__pycache__/__init__.cpython-310.pyc +0 -0
  2. mantis_evalkit/lib/python3.10/site-packages/accelerate/__pycache__/data_loader.cpython-310.pyc +0 -0
  3. mantis_evalkit/lib/python3.10/site-packages/accelerate/__pycache__/local_sgd.cpython-310.pyc +0 -0
  4. mantis_evalkit/lib/python3.10/site-packages/accelerate/commands/__init__.py +13 -0
  5. mantis_evalkit/lib/python3.10/site-packages/accelerate/commands/config/update.py +63 -0
  6. mantis_evalkit/lib/python3.10/site-packages/accelerate/commands/env.py +113 -0
  7. mantis_evalkit/lib/python3.10/site-packages/accelerate/commands/test.py +65 -0
  8. mantis_evalkit/lib/python3.10/site-packages/accelerate/commands/tpu.py +157 -0
  9. mantis_evalkit/lib/python3.10/site-packages/accelerate/commands/utils.py +120 -0
  10. mantis_evalkit/lib/python3.10/site-packages/sklearn/_build_utils/__init__.py +0 -0
  11. mantis_evalkit/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/tempita.cpython-310.pyc +0 -0
  12. mantis_evalkit/lib/python3.10/site-packages/sklearn/_build_utils/tempita.py +60 -0
  13. mantis_evalkit/lib/python3.10/site-packages/sklearn/_build_utils/version.py +16 -0
  14. mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/__init__.py +46 -0
  15. mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/__pycache__/__init__.cpython-310.pyc +0 -0
  16. mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_elliptic_envelope.cpython-310.pyc +0 -0
  17. mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_empirical_covariance.cpython-310.pyc +0 -0
  18. mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_graph_lasso.cpython-310.pyc +0 -0
  19. mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_robust_covariance.cpython-310.pyc +0 -0
  20. mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_shrunk_covariance.cpython-310.pyc +0 -0
  21. mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/_elliptic_envelope.py +266 -0
  22. mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/_empirical_covariance.py +367 -0
  23. mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/_graph_lasso.py +1140 -0
  24. mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/_robust_covariance.py +870 -0
  25. mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/_shrunk_covariance.py +820 -0
  26. mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/tests/__init__.py +0 -0
  27. mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  28. mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_covariance.cpython-310.pyc +0 -0
  29. mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_elliptic_envelope.cpython-310.pyc +0 -0
  30. mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_graphical_lasso.cpython-310.pyc +0 -0
  31. mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_robust_covariance.cpython-310.pyc +0 -0
  32. mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/tests/test_covariance.py +374 -0
  33. mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/tests/test_elliptic_envelope.py +52 -0
  34. mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/tests/test_graphical_lasso.py +318 -0
  35. mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/tests/test_robust_covariance.py +168 -0
  36. mantis_evalkit/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/__init__.cpython-310.pyc +0 -0
  37. mantis_evalkit/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_base.cpython-310.pyc +0 -0
  38. mantis_evalkit/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_factor_analysis.cpython-310.pyc +0 -0
  39. mantis_evalkit/lib/python3.10/site-packages/sklearn/frozen/_frozen.py +166 -0
  40. mantis_evalkit/lib/python3.10/site-packages/sklearn/frozen/tests/test_frozen.py +223 -0
  41. mantis_evalkit/lib/python3.10/site-packages/sklearn/gaussian_process/__init__.py +10 -0
  42. mantis_evalkit/lib/python3.10/site-packages/sklearn/gaussian_process/_gpc.py +904 -0
  43. mantis_evalkit/lib/python3.10/site-packages/sklearn/gaussian_process/kernels.py +2410 -0
  44. mantis_evalkit/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__init__.py +0 -0
  45. mantis_evalkit/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__pycache__/test_gpc.cpython-310.pyc +0 -0
  46. mantis_evalkit/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__pycache__/test_gpr.cpython-310.pyc +0 -0
  47. mantis_evalkit/lib/python3.10/site-packages/sklearn/gaussian_process/tests/_mini_sequence_kernel.py +54 -0
  48. mantis_evalkit/lib/python3.10/site-packages/sklearn/gaussian_process/tests/test_gpc.py +284 -0
  49. mantis_evalkit/lib/python3.10/site-packages/sklearn/gaussian_process/tests/test_gpr.py +848 -0
  50. mantis_evalkit/lib/python3.10/site-packages/sklearn/gaussian_process/tests/test_kernels.py +403 -0
mantis_evalkit/lib/python3.10/site-packages/accelerate/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.22 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/accelerate/__pycache__/data_loader.cpython-310.pyc ADDED
Binary file (41.7 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/accelerate/__pycache__/local_sgd.cpython-310.pyc ADDED
Binary file (3.64 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/accelerate/commands/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
mantis_evalkit/lib/python3.10/site-packages/accelerate/commands/config/update.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from pathlib import Path
18
+
19
+ from .config_args import default_config_file, load_config_from_file
20
+ from .config_utils import SubcommandHelpFormatter
21
+
22
+
23
+ description = "Update an existing config file with the latest defaults while maintaining the old configuration."
24
+
25
+
26
+ def update_config(args):
27
+ """
28
+ Update an existing config file with the latest defaults while maintaining the old configuration.
29
+ """
30
+ config_file = args.config_file
31
+ if config_file is None and Path(default_config_file).exists():
32
+ config_file = default_config_file
33
+ elif not Path(config_file).exists():
34
+ raise ValueError(f"The passed config file located at {config_file} doesn't exist.")
35
+ config = load_config_from_file(config_file)
36
+
37
+ if config_file.endswith(".json"):
38
+ config.to_json_file(config_file)
39
+ else:
40
+ config.to_yaml_file(config_file)
41
+ return config_file
42
+
43
+
44
+ def update_command_parser(parser, parents):
45
+ parser = parser.add_parser("update", parents=parents, help=description, formatter_class=SubcommandHelpFormatter)
46
+ parser.add_argument(
47
+ "--config_file",
48
+ default=None,
49
+ help=(
50
+ "The path to the config file to update. Will default to a file named default_config.yaml in the cache "
51
+ "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
52
+ "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
53
+ "with 'huggingface'."
54
+ ),
55
+ )
56
+
57
+ parser.set_defaults(func=update_config_command)
58
+ return parser
59
+
60
+
61
+ def update_config_command(args):
62
+ config_file = update_config(args)
63
+ print(f"Sucessfully updated the configuration file at {config_file}.")
mantis_evalkit/lib/python3.10/site-packages/accelerate/commands/env.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import argparse
18
+ import os
19
+ import platform
20
+ import subprocess
21
+
22
+ import numpy as np
23
+ import psutil
24
+ import torch
25
+
26
+ from accelerate import __version__ as version
27
+ from accelerate.commands.config import default_config_file, load_config_from_file
28
+
29
+ from ..utils import is_mlu_available, is_musa_available, is_npu_available, is_xpu_available
30
+
31
+
32
+ def env_command_parser(subparsers=None):
33
+ if subparsers is not None:
34
+ parser = subparsers.add_parser("env")
35
+ else:
36
+ parser = argparse.ArgumentParser("Accelerate env command")
37
+
38
+ parser.add_argument(
39
+ "--config_file", default=None, help="The config file to use for the default values in the launching script."
40
+ )
41
+
42
+ if subparsers is not None:
43
+ parser.set_defaults(func=env_command)
44
+ return parser
45
+
46
+
47
+ def env_command(args):
48
+ pt_version = torch.__version__
49
+ pt_cuda_available = torch.cuda.is_available()
50
+ pt_xpu_available = is_xpu_available()
51
+ pt_mlu_available = is_mlu_available()
52
+ pt_musa_available = is_musa_available()
53
+ pt_npu_available = is_npu_available()
54
+
55
+ accelerate_config = "Not found"
56
+ # Get the default from the config file.
57
+ if args.config_file is not None or os.path.isfile(default_config_file):
58
+ accelerate_config = load_config_from_file(args.config_file).to_dict()
59
+
60
+ # if we can run which, get it
61
+ command = None
62
+ bash_location = "Not found"
63
+ if os.name == "nt":
64
+ command = ["where", "accelerate"]
65
+ elif os.name == "posix":
66
+ command = ["which", "accelerate"]
67
+ if command is not None:
68
+ bash_location = subprocess.check_output(command, text=True, stderr=subprocess.STDOUT).strip()
69
+ info = {
70
+ "`Accelerate` version": version,
71
+ "Platform": platform.platform(),
72
+ "`accelerate` bash location": bash_location,
73
+ "Python version": platform.python_version(),
74
+ "Numpy version": np.__version__,
75
+ "PyTorch version (GPU?)": f"{pt_version} ({pt_cuda_available})",
76
+ "PyTorch XPU available": str(pt_xpu_available),
77
+ "PyTorch NPU available": str(pt_npu_available),
78
+ "PyTorch MLU available": str(pt_mlu_available),
79
+ "PyTorch MUSA available": str(pt_musa_available),
80
+ "System RAM": f"{psutil.virtual_memory().total / 1024 ** 3:.2f} GB",
81
+ }
82
+ if pt_cuda_available:
83
+ info["GPU type"] = torch.cuda.get_device_name()
84
+ if pt_mlu_available:
85
+ info["MLU type"] = torch.mlu.get_device_name()
86
+ if pt_npu_available:
87
+ info["CANN version"] = torch.version.cann
88
+
89
+ print("\nCopy-and-paste the text below in your GitHub issue\n")
90
+ print("\n".join([f"- {prop}: {val}" for prop, val in info.items()]))
91
+
92
+ print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:")
93
+ accelerate_config_str = (
94
+ "\n".join([f"\t- {prop}: {val}" for prop, val in accelerate_config.items()])
95
+ if isinstance(accelerate_config, dict)
96
+ else f"\t{accelerate_config}"
97
+ )
98
+ print(accelerate_config_str)
99
+
100
+ info["`Accelerate` configs"] = accelerate_config
101
+
102
+ return info
103
+
104
+
105
+ def main() -> int:
106
+ parser = env_command_parser()
107
+ args = parser.parse_args()
108
+ env_command(args)
109
+ return 0
110
+
111
+
112
+ if __name__ == "__main__":
113
+ raise SystemExit(main())
mantis_evalkit/lib/python3.10/site-packages/accelerate/commands/test.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import argparse
18
+
19
+ from accelerate.test_utils import execute_subprocess_async, path_in_accelerate_package
20
+
21
+
22
+ def test_command_parser(subparsers=None):
23
+ if subparsers is not None:
24
+ parser = subparsers.add_parser("test")
25
+ else:
26
+ parser = argparse.ArgumentParser("Accelerate test command")
27
+
28
+ parser.add_argument(
29
+ "--config_file",
30
+ default=None,
31
+ help=(
32
+ "The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
33
+ "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
34
+ "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
35
+ "with 'huggingface'."
36
+ ),
37
+ )
38
+
39
+ if subparsers is not None:
40
+ parser.set_defaults(func=test_command)
41
+ return parser
42
+
43
+
44
+ def test_command(args):
45
+ script_name = path_in_accelerate_package("test_utils", "scripts", "test_script.py")
46
+
47
+ if args.config_file is None:
48
+ test_args = [script_name]
49
+ else:
50
+ test_args = f"--config_file={args.config_file} {script_name}".split()
51
+
52
+ cmd = ["accelerate-launch"] + test_args
53
+ result = execute_subprocess_async(cmd)
54
+ if result.returncode == 0:
55
+ print("Test is a success! You are ready for your distributed training!")
56
+
57
+
58
+ def main():
59
+ parser = test_command_parser()
60
+ args = parser.parse_args()
61
+ test_command(args)
62
+
63
+
64
+ if __name__ == "__main__":
65
+ main()
mantis_evalkit/lib/python3.10/site-packages/accelerate/commands/tpu.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import argparse
18
+ import os
19
+ import subprocess
20
+
21
+ from packaging.version import Version, parse
22
+
23
+ from accelerate.commands.config.config_args import default_config_file, load_config_from_file
24
+
25
+
26
+ _description = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
27
+
28
+
29
+ def tpu_command_parser(subparsers=None):
30
+ if subparsers is not None:
31
+ parser = subparsers.add_parser("tpu-config", description=_description)
32
+ else:
33
+ parser = argparse.ArgumentParser("Accelerate tpu-config command", description=_description)
34
+ # Core arguments
35
+ config_args = parser.add_argument_group(
36
+ "Config Arguments", "Arguments that can be configured through `accelerate config`."
37
+ )
38
+ config_args.add_argument(
39
+ "--config_file",
40
+ type=str,
41
+ default=None,
42
+ help="Path to the config file to use for accelerate.",
43
+ )
44
+ config_args.add_argument(
45
+ "--tpu_name",
46
+ default=None,
47
+ help="The name of the TPU to use. If not specified, will use the TPU specified in the config file.",
48
+ )
49
+ config_args.add_argument(
50
+ "--tpu_zone",
51
+ default=None,
52
+ help="The zone of the TPU to use. If not specified, will use the zone specified in the config file.",
53
+ )
54
+ pod_args = parser.add_argument_group("TPU Arguments", "Arguments for options ran inside the TPU.")
55
+ pod_args.add_argument(
56
+ "--use_alpha",
57
+ action="store_true",
58
+ help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.",
59
+ )
60
+ pod_args.add_argument(
61
+ "--command_file",
62
+ default=None,
63
+ help="The path to the file containing the commands to run on the pod on startup.",
64
+ )
65
+ pod_args.add_argument(
66
+ "--command",
67
+ action="append",
68
+ nargs="+",
69
+ help="A command to run on the pod. Can be passed multiple times.",
70
+ )
71
+ pod_args.add_argument(
72
+ "--install_accelerate",
73
+ action="store_true",
74
+ help="Whether to install accelerate on the pod. Defaults to False.",
75
+ )
76
+ pod_args.add_argument(
77
+ "--accelerate_version",
78
+ default="latest",
79
+ help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.",
80
+ )
81
+ pod_args.add_argument(
82
+ "--debug", action="store_true", help="If set, will print the command that would be run instead of running it."
83
+ )
84
+
85
+ if subparsers is not None:
86
+ parser.set_defaults(func=tpu_command_launcher)
87
+ return parser
88
+
89
+
90
+ def tpu_command_launcher(args):
91
+ defaults = None
92
+
93
+ # Get the default from the config file if it exists.
94
+ if args.config_file is not None or os.path.isfile(default_config_file):
95
+ defaults = load_config_from_file(args.config_file)
96
+ if not args.command_file and defaults.command_file is not None and not args.command:
97
+ args.command_file = defaults.command_file
98
+ if not args.command and defaults.commands is not None:
99
+ args.command = defaults.commands
100
+ if not args.tpu_name:
101
+ args.tpu_name = defaults.tpu_name
102
+ if not args.tpu_zone:
103
+ args.tpu_zone = defaults.tpu_zone
104
+ if args.accelerate_version == "dev":
105
+ args.accelerate_version = "git+https://github.com/huggingface/accelerate.git"
106
+ elif args.accelerate_version == "latest":
107
+ args.accelerate_version = "accelerate -U"
108
+ elif isinstance(parse(args.accelerate_version), Version):
109
+ args.accelerate_version = f"accelerate=={args.accelerate_version}"
110
+
111
+ if not args.command_file and not args.command:
112
+ raise ValueError("You must specify either a command file or a command to run on the pod.")
113
+
114
+ if args.command_file:
115
+ with open(args.command_file) as f:
116
+ args.command = [f.read().splitlines()]
117
+
118
+ # To turn list of lists into list of strings
119
+ if isinstance(args.command[0], list):
120
+ args.command = [line for cmd in args.command for line in cmd]
121
+ # Default to the shared folder and install accelerate
122
+ new_cmd = ["cd /usr/share"]
123
+ if args.install_accelerate:
124
+ new_cmd += [f"pip install {args.accelerate_version}"]
125
+ new_cmd += args.command
126
+ args.command = "; ".join(new_cmd)
127
+
128
+ # Then send it to gcloud
129
+ # Eventually try to use google-api-core to do this instead of subprocess
130
+ cmd = ["gcloud"]
131
+ if args.use_alpha:
132
+ cmd += ["alpha"]
133
+ cmd += [
134
+ "compute",
135
+ "tpus",
136
+ "tpu-vm",
137
+ "ssh",
138
+ args.tpu_name,
139
+ "--zone",
140
+ args.tpu_zone,
141
+ "--command",
142
+ args.command,
143
+ "--worker",
144
+ "all",
145
+ ]
146
+ if args.debug:
147
+ print(f"Running {' '.join(cmd)}")
148
+ return
149
+ subprocess.run(cmd)
150
+ print("Successfully setup pod.")
151
+
152
+
153
+ def main():
154
+ parser = tpu_command_parser()
155
+ args = parser.parse_args()
156
+
157
+ tpu_command_launcher(args)
mantis_evalkit/lib/python3.10/site-packages/accelerate/commands/utils.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import argparse
16
+
17
+
18
+ class _StoreAction(argparse.Action):
19
+ """
20
+ Custom action that allows for `-` or `_` to be passed in for an argument.
21
+ """
22
+
23
+ def __init__(self, *args, **kwargs):
24
+ super().__init__(*args, **kwargs)
25
+ new_option_strings = []
26
+ for option_string in self.option_strings:
27
+ new_option_strings.append(option_string)
28
+ if "_" in option_string[2:]:
29
+ # Add `-` version to the option string
30
+ new_option_strings.append(option_string.replace("_", "-"))
31
+ self.option_strings = new_option_strings
32
+
33
+ def __call__(self, parser, namespace, values, option_string=None):
34
+ setattr(namespace, self.dest, values)
35
+
36
+
37
+ class _StoreConstAction(_StoreAction):
38
+ """
39
+ Same as `argparse._StoreConstAction` but uses the custom `_StoreAction`.
40
+ """
41
+
42
+ def __init__(self, option_strings, dest, const, default=None, required=False, help=None):
43
+ super().__init__(
44
+ option_strings=option_strings,
45
+ dest=dest,
46
+ nargs=0,
47
+ const=const,
48
+ default=default,
49
+ required=required,
50
+ help=help,
51
+ )
52
+
53
+ def __call__(self, parser, namespace, values, option_string=None):
54
+ setattr(namespace, self.dest, self.const)
55
+
56
+
57
+ class _StoreTrueAction(_StoreConstAction):
58
+ """
59
+ Same as `argparse._StoreTrueAction` but uses the custom `_StoreConstAction`.
60
+ """
61
+
62
+ def __init__(
63
+ self,
64
+ option_strings,
65
+ dest,
66
+ default=None,
67
+ required=False,
68
+ help=None,
69
+ ):
70
+ super().__init__(
71
+ option_strings=option_strings, dest=dest, const=True, default=default, required=required, help=help
72
+ )
73
+
74
+
75
+ class CustomArgumentGroup(argparse._ArgumentGroup):
76
+ """
77
+ Custom argument group that allows for the use of `-` or `_` in arguments passed and overrides the help for each
78
+ when applicable.
79
+ """
80
+
81
+ def _add_action(self, action):
82
+ args = vars(action)
83
+ if isinstance(action, argparse._StoreTrueAction):
84
+ action = _StoreTrueAction(
85
+ args["option_strings"], args["dest"], args["default"], args["required"], args["help"]
86
+ )
87
+ elif isinstance(action, argparse._StoreConstAction):
88
+ action = _StoreConstAction(
89
+ args["option_strings"],
90
+ args["dest"],
91
+ args["const"],
92
+ args["default"],
93
+ args["required"],
94
+ args["help"],
95
+ )
96
+ elif isinstance(action, argparse._StoreAction):
97
+ action = _StoreAction(**args)
98
+ action = super()._add_action(action)
99
+ return action
100
+
101
+
102
+ class CustomArgumentParser(argparse.ArgumentParser):
103
+ """
104
+ Custom argument parser that allows for the use of `-` or `_` in arguments passed and overrides the help for each
105
+ when applicable.
106
+ """
107
+
108
+ def add_argument(self, *args, **kwargs):
109
+ if "action" in kwargs:
110
+ # Translate action -> class
111
+ if kwargs["action"] == "store_true":
112
+ kwargs["action"] = _StoreTrueAction
113
+ else:
114
+ kwargs["action"] = _StoreAction
115
+ super().add_argument(*args, **kwargs)
116
+
117
+ def add_argument_group(self, *args, **kwargs):
118
+ group = CustomArgumentGroup(self, *args, **kwargs)
119
+ self._action_groups.append(group)
120
+ return group
mantis_evalkit/lib/python3.10/site-packages/sklearn/_build_utils/__init__.py ADDED
File without changes
mantis_evalkit/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/tempita.cpython-310.pyc ADDED
Binary file (1.62 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/sklearn/_build_utils/tempita.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: The scikit-learn developers
2
+ # SPDX-License-Identifier: BSD-3-Clause
3
+
4
+ import argparse
5
+ import os
6
+
7
+ from Cython import Tempita as tempita
8
+
9
+ # XXX: If this import ever fails (does it really?), vendor either
10
+ # cython.tempita or numpy/npy_tempita.
11
+
12
+
13
+ def process_tempita(fromfile, outfile=None):
14
+ """Process tempita templated file and write out the result.
15
+
16
+ The template file is expected to end in `.c.tp` or `.pyx.tp`:
17
+ E.g. processing `template.c.in` generates `template.c`.
18
+
19
+ """
20
+ with open(fromfile, "r", encoding="utf-8") as f:
21
+ template_content = f.read()
22
+
23
+ template = tempita.Template(template_content)
24
+ content = template.substitute()
25
+
26
+ with open(outfile, "w", encoding="utf-8") as f:
27
+ f.write(content)
28
+
29
+
30
+ def main():
31
+ parser = argparse.ArgumentParser()
32
+ parser.add_argument("infile", type=str, help="Path to the input file")
33
+ parser.add_argument("-o", "--outdir", type=str, help="Path to the output directory")
34
+ parser.add_argument(
35
+ "-i",
36
+ "--ignore",
37
+ type=str,
38
+ help=(
39
+ "An ignored input - may be useful to add a "
40
+ "dependency between custom targets"
41
+ ),
42
+ )
43
+ args = parser.parse_args()
44
+
45
+ if not args.infile.endswith(".tp"):
46
+ raise ValueError(f"Unexpected extension: {args.infile}")
47
+
48
+ if not args.outdir:
49
+ raise ValueError("Missing `--outdir` argument to tempita.py")
50
+
51
+ outdir_abs = os.path.join(os.getcwd(), args.outdir)
52
+ outfile = os.path.join(
53
+ outdir_abs, os.path.splitext(os.path.split(args.infile)[1])[0]
54
+ )
55
+
56
+ process_tempita(args.infile, outfile)
57
+
58
+
59
+ if __name__ == "__main__":
60
+ main()
mantis_evalkit/lib/python3.10/site-packages/sklearn/_build_utils/version.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """Extract version number from __init__.py"""
3
+
4
+ # Authors: The scikit-learn developers
5
+ # SPDX-License-Identifier: BSD-3-Clause
6
+
7
+ import os
8
+
9
+ sklearn_init = os.path.join(os.path.dirname(__file__), "../__init__.py")
10
+
11
+ data = open(sklearn_init).readlines()
12
+ version_line = next(line for line in data if line.startswith("__version__"))
13
+
14
+ version = version_line.strip().split(" = ")[1].replace('"', "").replace("'", "")
15
+
16
+ print(version)
mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/__init__.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Methods and algorithms to robustly estimate covariance.
2
+
3
+ They estimate the covariance of features at given sets of points, as well as the
4
+ precision matrix defined as the inverse of the covariance. Covariance estimation is
5
+ closely related to the theory of Gaussian graphical models.
6
+ """
7
+
8
+ # Authors: The scikit-learn developers
9
+ # SPDX-License-Identifier: BSD-3-Clause
10
+
11
+ from ._elliptic_envelope import EllipticEnvelope
12
+ from ._empirical_covariance import (
13
+ EmpiricalCovariance,
14
+ empirical_covariance,
15
+ log_likelihood,
16
+ )
17
+ from ._graph_lasso import GraphicalLasso, GraphicalLassoCV, graphical_lasso
18
+ from ._robust_covariance import MinCovDet, fast_mcd
19
+ from ._shrunk_covariance import (
20
+ OAS,
21
+ LedoitWolf,
22
+ ShrunkCovariance,
23
+ ledoit_wolf,
24
+ ledoit_wolf_shrinkage,
25
+ oas,
26
+ shrunk_covariance,
27
+ )
28
+
29
+ __all__ = [
30
+ "EllipticEnvelope",
31
+ "EmpiricalCovariance",
32
+ "GraphicalLasso",
33
+ "GraphicalLassoCV",
34
+ "LedoitWolf",
35
+ "MinCovDet",
36
+ "OAS",
37
+ "ShrunkCovariance",
38
+ "empirical_covariance",
39
+ "fast_mcd",
40
+ "graphical_lasso",
41
+ "ledoit_wolf",
42
+ "ledoit_wolf_shrinkage",
43
+ "log_likelihood",
44
+ "oas",
45
+ "shrunk_covariance",
46
+ ]
mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.13 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_elliptic_envelope.cpython-310.pyc ADDED
Binary file (9.55 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_empirical_covariance.cpython-310.pyc ADDED
Binary file (11.6 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_graph_lasso.cpython-310.pyc ADDED
Binary file (31.5 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_robust_covariance.cpython-310.pyc ADDED
Binary file (24.3 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_shrunk_covariance.cpython-310.pyc ADDED
Binary file (24.4 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/_elliptic_envelope.py ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: The scikit-learn developers
2
+ # SPDX-License-Identifier: BSD-3-Clause
3
+
4
+ from numbers import Real
5
+
6
+ import numpy as np
7
+
8
+ from ..base import OutlierMixin, _fit_context
9
+ from ..metrics import accuracy_score
10
+ from ..utils._param_validation import Interval
11
+ from ..utils.validation import check_is_fitted
12
+ from ._robust_covariance import MinCovDet
13
+
14
+
15
+ class EllipticEnvelope(OutlierMixin, MinCovDet):
16
+ """An object for detecting outliers in a Gaussian distributed dataset.
17
+
18
+ Read more in the :ref:`User Guide <outlier_detection>`.
19
+
20
+ Parameters
21
+ ----------
22
+ store_precision : bool, default=True
23
+ Specify if the estimated precision is stored.
24
+
25
+ assume_centered : bool, default=False
26
+ If True, the support of robust location and covariance estimates
27
+ is computed, and a covariance estimate is recomputed from it,
28
+ without centering the data.
29
+ Useful to work with data whose mean is significantly equal to
30
+ zero but is not exactly zero.
31
+ If False, the robust location and covariance are directly computed
32
+ with the FastMCD algorithm without additional treatment.
33
+
34
+ support_fraction : float, default=None
35
+ The proportion of points to be included in the support of the raw
36
+ MCD estimate. If None, the minimum value of support_fraction will
37
+ be used within the algorithm: `(n_samples + n_features + 1) / 2 * n_samples`.
38
+ Range is (0, 1).
39
+
40
+ contamination : float, default=0.1
41
+ The amount of contamination of the data set, i.e. the proportion
42
+ of outliers in the data set. Range is (0, 0.5].
43
+
44
+ random_state : int, RandomState instance or None, default=None
45
+ Determines the pseudo random number generator for shuffling
46
+ the data. Pass an int for reproducible results across multiple function
47
+ calls. See :term:`Glossary <random_state>`.
48
+
49
+ Attributes
50
+ ----------
51
+ location_ : ndarray of shape (n_features,)
52
+ Estimated robust location.
53
+
54
+ covariance_ : ndarray of shape (n_features, n_features)
55
+ Estimated robust covariance matrix.
56
+
57
+ precision_ : ndarray of shape (n_features, n_features)
58
+ Estimated pseudo inverse matrix.
59
+ (stored only if store_precision is True)
60
+
61
+ support_ : ndarray of shape (n_samples,)
62
+ A mask of the observations that have been used to compute the
63
+ robust estimates of location and shape.
64
+
65
+ offset_ : float
66
+ Offset used to define the decision function from the raw scores.
67
+ We have the relation: ``decision_function = score_samples - offset_``.
68
+ The offset depends on the contamination parameter and is defined in
69
+ such a way we obtain the expected number of outliers (samples with
70
+ decision function < 0) in training.
71
+
72
+ .. versionadded:: 0.20
73
+
74
+ raw_location_ : ndarray of shape (n_features,)
75
+ The raw robust estimated location before correction and re-weighting.
76
+
77
+ raw_covariance_ : ndarray of shape (n_features, n_features)
78
+ The raw robust estimated covariance before correction and re-weighting.
79
+
80
+ raw_support_ : ndarray of shape (n_samples,)
81
+ A mask of the observations that have been used to compute
82
+ the raw robust estimates of location and shape, before correction
83
+ and re-weighting.
84
+
85
+ dist_ : ndarray of shape (n_samples,)
86
+ Mahalanobis distances of the training set (on which :meth:`fit` is
87
+ called) observations.
88
+
89
+ n_features_in_ : int
90
+ Number of features seen during :term:`fit`.
91
+
92
+ .. versionadded:: 0.24
93
+
94
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
95
+ Names of features seen during :term:`fit`. Defined only when `X`
96
+ has feature names that are all strings.
97
+
98
+ .. versionadded:: 1.0
99
+
100
+ See Also
101
+ --------
102
+ EmpiricalCovariance : Maximum likelihood covariance estimator.
103
+ GraphicalLasso : Sparse inverse covariance estimation
104
+ with an l1-penalized estimator.
105
+ LedoitWolf : LedoitWolf Estimator.
106
+ MinCovDet : Minimum Covariance Determinant
107
+ (robust estimator of covariance).
108
+ OAS : Oracle Approximating Shrinkage Estimator.
109
+ ShrunkCovariance : Covariance estimator with shrinkage.
110
+
111
+ Notes
112
+ -----
113
+ Outlier detection from covariance estimation may break or not
114
+ perform well in high-dimensional settings. In particular, one will
115
+ always take care to work with ``n_samples > n_features ** 2``.
116
+
117
+ References
118
+ ----------
119
+ .. [1] Rousseeuw, P.J., Van Driessen, K. "A fast algorithm for the
120
+ minimum covariance determinant estimator" Technometrics 41(3), 212
121
+ (1999)
122
+
123
+ Examples
124
+ --------
125
+ >>> import numpy as np
126
+ >>> from sklearn.covariance import EllipticEnvelope
127
+ >>> true_cov = np.array([[.8, .3],
128
+ ... [.3, .4]])
129
+ >>> X = np.random.RandomState(0).multivariate_normal(mean=[0, 0],
130
+ ... cov=true_cov,
131
+ ... size=500)
132
+ >>> cov = EllipticEnvelope(random_state=0).fit(X)
133
+ >>> # predict returns 1 for an inlier and -1 for an outlier
134
+ >>> cov.predict([[0, 0],
135
+ ... [3, 3]])
136
+ array([ 1, -1])
137
+ >>> cov.covariance_
138
+ array([[0.7411..., 0.2535...],
139
+ [0.2535..., 0.3053...]])
140
+ >>> cov.location_
141
+ array([0.0813... , 0.0427...])
142
+ """
143
+
144
+ _parameter_constraints: dict = {
145
+ **MinCovDet._parameter_constraints,
146
+ "contamination": [Interval(Real, 0, 0.5, closed="right")],
147
+ }
148
+
149
+ def __init__(
150
+ self,
151
+ *,
152
+ store_precision=True,
153
+ assume_centered=False,
154
+ support_fraction=None,
155
+ contamination=0.1,
156
+ random_state=None,
157
+ ):
158
+ super().__init__(
159
+ store_precision=store_precision,
160
+ assume_centered=assume_centered,
161
+ support_fraction=support_fraction,
162
+ random_state=random_state,
163
+ )
164
+ self.contamination = contamination
165
+
166
+ @_fit_context(prefer_skip_nested_validation=True)
167
+ def fit(self, X, y=None):
168
+ """Fit the EllipticEnvelope model.
169
+
170
+ Parameters
171
+ ----------
172
+ X : array-like of shape (n_samples, n_features)
173
+ Training data.
174
+
175
+ y : Ignored
176
+ Not used, present for API consistency by convention.
177
+
178
+ Returns
179
+ -------
180
+ self : object
181
+ Returns the instance itself.
182
+ """
183
+ super().fit(X)
184
+ self.offset_ = np.percentile(-self.dist_, 100.0 * self.contamination)
185
+ return self
186
+
187
+ def decision_function(self, X):
188
+ """Compute the decision function of the given observations.
189
+
190
+ Parameters
191
+ ----------
192
+ X : array-like of shape (n_samples, n_features)
193
+ The data matrix.
194
+
195
+ Returns
196
+ -------
197
+ decision : ndarray of shape (n_samples,)
198
+ Decision function of the samples.
199
+ It is equal to the shifted Mahalanobis distances.
200
+ The threshold for being an outlier is 0, which ensures a
201
+ compatibility with other outlier detection algorithms.
202
+ """
203
+ check_is_fitted(self)
204
+ negative_mahal_dist = self.score_samples(X)
205
+ return negative_mahal_dist - self.offset_
206
+
207
+ def score_samples(self, X):
208
+ """Compute the negative Mahalanobis distances.
209
+
210
+ Parameters
211
+ ----------
212
+ X : array-like of shape (n_samples, n_features)
213
+ The data matrix.
214
+
215
+ Returns
216
+ -------
217
+ negative_mahal_distances : array-like of shape (n_samples,)
218
+ Opposite of the Mahalanobis distances.
219
+ """
220
+ check_is_fitted(self)
221
+ return -self.mahalanobis(X)
222
+
223
+ def predict(self, X):
224
+ """
225
+ Predict labels (1 inlier, -1 outlier) of X according to fitted model.
226
+
227
+ Parameters
228
+ ----------
229
+ X : array-like of shape (n_samples, n_features)
230
+ The data matrix.
231
+
232
+ Returns
233
+ -------
234
+ is_inlier : ndarray of shape (n_samples,)
235
+ Returns -1 for anomalies/outliers and +1 for inliers.
236
+ """
237
+ values = self.decision_function(X)
238
+ is_inlier = np.full(values.shape[0], -1, dtype=int)
239
+ is_inlier[values >= 0] = 1
240
+
241
+ return is_inlier
242
+
243
+ def score(self, X, y, sample_weight=None):
244
+ """Return the mean accuracy on the given test data and labels.
245
+
246
+ In multi-label classification, this is the subset accuracy
247
+ which is a harsh metric since you require for each sample that
248
+ each label set be correctly predicted.
249
+
250
+ Parameters
251
+ ----------
252
+ X : array-like of shape (n_samples, n_features)
253
+ Test samples.
254
+
255
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs)
256
+ True labels for X.
257
+
258
+ sample_weight : array-like of shape (n_samples,), default=None
259
+ Sample weights.
260
+
261
+ Returns
262
+ -------
263
+ score : float
264
+ Mean accuracy of self.predict(X) w.r.t. y.
265
+ """
266
+ return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/_empirical_covariance.py ADDED
@@ -0,0 +1,367 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Maximum likelihood covariance estimator.
3
+
4
+ """
5
+
6
+ # Authors: The scikit-learn developers
7
+ # SPDX-License-Identifier: BSD-3-Clause
8
+
9
+ # avoid division truncation
10
+ import warnings
11
+
12
+ import numpy as np
13
+ from scipy import linalg
14
+
15
+ from sklearn.utils import metadata_routing
16
+
17
+ from .. import config_context
18
+ from ..base import BaseEstimator, _fit_context
19
+ from ..metrics.pairwise import pairwise_distances
20
+ from ..utils import check_array
21
+ from ..utils._param_validation import validate_params
22
+ from ..utils.extmath import fast_logdet
23
+ from ..utils.validation import validate_data
24
+
25
+
26
+ @validate_params(
27
+ {
28
+ "emp_cov": [np.ndarray],
29
+ "precision": [np.ndarray],
30
+ },
31
+ prefer_skip_nested_validation=True,
32
+ )
33
+ def log_likelihood(emp_cov, precision):
34
+ """Compute the sample mean of the log_likelihood under a covariance model.
35
+
36
+ Computes the empirical expected log-likelihood, allowing for universal
37
+ comparison (beyond this software package), and accounts for normalization
38
+ terms and scaling.
39
+
40
+ Parameters
41
+ ----------
42
+ emp_cov : ndarray of shape (n_features, n_features)
43
+ Maximum Likelihood Estimator of covariance.
44
+
45
+ precision : ndarray of shape (n_features, n_features)
46
+ The precision matrix of the covariance model to be tested.
47
+
48
+ Returns
49
+ -------
50
+ log_likelihood_ : float
51
+ Sample mean of the log-likelihood.
52
+ """
53
+ p = precision.shape[0]
54
+ log_likelihood_ = -np.sum(emp_cov * precision) + fast_logdet(precision)
55
+ log_likelihood_ -= p * np.log(2 * np.pi)
56
+ log_likelihood_ /= 2.0
57
+ return log_likelihood_
58
+
59
+
60
+ @validate_params(
61
+ {
62
+ "X": ["array-like"],
63
+ "assume_centered": ["boolean"],
64
+ },
65
+ prefer_skip_nested_validation=True,
66
+ )
67
+ def empirical_covariance(X, *, assume_centered=False):
68
+ """Compute the Maximum likelihood covariance estimator.
69
+
70
+ Parameters
71
+ ----------
72
+ X : ndarray of shape (n_samples, n_features)
73
+ Data from which to compute the covariance estimate.
74
+
75
+ assume_centered : bool, default=False
76
+ If `True`, data will not be centered before computation.
77
+ Useful when working with data whose mean is almost, but not exactly
78
+ zero.
79
+ If `False`, data will be centered before computation.
80
+
81
+ Returns
82
+ -------
83
+ covariance : ndarray of shape (n_features, n_features)
84
+ Empirical covariance (Maximum Likelihood Estimator).
85
+
86
+ Examples
87
+ --------
88
+ >>> from sklearn.covariance import empirical_covariance
89
+ >>> X = [[1,1,1],[1,1,1],[1,1,1],
90
+ ... [0,0,0],[0,0,0],[0,0,0]]
91
+ >>> empirical_covariance(X)
92
+ array([[0.25, 0.25, 0.25],
93
+ [0.25, 0.25, 0.25],
94
+ [0.25, 0.25, 0.25]])
95
+ """
96
+ X = check_array(X, ensure_2d=False, ensure_all_finite=False)
97
+
98
+ if X.ndim == 1:
99
+ X = np.reshape(X, (1, -1))
100
+
101
+ if X.shape[0] == 1:
102
+ warnings.warn(
103
+ "Only one sample available. You may want to reshape your data array"
104
+ )
105
+
106
+ if assume_centered:
107
+ covariance = np.dot(X.T, X) / X.shape[0]
108
+ else:
109
+ covariance = np.cov(X.T, bias=1)
110
+
111
+ if covariance.ndim == 0:
112
+ covariance = np.array([[covariance]])
113
+ return covariance
114
+
115
+
116
+ class EmpiricalCovariance(BaseEstimator):
117
+ """Maximum likelihood covariance estimator.
118
+
119
+ Read more in the :ref:`User Guide <covariance>`.
120
+
121
+ Parameters
122
+ ----------
123
+ store_precision : bool, default=True
124
+ Specifies if the estimated precision is stored.
125
+
126
+ assume_centered : bool, default=False
127
+ If True, data are not centered before computation.
128
+ Useful when working with data whose mean is almost, but not exactly
129
+ zero.
130
+ If False (default), data are centered before computation.
131
+
132
+ Attributes
133
+ ----------
134
+ location_ : ndarray of shape (n_features,)
135
+ Estimated location, i.e. the estimated mean.
136
+
137
+ covariance_ : ndarray of shape (n_features, n_features)
138
+ Estimated covariance matrix
139
+
140
+ precision_ : ndarray of shape (n_features, n_features)
141
+ Estimated pseudo-inverse matrix.
142
+ (stored only if store_precision is True)
143
+
144
+ n_features_in_ : int
145
+ Number of features seen during :term:`fit`.
146
+
147
+ .. versionadded:: 0.24
148
+
149
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
150
+ Names of features seen during :term:`fit`. Defined only when `X`
151
+ has feature names that are all strings.
152
+
153
+ .. versionadded:: 1.0
154
+
155
+ See Also
156
+ --------
157
+ EllipticEnvelope : An object for detecting outliers in
158
+ a Gaussian distributed dataset.
159
+ GraphicalLasso : Sparse inverse covariance estimation
160
+ with an l1-penalized estimator.
161
+ LedoitWolf : LedoitWolf Estimator.
162
+ MinCovDet : Minimum Covariance Determinant
163
+ (robust estimator of covariance).
164
+ OAS : Oracle Approximating Shrinkage Estimator.
165
+ ShrunkCovariance : Covariance estimator with shrinkage.
166
+
167
+ Examples
168
+ --------
169
+ >>> import numpy as np
170
+ >>> from sklearn.covariance import EmpiricalCovariance
171
+ >>> from sklearn.datasets import make_gaussian_quantiles
172
+ >>> real_cov = np.array([[.8, .3],
173
+ ... [.3, .4]])
174
+ >>> rng = np.random.RandomState(0)
175
+ >>> X = rng.multivariate_normal(mean=[0, 0],
176
+ ... cov=real_cov,
177
+ ... size=500)
178
+ >>> cov = EmpiricalCovariance().fit(X)
179
+ >>> cov.covariance_
180
+ array([[0.7569..., 0.2818...],
181
+ [0.2818..., 0.3928...]])
182
+ >>> cov.location_
183
+ array([0.0622..., 0.0193...])
184
+ """
185
+
186
+ # X_test should have been called X
187
+ __metadata_request__score = {"X_test": metadata_routing.UNUSED}
188
+
189
+ _parameter_constraints: dict = {
190
+ "store_precision": ["boolean"],
191
+ "assume_centered": ["boolean"],
192
+ }
193
+
194
+ def __init__(self, *, store_precision=True, assume_centered=False):
195
+ self.store_precision = store_precision
196
+ self.assume_centered = assume_centered
197
+
198
+ def _set_covariance(self, covariance):
199
+ """Saves the covariance and precision estimates
200
+
201
+ Storage is done accordingly to `self.store_precision`.
202
+ Precision stored only if invertible.
203
+
204
+ Parameters
205
+ ----------
206
+ covariance : array-like of shape (n_features, n_features)
207
+ Estimated covariance matrix to be stored, and from which precision
208
+ is computed.
209
+ """
210
+ covariance = check_array(covariance)
211
+ # set covariance
212
+ self.covariance_ = covariance
213
+ # set precision
214
+ if self.store_precision:
215
+ self.precision_ = linalg.pinvh(covariance, check_finite=False)
216
+ else:
217
+ self.precision_ = None
218
+
219
+ def get_precision(self):
220
+ """Getter for the precision matrix.
221
+
222
+ Returns
223
+ -------
224
+ precision_ : array-like of shape (n_features, n_features)
225
+ The precision matrix associated to the current covariance object.
226
+ """
227
+ if self.store_precision:
228
+ precision = self.precision_
229
+ else:
230
+ precision = linalg.pinvh(self.covariance_, check_finite=False)
231
+ return precision
232
+
233
+ @_fit_context(prefer_skip_nested_validation=True)
234
+ def fit(self, X, y=None):
235
+ """Fit the maximum likelihood covariance estimator to X.
236
+
237
+ Parameters
238
+ ----------
239
+ X : array-like of shape (n_samples, n_features)
240
+ Training data, where `n_samples` is the number of samples and
241
+ `n_features` is the number of features.
242
+
243
+ y : Ignored
244
+ Not used, present for API consistency by convention.
245
+
246
+ Returns
247
+ -------
248
+ self : object
249
+ Returns the instance itself.
250
+ """
251
+ X = validate_data(self, X)
252
+ if self.assume_centered:
253
+ self.location_ = np.zeros(X.shape[1])
254
+ else:
255
+ self.location_ = X.mean(0)
256
+ covariance = empirical_covariance(X, assume_centered=self.assume_centered)
257
+ self._set_covariance(covariance)
258
+
259
+ return self
260
+
261
+ def score(self, X_test, y=None):
262
+ """Compute the log-likelihood of `X_test` under the estimated Gaussian model.
263
+
264
+ The Gaussian model is defined by its mean and covariance matrix which are
265
+ represented respectively by `self.location_` and `self.covariance_`.
266
+
267
+ Parameters
268
+ ----------
269
+ X_test : array-like of shape (n_samples, n_features)
270
+ Test data of which we compute the likelihood, where `n_samples` is
271
+ the number of samples and `n_features` is the number of features.
272
+ `X_test` is assumed to be drawn from the same distribution than
273
+ the data used in fit (including centering).
274
+
275
+ y : Ignored
276
+ Not used, present for API consistency by convention.
277
+
278
+ Returns
279
+ -------
280
+ res : float
281
+ The log-likelihood of `X_test` with `self.location_` and `self.covariance_`
282
+ as estimators of the Gaussian model mean and covariance matrix respectively.
283
+ """
284
+ X_test = validate_data(self, X_test, reset=False)
285
+ # compute empirical covariance of the test set
286
+ test_cov = empirical_covariance(X_test - self.location_, assume_centered=True)
287
+ # compute log likelihood
288
+ res = log_likelihood(test_cov, self.get_precision())
289
+
290
+ return res
291
+
292
+ def error_norm(self, comp_cov, norm="frobenius", scaling=True, squared=True):
293
+ """Compute the Mean Squared Error between two covariance estimators.
294
+
295
+ Parameters
296
+ ----------
297
+ comp_cov : array-like of shape (n_features, n_features)
298
+ The covariance to compare with.
299
+
300
+ norm : {"frobenius", "spectral"}, default="frobenius"
301
+ The type of norm used to compute the error. Available error types:
302
+ - 'frobenius' (default): sqrt(tr(A^t.A))
303
+ - 'spectral': sqrt(max(eigenvalues(A^t.A))
304
+ where A is the error ``(comp_cov - self.covariance_)``.
305
+
306
+ scaling : bool, default=True
307
+ If True (default), the squared error norm is divided by n_features.
308
+ If False, the squared error norm is not rescaled.
309
+
310
+ squared : bool, default=True
311
+ Whether to compute the squared error norm or the error norm.
312
+ If True (default), the squared error norm is returned.
313
+ If False, the error norm is returned.
314
+
315
+ Returns
316
+ -------
317
+ result : float
318
+ The Mean Squared Error (in the sense of the Frobenius norm) between
319
+ `self` and `comp_cov` covariance estimators.
320
+ """
321
+ # compute the error
322
+ error = comp_cov - self.covariance_
323
+ # compute the error norm
324
+ if norm == "frobenius":
325
+ squared_norm = np.sum(error**2)
326
+ elif norm == "spectral":
327
+ squared_norm = np.amax(linalg.svdvals(np.dot(error.T, error)))
328
+ else:
329
+ raise NotImplementedError(
330
+ "Only spectral and frobenius norms are implemented"
331
+ )
332
+ # optionally scale the error norm
333
+ if scaling:
334
+ squared_norm = squared_norm / error.shape[0]
335
+ # finally get either the squared norm or the norm
336
+ if squared:
337
+ result = squared_norm
338
+ else:
339
+ result = np.sqrt(squared_norm)
340
+
341
+ return result
342
+
343
+ def mahalanobis(self, X):
344
+ """Compute the squared Mahalanobis distances of given observations.
345
+
346
+ Parameters
347
+ ----------
348
+ X : array-like of shape (n_samples, n_features)
349
+ The observations, the Mahalanobis distances of the which we
350
+ compute. Observations are assumed to be drawn from the same
351
+ distribution than the data used in fit.
352
+
353
+ Returns
354
+ -------
355
+ dist : ndarray of shape (n_samples,)
356
+ Squared Mahalanobis distances of the observations.
357
+ """
358
+ X = validate_data(self, X, reset=False)
359
+
360
+ precision = self.get_precision()
361
+ with config_context(assume_finite=True):
362
+ # compute mahalanobis distances
363
+ dist = pairwise_distances(
364
+ X, self.location_[np.newaxis, :], metric="mahalanobis", VI=precision
365
+ )
366
+
367
+ return np.reshape(dist, (len(X),)) ** 2
mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/_graph_lasso.py ADDED
@@ -0,0 +1,1140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """GraphicalLasso: sparse inverse covariance estimation with an l1-penalized
2
+ estimator.
3
+ """
4
+
5
+ # Authors: The scikit-learn developers
6
+ # SPDX-License-Identifier: BSD-3-Clause
7
+
8
+ import operator
9
+ import sys
10
+ import time
11
+ import warnings
12
+ from numbers import Integral, Real
13
+
14
+ import numpy as np
15
+ from scipy import linalg
16
+
17
+ from ..base import _fit_context
18
+ from ..exceptions import ConvergenceWarning
19
+
20
+ # mypy error: Module 'sklearn.linear_model' has no attribute '_cd_fast'
21
+ from ..linear_model import _cd_fast as cd_fast # type: ignore
22
+ from ..linear_model import lars_path_gram
23
+ from ..model_selection import check_cv, cross_val_score
24
+ from ..utils import Bunch
25
+ from ..utils._param_validation import Interval, StrOptions, validate_params
26
+ from ..utils.metadata_routing import (
27
+ MetadataRouter,
28
+ MethodMapping,
29
+ _raise_for_params,
30
+ _routing_enabled,
31
+ process_routing,
32
+ )
33
+ from ..utils.parallel import Parallel, delayed
34
+ from ..utils.validation import (
35
+ _is_arraylike_not_scalar,
36
+ check_random_state,
37
+ check_scalar,
38
+ validate_data,
39
+ )
40
+ from . import EmpiricalCovariance, empirical_covariance, log_likelihood
41
+
42
+
43
+ # Helper functions to compute the objective and dual objective functions
44
+ # of the l1-penalized estimator
45
+ def _objective(mle, precision_, alpha):
46
+ """Evaluation of the graphical-lasso objective function
47
+
48
+ the objective function is made of a shifted scaled version of the
49
+ normalized log-likelihood (i.e. its empirical mean over the samples) and a
50
+ penalisation term to promote sparsity
51
+ """
52
+ p = precision_.shape[0]
53
+ cost = -2.0 * log_likelihood(mle, precision_) + p * np.log(2 * np.pi)
54
+ cost += alpha * (np.abs(precision_).sum() - np.abs(np.diag(precision_)).sum())
55
+ return cost
56
+
57
+
58
+ def _dual_gap(emp_cov, precision_, alpha):
59
+ """Expression of the dual gap convergence criterion
60
+
61
+ The specific definition is given in Duchi "Projected Subgradient Methods
62
+ for Learning Sparse Gaussians".
63
+ """
64
+ gap = np.sum(emp_cov * precision_)
65
+ gap -= precision_.shape[0]
66
+ gap += alpha * (np.abs(precision_).sum() - np.abs(np.diag(precision_)).sum())
67
+ return gap
68
+
69
+
70
+ # The g-lasso algorithm
71
+ def _graphical_lasso(
72
+ emp_cov,
73
+ alpha,
74
+ *,
75
+ cov_init=None,
76
+ mode="cd",
77
+ tol=1e-4,
78
+ enet_tol=1e-4,
79
+ max_iter=100,
80
+ verbose=False,
81
+ eps=np.finfo(np.float64).eps,
82
+ ):
83
+ _, n_features = emp_cov.shape
84
+ if alpha == 0:
85
+ # Early return without regularization
86
+ precision_ = linalg.inv(emp_cov)
87
+ cost = -2.0 * log_likelihood(emp_cov, precision_)
88
+ cost += n_features * np.log(2 * np.pi)
89
+ d_gap = np.sum(emp_cov * precision_) - n_features
90
+ return emp_cov, precision_, (cost, d_gap), 0
91
+
92
+ if cov_init is None:
93
+ covariance_ = emp_cov.copy()
94
+ else:
95
+ covariance_ = cov_init.copy()
96
+ # As a trivial regularization (Tikhonov like), we scale down the
97
+ # off-diagonal coefficients of our starting point: This is needed, as
98
+ # in the cross-validation the cov_init can easily be
99
+ # ill-conditioned, and the CV loop blows. Beside, this takes
100
+ # conservative stand-point on the initial conditions, and it tends to
101
+ # make the convergence go faster.
102
+ covariance_ *= 0.95
103
+ diagonal = emp_cov.flat[:: n_features + 1]
104
+ covariance_.flat[:: n_features + 1] = diagonal
105
+ precision_ = linalg.pinvh(covariance_)
106
+
107
+ indices = np.arange(n_features)
108
+ i = 0 # initialize the counter to be robust to `max_iter=0`
109
+ costs = list()
110
+ # The different l1 regression solver have different numerical errors
111
+ if mode == "cd":
112
+ errors = dict(over="raise", invalid="ignore")
113
+ else:
114
+ errors = dict(invalid="raise")
115
+ try:
116
+ # be robust to the max_iter=0 edge case, see:
117
+ # https://github.com/scikit-learn/scikit-learn/issues/4134
118
+ d_gap = np.inf
119
+ # set a sub_covariance buffer
120
+ sub_covariance = np.copy(covariance_[1:, 1:], order="C")
121
+ for i in range(max_iter):
122
+ for idx in range(n_features):
123
+ # To keep the contiguous matrix `sub_covariance` equal to
124
+ # covariance_[indices != idx].T[indices != idx]
125
+ # we only need to update 1 column and 1 line when idx changes
126
+ if idx > 0:
127
+ di = idx - 1
128
+ sub_covariance[di] = covariance_[di][indices != idx]
129
+ sub_covariance[:, di] = covariance_[:, di][indices != idx]
130
+ else:
131
+ sub_covariance[:] = covariance_[1:, 1:]
132
+ row = emp_cov[idx, indices != idx]
133
+ with np.errstate(**errors):
134
+ if mode == "cd":
135
+ # Use coordinate descent
136
+ coefs = -(
137
+ precision_[indices != idx, idx]
138
+ / (precision_[idx, idx] + 1000 * eps)
139
+ )
140
+ coefs, _, _, _ = cd_fast.enet_coordinate_descent_gram(
141
+ coefs,
142
+ alpha,
143
+ 0,
144
+ sub_covariance,
145
+ row,
146
+ row,
147
+ max_iter,
148
+ enet_tol,
149
+ check_random_state(None),
150
+ False,
151
+ )
152
+ else: # mode == "lars"
153
+ _, _, coefs = lars_path_gram(
154
+ Xy=row,
155
+ Gram=sub_covariance,
156
+ n_samples=row.size,
157
+ alpha_min=alpha / (n_features - 1),
158
+ copy_Gram=True,
159
+ eps=eps,
160
+ method="lars",
161
+ return_path=False,
162
+ )
163
+ # Update the precision matrix
164
+ precision_[idx, idx] = 1.0 / (
165
+ covariance_[idx, idx]
166
+ - np.dot(covariance_[indices != idx, idx], coefs)
167
+ )
168
+ precision_[indices != idx, idx] = -precision_[idx, idx] * coefs
169
+ precision_[idx, indices != idx] = -precision_[idx, idx] * coefs
170
+ coefs = np.dot(sub_covariance, coefs)
171
+ covariance_[idx, indices != idx] = coefs
172
+ covariance_[indices != idx, idx] = coefs
173
+ if not np.isfinite(precision_.sum()):
174
+ raise FloatingPointError(
175
+ "The system is too ill-conditioned for this solver"
176
+ )
177
+ d_gap = _dual_gap(emp_cov, precision_, alpha)
178
+ cost = _objective(emp_cov, precision_, alpha)
179
+ if verbose:
180
+ print(
181
+ "[graphical_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3e"
182
+ % (i, cost, d_gap)
183
+ )
184
+ costs.append((cost, d_gap))
185
+ if np.abs(d_gap) < tol:
186
+ break
187
+ if not np.isfinite(cost) and i > 0:
188
+ raise FloatingPointError(
189
+ "Non SPD result: the system is too ill-conditioned for this solver"
190
+ )
191
+ else:
192
+ warnings.warn(
193
+ "graphical_lasso: did not converge after %i iteration: dual gap: %.3e"
194
+ % (max_iter, d_gap),
195
+ ConvergenceWarning,
196
+ )
197
+ except FloatingPointError as e:
198
+ e.args = (e.args[0] + ". The system is too ill-conditioned for this solver",)
199
+ raise e
200
+
201
+ return covariance_, precision_, costs, i + 1
202
+
203
+
204
+ def alpha_max(emp_cov):
205
+ """Find the maximum alpha for which there are some non-zeros off-diagonal.
206
+
207
+ Parameters
208
+ ----------
209
+ emp_cov : ndarray of shape (n_features, n_features)
210
+ The sample covariance matrix.
211
+
212
+ Notes
213
+ -----
214
+ This results from the bound for the all the Lasso that are solved
215
+ in GraphicalLasso: each time, the row of cov corresponds to Xy. As the
216
+ bound for alpha is given by `max(abs(Xy))`, the result follows.
217
+ """
218
+ A = np.copy(emp_cov)
219
+ A.flat[:: A.shape[0] + 1] = 0
220
+ return np.max(np.abs(A))
221
+
222
+
223
+ @validate_params(
224
+ {
225
+ "emp_cov": ["array-like"],
226
+ "return_costs": ["boolean"],
227
+ "return_n_iter": ["boolean"],
228
+ },
229
+ prefer_skip_nested_validation=False,
230
+ )
231
+ def graphical_lasso(
232
+ emp_cov,
233
+ alpha,
234
+ *,
235
+ mode="cd",
236
+ tol=1e-4,
237
+ enet_tol=1e-4,
238
+ max_iter=100,
239
+ verbose=False,
240
+ return_costs=False,
241
+ eps=np.finfo(np.float64).eps,
242
+ return_n_iter=False,
243
+ ):
244
+ """L1-penalized covariance estimator.
245
+
246
+ Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
247
+
248
+ .. versionchanged:: v0.20
249
+ graph_lasso has been renamed to graphical_lasso
250
+
251
+ Parameters
252
+ ----------
253
+ emp_cov : array-like of shape (n_features, n_features)
254
+ Empirical covariance from which to compute the covariance estimate.
255
+
256
+ alpha : float
257
+ The regularization parameter: the higher alpha, the more
258
+ regularization, the sparser the inverse covariance.
259
+ Range is (0, inf].
260
+
261
+ mode : {'cd', 'lars'}, default='cd'
262
+ The Lasso solver to use: coordinate descent or LARS. Use LARS for
263
+ very sparse underlying graphs, where p > n. Elsewhere prefer cd
264
+ which is more numerically stable.
265
+
266
+ tol : float, default=1e-4
267
+ The tolerance to declare convergence: if the dual gap goes below
268
+ this value, iterations are stopped. Range is (0, inf].
269
+
270
+ enet_tol : float, default=1e-4
271
+ The tolerance for the elastic net solver used to calculate the descent
272
+ direction. This parameter controls the accuracy of the search direction
273
+ for a given column update, not of the overall parameter estimate. Only
274
+ used for mode='cd'. Range is (0, inf].
275
+
276
+ max_iter : int, default=100
277
+ The maximum number of iterations.
278
+
279
+ verbose : bool, default=False
280
+ If verbose is True, the objective function and dual gap are
281
+ printed at each iteration.
282
+
283
+ return_costs : bool, default=False
284
+ If return_costs is True, the objective function and dual gap
285
+ at each iteration are returned.
286
+
287
+ eps : float, default=eps
288
+ The machine-precision regularization in the computation of the
289
+ Cholesky diagonal factors. Increase this for very ill-conditioned
290
+ systems. Default is `np.finfo(np.float64).eps`.
291
+
292
+ return_n_iter : bool, default=False
293
+ Whether or not to return the number of iterations.
294
+
295
+ Returns
296
+ -------
297
+ covariance : ndarray of shape (n_features, n_features)
298
+ The estimated covariance matrix.
299
+
300
+ precision : ndarray of shape (n_features, n_features)
301
+ The estimated (sparse) precision matrix.
302
+
303
+ costs : list of (objective, dual_gap) pairs
304
+ The list of values of the objective function and the dual gap at
305
+ each iteration. Returned only if return_costs is True.
306
+
307
+ n_iter : int
308
+ Number of iterations. Returned only if `return_n_iter` is set to True.
309
+
310
+ See Also
311
+ --------
312
+ GraphicalLasso : Sparse inverse covariance estimation
313
+ with an l1-penalized estimator.
314
+ GraphicalLassoCV : Sparse inverse covariance with
315
+ cross-validated choice of the l1 penalty.
316
+
317
+ Notes
318
+ -----
319
+ The algorithm employed to solve this problem is the GLasso algorithm,
320
+ from the Friedman 2008 Biostatistics paper. It is the same algorithm
321
+ as in the R `glasso` package.
322
+
323
+ One possible difference with the `glasso` R package is that the
324
+ diagonal coefficients are not penalized.
325
+
326
+ Examples
327
+ --------
328
+ >>> import numpy as np
329
+ >>> from sklearn.datasets import make_sparse_spd_matrix
330
+ >>> from sklearn.covariance import empirical_covariance, graphical_lasso
331
+ >>> true_cov = make_sparse_spd_matrix(n_dim=3,random_state=42)
332
+ >>> rng = np.random.RandomState(42)
333
+ >>> X = rng.multivariate_normal(mean=np.zeros(3), cov=true_cov, size=3)
334
+ >>> emp_cov = empirical_covariance(X, assume_centered=True)
335
+ >>> emp_cov, _ = graphical_lasso(emp_cov, alpha=0.05)
336
+ >>> emp_cov
337
+ array([[ 1.68..., 0.21..., -0.20...],
338
+ [ 0.21..., 0.22..., -0.08...],
339
+ [-0.20..., -0.08..., 0.23...]])
340
+ """
341
+ model = GraphicalLasso(
342
+ alpha=alpha,
343
+ mode=mode,
344
+ covariance="precomputed",
345
+ tol=tol,
346
+ enet_tol=enet_tol,
347
+ max_iter=max_iter,
348
+ verbose=verbose,
349
+ eps=eps,
350
+ assume_centered=True,
351
+ ).fit(emp_cov)
352
+
353
+ output = [model.covariance_, model.precision_]
354
+ if return_costs:
355
+ output.append(model.costs_)
356
+ if return_n_iter:
357
+ output.append(model.n_iter_)
358
+ return tuple(output)
359
+
360
+
361
+ class BaseGraphicalLasso(EmpiricalCovariance):
362
+ _parameter_constraints: dict = {
363
+ **EmpiricalCovariance._parameter_constraints,
364
+ "tol": [Interval(Real, 0, None, closed="right")],
365
+ "enet_tol": [Interval(Real, 0, None, closed="right")],
366
+ "max_iter": [Interval(Integral, 0, None, closed="left")],
367
+ "mode": [StrOptions({"cd", "lars"})],
368
+ "verbose": ["verbose"],
369
+ "eps": [Interval(Real, 0, None, closed="both")],
370
+ }
371
+ _parameter_constraints.pop("store_precision")
372
+
373
+ def __init__(
374
+ self,
375
+ tol=1e-4,
376
+ enet_tol=1e-4,
377
+ max_iter=100,
378
+ mode="cd",
379
+ verbose=False,
380
+ eps=np.finfo(np.float64).eps,
381
+ assume_centered=False,
382
+ ):
383
+ super().__init__(assume_centered=assume_centered)
384
+ self.tol = tol
385
+ self.enet_tol = enet_tol
386
+ self.max_iter = max_iter
387
+ self.mode = mode
388
+ self.verbose = verbose
389
+ self.eps = eps
390
+
391
+
392
+ class GraphicalLasso(BaseGraphicalLasso):
393
+ """Sparse inverse covariance estimation with an l1-penalized estimator.
394
+
395
+ For a usage example see
396
+ :ref:`sphx_glr_auto_examples_applications_plot_stock_market.py`.
397
+
398
+ Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
399
+
400
+ .. versionchanged:: v0.20
401
+ GraphLasso has been renamed to GraphicalLasso
402
+
403
+ Parameters
404
+ ----------
405
+ alpha : float, default=0.01
406
+ The regularization parameter: the higher alpha, the more
407
+ regularization, the sparser the inverse covariance.
408
+ Range is (0, inf].
409
+
410
+ mode : {'cd', 'lars'}, default='cd'
411
+ The Lasso solver to use: coordinate descent or LARS. Use LARS for
412
+ very sparse underlying graphs, where p > n. Elsewhere prefer cd
413
+ which is more numerically stable.
414
+
415
+ covariance : "precomputed", default=None
416
+ If covariance is "precomputed", the input data in `fit` is assumed
417
+ to be the covariance matrix. If `None`, the empirical covariance
418
+ is estimated from the data `X`.
419
+
420
+ .. versionadded:: 1.3
421
+
422
+ tol : float, default=1e-4
423
+ The tolerance to declare convergence: if the dual gap goes below
424
+ this value, iterations are stopped. Range is (0, inf].
425
+
426
+ enet_tol : float, default=1e-4
427
+ The tolerance for the elastic net solver used to calculate the descent
428
+ direction. This parameter controls the accuracy of the search direction
429
+ for a given column update, not of the overall parameter estimate. Only
430
+ used for mode='cd'. Range is (0, inf].
431
+
432
+ max_iter : int, default=100
433
+ The maximum number of iterations.
434
+
435
+ verbose : bool, default=False
436
+ If verbose is True, the objective function and dual gap are
437
+ plotted at each iteration.
438
+
439
+ eps : float, default=eps
440
+ The machine-precision regularization in the computation of the
441
+ Cholesky diagonal factors. Increase this for very ill-conditioned
442
+ systems. Default is `np.finfo(np.float64).eps`.
443
+
444
+ .. versionadded:: 1.3
445
+
446
+ assume_centered : bool, default=False
447
+ If True, data are not centered before computation.
448
+ Useful when working with data whose mean is almost, but not exactly
449
+ zero.
450
+ If False, data are centered before computation.
451
+
452
+ Attributes
453
+ ----------
454
+ location_ : ndarray of shape (n_features,)
455
+ Estimated location, i.e. the estimated mean.
456
+
457
+ covariance_ : ndarray of shape (n_features, n_features)
458
+ Estimated covariance matrix
459
+
460
+ precision_ : ndarray of shape (n_features, n_features)
461
+ Estimated pseudo inverse matrix.
462
+
463
+ n_iter_ : int
464
+ Number of iterations run.
465
+
466
+ costs_ : list of (objective, dual_gap) pairs
467
+ The list of values of the objective function and the dual gap at
468
+ each iteration. Returned only if return_costs is True.
469
+
470
+ .. versionadded:: 1.3
471
+
472
+ n_features_in_ : int
473
+ Number of features seen during :term:`fit`.
474
+
475
+ .. versionadded:: 0.24
476
+
477
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
478
+ Names of features seen during :term:`fit`. Defined only when `X`
479
+ has feature names that are all strings.
480
+
481
+ .. versionadded:: 1.0
482
+
483
+ See Also
484
+ --------
485
+ graphical_lasso : L1-penalized covariance estimator.
486
+ GraphicalLassoCV : Sparse inverse covariance with
487
+ cross-validated choice of the l1 penalty.
488
+
489
+ Examples
490
+ --------
491
+ >>> import numpy as np
492
+ >>> from sklearn.covariance import GraphicalLasso
493
+ >>> true_cov = np.array([[0.8, 0.0, 0.2, 0.0],
494
+ ... [0.0, 0.4, 0.0, 0.0],
495
+ ... [0.2, 0.0, 0.3, 0.1],
496
+ ... [0.0, 0.0, 0.1, 0.7]])
497
+ >>> np.random.seed(0)
498
+ >>> X = np.random.multivariate_normal(mean=[0, 0, 0, 0],
499
+ ... cov=true_cov,
500
+ ... size=200)
501
+ >>> cov = GraphicalLasso().fit(X)
502
+ >>> np.around(cov.covariance_, decimals=3)
503
+ array([[0.816, 0.049, 0.218, 0.019],
504
+ [0.049, 0.364, 0.017, 0.034],
505
+ [0.218, 0.017, 0.322, 0.093],
506
+ [0.019, 0.034, 0.093, 0.69 ]])
507
+ >>> np.around(cov.location_, decimals=3)
508
+ array([0.073, 0.04 , 0.038, 0.143])
509
+ """
510
+
511
+ _parameter_constraints: dict = {
512
+ **BaseGraphicalLasso._parameter_constraints,
513
+ "alpha": [Interval(Real, 0, None, closed="both")],
514
+ "covariance": [StrOptions({"precomputed"}), None],
515
+ }
516
+
517
+ def __init__(
518
+ self,
519
+ alpha=0.01,
520
+ *,
521
+ mode="cd",
522
+ covariance=None,
523
+ tol=1e-4,
524
+ enet_tol=1e-4,
525
+ max_iter=100,
526
+ verbose=False,
527
+ eps=np.finfo(np.float64).eps,
528
+ assume_centered=False,
529
+ ):
530
+ super().__init__(
531
+ tol=tol,
532
+ enet_tol=enet_tol,
533
+ max_iter=max_iter,
534
+ mode=mode,
535
+ verbose=verbose,
536
+ eps=eps,
537
+ assume_centered=assume_centered,
538
+ )
539
+ self.alpha = alpha
540
+ self.covariance = covariance
541
+
542
+ @_fit_context(prefer_skip_nested_validation=True)
543
+ def fit(self, X, y=None):
544
+ """Fit the GraphicalLasso model to X.
545
+
546
+ Parameters
547
+ ----------
548
+ X : array-like of shape (n_samples, n_features)
549
+ Data from which to compute the covariance estimate.
550
+
551
+ y : Ignored
552
+ Not used, present for API consistency by convention.
553
+
554
+ Returns
555
+ -------
556
+ self : object
557
+ Returns the instance itself.
558
+ """
559
+ # Covariance does not make sense for a single feature
560
+ X = validate_data(self, X, ensure_min_features=2, ensure_min_samples=2)
561
+
562
+ if self.covariance == "precomputed":
563
+ emp_cov = X.copy()
564
+ self.location_ = np.zeros(X.shape[1])
565
+ else:
566
+ emp_cov = empirical_covariance(X, assume_centered=self.assume_centered)
567
+ if self.assume_centered:
568
+ self.location_ = np.zeros(X.shape[1])
569
+ else:
570
+ self.location_ = X.mean(0)
571
+
572
+ self.covariance_, self.precision_, self.costs_, self.n_iter_ = _graphical_lasso(
573
+ emp_cov,
574
+ alpha=self.alpha,
575
+ cov_init=None,
576
+ mode=self.mode,
577
+ tol=self.tol,
578
+ enet_tol=self.enet_tol,
579
+ max_iter=self.max_iter,
580
+ verbose=self.verbose,
581
+ eps=self.eps,
582
+ )
583
+ return self
584
+
585
+
586
+ # Cross-validation with GraphicalLasso
587
+ def graphical_lasso_path(
588
+ X,
589
+ alphas,
590
+ cov_init=None,
591
+ X_test=None,
592
+ mode="cd",
593
+ tol=1e-4,
594
+ enet_tol=1e-4,
595
+ max_iter=100,
596
+ verbose=False,
597
+ eps=np.finfo(np.float64).eps,
598
+ ):
599
+ """l1-penalized covariance estimator along a path of decreasing alphas
600
+
601
+ Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
602
+
603
+ Parameters
604
+ ----------
605
+ X : ndarray of shape (n_samples, n_features)
606
+ Data from which to compute the covariance estimate.
607
+
608
+ alphas : array-like of shape (n_alphas,)
609
+ The list of regularization parameters, decreasing order.
610
+
611
+ cov_init : array of shape (n_features, n_features), default=None
612
+ The initial guess for the covariance.
613
+
614
+ X_test : array of shape (n_test_samples, n_features), default=None
615
+ Optional test matrix to measure generalisation error.
616
+
617
+ mode : {'cd', 'lars'}, default='cd'
618
+ The Lasso solver to use: coordinate descent or LARS. Use LARS for
619
+ very sparse underlying graphs, where p > n. Elsewhere prefer cd
620
+ which is more numerically stable.
621
+
622
+ tol : float, default=1e-4
623
+ The tolerance to declare convergence: if the dual gap goes below
624
+ this value, iterations are stopped. The tolerance must be a positive
625
+ number.
626
+
627
+ enet_tol : float, default=1e-4
628
+ The tolerance for the elastic net solver used to calculate the descent
629
+ direction. This parameter controls the accuracy of the search direction
630
+ for a given column update, not of the overall parameter estimate. Only
631
+ used for mode='cd'. The tolerance must be a positive number.
632
+
633
+ max_iter : int, default=100
634
+ The maximum number of iterations. This parameter should be a strictly
635
+ positive integer.
636
+
637
+ verbose : int or bool, default=False
638
+ The higher the verbosity flag, the more information is printed
639
+ during the fitting.
640
+
641
+ eps : float, default=eps
642
+ The machine-precision regularization in the computation of the
643
+ Cholesky diagonal factors. Increase this for very ill-conditioned
644
+ systems. Default is `np.finfo(np.float64).eps`.
645
+
646
+ .. versionadded:: 1.3
647
+
648
+ Returns
649
+ -------
650
+ covariances_ : list of shape (n_alphas,) of ndarray of shape \
651
+ (n_features, n_features)
652
+ The estimated covariance matrices.
653
+
654
+ precisions_ : list of shape (n_alphas,) of ndarray of shape \
655
+ (n_features, n_features)
656
+ The estimated (sparse) precision matrices.
657
+
658
+ scores_ : list of shape (n_alphas,), dtype=float
659
+ The generalisation error (log-likelihood) on the test data.
660
+ Returned only if test data is passed.
661
+ """
662
+ inner_verbose = max(0, verbose - 1)
663
+ emp_cov = empirical_covariance(X)
664
+ if cov_init is None:
665
+ covariance_ = emp_cov.copy()
666
+ else:
667
+ covariance_ = cov_init
668
+ covariances_ = list()
669
+ precisions_ = list()
670
+ scores_ = list()
671
+ if X_test is not None:
672
+ test_emp_cov = empirical_covariance(X_test)
673
+
674
+ for alpha in alphas:
675
+ try:
676
+ # Capture the errors, and move on
677
+ covariance_, precision_, _, _ = _graphical_lasso(
678
+ emp_cov,
679
+ alpha=alpha,
680
+ cov_init=covariance_,
681
+ mode=mode,
682
+ tol=tol,
683
+ enet_tol=enet_tol,
684
+ max_iter=max_iter,
685
+ verbose=inner_verbose,
686
+ eps=eps,
687
+ )
688
+ covariances_.append(covariance_)
689
+ precisions_.append(precision_)
690
+ if X_test is not None:
691
+ this_score = log_likelihood(test_emp_cov, precision_)
692
+ except FloatingPointError:
693
+ this_score = -np.inf
694
+ covariances_.append(np.nan)
695
+ precisions_.append(np.nan)
696
+ if X_test is not None:
697
+ if not np.isfinite(this_score):
698
+ this_score = -np.inf
699
+ scores_.append(this_score)
700
+ if verbose == 1:
701
+ sys.stderr.write(".")
702
+ elif verbose > 1:
703
+ if X_test is not None:
704
+ print(
705
+ "[graphical_lasso_path] alpha: %.2e, score: %.2e"
706
+ % (alpha, this_score)
707
+ )
708
+ else:
709
+ print("[graphical_lasso_path] alpha: %.2e" % alpha)
710
+ if X_test is not None:
711
+ return covariances_, precisions_, scores_
712
+ return covariances_, precisions_
713
+
714
+
715
+ class GraphicalLassoCV(BaseGraphicalLasso):
716
+ """Sparse inverse covariance w/ cross-validated choice of the l1 penalty.
717
+
718
+ See glossary entry for :term:`cross-validation estimator`.
719
+
720
+ Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
721
+
722
+ .. versionchanged:: v0.20
723
+ GraphLassoCV has been renamed to GraphicalLassoCV
724
+
725
+ Parameters
726
+ ----------
727
+ alphas : int or array-like of shape (n_alphas,), dtype=float, default=4
728
+ If an integer is given, it fixes the number of points on the
729
+ grids of alpha to be used. If a list is given, it gives the
730
+ grid to be used. See the notes in the class docstring for
731
+ more details. Range is [1, inf) for an integer.
732
+ Range is (0, inf] for an array-like of floats.
733
+
734
+ n_refinements : int, default=4
735
+ The number of times the grid is refined. Not used if explicit
736
+ values of alphas are passed. Range is [1, inf).
737
+
738
+ cv : int, cross-validation generator or iterable, default=None
739
+ Determines the cross-validation splitting strategy.
740
+ Possible inputs for cv are:
741
+
742
+ - None, to use the default 5-fold cross-validation,
743
+ - integer, to specify the number of folds.
744
+ - :term:`CV splitter`,
745
+ - An iterable yielding (train, test) splits as arrays of indices.
746
+
747
+ For integer/None inputs :class:`~sklearn.model_selection.KFold` is used.
748
+
749
+ Refer :ref:`User Guide <cross_validation>` for the various
750
+ cross-validation strategies that can be used here.
751
+
752
+ .. versionchanged:: 0.20
753
+ ``cv`` default value if None changed from 3-fold to 5-fold.
754
+
755
+ tol : float, default=1e-4
756
+ The tolerance to declare convergence: if the dual gap goes below
757
+ this value, iterations are stopped. Range is (0, inf].
758
+
759
+ enet_tol : float, default=1e-4
760
+ The tolerance for the elastic net solver used to calculate the descent
761
+ direction. This parameter controls the accuracy of the search direction
762
+ for a given column update, not of the overall parameter estimate. Only
763
+ used for mode='cd'. Range is (0, inf].
764
+
765
+ max_iter : int, default=100
766
+ Maximum number of iterations.
767
+
768
+ mode : {'cd', 'lars'}, default='cd'
769
+ The Lasso solver to use: coordinate descent or LARS. Use LARS for
770
+ very sparse underlying graphs, where number of features is greater
771
+ than number of samples. Elsewhere prefer cd which is more numerically
772
+ stable.
773
+
774
+ n_jobs : int, default=None
775
+ Number of jobs to run in parallel.
776
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
777
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
778
+ for more details.
779
+
780
+ .. versionchanged:: v0.20
781
+ `n_jobs` default changed from 1 to None
782
+
783
+ verbose : bool, default=False
784
+ If verbose is True, the objective function and duality gap are
785
+ printed at each iteration.
786
+
787
+ eps : float, default=eps
788
+ The machine-precision regularization in the computation of the
789
+ Cholesky diagonal factors. Increase this for very ill-conditioned
790
+ systems. Default is `np.finfo(np.float64).eps`.
791
+
792
+ .. versionadded:: 1.3
793
+
794
+ assume_centered : bool, default=False
795
+ If True, data are not centered before computation.
796
+ Useful when working with data whose mean is almost, but not exactly
797
+ zero.
798
+ If False, data are centered before computation.
799
+
800
+ Attributes
801
+ ----------
802
+ location_ : ndarray of shape (n_features,)
803
+ Estimated location, i.e. the estimated mean.
804
+
805
+ covariance_ : ndarray of shape (n_features, n_features)
806
+ Estimated covariance matrix.
807
+
808
+ precision_ : ndarray of shape (n_features, n_features)
809
+ Estimated precision matrix (inverse covariance).
810
+
811
+ costs_ : list of (objective, dual_gap) pairs
812
+ The list of values of the objective function and the dual gap at
813
+ each iteration. Returned only if return_costs is True.
814
+
815
+ .. versionadded:: 1.3
816
+
817
+ alpha_ : float
818
+ Penalization parameter selected.
819
+
820
+ cv_results_ : dict of ndarrays
821
+ A dict with keys:
822
+
823
+ alphas : ndarray of shape (n_alphas,)
824
+ All penalization parameters explored.
825
+
826
+ split(k)_test_score : ndarray of shape (n_alphas,)
827
+ Log-likelihood score on left-out data across (k)th fold.
828
+
829
+ .. versionadded:: 1.0
830
+
831
+ mean_test_score : ndarray of shape (n_alphas,)
832
+ Mean of scores over the folds.
833
+
834
+ .. versionadded:: 1.0
835
+
836
+ std_test_score : ndarray of shape (n_alphas,)
837
+ Standard deviation of scores over the folds.
838
+
839
+ .. versionadded:: 1.0
840
+
841
+ n_iter_ : int
842
+ Number of iterations run for the optimal alpha.
843
+
844
+ n_features_in_ : int
845
+ Number of features seen during :term:`fit`.
846
+
847
+ .. versionadded:: 0.24
848
+
849
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
850
+ Names of features seen during :term:`fit`. Defined only when `X`
851
+ has feature names that are all strings.
852
+
853
+ .. versionadded:: 1.0
854
+
855
+ See Also
856
+ --------
857
+ graphical_lasso : L1-penalized covariance estimator.
858
+ GraphicalLasso : Sparse inverse covariance estimation
859
+ with an l1-penalized estimator.
860
+
861
+ Notes
862
+ -----
863
+ The search for the optimal penalization parameter (`alpha`) is done on an
864
+ iteratively refined grid: first the cross-validated scores on a grid are
865
+ computed, then a new refined grid is centered around the maximum, and so
866
+ on.
867
+
868
+ One of the challenges which is faced here is that the solvers can
869
+ fail to converge to a well-conditioned estimate. The corresponding
870
+ values of `alpha` then come out as missing values, but the optimum may
871
+ be close to these missing values.
872
+
873
+ In `fit`, once the best parameter `alpha` is found through
874
+ cross-validation, the model is fit again using the entire training set.
875
+
876
+ Examples
877
+ --------
878
+ >>> import numpy as np
879
+ >>> from sklearn.covariance import GraphicalLassoCV
880
+ >>> true_cov = np.array([[0.8, 0.0, 0.2, 0.0],
881
+ ... [0.0, 0.4, 0.0, 0.0],
882
+ ... [0.2, 0.0, 0.3, 0.1],
883
+ ... [0.0, 0.0, 0.1, 0.7]])
884
+ >>> np.random.seed(0)
885
+ >>> X = np.random.multivariate_normal(mean=[0, 0, 0, 0],
886
+ ... cov=true_cov,
887
+ ... size=200)
888
+ >>> cov = GraphicalLassoCV().fit(X)
889
+ >>> np.around(cov.covariance_, decimals=3)
890
+ array([[0.816, 0.051, 0.22 , 0.017],
891
+ [0.051, 0.364, 0.018, 0.036],
892
+ [0.22 , 0.018, 0.322, 0.094],
893
+ [0.017, 0.036, 0.094, 0.69 ]])
894
+ >>> np.around(cov.location_, decimals=3)
895
+ array([0.073, 0.04 , 0.038, 0.143])
896
+ """
897
+
898
+ _parameter_constraints: dict = {
899
+ **BaseGraphicalLasso._parameter_constraints,
900
+ "alphas": [Interval(Integral, 0, None, closed="left"), "array-like"],
901
+ "n_refinements": [Interval(Integral, 1, None, closed="left")],
902
+ "cv": ["cv_object"],
903
+ "n_jobs": [Integral, None],
904
+ }
905
+
906
+ def __init__(
907
+ self,
908
+ *,
909
+ alphas=4,
910
+ n_refinements=4,
911
+ cv=None,
912
+ tol=1e-4,
913
+ enet_tol=1e-4,
914
+ max_iter=100,
915
+ mode="cd",
916
+ n_jobs=None,
917
+ verbose=False,
918
+ eps=np.finfo(np.float64).eps,
919
+ assume_centered=False,
920
+ ):
921
+ super().__init__(
922
+ tol=tol,
923
+ enet_tol=enet_tol,
924
+ max_iter=max_iter,
925
+ mode=mode,
926
+ verbose=verbose,
927
+ eps=eps,
928
+ assume_centered=assume_centered,
929
+ )
930
+ self.alphas = alphas
931
+ self.n_refinements = n_refinements
932
+ self.cv = cv
933
+ self.n_jobs = n_jobs
934
+
935
+ @_fit_context(prefer_skip_nested_validation=True)
936
+ def fit(self, X, y=None, **params):
937
+ """Fit the GraphicalLasso covariance model to X.
938
+
939
+ Parameters
940
+ ----------
941
+ X : array-like of shape (n_samples, n_features)
942
+ Data from which to compute the covariance estimate.
943
+
944
+ y : Ignored
945
+ Not used, present for API consistency by convention.
946
+
947
+ **params : dict, default=None
948
+ Parameters to be passed to the CV splitter and the
949
+ cross_val_score function.
950
+
951
+ .. versionadded:: 1.5
952
+ Only available if `enable_metadata_routing=True`,
953
+ which can be set by using
954
+ ``sklearn.set_config(enable_metadata_routing=True)``.
955
+ See :ref:`Metadata Routing User Guide <metadata_routing>` for
956
+ more details.
957
+
958
+ Returns
959
+ -------
960
+ self : object
961
+ Returns the instance itself.
962
+ """
963
+ # Covariance does not make sense for a single feature
964
+ _raise_for_params(params, self, "fit")
965
+
966
+ X = validate_data(self, X, ensure_min_features=2)
967
+ if self.assume_centered:
968
+ self.location_ = np.zeros(X.shape[1])
969
+ else:
970
+ self.location_ = X.mean(0)
971
+ emp_cov = empirical_covariance(X, assume_centered=self.assume_centered)
972
+
973
+ cv = check_cv(self.cv, y, classifier=False)
974
+
975
+ # List of (alpha, scores, covs)
976
+ path = list()
977
+ n_alphas = self.alphas
978
+ inner_verbose = max(0, self.verbose - 1)
979
+
980
+ if _is_arraylike_not_scalar(n_alphas):
981
+ for alpha in self.alphas:
982
+ check_scalar(
983
+ alpha,
984
+ "alpha",
985
+ Real,
986
+ min_val=0,
987
+ max_val=np.inf,
988
+ include_boundaries="right",
989
+ )
990
+ alphas = self.alphas
991
+ n_refinements = 1
992
+ else:
993
+ n_refinements = self.n_refinements
994
+ alpha_1 = alpha_max(emp_cov)
995
+ alpha_0 = 1e-2 * alpha_1
996
+ alphas = np.logspace(np.log10(alpha_0), np.log10(alpha_1), n_alphas)[::-1]
997
+
998
+ if _routing_enabled():
999
+ routed_params = process_routing(self, "fit", **params)
1000
+ else:
1001
+ routed_params = Bunch(splitter=Bunch(split={}))
1002
+
1003
+ t0 = time.time()
1004
+ for i in range(n_refinements):
1005
+ with warnings.catch_warnings():
1006
+ # No need to see the convergence warnings on this grid:
1007
+ # they will always be points that will not converge
1008
+ # during the cross-validation
1009
+ warnings.simplefilter("ignore", ConvergenceWarning)
1010
+ # Compute the cross-validated loss on the current grid
1011
+
1012
+ # NOTE: Warm-restarting graphical_lasso_path has been tried,
1013
+ # and this did not allow to gain anything
1014
+ # (same execution time with or without).
1015
+ this_path = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
1016
+ delayed(graphical_lasso_path)(
1017
+ X[train],
1018
+ alphas=alphas,
1019
+ X_test=X[test],
1020
+ mode=self.mode,
1021
+ tol=self.tol,
1022
+ enet_tol=self.enet_tol,
1023
+ max_iter=int(0.1 * self.max_iter),
1024
+ verbose=inner_verbose,
1025
+ eps=self.eps,
1026
+ )
1027
+ for train, test in cv.split(X, y, **routed_params.splitter.split)
1028
+ )
1029
+
1030
+ # Little danse to transform the list in what we need
1031
+ covs, _, scores = zip(*this_path)
1032
+ covs = zip(*covs)
1033
+ scores = zip(*scores)
1034
+ path.extend(zip(alphas, scores, covs))
1035
+ path = sorted(path, key=operator.itemgetter(0), reverse=True)
1036
+
1037
+ # Find the maximum (avoid using built in 'max' function to
1038
+ # have a fully-reproducible selection of the smallest alpha
1039
+ # in case of equality)
1040
+ best_score = -np.inf
1041
+ last_finite_idx = 0
1042
+ for index, (alpha, scores, _) in enumerate(path):
1043
+ this_score = np.mean(scores)
1044
+ if this_score >= 0.1 / np.finfo(np.float64).eps:
1045
+ this_score = np.nan
1046
+ if np.isfinite(this_score):
1047
+ last_finite_idx = index
1048
+ if this_score >= best_score:
1049
+ best_score = this_score
1050
+ best_index = index
1051
+
1052
+ # Refine the grid
1053
+ if best_index == 0:
1054
+ # We do not need to go back: we have chosen
1055
+ # the highest value of alpha for which there are
1056
+ # non-zero coefficients
1057
+ alpha_1 = path[0][0]
1058
+ alpha_0 = path[1][0]
1059
+ elif best_index == last_finite_idx and not best_index == len(path) - 1:
1060
+ # We have non-converged models on the upper bound of the
1061
+ # grid, we need to refine the grid there
1062
+ alpha_1 = path[best_index][0]
1063
+ alpha_0 = path[best_index + 1][0]
1064
+ elif best_index == len(path) - 1:
1065
+ alpha_1 = path[best_index][0]
1066
+ alpha_0 = 0.01 * path[best_index][0]
1067
+ else:
1068
+ alpha_1 = path[best_index - 1][0]
1069
+ alpha_0 = path[best_index + 1][0]
1070
+
1071
+ if not _is_arraylike_not_scalar(n_alphas):
1072
+ alphas = np.logspace(np.log10(alpha_1), np.log10(alpha_0), n_alphas + 2)
1073
+ alphas = alphas[1:-1]
1074
+
1075
+ if self.verbose and n_refinements > 1:
1076
+ print(
1077
+ "[GraphicalLassoCV] Done refinement % 2i out of %i: % 3is"
1078
+ % (i + 1, n_refinements, time.time() - t0)
1079
+ )
1080
+
1081
+ path = list(zip(*path))
1082
+ grid_scores = list(path[1])
1083
+ alphas = list(path[0])
1084
+ # Finally, compute the score with alpha = 0
1085
+ alphas.append(0)
1086
+ grid_scores.append(
1087
+ cross_val_score(
1088
+ EmpiricalCovariance(),
1089
+ X,
1090
+ cv=cv,
1091
+ n_jobs=self.n_jobs,
1092
+ verbose=inner_verbose,
1093
+ params=params,
1094
+ )
1095
+ )
1096
+ grid_scores = np.array(grid_scores)
1097
+
1098
+ self.cv_results_ = {"alphas": np.array(alphas)}
1099
+
1100
+ for i in range(grid_scores.shape[1]):
1101
+ self.cv_results_[f"split{i}_test_score"] = grid_scores[:, i]
1102
+
1103
+ self.cv_results_["mean_test_score"] = np.mean(grid_scores, axis=1)
1104
+ self.cv_results_["std_test_score"] = np.std(grid_scores, axis=1)
1105
+
1106
+ best_alpha = alphas[best_index]
1107
+ self.alpha_ = best_alpha
1108
+
1109
+ # Finally fit the model with the selected alpha
1110
+ self.covariance_, self.precision_, self.costs_, self.n_iter_ = _graphical_lasso(
1111
+ emp_cov,
1112
+ alpha=best_alpha,
1113
+ mode=self.mode,
1114
+ tol=self.tol,
1115
+ enet_tol=self.enet_tol,
1116
+ max_iter=self.max_iter,
1117
+ verbose=inner_verbose,
1118
+ eps=self.eps,
1119
+ )
1120
+ return self
1121
+
1122
+ def get_metadata_routing(self):
1123
+ """Get metadata routing of this object.
1124
+
1125
+ Please check :ref:`User Guide <metadata_routing>` on how the routing
1126
+ mechanism works.
1127
+
1128
+ .. versionadded:: 1.5
1129
+
1130
+ Returns
1131
+ -------
1132
+ routing : MetadataRouter
1133
+ A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
1134
+ routing information.
1135
+ """
1136
+ router = MetadataRouter(owner=self.__class__.__name__).add(
1137
+ splitter=check_cv(self.cv),
1138
+ method_mapping=MethodMapping().add(callee="split", caller="fit"),
1139
+ )
1140
+ return router
mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/_robust_covariance.py ADDED
@@ -0,0 +1,870 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Robust location and covariance estimators.
3
+
4
+ Here are implemented estimators that are resistant to outliers.
5
+
6
+ """
7
+
8
+ # Authors: The scikit-learn developers
9
+ # SPDX-License-Identifier: BSD-3-Clause
10
+
11
+ import warnings
12
+ from numbers import Integral, Real
13
+
14
+ import numpy as np
15
+ from scipy import linalg
16
+ from scipy.stats import chi2
17
+
18
+ from ..base import _fit_context
19
+ from ..utils import check_array, check_random_state
20
+ from ..utils._param_validation import Interval
21
+ from ..utils.extmath import fast_logdet
22
+ from ..utils.validation import validate_data
23
+ from ._empirical_covariance import EmpiricalCovariance, empirical_covariance
24
+
25
+
26
+ # Minimum Covariance Determinant
27
+ # Implementing of an algorithm by Rousseeuw & Van Driessen described in
28
+ # (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
29
+ # 1999, American Statistical Association and the American Society
30
+ # for Quality, TECHNOMETRICS)
31
+ # XXX Is this really a public function? It's not listed in the docs or
32
+ # exported by sklearn.covariance. Deprecate?
33
+ def c_step(
34
+ X,
35
+ n_support,
36
+ remaining_iterations=30,
37
+ initial_estimates=None,
38
+ verbose=False,
39
+ cov_computation_method=empirical_covariance,
40
+ random_state=None,
41
+ ):
42
+ """C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
43
+
44
+ Parameters
45
+ ----------
46
+ X : array-like of shape (n_samples, n_features)
47
+ Data set in which we look for the n_support observations whose
48
+ scatter matrix has minimum determinant.
49
+
50
+ n_support : int
51
+ Number of observations to compute the robust estimates of location
52
+ and covariance from. This parameter must be greater than
53
+ `n_samples / 2`.
54
+
55
+ remaining_iterations : int, default=30
56
+ Number of iterations to perform.
57
+ According to [Rouseeuw1999]_, two iterations are sufficient to get
58
+ close to the minimum, and we never need more than 30 to reach
59
+ convergence.
60
+
61
+ initial_estimates : tuple of shape (2,), default=None
62
+ Initial estimates of location and shape from which to run the c_step
63
+ procedure:
64
+ - initial_estimates[0]: an initial location estimate
65
+ - initial_estimates[1]: an initial covariance estimate
66
+
67
+ verbose : bool, default=False
68
+ Verbose mode.
69
+
70
+ cov_computation_method : callable, \
71
+ default=:func:`sklearn.covariance.empirical_covariance`
72
+ The function which will be used to compute the covariance.
73
+ Must return array of shape (n_features, n_features).
74
+
75
+ random_state : int, RandomState instance or None, default=None
76
+ Determines the pseudo random number generator for shuffling the data.
77
+ Pass an int for reproducible results across multiple function calls.
78
+ See :term:`Glossary <random_state>`.
79
+
80
+ Returns
81
+ -------
82
+ location : ndarray of shape (n_features,)
83
+ Robust location estimates.
84
+
85
+ covariance : ndarray of shape (n_features, n_features)
86
+ Robust covariance estimates.
87
+
88
+ support : ndarray of shape (n_samples,)
89
+ A mask for the `n_support` observations whose scatter matrix has
90
+ minimum determinant.
91
+
92
+ References
93
+ ----------
94
+ .. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
95
+ Estimator, 1999, American Statistical Association and the American
96
+ Society for Quality, TECHNOMETRICS
97
+ """
98
+ X = np.asarray(X)
99
+ random_state = check_random_state(random_state)
100
+ return _c_step(
101
+ X,
102
+ n_support,
103
+ remaining_iterations=remaining_iterations,
104
+ initial_estimates=initial_estimates,
105
+ verbose=verbose,
106
+ cov_computation_method=cov_computation_method,
107
+ random_state=random_state,
108
+ )
109
+
110
+
111
+ def _c_step(
112
+ X,
113
+ n_support,
114
+ random_state,
115
+ remaining_iterations=30,
116
+ initial_estimates=None,
117
+ verbose=False,
118
+ cov_computation_method=empirical_covariance,
119
+ ):
120
+ n_samples, n_features = X.shape
121
+ dist = np.inf
122
+
123
+ # Initialisation
124
+ if initial_estimates is None:
125
+ # compute initial robust estimates from a random subset
126
+ support_indices = random_state.permutation(n_samples)[:n_support]
127
+ else:
128
+ # get initial robust estimates from the function parameters
129
+ location = initial_estimates[0]
130
+ covariance = initial_estimates[1]
131
+ # run a special iteration for that case (to get an initial support_indices)
132
+ precision = linalg.pinvh(covariance)
133
+ X_centered = X - location
134
+ dist = (np.dot(X_centered, precision) * X_centered).sum(1)
135
+ # compute new estimates
136
+ support_indices = np.argpartition(dist, n_support - 1)[:n_support]
137
+
138
+ X_support = X[support_indices]
139
+ location = X_support.mean(0)
140
+ covariance = cov_computation_method(X_support)
141
+
142
+ # Iterative procedure for Minimum Covariance Determinant computation
143
+ det = fast_logdet(covariance)
144
+ # If the data already has singular covariance, calculate the precision,
145
+ # as the loop below will not be entered.
146
+ if np.isinf(det):
147
+ precision = linalg.pinvh(covariance)
148
+
149
+ previous_det = np.inf
150
+ while det < previous_det and remaining_iterations > 0 and not np.isinf(det):
151
+ # save old estimates values
152
+ previous_location = location
153
+ previous_covariance = covariance
154
+ previous_det = det
155
+ previous_support_indices = support_indices
156
+ # compute a new support_indices from the full data set mahalanobis distances
157
+ precision = linalg.pinvh(covariance)
158
+ X_centered = X - location
159
+ dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
160
+ # compute new estimates
161
+ support_indices = np.argpartition(dist, n_support - 1)[:n_support]
162
+ X_support = X[support_indices]
163
+ location = X_support.mean(axis=0)
164
+ covariance = cov_computation_method(X_support)
165
+ det = fast_logdet(covariance)
166
+ # update remaining iterations for early stopping
167
+ remaining_iterations -= 1
168
+
169
+ previous_dist = dist
170
+ dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
171
+ # Check if best fit already found (det => 0, logdet => -inf)
172
+ if np.isinf(det):
173
+ results = location, covariance, det, support_indices, dist
174
+ # Check convergence
175
+ if np.allclose(det, previous_det):
176
+ # c_step procedure converged
177
+ if verbose:
178
+ print(
179
+ "Optimal couple (location, covariance) found before"
180
+ " ending iterations (%d left)" % (remaining_iterations)
181
+ )
182
+ results = location, covariance, det, support_indices, dist
183
+ elif det > previous_det:
184
+ # determinant has increased (should not happen)
185
+ warnings.warn(
186
+ "Determinant has increased; this should not happen: "
187
+ "log(det) > log(previous_det) (%.15f > %.15f). "
188
+ "You may want to try with a higher value of "
189
+ "support_fraction (current value: %.3f)."
190
+ % (det, previous_det, n_support / n_samples),
191
+ RuntimeWarning,
192
+ )
193
+ results = (
194
+ previous_location,
195
+ previous_covariance,
196
+ previous_det,
197
+ previous_support_indices,
198
+ previous_dist,
199
+ )
200
+
201
+ # Check early stopping
202
+ if remaining_iterations == 0:
203
+ if verbose:
204
+ print("Maximum number of iterations reached")
205
+ results = location, covariance, det, support_indices, dist
206
+
207
+ location, covariance, det, support_indices, dist = results
208
+ # Convert from list of indices to boolean mask.
209
+ support = np.bincount(support_indices, minlength=n_samples).astype(bool)
210
+ return location, covariance, det, support, dist
211
+
212
+
213
+ def select_candidates(
214
+ X,
215
+ n_support,
216
+ n_trials,
217
+ select=1,
218
+ n_iter=30,
219
+ verbose=False,
220
+ cov_computation_method=empirical_covariance,
221
+ random_state=None,
222
+ ):
223
+ """Finds the best pure subset of observations to compute MCD from it.
224
+
225
+ The purpose of this function is to find the best sets of n_support
226
+ observations with respect to a minimization of their covariance
227
+ matrix determinant. Equivalently, it removes n_samples-n_support
228
+ observations to construct what we call a pure data set (i.e. not
229
+ containing outliers). The list of the observations of the pure
230
+ data set is referred to as the `support`.
231
+
232
+ Starting from a random support, the pure data set is found by the
233
+ c_step procedure introduced by Rousseeuw and Van Driessen in
234
+ [RV]_.
235
+
236
+ Parameters
237
+ ----------
238
+ X : array-like of shape (n_samples, n_features)
239
+ Data (sub)set in which we look for the n_support purest observations.
240
+
241
+ n_support : int
242
+ The number of samples the pure data set must contain.
243
+ This parameter must be in the range `[(n + p + 1)/2] < n_support < n`.
244
+
245
+ n_trials : int or tuple of shape (2,)
246
+ Number of different initial sets of observations from which to
247
+ run the algorithm. This parameter should be a strictly positive
248
+ integer.
249
+ Instead of giving a number of trials to perform, one can provide a
250
+ list of initial estimates that will be used to iteratively run
251
+ c_step procedures. In this case:
252
+ - n_trials[0]: array-like, shape (n_trials, n_features)
253
+ is the list of `n_trials` initial location estimates
254
+ - n_trials[1]: array-like, shape (n_trials, n_features, n_features)
255
+ is the list of `n_trials` initial covariances estimates
256
+
257
+ select : int, default=1
258
+ Number of best candidates results to return. This parameter must be
259
+ a strictly positive integer.
260
+
261
+ n_iter : int, default=30
262
+ Maximum number of iterations for the c_step procedure.
263
+ (2 is enough to be close to the final solution. "Never" exceeds 20).
264
+ This parameter must be a strictly positive integer.
265
+
266
+ verbose : bool, default=False
267
+ Control the output verbosity.
268
+
269
+ cov_computation_method : callable, \
270
+ default=:func:`sklearn.covariance.empirical_covariance`
271
+ The function which will be used to compute the covariance.
272
+ Must return an array of shape (n_features, n_features).
273
+
274
+ random_state : int, RandomState instance or None, default=None
275
+ Determines the pseudo random number generator for shuffling the data.
276
+ Pass an int for reproducible results across multiple function calls.
277
+ See :term:`Glossary <random_state>`.
278
+
279
+ See Also
280
+ ---------
281
+ c_step
282
+
283
+ Returns
284
+ -------
285
+ best_locations : ndarray of shape (select, n_features)
286
+ The `select` location estimates computed from the `select` best
287
+ supports found in the data set (`X`).
288
+
289
+ best_covariances : ndarray of shape (select, n_features, n_features)
290
+ The `select` covariance estimates computed from the `select`
291
+ best supports found in the data set (`X`).
292
+
293
+ best_supports : ndarray of shape (select, n_samples)
294
+ The `select` best supports found in the data set (`X`).
295
+
296
+ References
297
+ ----------
298
+ .. [RV] A Fast Algorithm for the Minimum Covariance Determinant
299
+ Estimator, 1999, American Statistical Association and the American
300
+ Society for Quality, TECHNOMETRICS
301
+ """
302
+ random_state = check_random_state(random_state)
303
+
304
+ if isinstance(n_trials, Integral):
305
+ run_from_estimates = False
306
+ elif isinstance(n_trials, tuple):
307
+ run_from_estimates = True
308
+ estimates_list = n_trials
309
+ n_trials = estimates_list[0].shape[0]
310
+ else:
311
+ raise TypeError(
312
+ "Invalid 'n_trials' parameter, expected tuple or integer, got %s (%s)"
313
+ % (n_trials, type(n_trials))
314
+ )
315
+
316
+ # compute `n_trials` location and shape estimates candidates in the subset
317
+ all_estimates = []
318
+ if not run_from_estimates:
319
+ # perform `n_trials` computations from random initial supports
320
+ for j in range(n_trials):
321
+ all_estimates.append(
322
+ _c_step(
323
+ X,
324
+ n_support,
325
+ remaining_iterations=n_iter,
326
+ verbose=verbose,
327
+ cov_computation_method=cov_computation_method,
328
+ random_state=random_state,
329
+ )
330
+ )
331
+ else:
332
+ # perform computations from every given initial estimates
333
+ for j in range(n_trials):
334
+ initial_estimates = (estimates_list[0][j], estimates_list[1][j])
335
+ all_estimates.append(
336
+ _c_step(
337
+ X,
338
+ n_support,
339
+ remaining_iterations=n_iter,
340
+ initial_estimates=initial_estimates,
341
+ verbose=verbose,
342
+ cov_computation_method=cov_computation_method,
343
+ random_state=random_state,
344
+ )
345
+ )
346
+ all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = zip(
347
+ *all_estimates
348
+ )
349
+ # find the `n_best` best results among the `n_trials` ones
350
+ index_best = np.argsort(all_dets_sub)[:select]
351
+ best_locations = np.asarray(all_locs_sub)[index_best]
352
+ best_covariances = np.asarray(all_covs_sub)[index_best]
353
+ best_supports = np.asarray(all_supports_sub)[index_best]
354
+ best_ds = np.asarray(all_ds_sub)[index_best]
355
+
356
+ return best_locations, best_covariances, best_supports, best_ds
357
+
358
+
359
+ def fast_mcd(
360
+ X,
361
+ support_fraction=None,
362
+ cov_computation_method=empirical_covariance,
363
+ random_state=None,
364
+ ):
365
+ """Estimate the Minimum Covariance Determinant matrix.
366
+
367
+ Read more in the :ref:`User Guide <robust_covariance>`.
368
+
369
+ Parameters
370
+ ----------
371
+ X : array-like of shape (n_samples, n_features)
372
+ The data matrix, with p features and n samples.
373
+
374
+ support_fraction : float, default=None
375
+ The proportion of points to be included in the support of the raw
376
+ MCD estimate. Default is `None`, which implies that the minimum
377
+ value of `support_fraction` will be used within the algorithm:
378
+ `(n_samples + n_features + 1) / 2 * n_samples`. This parameter must be
379
+ in the range (0, 1).
380
+
381
+ cov_computation_method : callable, \
382
+ default=:func:`sklearn.covariance.empirical_covariance`
383
+ The function which will be used to compute the covariance.
384
+ Must return an array of shape (n_features, n_features).
385
+
386
+ random_state : int, RandomState instance or None, default=None
387
+ Determines the pseudo random number generator for shuffling the data.
388
+ Pass an int for reproducible results across multiple function calls.
389
+ See :term:`Glossary <random_state>`.
390
+
391
+ Returns
392
+ -------
393
+ location : ndarray of shape (n_features,)
394
+ Robust location of the data.
395
+
396
+ covariance : ndarray of shape (n_features, n_features)
397
+ Robust covariance of the features.
398
+
399
+ support : ndarray of shape (n_samples,), dtype=bool
400
+ A mask of the observations that have been used to compute
401
+ the robust location and covariance estimates of the data set.
402
+
403
+ Notes
404
+ -----
405
+ The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
406
+ in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
407
+ 1999, American Statistical Association and the American Society
408
+ for Quality, TECHNOMETRICS".
409
+ The principle is to compute robust estimates and random subsets before
410
+ pooling them into a larger subsets, and finally into the full data set.
411
+ Depending on the size of the initial sample, we have one, two or three
412
+ such computation levels.
413
+
414
+ Note that only raw estimates are returned. If one is interested in
415
+ the correction and reweighting steps described in [RouseeuwVan]_,
416
+ see the MinCovDet object.
417
+
418
+ References
419
+ ----------
420
+
421
+ .. [RouseeuwVan] A Fast Algorithm for the Minimum Covariance
422
+ Determinant Estimator, 1999, American Statistical Association
423
+ and the American Society for Quality, TECHNOMETRICS
424
+
425
+ .. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
426
+ Asymptotics For The Minimum Covariance Determinant Estimator,
427
+ The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
428
+ """
429
+ random_state = check_random_state(random_state)
430
+
431
+ X = check_array(X, ensure_min_samples=2, estimator="fast_mcd")
432
+ n_samples, n_features = X.shape
433
+
434
+ # minimum breakdown value
435
+ if support_fraction is None:
436
+ n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
437
+ else:
438
+ n_support = int(support_fraction * n_samples)
439
+
440
+ # 1-dimensional case quick computation
441
+ # (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
442
+ # Regression and Outlier Detection, John Wiley & Sons, chapter 4)
443
+ if n_features == 1:
444
+ if n_support < n_samples:
445
+ # find the sample shortest halves
446
+ X_sorted = np.sort(np.ravel(X))
447
+ diff = X_sorted[n_support:] - X_sorted[: (n_samples - n_support)]
448
+ halves_start = np.where(diff == np.min(diff))[0]
449
+ # take the middle points' mean to get the robust location estimate
450
+ location = (
451
+ 0.5
452
+ * (X_sorted[n_support + halves_start] + X_sorted[halves_start]).mean()
453
+ )
454
+ support = np.zeros(n_samples, dtype=bool)
455
+ X_centered = X - location
456
+ support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
457
+ covariance = np.asarray([[np.var(X[support])]])
458
+ location = np.array([location])
459
+ # get precision matrix in an optimized way
460
+ precision = linalg.pinvh(covariance)
461
+ dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
462
+ else:
463
+ support = np.ones(n_samples, dtype=bool)
464
+ covariance = np.asarray([[np.var(X)]])
465
+ location = np.asarray([np.mean(X)])
466
+ X_centered = X - location
467
+ # get precision matrix in an optimized way
468
+ precision = linalg.pinvh(covariance)
469
+ dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
470
+ # Starting FastMCD algorithm for p-dimensional case
471
+ if (n_samples > 500) and (n_features > 1):
472
+ # 1. Find candidate supports on subsets
473
+ # a. split the set in subsets of size ~ 300
474
+ n_subsets = n_samples // 300
475
+ n_samples_subsets = n_samples // n_subsets
476
+ samples_shuffle = random_state.permutation(n_samples)
477
+ h_subset = int(np.ceil(n_samples_subsets * (n_support / float(n_samples))))
478
+ # b. perform a total of 500 trials
479
+ n_trials_tot = 500
480
+ # c. select 10 best (location, covariance) for each subset
481
+ n_best_sub = 10
482
+ n_trials = max(10, n_trials_tot // n_subsets)
483
+ n_best_tot = n_subsets * n_best_sub
484
+ all_best_locations = np.zeros((n_best_tot, n_features))
485
+ try:
486
+ all_best_covariances = np.zeros((n_best_tot, n_features, n_features))
487
+ except MemoryError:
488
+ # The above is too big. Let's try with something much small
489
+ # (and less optimal)
490
+ n_best_tot = 10
491
+ all_best_covariances = np.zeros((n_best_tot, n_features, n_features))
492
+ n_best_sub = 2
493
+ for i in range(n_subsets):
494
+ low_bound = i * n_samples_subsets
495
+ high_bound = low_bound + n_samples_subsets
496
+ current_subset = X[samples_shuffle[low_bound:high_bound]]
497
+ best_locations_sub, best_covariances_sub, _, _ = select_candidates(
498
+ current_subset,
499
+ h_subset,
500
+ n_trials,
501
+ select=n_best_sub,
502
+ n_iter=2,
503
+ cov_computation_method=cov_computation_method,
504
+ random_state=random_state,
505
+ )
506
+ subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
507
+ all_best_locations[subset_slice] = best_locations_sub
508
+ all_best_covariances[subset_slice] = best_covariances_sub
509
+ # 2. Pool the candidate supports into a merged set
510
+ # (possibly the full dataset)
511
+ n_samples_merged = min(1500, n_samples)
512
+ h_merged = int(np.ceil(n_samples_merged * (n_support / float(n_samples))))
513
+ if n_samples > 1500:
514
+ n_best_merged = 10
515
+ else:
516
+ n_best_merged = 1
517
+ # find the best couples (location, covariance) on the merged set
518
+ selection = random_state.permutation(n_samples)[:n_samples_merged]
519
+ locations_merged, covariances_merged, supports_merged, d = select_candidates(
520
+ X[selection],
521
+ h_merged,
522
+ n_trials=(all_best_locations, all_best_covariances),
523
+ select=n_best_merged,
524
+ cov_computation_method=cov_computation_method,
525
+ random_state=random_state,
526
+ )
527
+ # 3. Finally get the overall best (locations, covariance) couple
528
+ if n_samples < 1500:
529
+ # directly get the best couple (location, covariance)
530
+ location = locations_merged[0]
531
+ covariance = covariances_merged[0]
532
+ support = np.zeros(n_samples, dtype=bool)
533
+ dist = np.zeros(n_samples)
534
+ support[selection] = supports_merged[0]
535
+ dist[selection] = d[0]
536
+ else:
537
+ # select the best couple on the full dataset
538
+ locations_full, covariances_full, supports_full, d = select_candidates(
539
+ X,
540
+ n_support,
541
+ n_trials=(locations_merged, covariances_merged),
542
+ select=1,
543
+ cov_computation_method=cov_computation_method,
544
+ random_state=random_state,
545
+ )
546
+ location = locations_full[0]
547
+ covariance = covariances_full[0]
548
+ support = supports_full[0]
549
+ dist = d[0]
550
+ elif n_features > 1:
551
+ # 1. Find the 10 best couples (location, covariance)
552
+ # considering two iterations
553
+ n_trials = 30
554
+ n_best = 10
555
+ locations_best, covariances_best, _, _ = select_candidates(
556
+ X,
557
+ n_support,
558
+ n_trials=n_trials,
559
+ select=n_best,
560
+ n_iter=2,
561
+ cov_computation_method=cov_computation_method,
562
+ random_state=random_state,
563
+ )
564
+ # 2. Select the best couple on the full dataset amongst the 10
565
+ locations_full, covariances_full, supports_full, d = select_candidates(
566
+ X,
567
+ n_support,
568
+ n_trials=(locations_best, covariances_best),
569
+ select=1,
570
+ cov_computation_method=cov_computation_method,
571
+ random_state=random_state,
572
+ )
573
+ location = locations_full[0]
574
+ covariance = covariances_full[0]
575
+ support = supports_full[0]
576
+ dist = d[0]
577
+
578
+ return location, covariance, support, dist
579
+
580
+
581
+ class MinCovDet(EmpiricalCovariance):
582
+ """Minimum Covariance Determinant (MCD): robust estimator of covariance.
583
+
584
+ The Minimum Covariance Determinant covariance estimator is to be applied
585
+ on Gaussian-distributed data, but could still be relevant on data
586
+ drawn from a unimodal, symmetric distribution. It is not meant to be used
587
+ with multi-modal data (the algorithm used to fit a MinCovDet object is
588
+ likely to fail in such a case).
589
+ One should consider projection pursuit methods to deal with multi-modal
590
+ datasets.
591
+
592
+ Read more in the :ref:`User Guide <robust_covariance>`.
593
+
594
+ Parameters
595
+ ----------
596
+ store_precision : bool, default=True
597
+ Specify if the estimated precision is stored.
598
+
599
+ assume_centered : bool, default=False
600
+ If True, the support of the robust location and the covariance
601
+ estimates is computed, and a covariance estimate is recomputed from
602
+ it, without centering the data.
603
+ Useful to work with data whose mean is significantly equal to
604
+ zero but is not exactly zero.
605
+ If False, the robust location and covariance are directly computed
606
+ with the FastMCD algorithm without additional treatment.
607
+
608
+ support_fraction : float, default=None
609
+ The proportion of points to be included in the support of the raw
610
+ MCD estimate. Default is None, which implies that the minimum
611
+ value of support_fraction will be used within the algorithm:
612
+ `(n_samples + n_features + 1) / 2 * n_samples`. The parameter must be
613
+ in the range (0, 1].
614
+
615
+ random_state : int, RandomState instance or None, default=None
616
+ Determines the pseudo random number generator for shuffling the data.
617
+ Pass an int for reproducible results across multiple function calls.
618
+ See :term:`Glossary <random_state>`.
619
+
620
+ Attributes
621
+ ----------
622
+ raw_location_ : ndarray of shape (n_features,)
623
+ The raw robust estimated location before correction and re-weighting.
624
+
625
+ raw_covariance_ : ndarray of shape (n_features, n_features)
626
+ The raw robust estimated covariance before correction and re-weighting.
627
+
628
+ raw_support_ : ndarray of shape (n_samples,)
629
+ A mask of the observations that have been used to compute
630
+ the raw robust estimates of location and shape, before correction
631
+ and re-weighting.
632
+
633
+ location_ : ndarray of shape (n_features,)
634
+ Estimated robust location.
635
+
636
+ covariance_ : ndarray of shape (n_features, n_features)
637
+ Estimated robust covariance matrix.
638
+
639
+ precision_ : ndarray of shape (n_features, n_features)
640
+ Estimated pseudo inverse matrix.
641
+ (stored only if store_precision is True)
642
+
643
+ support_ : ndarray of shape (n_samples,)
644
+ A mask of the observations that have been used to compute
645
+ the robust estimates of location and shape.
646
+
647
+ dist_ : ndarray of shape (n_samples,)
648
+ Mahalanobis distances of the training set (on which :meth:`fit` is
649
+ called) observations.
650
+
651
+ n_features_in_ : int
652
+ Number of features seen during :term:`fit`.
653
+
654
+ .. versionadded:: 0.24
655
+
656
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
657
+ Names of features seen during :term:`fit`. Defined only when `X`
658
+ has feature names that are all strings.
659
+
660
+ .. versionadded:: 1.0
661
+
662
+ See Also
663
+ --------
664
+ EllipticEnvelope : An object for detecting outliers in
665
+ a Gaussian distributed dataset.
666
+ EmpiricalCovariance : Maximum likelihood covariance estimator.
667
+ GraphicalLasso : Sparse inverse covariance estimation
668
+ with an l1-penalized estimator.
669
+ GraphicalLassoCV : Sparse inverse covariance with cross-validated
670
+ choice of the l1 penalty.
671
+ LedoitWolf : LedoitWolf Estimator.
672
+ OAS : Oracle Approximating Shrinkage Estimator.
673
+ ShrunkCovariance : Covariance estimator with shrinkage.
674
+
675
+ References
676
+ ----------
677
+
678
+ .. [Rouseeuw1984] P. J. Rousseeuw. Least median of squares regression.
679
+ J. Am Stat Ass, 79:871, 1984.
680
+ .. [Rousseeuw] A Fast Algorithm for the Minimum Covariance Determinant
681
+ Estimator, 1999, American Statistical Association and the American
682
+ Society for Quality, TECHNOMETRICS
683
+ .. [ButlerDavies] R. W. Butler, P. L. Davies and M. Jhun,
684
+ Asymptotics For The Minimum Covariance Determinant Estimator,
685
+ The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
686
+
687
+ Examples
688
+ --------
689
+ >>> import numpy as np
690
+ >>> from sklearn.covariance import MinCovDet
691
+ >>> from sklearn.datasets import make_gaussian_quantiles
692
+ >>> real_cov = np.array([[.8, .3],
693
+ ... [.3, .4]])
694
+ >>> rng = np.random.RandomState(0)
695
+ >>> X = rng.multivariate_normal(mean=[0, 0],
696
+ ... cov=real_cov,
697
+ ... size=500)
698
+ >>> cov = MinCovDet(random_state=0).fit(X)
699
+ >>> cov.covariance_
700
+ array([[0.7411..., 0.2535...],
701
+ [0.2535..., 0.3053...]])
702
+ >>> cov.location_
703
+ array([0.0813... , 0.0427...])
704
+ """
705
+
706
+ _parameter_constraints: dict = {
707
+ **EmpiricalCovariance._parameter_constraints,
708
+ "support_fraction": [Interval(Real, 0, 1, closed="right"), None],
709
+ "random_state": ["random_state"],
710
+ }
711
+ _nonrobust_covariance = staticmethod(empirical_covariance)
712
+
713
+ def __init__(
714
+ self,
715
+ *,
716
+ store_precision=True,
717
+ assume_centered=False,
718
+ support_fraction=None,
719
+ random_state=None,
720
+ ):
721
+ self.store_precision = store_precision
722
+ self.assume_centered = assume_centered
723
+ self.support_fraction = support_fraction
724
+ self.random_state = random_state
725
+
726
+ @_fit_context(prefer_skip_nested_validation=True)
727
+ def fit(self, X, y=None):
728
+ """Fit a Minimum Covariance Determinant with the FastMCD algorithm.
729
+
730
+ Parameters
731
+ ----------
732
+ X : array-like of shape (n_samples, n_features)
733
+ Training data, where `n_samples` is the number of samples
734
+ and `n_features` is the number of features.
735
+
736
+ y : Ignored
737
+ Not used, present for API consistency by convention.
738
+
739
+ Returns
740
+ -------
741
+ self : object
742
+ Returns the instance itself.
743
+ """
744
+ X = validate_data(self, X, ensure_min_samples=2, estimator="MinCovDet")
745
+ random_state = check_random_state(self.random_state)
746
+ n_samples, n_features = X.shape
747
+ # check that the empirical covariance is full rank
748
+ if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
749
+ warnings.warn(
750
+ "The covariance matrix associated to your dataset is not full rank"
751
+ )
752
+ # compute and store raw estimates
753
+ raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
754
+ X,
755
+ support_fraction=self.support_fraction,
756
+ cov_computation_method=self._nonrobust_covariance,
757
+ random_state=random_state,
758
+ )
759
+ if self.assume_centered:
760
+ raw_location = np.zeros(n_features)
761
+ raw_covariance = self._nonrobust_covariance(
762
+ X[raw_support], assume_centered=True
763
+ )
764
+ # get precision matrix in an optimized way
765
+ precision = linalg.pinvh(raw_covariance)
766
+ raw_dist = np.sum(np.dot(X, precision) * X, 1)
767
+ self.raw_location_ = raw_location
768
+ self.raw_covariance_ = raw_covariance
769
+ self.raw_support_ = raw_support
770
+ self.location_ = raw_location
771
+ self.support_ = raw_support
772
+ self.dist_ = raw_dist
773
+ # obtain consistency at normal models
774
+ self.correct_covariance(X)
775
+ # re-weight estimator
776
+ self.reweight_covariance(X)
777
+
778
+ return self
779
+
780
+ def correct_covariance(self, data):
781
+ """Apply a correction to raw Minimum Covariance Determinant estimates.
782
+
783
+ Correction using the empirical correction factor suggested
784
+ by Rousseeuw and Van Driessen in [RVD]_.
785
+
786
+ Parameters
787
+ ----------
788
+ data : array-like of shape (n_samples, n_features)
789
+ The data matrix, with p features and n samples.
790
+ The data set must be the one which was used to compute
791
+ the raw estimates.
792
+
793
+ Returns
794
+ -------
795
+ covariance_corrected : ndarray of shape (n_features, n_features)
796
+ Corrected robust covariance estimate.
797
+
798
+ References
799
+ ----------
800
+
801
+ .. [RVD] A Fast Algorithm for the Minimum Covariance
802
+ Determinant Estimator, 1999, American Statistical Association
803
+ and the American Society for Quality, TECHNOMETRICS
804
+ """
805
+
806
+ # Check that the covariance of the support data is not equal to 0.
807
+ # Otherwise self.dist_ = 0 and thus correction = 0.
808
+ n_samples = len(self.dist_)
809
+ n_support = np.sum(self.support_)
810
+ if n_support < n_samples and np.allclose(self.raw_covariance_, 0):
811
+ raise ValueError(
812
+ "The covariance matrix of the support data "
813
+ "is equal to 0, try to increase support_fraction"
814
+ )
815
+ correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
816
+ covariance_corrected = self.raw_covariance_ * correction
817
+ self.dist_ /= correction
818
+ return covariance_corrected
819
+
820
+ def reweight_covariance(self, data):
821
+ """Re-weight raw Minimum Covariance Determinant estimates.
822
+
823
+ Re-weight observations using Rousseeuw's method (equivalent to
824
+ deleting outlying observations from the data set before
825
+ computing location and covariance estimates) described
826
+ in [RVDriessen]_.
827
+
828
+ Parameters
829
+ ----------
830
+ data : array-like of shape (n_samples, n_features)
831
+ The data matrix, with p features and n samples.
832
+ The data set must be the one which was used to compute
833
+ the raw estimates.
834
+
835
+ Returns
836
+ -------
837
+ location_reweighted : ndarray of shape (n_features,)
838
+ Re-weighted robust location estimate.
839
+
840
+ covariance_reweighted : ndarray of shape (n_features, n_features)
841
+ Re-weighted robust covariance estimate.
842
+
843
+ support_reweighted : ndarray of shape (n_samples,), dtype=bool
844
+ A mask of the observations that have been used to compute
845
+ the re-weighted robust location and covariance estimates.
846
+
847
+ References
848
+ ----------
849
+
850
+ .. [RVDriessen] A Fast Algorithm for the Minimum Covariance
851
+ Determinant Estimator, 1999, American Statistical Association
852
+ and the American Society for Quality, TECHNOMETRICS
853
+ """
854
+ n_samples, n_features = data.shape
855
+ mask = self.dist_ < chi2(n_features).isf(0.025)
856
+ if self.assume_centered:
857
+ location_reweighted = np.zeros(n_features)
858
+ else:
859
+ location_reweighted = data[mask].mean(0)
860
+ covariance_reweighted = self._nonrobust_covariance(
861
+ data[mask], assume_centered=self.assume_centered
862
+ )
863
+ support_reweighted = np.zeros(n_samples, dtype=bool)
864
+ support_reweighted[mask] = True
865
+ self._set_covariance(covariance_reweighted)
866
+ self.location_ = location_reweighted
867
+ self.support_ = support_reweighted
868
+ X_centered = data - self.location_
869
+ self.dist_ = np.sum(np.dot(X_centered, self.get_precision()) * X_centered, 1)
870
+ return location_reweighted, covariance_reweighted, support_reweighted
mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/_shrunk_covariance.py ADDED
@@ -0,0 +1,820 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Covariance estimators using shrinkage.
3
+
4
+ Shrinkage corresponds to regularising `cov` using a convex combination:
5
+ shrunk_cov = (1-shrinkage)*cov + shrinkage*structured_estimate.
6
+
7
+ """
8
+
9
+ # Authors: The scikit-learn developers
10
+ # SPDX-License-Identifier: BSD-3-Clause
11
+
12
+ # avoid division truncation
13
+ import warnings
14
+ from numbers import Integral, Real
15
+
16
+ import numpy as np
17
+
18
+ from ..base import _fit_context
19
+ from ..utils import check_array
20
+ from ..utils._param_validation import Interval, validate_params
21
+ from ..utils.validation import validate_data
22
+ from . import EmpiricalCovariance, empirical_covariance
23
+
24
+
25
+ def _ledoit_wolf(X, *, assume_centered, block_size):
26
+ """Estimate the shrunk Ledoit-Wolf covariance matrix."""
27
+ # for only one feature, the result is the same whatever the shrinkage
28
+ if len(X.shape) == 2 and X.shape[1] == 1:
29
+ if not assume_centered:
30
+ X = X - X.mean()
31
+ return np.atleast_2d((X**2).mean()), 0.0
32
+ n_features = X.shape[1]
33
+
34
+ # get Ledoit-Wolf shrinkage
35
+ shrinkage = ledoit_wolf_shrinkage(
36
+ X, assume_centered=assume_centered, block_size=block_size
37
+ )
38
+ emp_cov = empirical_covariance(X, assume_centered=assume_centered)
39
+ mu = np.sum(np.trace(emp_cov)) / n_features
40
+ shrunk_cov = (1.0 - shrinkage) * emp_cov
41
+ shrunk_cov.flat[:: n_features + 1] += shrinkage * mu
42
+
43
+ return shrunk_cov, shrinkage
44
+
45
+
46
+ def _oas(X, *, assume_centered=False):
47
+ """Estimate covariance with the Oracle Approximating Shrinkage algorithm.
48
+
49
+ The formulation is based on [1]_.
50
+ [1] "Shrinkage algorithms for MMSE covariance estimation.",
51
+ Chen, Y., Wiesel, A., Eldar, Y. C., & Hero, A. O.
52
+ IEEE Transactions on Signal Processing, 58(10), 5016-5029, 2010.
53
+ https://arxiv.org/pdf/0907.4698.pdf
54
+ """
55
+ if len(X.shape) == 2 and X.shape[1] == 1:
56
+ # for only one feature, the result is the same whatever the shrinkage
57
+ if not assume_centered:
58
+ X = X - X.mean()
59
+ return np.atleast_2d((X**2).mean()), 0.0
60
+
61
+ n_samples, n_features = X.shape
62
+
63
+ emp_cov = empirical_covariance(X, assume_centered=assume_centered)
64
+
65
+ # The shrinkage is defined as:
66
+ # shrinkage = min(
67
+ # trace(S @ S.T) + trace(S)**2) / ((n + 1) (trace(S @ S.T) - trace(S)**2 / p), 1
68
+ # )
69
+ # where n and p are n_samples and n_features, respectively (cf. Eq. 23 in [1]).
70
+ # The factor 2 / p is omitted since it does not impact the value of the estimator
71
+ # for large p.
72
+
73
+ # Instead of computing trace(S)**2, we can compute the average of the squared
74
+ # elements of S that is equal to trace(S)**2 / p**2.
75
+ # See the definition of the Frobenius norm:
76
+ # https://en.wikipedia.org/wiki/Matrix_norm#Frobenius_norm
77
+ alpha = np.mean(emp_cov**2)
78
+ mu = np.trace(emp_cov) / n_features
79
+ mu_squared = mu**2
80
+
81
+ # The factor 1 / p**2 will cancel out since it is in both the numerator and
82
+ # denominator
83
+ num = alpha + mu_squared
84
+ den = (n_samples + 1) * (alpha - mu_squared / n_features)
85
+ shrinkage = 1.0 if den == 0 else min(num / den, 1.0)
86
+
87
+ # The shrunk covariance is defined as:
88
+ # (1 - shrinkage) * S + shrinkage * F (cf. Eq. 4 in [1])
89
+ # where S is the empirical covariance and F is the shrinkage target defined as
90
+ # F = trace(S) / n_features * np.identity(n_features) (cf. Eq. 3 in [1])
91
+ shrunk_cov = (1.0 - shrinkage) * emp_cov
92
+ shrunk_cov.flat[:: n_features + 1] += shrinkage * mu
93
+
94
+ return shrunk_cov, shrinkage
95
+
96
+
97
+ ###############################################################################
98
+ # Public API
99
+ # ShrunkCovariance estimator
100
+
101
+
102
+ @validate_params(
103
+ {
104
+ "emp_cov": ["array-like"],
105
+ "shrinkage": [Interval(Real, 0, 1, closed="both")],
106
+ },
107
+ prefer_skip_nested_validation=True,
108
+ )
109
+ def shrunk_covariance(emp_cov, shrinkage=0.1):
110
+ """Calculate covariance matrices shrunk on the diagonal.
111
+
112
+ Read more in the :ref:`User Guide <shrunk_covariance>`.
113
+
114
+ Parameters
115
+ ----------
116
+ emp_cov : array-like of shape (..., n_features, n_features)
117
+ Covariance matrices to be shrunk, at least 2D ndarray.
118
+
119
+ shrinkage : float, default=0.1
120
+ Coefficient in the convex combination used for the computation
121
+ of the shrunk estimate. Range is [0, 1].
122
+
123
+ Returns
124
+ -------
125
+ shrunk_cov : ndarray of shape (..., n_features, n_features)
126
+ Shrunk covariance matrices.
127
+
128
+ Notes
129
+ -----
130
+ The regularized (shrunk) covariance is given by::
131
+
132
+ (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)
133
+
134
+ where `mu = trace(cov) / n_features`.
135
+
136
+ Examples
137
+ --------
138
+ >>> import numpy as np
139
+ >>> from sklearn.datasets import make_gaussian_quantiles
140
+ >>> from sklearn.covariance import empirical_covariance, shrunk_covariance
141
+ >>> real_cov = np.array([[.8, .3], [.3, .4]])
142
+ >>> rng = np.random.RandomState(0)
143
+ >>> X = rng.multivariate_normal(mean=[0, 0], cov=real_cov, size=500)
144
+ >>> shrunk_covariance(empirical_covariance(X))
145
+ array([[0.73..., 0.25...],
146
+ [0.25..., 0.41...]])
147
+ """
148
+ emp_cov = check_array(emp_cov, allow_nd=True)
149
+ n_features = emp_cov.shape[-1]
150
+
151
+ shrunk_cov = (1.0 - shrinkage) * emp_cov
152
+ mu = np.trace(emp_cov, axis1=-2, axis2=-1) / n_features
153
+ mu = np.expand_dims(mu, axis=tuple(range(mu.ndim, emp_cov.ndim)))
154
+ shrunk_cov += shrinkage * mu * np.eye(n_features)
155
+
156
+ return shrunk_cov
157
+
158
+
159
+ class ShrunkCovariance(EmpiricalCovariance):
160
+ """Covariance estimator with shrinkage.
161
+
162
+ Read more in the :ref:`User Guide <shrunk_covariance>`.
163
+
164
+ Parameters
165
+ ----------
166
+ store_precision : bool, default=True
167
+ Specify if the estimated precision is stored.
168
+
169
+ assume_centered : bool, default=False
170
+ If True, data will not be centered before computation.
171
+ Useful when working with data whose mean is almost, but not exactly
172
+ zero.
173
+ If False, data will be centered before computation.
174
+
175
+ shrinkage : float, default=0.1
176
+ Coefficient in the convex combination used for the computation
177
+ of the shrunk estimate. Range is [0, 1].
178
+
179
+ Attributes
180
+ ----------
181
+ covariance_ : ndarray of shape (n_features, n_features)
182
+ Estimated covariance matrix
183
+
184
+ location_ : ndarray of shape (n_features,)
185
+ Estimated location, i.e. the estimated mean.
186
+
187
+ precision_ : ndarray of shape (n_features, n_features)
188
+ Estimated pseudo inverse matrix.
189
+ (stored only if store_precision is True)
190
+
191
+ n_features_in_ : int
192
+ Number of features seen during :term:`fit`.
193
+
194
+ .. versionadded:: 0.24
195
+
196
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
197
+ Names of features seen during :term:`fit`. Defined only when `X`
198
+ has feature names that are all strings.
199
+
200
+ .. versionadded:: 1.0
201
+
202
+ See Also
203
+ --------
204
+ EllipticEnvelope : An object for detecting outliers in
205
+ a Gaussian distributed dataset.
206
+ EmpiricalCovariance : Maximum likelihood covariance estimator.
207
+ GraphicalLasso : Sparse inverse covariance estimation
208
+ with an l1-penalized estimator.
209
+ GraphicalLassoCV : Sparse inverse covariance with cross-validated
210
+ choice of the l1 penalty.
211
+ LedoitWolf : LedoitWolf Estimator.
212
+ MinCovDet : Minimum Covariance Determinant
213
+ (robust estimator of covariance).
214
+ OAS : Oracle Approximating Shrinkage Estimator.
215
+
216
+ Notes
217
+ -----
218
+ The regularized covariance is given by:
219
+
220
+ (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)
221
+
222
+ where mu = trace(cov) / n_features
223
+
224
+ Examples
225
+ --------
226
+ >>> import numpy as np
227
+ >>> from sklearn.covariance import ShrunkCovariance
228
+ >>> from sklearn.datasets import make_gaussian_quantiles
229
+ >>> real_cov = np.array([[.8, .3],
230
+ ... [.3, .4]])
231
+ >>> rng = np.random.RandomState(0)
232
+ >>> X = rng.multivariate_normal(mean=[0, 0],
233
+ ... cov=real_cov,
234
+ ... size=500)
235
+ >>> cov = ShrunkCovariance().fit(X)
236
+ >>> cov.covariance_
237
+ array([[0.7387..., 0.2536...],
238
+ [0.2536..., 0.4110...]])
239
+ >>> cov.location_
240
+ array([0.0622..., 0.0193...])
241
+ """
242
+
243
+ _parameter_constraints: dict = {
244
+ **EmpiricalCovariance._parameter_constraints,
245
+ "shrinkage": [Interval(Real, 0, 1, closed="both")],
246
+ }
247
+
248
+ def __init__(self, *, store_precision=True, assume_centered=False, shrinkage=0.1):
249
+ super().__init__(
250
+ store_precision=store_precision, assume_centered=assume_centered
251
+ )
252
+ self.shrinkage = shrinkage
253
+
254
+ @_fit_context(prefer_skip_nested_validation=True)
255
+ def fit(self, X, y=None):
256
+ """Fit the shrunk covariance model to X.
257
+
258
+ Parameters
259
+ ----------
260
+ X : array-like of shape (n_samples, n_features)
261
+ Training data, where `n_samples` is the number of samples
262
+ and `n_features` is the number of features.
263
+
264
+ y : Ignored
265
+ Not used, present for API consistency by convention.
266
+
267
+ Returns
268
+ -------
269
+ self : object
270
+ Returns the instance itself.
271
+ """
272
+ X = validate_data(self, X)
273
+ # Not calling the parent object to fit, to avoid a potential
274
+ # matrix inversion when setting the precision
275
+ if self.assume_centered:
276
+ self.location_ = np.zeros(X.shape[1])
277
+ else:
278
+ self.location_ = X.mean(0)
279
+ covariance = empirical_covariance(X, assume_centered=self.assume_centered)
280
+ covariance = shrunk_covariance(covariance, self.shrinkage)
281
+ self._set_covariance(covariance)
282
+
283
+ return self
284
+
285
+
286
+ # Ledoit-Wolf estimator
287
+
288
+
289
+ @validate_params(
290
+ {
291
+ "X": ["array-like"],
292
+ "assume_centered": ["boolean"],
293
+ "block_size": [Interval(Integral, 1, None, closed="left")],
294
+ },
295
+ prefer_skip_nested_validation=True,
296
+ )
297
+ def ledoit_wolf_shrinkage(X, assume_centered=False, block_size=1000):
298
+ """Estimate the shrunk Ledoit-Wolf covariance matrix.
299
+
300
+ Read more in the :ref:`User Guide <shrunk_covariance>`.
301
+
302
+ Parameters
303
+ ----------
304
+ X : array-like of shape (n_samples, n_features)
305
+ Data from which to compute the Ledoit-Wolf shrunk covariance shrinkage.
306
+
307
+ assume_centered : bool, default=False
308
+ If True, data will not be centered before computation.
309
+ Useful to work with data whose mean is significantly equal to
310
+ zero but is not exactly zero.
311
+ If False, data will be centered before computation.
312
+
313
+ block_size : int, default=1000
314
+ Size of blocks into which the covariance matrix will be split.
315
+
316
+ Returns
317
+ -------
318
+ shrinkage : float
319
+ Coefficient in the convex combination used for the computation
320
+ of the shrunk estimate.
321
+
322
+ Notes
323
+ -----
324
+ The regularized (shrunk) covariance is:
325
+
326
+ (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)
327
+
328
+ where mu = trace(cov) / n_features
329
+
330
+ Examples
331
+ --------
332
+ >>> import numpy as np
333
+ >>> from sklearn.covariance import ledoit_wolf_shrinkage
334
+ >>> real_cov = np.array([[.4, .2], [.2, .8]])
335
+ >>> rng = np.random.RandomState(0)
336
+ >>> X = rng.multivariate_normal(mean=[0, 0], cov=real_cov, size=50)
337
+ >>> shrinkage_coefficient = ledoit_wolf_shrinkage(X)
338
+ >>> shrinkage_coefficient
339
+ np.float64(0.23...)
340
+ """
341
+ X = check_array(X)
342
+ # for only one feature, the result is the same whatever the shrinkage
343
+ if len(X.shape) == 2 and X.shape[1] == 1:
344
+ return 0.0
345
+ if X.ndim == 1:
346
+ X = np.reshape(X, (1, -1))
347
+
348
+ if X.shape[0] == 1:
349
+ warnings.warn(
350
+ "Only one sample available. You may want to reshape your data array"
351
+ )
352
+ n_samples, n_features = X.shape
353
+
354
+ # optionally center data
355
+ if not assume_centered:
356
+ X = X - X.mean(0)
357
+
358
+ # A non-blocked version of the computation is present in the tests
359
+ # in tests/test_covariance.py
360
+
361
+ # number of blocks to split the covariance matrix into
362
+ n_splits = int(n_features / block_size)
363
+ X2 = X**2
364
+ emp_cov_trace = np.sum(X2, axis=0) / n_samples
365
+ mu = np.sum(emp_cov_trace) / n_features
366
+ beta_ = 0.0 # sum of the coefficients of <X2.T, X2>
367
+ delta_ = 0.0 # sum of the *squared* coefficients of <X.T, X>
368
+ # starting block computation
369
+ for i in range(n_splits):
370
+ for j in range(n_splits):
371
+ rows = slice(block_size * i, block_size * (i + 1))
372
+ cols = slice(block_size * j, block_size * (j + 1))
373
+ beta_ += np.sum(np.dot(X2.T[rows], X2[:, cols]))
374
+ delta_ += np.sum(np.dot(X.T[rows], X[:, cols]) ** 2)
375
+ rows = slice(block_size * i, block_size * (i + 1))
376
+ beta_ += np.sum(np.dot(X2.T[rows], X2[:, block_size * n_splits :]))
377
+ delta_ += np.sum(np.dot(X.T[rows], X[:, block_size * n_splits :]) ** 2)
378
+ for j in range(n_splits):
379
+ cols = slice(block_size * j, block_size * (j + 1))
380
+ beta_ += np.sum(np.dot(X2.T[block_size * n_splits :], X2[:, cols]))
381
+ delta_ += np.sum(np.dot(X.T[block_size * n_splits :], X[:, cols]) ** 2)
382
+ delta_ += np.sum(
383
+ np.dot(X.T[block_size * n_splits :], X[:, block_size * n_splits :]) ** 2
384
+ )
385
+ delta_ /= n_samples**2
386
+ beta_ += np.sum(
387
+ np.dot(X2.T[block_size * n_splits :], X2[:, block_size * n_splits :])
388
+ )
389
+ # use delta_ to compute beta
390
+ beta = 1.0 / (n_features * n_samples) * (beta_ / n_samples - delta_)
391
+ # delta is the sum of the squared coefficients of (<X.T,X> - mu*Id) / p
392
+ delta = delta_ - 2.0 * mu * emp_cov_trace.sum() + n_features * mu**2
393
+ delta /= n_features
394
+ # get final beta as the min between beta and delta
395
+ # We do this to prevent shrinking more than "1", which would invert
396
+ # the value of covariances
397
+ beta = min(beta, delta)
398
+ # finally get shrinkage
399
+ shrinkage = 0 if beta == 0 else beta / delta
400
+ return shrinkage
401
+
402
+
403
+ @validate_params(
404
+ {"X": ["array-like"]},
405
+ prefer_skip_nested_validation=False,
406
+ )
407
+ def ledoit_wolf(X, *, assume_centered=False, block_size=1000):
408
+ """Estimate the shrunk Ledoit-Wolf covariance matrix.
409
+
410
+ Read more in the :ref:`User Guide <shrunk_covariance>`.
411
+
412
+ Parameters
413
+ ----------
414
+ X : array-like of shape (n_samples, n_features)
415
+ Data from which to compute the covariance estimate.
416
+
417
+ assume_centered : bool, default=False
418
+ If True, data will not be centered before computation.
419
+ Useful to work with data whose mean is significantly equal to
420
+ zero but is not exactly zero.
421
+ If False, data will be centered before computation.
422
+
423
+ block_size : int, default=1000
424
+ Size of blocks into which the covariance matrix will be split.
425
+ This is purely a memory optimization and does not affect results.
426
+
427
+ Returns
428
+ -------
429
+ shrunk_cov : ndarray of shape (n_features, n_features)
430
+ Shrunk covariance.
431
+
432
+ shrinkage : float
433
+ Coefficient in the convex combination used for the computation
434
+ of the shrunk estimate.
435
+
436
+ Notes
437
+ -----
438
+ The regularized (shrunk) covariance is:
439
+
440
+ (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)
441
+
442
+ where mu = trace(cov) / n_features
443
+
444
+ Examples
445
+ --------
446
+ >>> import numpy as np
447
+ >>> from sklearn.covariance import empirical_covariance, ledoit_wolf
448
+ >>> real_cov = np.array([[.4, .2], [.2, .8]])
449
+ >>> rng = np.random.RandomState(0)
450
+ >>> X = rng.multivariate_normal(mean=[0, 0], cov=real_cov, size=50)
451
+ >>> covariance, shrinkage = ledoit_wolf(X)
452
+ >>> covariance
453
+ array([[0.44..., 0.16...],
454
+ [0.16..., 0.80...]])
455
+ >>> shrinkage
456
+ np.float64(0.23...)
457
+ """
458
+ estimator = LedoitWolf(
459
+ assume_centered=assume_centered,
460
+ block_size=block_size,
461
+ store_precision=False,
462
+ ).fit(X)
463
+
464
+ return estimator.covariance_, estimator.shrinkage_
465
+
466
+
467
+ class LedoitWolf(EmpiricalCovariance):
468
+ """LedoitWolf Estimator.
469
+
470
+ Ledoit-Wolf is a particular form of shrinkage, where the shrinkage
471
+ coefficient is computed using O. Ledoit and M. Wolf's formula as
472
+ described in "A Well-Conditioned Estimator for Large-Dimensional
473
+ Covariance Matrices", Ledoit and Wolf, Journal of Multivariate
474
+ Analysis, Volume 88, Issue 2, February 2004, pages 365-411.
475
+
476
+ Read more in the :ref:`User Guide <shrunk_covariance>`.
477
+
478
+ Parameters
479
+ ----------
480
+ store_precision : bool, default=True
481
+ Specify if the estimated precision is stored.
482
+
483
+ assume_centered : bool, default=False
484
+ If True, data will not be centered before computation.
485
+ Useful when working with data whose mean is almost, but not exactly
486
+ zero.
487
+ If False (default), data will be centered before computation.
488
+
489
+ block_size : int, default=1000
490
+ Size of blocks into which the covariance matrix will be split
491
+ during its Ledoit-Wolf estimation. This is purely a memory
492
+ optimization and does not affect results.
493
+
494
+ Attributes
495
+ ----------
496
+ covariance_ : ndarray of shape (n_features, n_features)
497
+ Estimated covariance matrix.
498
+
499
+ location_ : ndarray of shape (n_features,)
500
+ Estimated location, i.e. the estimated mean.
501
+
502
+ precision_ : ndarray of shape (n_features, n_features)
503
+ Estimated pseudo inverse matrix.
504
+ (stored only if store_precision is True)
505
+
506
+ shrinkage_ : float
507
+ Coefficient in the convex combination used for the computation
508
+ of the shrunk estimate. Range is [0, 1].
509
+
510
+ n_features_in_ : int
511
+ Number of features seen during :term:`fit`.
512
+
513
+ .. versionadded:: 0.24
514
+
515
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
516
+ Names of features seen during :term:`fit`. Defined only when `X`
517
+ has feature names that are all strings.
518
+
519
+ .. versionadded:: 1.0
520
+
521
+ See Also
522
+ --------
523
+ EllipticEnvelope : An object for detecting outliers in
524
+ a Gaussian distributed dataset.
525
+ EmpiricalCovariance : Maximum likelihood covariance estimator.
526
+ GraphicalLasso : Sparse inverse covariance estimation
527
+ with an l1-penalized estimator.
528
+ GraphicalLassoCV : Sparse inverse covariance with cross-validated
529
+ choice of the l1 penalty.
530
+ MinCovDet : Minimum Covariance Determinant
531
+ (robust estimator of covariance).
532
+ OAS : Oracle Approximating Shrinkage Estimator.
533
+ ShrunkCovariance : Covariance estimator with shrinkage.
534
+
535
+ Notes
536
+ -----
537
+ The regularised covariance is:
538
+
539
+ (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)
540
+
541
+ where mu = trace(cov) / n_features
542
+ and shrinkage is given by the Ledoit and Wolf formula (see References)
543
+
544
+ References
545
+ ----------
546
+ "A Well-Conditioned Estimator for Large-Dimensional Covariance Matrices",
547
+ Ledoit and Wolf, Journal of Multivariate Analysis, Volume 88, Issue 2,
548
+ February 2004, pages 365-411.
549
+
550
+ Examples
551
+ --------
552
+ >>> import numpy as np
553
+ >>> from sklearn.covariance import LedoitWolf
554
+ >>> real_cov = np.array([[.4, .2],
555
+ ... [.2, .8]])
556
+ >>> np.random.seed(0)
557
+ >>> X = np.random.multivariate_normal(mean=[0, 0],
558
+ ... cov=real_cov,
559
+ ... size=50)
560
+ >>> cov = LedoitWolf().fit(X)
561
+ >>> cov.covariance_
562
+ array([[0.4406..., 0.1616...],
563
+ [0.1616..., 0.8022...]])
564
+ >>> cov.location_
565
+ array([ 0.0595... , -0.0075...])
566
+
567
+ See also :ref:`sphx_glr_auto_examples_covariance_plot_covariance_estimation.py`
568
+ for a more detailed example.
569
+ """
570
+
571
+ _parameter_constraints: dict = {
572
+ **EmpiricalCovariance._parameter_constraints,
573
+ "block_size": [Interval(Integral, 1, None, closed="left")],
574
+ }
575
+
576
+ def __init__(self, *, store_precision=True, assume_centered=False, block_size=1000):
577
+ super().__init__(
578
+ store_precision=store_precision, assume_centered=assume_centered
579
+ )
580
+ self.block_size = block_size
581
+
582
+ @_fit_context(prefer_skip_nested_validation=True)
583
+ def fit(self, X, y=None):
584
+ """Fit the Ledoit-Wolf shrunk covariance model to X.
585
+
586
+ Parameters
587
+ ----------
588
+ X : array-like of shape (n_samples, n_features)
589
+ Training data, where `n_samples` is the number of samples
590
+ and `n_features` is the number of features.
591
+ y : Ignored
592
+ Not used, present for API consistency by convention.
593
+
594
+ Returns
595
+ -------
596
+ self : object
597
+ Returns the instance itself.
598
+ """
599
+ # Not calling the parent object to fit, to avoid computing the
600
+ # covariance matrix (and potentially the precision)
601
+ X = validate_data(self, X)
602
+ if self.assume_centered:
603
+ self.location_ = np.zeros(X.shape[1])
604
+ else:
605
+ self.location_ = X.mean(0)
606
+ covariance, shrinkage = _ledoit_wolf(
607
+ X - self.location_, assume_centered=True, block_size=self.block_size
608
+ )
609
+ self.shrinkage_ = shrinkage
610
+ self._set_covariance(covariance)
611
+
612
+ return self
613
+
614
+
615
+ # OAS estimator
616
+ @validate_params(
617
+ {"X": ["array-like"]},
618
+ prefer_skip_nested_validation=False,
619
+ )
620
+ def oas(X, *, assume_centered=False):
621
+ """Estimate covariance with the Oracle Approximating Shrinkage.
622
+
623
+ Read more in the :ref:`User Guide <shrunk_covariance>`.
624
+
625
+ Parameters
626
+ ----------
627
+ X : array-like of shape (n_samples, n_features)
628
+ Data from which to compute the covariance estimate.
629
+
630
+ assume_centered : bool, default=False
631
+ If True, data will not be centered before computation.
632
+ Useful to work with data whose mean is significantly equal to
633
+ zero but is not exactly zero.
634
+ If False, data will be centered before computation.
635
+
636
+ Returns
637
+ -------
638
+ shrunk_cov : array-like of shape (n_features, n_features)
639
+ Shrunk covariance.
640
+
641
+ shrinkage : float
642
+ Coefficient in the convex combination used for the computation
643
+ of the shrunk estimate.
644
+
645
+ Notes
646
+ -----
647
+ The regularised covariance is:
648
+
649
+ (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features),
650
+
651
+ where mu = trace(cov) / n_features and shrinkage is given by the OAS formula
652
+ (see [1]_).
653
+
654
+ The shrinkage formulation implemented here differs from Eq. 23 in [1]_. In
655
+ the original article, formula (23) states that 2/p (p being the number of
656
+ features) is multiplied by Trace(cov*cov) in both the numerator and
657
+ denominator, but this operation is omitted because for a large p, the value
658
+ of 2/p is so small that it doesn't affect the value of the estimator.
659
+
660
+ References
661
+ ----------
662
+ .. [1] :arxiv:`"Shrinkage algorithms for MMSE covariance estimation.",
663
+ Chen, Y., Wiesel, A., Eldar, Y. C., & Hero, A. O.
664
+ IEEE Transactions on Signal Processing, 58(10), 5016-5029, 2010.
665
+ <0907.4698>`
666
+
667
+ Examples
668
+ --------
669
+ >>> import numpy as np
670
+ >>> from sklearn.covariance import oas
671
+ >>> rng = np.random.RandomState(0)
672
+ >>> real_cov = [[.8, .3], [.3, .4]]
673
+ >>> X = rng.multivariate_normal(mean=[0, 0], cov=real_cov, size=500)
674
+ >>> shrunk_cov, shrinkage = oas(X)
675
+ >>> shrunk_cov
676
+ array([[0.7533..., 0.2763...],
677
+ [0.2763..., 0.3964...]])
678
+ >>> shrinkage
679
+ np.float64(0.0195...)
680
+ """
681
+ estimator = OAS(
682
+ assume_centered=assume_centered,
683
+ ).fit(X)
684
+ return estimator.covariance_, estimator.shrinkage_
685
+
686
+
687
+ class OAS(EmpiricalCovariance):
688
+ """Oracle Approximating Shrinkage Estimator.
689
+
690
+ Read more in the :ref:`User Guide <shrunk_covariance>`.
691
+
692
+ Parameters
693
+ ----------
694
+ store_precision : bool, default=True
695
+ Specify if the estimated precision is stored.
696
+
697
+ assume_centered : bool, default=False
698
+ If True, data will not be centered before computation.
699
+ Useful when working with data whose mean is almost, but not exactly
700
+ zero.
701
+ If False (default), data will be centered before computation.
702
+
703
+ Attributes
704
+ ----------
705
+ covariance_ : ndarray of shape (n_features, n_features)
706
+ Estimated covariance matrix.
707
+
708
+ location_ : ndarray of shape (n_features,)
709
+ Estimated location, i.e. the estimated mean.
710
+
711
+ precision_ : ndarray of shape (n_features, n_features)
712
+ Estimated pseudo inverse matrix.
713
+ (stored only if store_precision is True)
714
+
715
+ shrinkage_ : float
716
+ coefficient in the convex combination used for the computation
717
+ of the shrunk estimate. Range is [0, 1].
718
+
719
+ n_features_in_ : int
720
+ Number of features seen during :term:`fit`.
721
+
722
+ .. versionadded:: 0.24
723
+
724
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
725
+ Names of features seen during :term:`fit`. Defined only when `X`
726
+ has feature names that are all strings.
727
+
728
+ .. versionadded:: 1.0
729
+
730
+ See Also
731
+ --------
732
+ EllipticEnvelope : An object for detecting outliers in
733
+ a Gaussian distributed dataset.
734
+ EmpiricalCovariance : Maximum likelihood covariance estimator.
735
+ GraphicalLasso : Sparse inverse covariance estimation
736
+ with an l1-penalized estimator.
737
+ GraphicalLassoCV : Sparse inverse covariance with cross-validated
738
+ choice of the l1 penalty.
739
+ LedoitWolf : LedoitWolf Estimator.
740
+ MinCovDet : Minimum Covariance Determinant
741
+ (robust estimator of covariance).
742
+ ShrunkCovariance : Covariance estimator with shrinkage.
743
+
744
+ Notes
745
+ -----
746
+ The regularised covariance is:
747
+
748
+ (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features),
749
+
750
+ where mu = trace(cov) / n_features and shrinkage is given by the OAS formula
751
+ (see [1]_).
752
+
753
+ The shrinkage formulation implemented here differs from Eq. 23 in [1]_. In
754
+ the original article, formula (23) states that 2/p (p being the number of
755
+ features) is multiplied by Trace(cov*cov) in both the numerator and
756
+ denominator, but this operation is omitted because for a large p, the value
757
+ of 2/p is so small that it doesn't affect the value of the estimator.
758
+
759
+ References
760
+ ----------
761
+ .. [1] :arxiv:`"Shrinkage algorithms for MMSE covariance estimation.",
762
+ Chen, Y., Wiesel, A., Eldar, Y. C., & Hero, A. O.
763
+ IEEE Transactions on Signal Processing, 58(10), 5016-5029, 2010.
764
+ <0907.4698>`
765
+
766
+ Examples
767
+ --------
768
+ >>> import numpy as np
769
+ >>> from sklearn.covariance import OAS
770
+ >>> from sklearn.datasets import make_gaussian_quantiles
771
+ >>> real_cov = np.array([[.8, .3],
772
+ ... [.3, .4]])
773
+ >>> rng = np.random.RandomState(0)
774
+ >>> X = rng.multivariate_normal(mean=[0, 0],
775
+ ... cov=real_cov,
776
+ ... size=500)
777
+ >>> oas = OAS().fit(X)
778
+ >>> oas.covariance_
779
+ array([[0.7533..., 0.2763...],
780
+ [0.2763..., 0.3964...]])
781
+ >>> oas.precision_
782
+ array([[ 1.7833..., -1.2431... ],
783
+ [-1.2431..., 3.3889...]])
784
+ >>> oas.shrinkage_
785
+ np.float64(0.0195...)
786
+
787
+ See also :ref:`sphx_glr_auto_examples_covariance_plot_covariance_estimation.py`
788
+ for a more detailed example.
789
+ """
790
+
791
+ @_fit_context(prefer_skip_nested_validation=True)
792
+ def fit(self, X, y=None):
793
+ """Fit the Oracle Approximating Shrinkage covariance model to X.
794
+
795
+ Parameters
796
+ ----------
797
+ X : array-like of shape (n_samples, n_features)
798
+ Training data, where `n_samples` is the number of samples
799
+ and `n_features` is the number of features.
800
+ y : Ignored
801
+ Not used, present for API consistency by convention.
802
+
803
+ Returns
804
+ -------
805
+ self : object
806
+ Returns the instance itself.
807
+ """
808
+ X = validate_data(self, X)
809
+ # Not calling the parent object to fit, to avoid computing the
810
+ # covariance matrix (and potentially the precision)
811
+ if self.assume_centered:
812
+ self.location_ = np.zeros(X.shape[1])
813
+ else:
814
+ self.location_ = X.mean(0)
815
+
816
+ covariance, shrinkage = _oas(X - self.location_, assume_centered=True)
817
+ self.shrinkage_ = shrinkage
818
+ self._set_covariance(covariance)
819
+
820
+ return self
mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/tests/__init__.py ADDED
File without changes
mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (184 Bytes). View file
 
mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_covariance.cpython-310.pyc ADDED
Binary file (7.78 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_elliptic_envelope.cpython-310.pyc ADDED
Binary file (1.67 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_graphical_lasso.cpython-310.pyc ADDED
Binary file (8.74 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_robust_covariance.cpython-310.pyc ADDED
Binary file (4.44 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/tests/test_covariance.py ADDED
@@ -0,0 +1,374 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: The scikit-learn developers
2
+ # SPDX-License-Identifier: BSD-3-Clause
3
+
4
+ import numpy as np
5
+ import pytest
6
+
7
+ from sklearn import datasets
8
+ from sklearn.covariance import (
9
+ OAS,
10
+ EmpiricalCovariance,
11
+ LedoitWolf,
12
+ ShrunkCovariance,
13
+ empirical_covariance,
14
+ ledoit_wolf,
15
+ ledoit_wolf_shrinkage,
16
+ oas,
17
+ shrunk_covariance,
18
+ )
19
+ from sklearn.covariance._shrunk_covariance import _ledoit_wolf
20
+ from sklearn.utils._testing import (
21
+ assert_allclose,
22
+ assert_almost_equal,
23
+ assert_array_almost_equal,
24
+ assert_array_equal,
25
+ )
26
+
27
+ from .._shrunk_covariance import _oas
28
+
29
+ X, _ = datasets.load_diabetes(return_X_y=True)
30
+ X_1d = X[:, 0]
31
+ n_samples, n_features = X.shape
32
+
33
+
34
+ def test_covariance():
35
+ # Tests Covariance module on a simple dataset.
36
+ # test covariance fit from data
37
+ cov = EmpiricalCovariance()
38
+ cov.fit(X)
39
+ emp_cov = empirical_covariance(X)
40
+ assert_array_almost_equal(emp_cov, cov.covariance_, 4)
41
+ assert_almost_equal(cov.error_norm(emp_cov), 0)
42
+ assert_almost_equal(cov.error_norm(emp_cov, norm="spectral"), 0)
43
+ assert_almost_equal(cov.error_norm(emp_cov, norm="frobenius"), 0)
44
+ assert_almost_equal(cov.error_norm(emp_cov, scaling=False), 0)
45
+ assert_almost_equal(cov.error_norm(emp_cov, squared=False), 0)
46
+ with pytest.raises(NotImplementedError):
47
+ cov.error_norm(emp_cov, norm="foo")
48
+ # Mahalanobis distances computation test
49
+ mahal_dist = cov.mahalanobis(X)
50
+ assert np.amin(mahal_dist) > 0
51
+
52
+ # test with n_features = 1
53
+ X_1d = X[:, 0].reshape((-1, 1))
54
+ cov = EmpiricalCovariance()
55
+ cov.fit(X_1d)
56
+ assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
57
+ assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
58
+ assert_almost_equal(cov.error_norm(empirical_covariance(X_1d), norm="spectral"), 0)
59
+
60
+ # test with one sample
61
+ # Create X with 1 sample and 5 features
62
+ X_1sample = np.arange(5).reshape(1, 5)
63
+ cov = EmpiricalCovariance()
64
+ warn_msg = "Only one sample available. You may want to reshape your data array"
65
+ with pytest.warns(UserWarning, match=warn_msg):
66
+ cov.fit(X_1sample)
67
+
68
+ assert_array_almost_equal(cov.covariance_, np.zeros(shape=(5, 5), dtype=np.float64))
69
+
70
+ # test integer type
71
+ X_integer = np.asarray([[0, 1], [1, 0]])
72
+ result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
73
+ assert_array_almost_equal(empirical_covariance(X_integer), result)
74
+
75
+ # test centered case
76
+ cov = EmpiricalCovariance(assume_centered=True)
77
+ cov.fit(X)
78
+ assert_array_equal(cov.location_, np.zeros(X.shape[1]))
79
+
80
+
81
+ @pytest.mark.parametrize("n_matrices", [1, 3])
82
+ def test_shrunk_covariance_func(n_matrices):
83
+ """Check `shrunk_covariance` function."""
84
+
85
+ n_features = 2
86
+ cov = np.ones((n_features, n_features))
87
+ cov_target = np.array([[1, 0.5], [0.5, 1]])
88
+
89
+ if n_matrices > 1:
90
+ cov = np.repeat(cov[np.newaxis, ...], n_matrices, axis=0)
91
+ cov_target = np.repeat(cov_target[np.newaxis, ...], n_matrices, axis=0)
92
+
93
+ cov_shrunk = shrunk_covariance(cov, 0.5)
94
+ assert_allclose(cov_shrunk, cov_target)
95
+
96
+
97
+ def test_shrunk_covariance():
98
+ """Check consistency between `ShrunkCovariance` and `shrunk_covariance`."""
99
+
100
+ # Tests ShrunkCovariance module on a simple dataset.
101
+ # compare shrunk covariance obtained from data and from MLE estimate
102
+ cov = ShrunkCovariance(shrinkage=0.5)
103
+ cov.fit(X)
104
+ assert_array_almost_equal(
105
+ shrunk_covariance(empirical_covariance(X), shrinkage=0.5), cov.covariance_, 4
106
+ )
107
+
108
+ # same test with shrinkage not provided
109
+ cov = ShrunkCovariance()
110
+ cov.fit(X)
111
+ assert_array_almost_equal(
112
+ shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4
113
+ )
114
+
115
+ # same test with shrinkage = 0 (<==> empirical_covariance)
116
+ cov = ShrunkCovariance(shrinkage=0.0)
117
+ cov.fit(X)
118
+ assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
119
+
120
+ # test with n_features = 1
121
+ X_1d = X[:, 0].reshape((-1, 1))
122
+ cov = ShrunkCovariance(shrinkage=0.3)
123
+ cov.fit(X_1d)
124
+ assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
125
+
126
+ # test shrinkage coeff on a simple data set (without saving precision)
127
+ cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
128
+ cov.fit(X)
129
+ assert cov.precision_ is None
130
+
131
+
132
+ def test_ledoit_wolf():
133
+ # Tests LedoitWolf module on a simple dataset.
134
+ # test shrinkage coeff on a simple data set
135
+ X_centered = X - X.mean(axis=0)
136
+ lw = LedoitWolf(assume_centered=True)
137
+ lw.fit(X_centered)
138
+ shrinkage_ = lw.shrinkage_
139
+
140
+ score_ = lw.score(X_centered)
141
+ assert_almost_equal(
142
+ ledoit_wolf_shrinkage(X_centered, assume_centered=True), shrinkage_
143
+ )
144
+ assert_almost_equal(
145
+ ledoit_wolf_shrinkage(X_centered, assume_centered=True, block_size=6),
146
+ shrinkage_,
147
+ )
148
+ # compare shrunk covariance obtained from data and from MLE estimate
149
+ lw_cov_from_mle, lw_shrinkage_from_mle = ledoit_wolf(
150
+ X_centered, assume_centered=True
151
+ )
152
+ assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
153
+ assert_almost_equal(lw_shrinkage_from_mle, lw.shrinkage_)
154
+ # compare estimates given by LW and ShrunkCovariance
155
+ scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
156
+ scov.fit(X_centered)
157
+ assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
158
+
159
+ # test with n_features = 1
160
+ X_1d = X[:, 0].reshape((-1, 1))
161
+ lw = LedoitWolf(assume_centered=True)
162
+ lw.fit(X_1d)
163
+ lw_cov_from_mle, lw_shrinkage_from_mle = ledoit_wolf(X_1d, assume_centered=True)
164
+ assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
165
+ assert_almost_equal(lw_shrinkage_from_mle, lw.shrinkage_)
166
+ assert_array_almost_equal((X_1d**2).sum() / n_samples, lw.covariance_, 4)
167
+
168
+ # test shrinkage coeff on a simple data set (without saving precision)
169
+ lw = LedoitWolf(store_precision=False, assume_centered=True)
170
+ lw.fit(X_centered)
171
+ assert_almost_equal(lw.score(X_centered), score_, 4)
172
+ assert lw.precision_ is None
173
+
174
+ # Same tests without assuming centered data
175
+ # test shrinkage coeff on a simple data set
176
+ lw = LedoitWolf()
177
+ lw.fit(X)
178
+ assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
179
+ assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
180
+ assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
181
+ assert_almost_equal(
182
+ lw.shrinkage_, _ledoit_wolf(X=X, assume_centered=False, block_size=10000)[1]
183
+ )
184
+ assert_almost_equal(lw.score(X), score_, 4)
185
+ # compare shrunk covariance obtained from data and from MLE estimate
186
+ lw_cov_from_mle, lw_shrinkage_from_mle = ledoit_wolf(X)
187
+ assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
188
+ assert_almost_equal(lw_shrinkage_from_mle, lw.shrinkage_)
189
+ # compare estimates given by LW and ShrunkCovariance
190
+ scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
191
+ scov.fit(X)
192
+ assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
193
+
194
+ # test with n_features = 1
195
+ X_1d = X[:, 0].reshape((-1, 1))
196
+ lw = LedoitWolf()
197
+ lw.fit(X_1d)
198
+ assert_allclose(
199
+ X_1d.var(ddof=0),
200
+ _ledoit_wolf(X=X_1d, assume_centered=False, block_size=10000)[0],
201
+ )
202
+ lw_cov_from_mle, lw_shrinkage_from_mle = ledoit_wolf(X_1d)
203
+ assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
204
+ assert_almost_equal(lw_shrinkage_from_mle, lw.shrinkage_)
205
+ assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
206
+
207
+ # test with one sample
208
+ # warning should be raised when using only 1 sample
209
+ X_1sample = np.arange(5).reshape(1, 5)
210
+ lw = LedoitWolf()
211
+
212
+ warn_msg = "Only one sample available. You may want to reshape your data array"
213
+ with pytest.warns(UserWarning, match=warn_msg):
214
+ lw.fit(X_1sample)
215
+
216
+ assert_array_almost_equal(lw.covariance_, np.zeros(shape=(5, 5), dtype=np.float64))
217
+
218
+ # test shrinkage coeff on a simple data set (without saving precision)
219
+ lw = LedoitWolf(store_precision=False)
220
+ lw.fit(X)
221
+ assert_almost_equal(lw.score(X), score_, 4)
222
+ assert lw.precision_ is None
223
+
224
+
225
+ def _naive_ledoit_wolf_shrinkage(X):
226
+ # A simple implementation of the formulas from Ledoit & Wolf
227
+
228
+ # The computation below achieves the following computations of the
229
+ # "O. Ledoit and M. Wolf, A Well-Conditioned Estimator for
230
+ # Large-Dimensional Covariance Matrices"
231
+ # beta and delta are given in the beginning of section 3.2
232
+ n_samples, n_features = X.shape
233
+ emp_cov = empirical_covariance(X, assume_centered=False)
234
+ mu = np.trace(emp_cov) / n_features
235
+ delta_ = emp_cov.copy()
236
+ delta_.flat[:: n_features + 1] -= mu
237
+ delta = (delta_**2).sum() / n_features
238
+ X2 = X**2
239
+ beta_ = (
240
+ 1.0
241
+ / (n_features * n_samples)
242
+ * np.sum(np.dot(X2.T, X2) / n_samples - emp_cov**2)
243
+ )
244
+
245
+ beta = min(beta_, delta)
246
+ shrinkage = beta / delta
247
+ return shrinkage
248
+
249
+
250
+ def test_ledoit_wolf_small():
251
+ # Compare our blocked implementation to the naive implementation
252
+ X_small = X[:, :4]
253
+ lw = LedoitWolf()
254
+ lw.fit(X_small)
255
+ shrinkage_ = lw.shrinkage_
256
+
257
+ assert_almost_equal(shrinkage_, _naive_ledoit_wolf_shrinkage(X_small))
258
+
259
+
260
+ def test_ledoit_wolf_large():
261
+ # test that ledoit_wolf doesn't error on data that is wider than block_size
262
+ rng = np.random.RandomState(0)
263
+ # use a number of features that is larger than the block-size
264
+ X = rng.normal(size=(10, 20))
265
+ lw = LedoitWolf(block_size=10).fit(X)
266
+ # check that covariance is about diagonal (random normal noise)
267
+ assert_almost_equal(lw.covariance_, np.eye(20), 0)
268
+ cov = lw.covariance_
269
+
270
+ # check that the result is consistent with not splitting data into blocks.
271
+ lw = LedoitWolf(block_size=25).fit(X)
272
+ assert_almost_equal(lw.covariance_, cov)
273
+
274
+
275
+ @pytest.mark.parametrize(
276
+ "ledoit_wolf_fitting_function", [LedoitWolf().fit, ledoit_wolf_shrinkage]
277
+ )
278
+ def test_ledoit_wolf_empty_array(ledoit_wolf_fitting_function):
279
+ """Check that we validate X and raise proper error with 0-sample array."""
280
+ X_empty = np.zeros((0, 2))
281
+ with pytest.raises(ValueError, match="Found array with 0 sample"):
282
+ ledoit_wolf_fitting_function(X_empty)
283
+
284
+
285
+ def test_oas():
286
+ # Tests OAS module on a simple dataset.
287
+ # test shrinkage coeff on a simple data set
288
+ X_centered = X - X.mean(axis=0)
289
+ oa = OAS(assume_centered=True)
290
+ oa.fit(X_centered)
291
+ shrinkage_ = oa.shrinkage_
292
+ score_ = oa.score(X_centered)
293
+ # compare shrunk covariance obtained from data and from MLE estimate
294
+ oa_cov_from_mle, oa_shrinkage_from_mle = oas(X_centered, assume_centered=True)
295
+ assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
296
+ assert_almost_equal(oa_shrinkage_from_mle, oa.shrinkage_)
297
+ # compare estimates given by OAS and ShrunkCovariance
298
+ scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
299
+ scov.fit(X_centered)
300
+ assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
301
+
302
+ # test with n_features = 1
303
+ X_1d = X[:, 0:1]
304
+ oa = OAS(assume_centered=True)
305
+ oa.fit(X_1d)
306
+ oa_cov_from_mle, oa_shrinkage_from_mle = oas(X_1d, assume_centered=True)
307
+ assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
308
+ assert_almost_equal(oa_shrinkage_from_mle, oa.shrinkage_)
309
+ assert_array_almost_equal((X_1d**2).sum() / n_samples, oa.covariance_, 4)
310
+
311
+ # test shrinkage coeff on a simple data set (without saving precision)
312
+ oa = OAS(store_precision=False, assume_centered=True)
313
+ oa.fit(X_centered)
314
+ assert_almost_equal(oa.score(X_centered), score_, 4)
315
+ assert oa.precision_ is None
316
+
317
+ # Same tests without assuming centered data--------------------------------
318
+ # test shrinkage coeff on a simple data set
319
+ oa = OAS()
320
+ oa.fit(X)
321
+ assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
322
+ assert_almost_equal(oa.score(X), score_, 4)
323
+ # compare shrunk covariance obtained from data and from MLE estimate
324
+ oa_cov_from_mle, oa_shrinkage_from_mle = oas(X)
325
+ assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
326
+ assert_almost_equal(oa_shrinkage_from_mle, oa.shrinkage_)
327
+ # compare estimates given by OAS and ShrunkCovariance
328
+ scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
329
+ scov.fit(X)
330
+ assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
331
+
332
+ # test with n_features = 1
333
+ X_1d = X[:, 0].reshape((-1, 1))
334
+ oa = OAS()
335
+ oa.fit(X_1d)
336
+ oa_cov_from_mle, oa_shrinkage_from_mle = oas(X_1d)
337
+ assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
338
+ assert_almost_equal(oa_shrinkage_from_mle, oa.shrinkage_)
339
+ assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
340
+
341
+ # test with one sample
342
+ # warning should be raised when using only 1 sample
343
+ X_1sample = np.arange(5).reshape(1, 5)
344
+ oa = OAS()
345
+ warn_msg = "Only one sample available. You may want to reshape your data array"
346
+ with pytest.warns(UserWarning, match=warn_msg):
347
+ oa.fit(X_1sample)
348
+
349
+ assert_array_almost_equal(oa.covariance_, np.zeros(shape=(5, 5), dtype=np.float64))
350
+
351
+ # test shrinkage coeff on a simple data set (without saving precision)
352
+ oa = OAS(store_precision=False)
353
+ oa.fit(X)
354
+ assert_almost_equal(oa.score(X), score_, 4)
355
+ assert oa.precision_ is None
356
+
357
+ # test function _oas without assuming centered data
358
+ X_1f = X[:, 0:1]
359
+ oa = OAS()
360
+ oa.fit(X_1f)
361
+ # compare shrunk covariance obtained from data and from MLE estimate
362
+ _oa_cov_from_mle, _oa_shrinkage_from_mle = _oas(X_1f)
363
+ assert_array_almost_equal(_oa_cov_from_mle, oa.covariance_, 4)
364
+ assert_almost_equal(_oa_shrinkage_from_mle, oa.shrinkage_)
365
+ assert_array_almost_equal((X_1f**2).sum() / n_samples, oa.covariance_, 4)
366
+
367
+
368
+ def test_EmpiricalCovariance_validates_mahalanobis():
369
+ """Checks that EmpiricalCovariance validates data with mahalanobis."""
370
+ cov = EmpiricalCovariance().fit(X)
371
+
372
+ msg = f"X has 2 features, but \\w+ is expecting {X.shape[1]} features as input"
373
+ with pytest.raises(ValueError, match=msg):
374
+ cov.mahalanobis(X[:, :2])
mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/tests/test_elliptic_envelope.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Testing for Elliptic Envelope algorithm (sklearn.covariance.elliptic_envelope).
3
+ """
4
+
5
+ import numpy as np
6
+ import pytest
7
+
8
+ from sklearn.covariance import EllipticEnvelope
9
+ from sklearn.exceptions import NotFittedError
10
+ from sklearn.utils._testing import (
11
+ assert_almost_equal,
12
+ assert_array_almost_equal,
13
+ assert_array_equal,
14
+ )
15
+
16
+
17
+ def test_elliptic_envelope(global_random_seed):
18
+ rnd = np.random.RandomState(global_random_seed)
19
+ X = rnd.randn(100, 10)
20
+ clf = EllipticEnvelope(contamination=0.1)
21
+ with pytest.raises(NotFittedError):
22
+ clf.predict(X)
23
+ with pytest.raises(NotFittedError):
24
+ clf.decision_function(X)
25
+ clf.fit(X)
26
+ y_pred = clf.predict(X)
27
+ scores = clf.score_samples(X)
28
+ decisions = clf.decision_function(X)
29
+
30
+ assert_array_almost_equal(scores, -clf.mahalanobis(X))
31
+ assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
32
+ assert_almost_equal(
33
+ clf.score(X, np.ones(100)), (100 - y_pred[y_pred == -1].size) / 100.0
34
+ )
35
+ assert sum(y_pred == -1) == sum(decisions < 0)
36
+
37
+
38
+ def test_score_samples():
39
+ X_train = [[1, 1], [1, 2], [2, 1]]
40
+ clf1 = EllipticEnvelope(contamination=0.2).fit(X_train)
41
+ clf2 = EllipticEnvelope().fit(X_train)
42
+ assert_array_equal(
43
+ clf1.score_samples([[2.0, 2.0]]),
44
+ clf1.decision_function([[2.0, 2.0]]) + clf1.offset_,
45
+ )
46
+ assert_array_equal(
47
+ clf2.score_samples([[2.0, 2.0]]),
48
+ clf2.decision_function([[2.0, 2.0]]) + clf2.offset_,
49
+ )
50
+ assert_array_equal(
51
+ clf1.score_samples([[2.0, 2.0]]), clf2.score_samples([[2.0, 2.0]])
52
+ )
mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/tests/test_graphical_lasso.py ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Test the graphical_lasso module."""
2
+
3
+ import sys
4
+ from io import StringIO
5
+
6
+ import numpy as np
7
+ import pytest
8
+ from numpy.testing import assert_allclose
9
+ from scipy import linalg
10
+
11
+ from sklearn import config_context, datasets
12
+ from sklearn.covariance import (
13
+ GraphicalLasso,
14
+ GraphicalLassoCV,
15
+ empirical_covariance,
16
+ graphical_lasso,
17
+ )
18
+ from sklearn.datasets import make_sparse_spd_matrix
19
+ from sklearn.model_selection import GroupKFold
20
+ from sklearn.utils import check_random_state
21
+ from sklearn.utils._testing import (
22
+ _convert_container,
23
+ assert_array_almost_equal,
24
+ assert_array_less,
25
+ )
26
+
27
+
28
+ def test_graphical_lassos(random_state=1):
29
+ """Test the graphical lasso solvers.
30
+
31
+ This checks is unstable for some random seeds where the covariance found with "cd"
32
+ and "lars" solvers are different (4 cases / 100 tries).
33
+ """
34
+ # Sample data from a sparse multivariate normal
35
+ dim = 20
36
+ n_samples = 100
37
+ random_state = check_random_state(random_state)
38
+ prec = make_sparse_spd_matrix(dim, alpha=0.95, random_state=random_state)
39
+ cov = linalg.inv(prec)
40
+ X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
41
+ emp_cov = empirical_covariance(X)
42
+
43
+ for alpha in (0.0, 0.1, 0.25):
44
+ covs = dict()
45
+ icovs = dict()
46
+ for method in ("cd", "lars"):
47
+ cov_, icov_, costs = graphical_lasso(
48
+ emp_cov, return_costs=True, alpha=alpha, mode=method
49
+ )
50
+ covs[method] = cov_
51
+ icovs[method] = icov_
52
+ costs, dual_gap = np.array(costs).T
53
+ # Check that the costs always decrease (doesn't hold if alpha == 0)
54
+ if not alpha == 0:
55
+ # use 1e-12 since the cost can be exactly 0
56
+ assert_array_less(np.diff(costs), 1e-12)
57
+ # Check that the 2 approaches give similar results
58
+ assert_allclose(covs["cd"], covs["lars"], atol=5e-4)
59
+ assert_allclose(icovs["cd"], icovs["lars"], atol=5e-4)
60
+
61
+ # Smoke test the estimator
62
+ model = GraphicalLasso(alpha=0.25).fit(X)
63
+ model.score(X)
64
+ assert_array_almost_equal(model.covariance_, covs["cd"], decimal=4)
65
+ assert_array_almost_equal(model.covariance_, covs["lars"], decimal=4)
66
+
67
+ # For a centered matrix, assume_centered could be chosen True or False
68
+ # Check that this returns indeed the same result for centered data
69
+ Z = X - X.mean(0)
70
+ precs = list()
71
+ for assume_centered in (False, True):
72
+ prec_ = GraphicalLasso(assume_centered=assume_centered).fit(Z).precision_
73
+ precs.append(prec_)
74
+ assert_array_almost_equal(precs[0], precs[1])
75
+
76
+
77
+ def test_graphical_lasso_when_alpha_equals_0():
78
+ """Test graphical_lasso's early return condition when alpha=0."""
79
+ X = np.random.randn(100, 10)
80
+ emp_cov = empirical_covariance(X, assume_centered=True)
81
+
82
+ model = GraphicalLasso(alpha=0, covariance="precomputed").fit(emp_cov)
83
+ assert_allclose(model.precision_, np.linalg.inv(emp_cov))
84
+
85
+ _, precision = graphical_lasso(emp_cov, alpha=0)
86
+ assert_allclose(precision, np.linalg.inv(emp_cov))
87
+
88
+
89
+ @pytest.mark.parametrize("mode", ["cd", "lars"])
90
+ def test_graphical_lasso_n_iter(mode):
91
+ X, _ = datasets.make_classification(n_samples=5_000, n_features=20, random_state=0)
92
+ emp_cov = empirical_covariance(X)
93
+
94
+ _, _, n_iter = graphical_lasso(
95
+ emp_cov, 0.2, mode=mode, max_iter=2, return_n_iter=True
96
+ )
97
+ assert n_iter == 2
98
+
99
+
100
+ def test_graphical_lasso_iris():
101
+ # Hard-coded solution from R glasso package for alpha=1.0
102
+ # (need to set penalize.diagonal to FALSE)
103
+ cov_R = np.array(
104
+ [
105
+ [0.68112222, 0.0000000, 0.265820, 0.02464314],
106
+ [0.00000000, 0.1887129, 0.000000, 0.00000000],
107
+ [0.26582000, 0.0000000, 3.095503, 0.28697200],
108
+ [0.02464314, 0.0000000, 0.286972, 0.57713289],
109
+ ]
110
+ )
111
+ icov_R = np.array(
112
+ [
113
+ [1.5190747, 0.000000, -0.1304475, 0.0000000],
114
+ [0.0000000, 5.299055, 0.0000000, 0.0000000],
115
+ [-0.1304475, 0.000000, 0.3498624, -0.1683946],
116
+ [0.0000000, 0.000000, -0.1683946, 1.8164353],
117
+ ]
118
+ )
119
+ X = datasets.load_iris().data
120
+ emp_cov = empirical_covariance(X)
121
+ for method in ("cd", "lars"):
122
+ cov, icov = graphical_lasso(emp_cov, alpha=1.0, return_costs=False, mode=method)
123
+ assert_array_almost_equal(cov, cov_R)
124
+ assert_array_almost_equal(icov, icov_R)
125
+
126
+
127
+ def test_graph_lasso_2D():
128
+ # Hard-coded solution from Python skggm package
129
+ # obtained by calling `quic(emp_cov, lam=.1, tol=1e-8)`
130
+ cov_skggm = np.array([[3.09550269, 1.186972], [1.186972, 0.57713289]])
131
+
132
+ icov_skggm = np.array([[1.52836773, -3.14334831], [-3.14334831, 8.19753385]])
133
+ X = datasets.load_iris().data[:, 2:]
134
+ emp_cov = empirical_covariance(X)
135
+ for method in ("cd", "lars"):
136
+ cov, icov = graphical_lasso(emp_cov, alpha=0.1, return_costs=False, mode=method)
137
+ assert_array_almost_equal(cov, cov_skggm)
138
+ assert_array_almost_equal(icov, icov_skggm)
139
+
140
+
141
+ def test_graphical_lasso_iris_singular():
142
+ # Small subset of rows to test the rank-deficient case
143
+ # Need to choose samples such that none of the variances are zero
144
+ indices = np.arange(10, 13)
145
+
146
+ # Hard-coded solution from R glasso package for alpha=0.01
147
+ cov_R = np.array(
148
+ [
149
+ [0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
150
+ [0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
151
+ [0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
152
+ [0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222],
153
+ ]
154
+ )
155
+ icov_R = np.array(
156
+ [
157
+ [24.42244057, -16.831679593, 0.0, 0.0],
158
+ [-16.83168201, 24.351841681, -6.206896552, -12.5],
159
+ [0.0, -6.206896171, 153.103448276, 0.0],
160
+ [0.0, -12.499999143, 0.0, 462.5],
161
+ ]
162
+ )
163
+ X = datasets.load_iris().data[indices, :]
164
+ emp_cov = empirical_covariance(X)
165
+ for method in ("cd", "lars"):
166
+ cov, icov = graphical_lasso(
167
+ emp_cov, alpha=0.01, return_costs=False, mode=method
168
+ )
169
+ assert_array_almost_equal(cov, cov_R, decimal=5)
170
+ assert_array_almost_equal(icov, icov_R, decimal=5)
171
+
172
+
173
+ def test_graphical_lasso_cv(random_state=1):
174
+ # Sample data from a sparse multivariate normal
175
+ dim = 5
176
+ n_samples = 6
177
+ random_state = check_random_state(random_state)
178
+ prec = make_sparse_spd_matrix(dim, alpha=0.96, random_state=random_state)
179
+ cov = linalg.inv(prec)
180
+ X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
181
+ # Capture stdout, to smoke test the verbose mode
182
+ orig_stdout = sys.stdout
183
+ try:
184
+ sys.stdout = StringIO()
185
+ # We need verbose very high so that Parallel prints on stdout
186
+ GraphicalLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
187
+ finally:
188
+ sys.stdout = orig_stdout
189
+
190
+
191
+ @pytest.mark.parametrize("alphas_container_type", ["list", "tuple", "array"])
192
+ def test_graphical_lasso_cv_alphas_iterable(alphas_container_type):
193
+ """Check that we can pass an array-like to `alphas`.
194
+
195
+ Non-regression test for:
196
+ https://github.com/scikit-learn/scikit-learn/issues/22489
197
+ """
198
+ true_cov = np.array(
199
+ [
200
+ [0.8, 0.0, 0.2, 0.0],
201
+ [0.0, 0.4, 0.0, 0.0],
202
+ [0.2, 0.0, 0.3, 0.1],
203
+ [0.0, 0.0, 0.1, 0.7],
204
+ ]
205
+ )
206
+ rng = np.random.RandomState(0)
207
+ X = rng.multivariate_normal(mean=[0, 0, 0, 0], cov=true_cov, size=200)
208
+ alphas = _convert_container([0.02, 0.03], alphas_container_type)
209
+ GraphicalLassoCV(alphas=alphas, tol=1e-1, n_jobs=1).fit(X)
210
+
211
+
212
+ @pytest.mark.parametrize(
213
+ "alphas,err_type,err_msg",
214
+ [
215
+ ([-0.02, 0.03], ValueError, "must be > 0"),
216
+ ([0, 0.03], ValueError, "must be > 0"),
217
+ (["not_number", 0.03], TypeError, "must be an instance of float"),
218
+ ],
219
+ )
220
+ def test_graphical_lasso_cv_alphas_invalid_array(alphas, err_type, err_msg):
221
+ """Check that if an array-like containing a value
222
+ outside of (0, inf] is passed to `alphas`, a ValueError is raised.
223
+ Check if a string is passed, a TypeError is raised.
224
+ """
225
+ true_cov = np.array(
226
+ [
227
+ [0.8, 0.0, 0.2, 0.0],
228
+ [0.0, 0.4, 0.0, 0.0],
229
+ [0.2, 0.0, 0.3, 0.1],
230
+ [0.0, 0.0, 0.1, 0.7],
231
+ ]
232
+ )
233
+ rng = np.random.RandomState(0)
234
+ X = rng.multivariate_normal(mean=[0, 0, 0, 0], cov=true_cov, size=200)
235
+
236
+ with pytest.raises(err_type, match=err_msg):
237
+ GraphicalLassoCV(alphas=alphas, tol=1e-1, n_jobs=1).fit(X)
238
+
239
+
240
+ def test_graphical_lasso_cv_scores():
241
+ splits = 4
242
+ n_alphas = 5
243
+ n_refinements = 3
244
+ true_cov = np.array(
245
+ [
246
+ [0.8, 0.0, 0.2, 0.0],
247
+ [0.0, 0.4, 0.0, 0.0],
248
+ [0.2, 0.0, 0.3, 0.1],
249
+ [0.0, 0.0, 0.1, 0.7],
250
+ ]
251
+ )
252
+ rng = np.random.RandomState(0)
253
+ X = rng.multivariate_normal(mean=[0, 0, 0, 0], cov=true_cov, size=200)
254
+ cov = GraphicalLassoCV(cv=splits, alphas=n_alphas, n_refinements=n_refinements).fit(
255
+ X
256
+ )
257
+
258
+ _assert_graphical_lasso_cv_scores(
259
+ cov=cov,
260
+ n_splits=splits,
261
+ n_refinements=n_refinements,
262
+ n_alphas=n_alphas,
263
+ )
264
+
265
+
266
+ @config_context(enable_metadata_routing=True)
267
+ def test_graphical_lasso_cv_scores_with_routing(global_random_seed):
268
+ """Check that `GraphicalLassoCV` internally dispatches metadata to
269
+ the splitter.
270
+ """
271
+ splits = 5
272
+ n_alphas = 5
273
+ n_refinements = 3
274
+ true_cov = np.array(
275
+ [
276
+ [0.8, 0.0, 0.2, 0.0],
277
+ [0.0, 0.4, 0.0, 0.0],
278
+ [0.2, 0.0, 0.3, 0.1],
279
+ [0.0, 0.0, 0.1, 0.7],
280
+ ]
281
+ )
282
+ rng = np.random.RandomState(global_random_seed)
283
+ X = rng.multivariate_normal(mean=[0, 0, 0, 0], cov=true_cov, size=300)
284
+ n_samples = X.shape[0]
285
+ groups = rng.randint(0, 5, n_samples)
286
+ params = {"groups": groups}
287
+ cv = GroupKFold(n_splits=splits)
288
+ cv.set_split_request(groups=True)
289
+
290
+ cov = GraphicalLassoCV(cv=cv, alphas=n_alphas, n_refinements=n_refinements).fit(
291
+ X, **params
292
+ )
293
+
294
+ _assert_graphical_lasso_cv_scores(
295
+ cov=cov,
296
+ n_splits=splits,
297
+ n_refinements=n_refinements,
298
+ n_alphas=n_alphas,
299
+ )
300
+
301
+
302
+ def _assert_graphical_lasso_cv_scores(cov, n_splits, n_refinements, n_alphas):
303
+ cv_results = cov.cv_results_
304
+ # alpha and one for each split
305
+
306
+ total_alphas = n_refinements * n_alphas + 1
307
+ keys = ["alphas"]
308
+ split_keys = [f"split{i}_test_score" for i in range(n_splits)]
309
+ for key in keys + split_keys:
310
+ assert key in cv_results
311
+ assert len(cv_results[key]) == total_alphas
312
+
313
+ cv_scores = np.asarray([cov.cv_results_[key] for key in split_keys])
314
+ expected_mean = cv_scores.mean(axis=0)
315
+ expected_std = cv_scores.std(axis=0)
316
+
317
+ assert_allclose(cov.cv_results_["mean_test_score"], expected_mean)
318
+ assert_allclose(cov.cv_results_["std_test_score"], expected_std)
mantis_evalkit/lib/python3.10/site-packages/sklearn/covariance/tests/test_robust_covariance.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: The scikit-learn developers
2
+ # SPDX-License-Identifier: BSD-3-Clause
3
+
4
+ import itertools
5
+
6
+ import numpy as np
7
+ import pytest
8
+
9
+ from sklearn import datasets
10
+ from sklearn.covariance import MinCovDet, empirical_covariance, fast_mcd
11
+ from sklearn.utils._testing import assert_array_almost_equal
12
+
13
+ X = datasets.load_iris().data
14
+ X_1d = X[:, 0]
15
+ n_samples, n_features = X.shape
16
+
17
+
18
+ def test_mcd(global_random_seed):
19
+ # Tests the FastMCD algorithm implementation
20
+ # Small data set
21
+ # test without outliers (random independent normal data)
22
+ launch_mcd_on_dataset(100, 5, 0, 0.02, 0.1, 75, global_random_seed)
23
+ # test with a contaminated data set (medium contamination)
24
+ launch_mcd_on_dataset(100, 5, 20, 0.3, 0.3, 65, global_random_seed)
25
+ # test with a contaminated data set (strong contamination)
26
+ launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50, global_random_seed)
27
+
28
+ # Medium data set
29
+ launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540, global_random_seed)
30
+
31
+ # Large data set
32
+ launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870, global_random_seed)
33
+
34
+ # 1D data set
35
+ launch_mcd_on_dataset(500, 1, 100, 0.02, 0.02, 350, global_random_seed)
36
+
37
+
38
+ def test_fast_mcd_on_invalid_input():
39
+ X = np.arange(100)
40
+ msg = "Expected 2D array, got 1D array instead"
41
+ with pytest.raises(ValueError, match=msg):
42
+ fast_mcd(X)
43
+
44
+
45
+ def test_mcd_class_on_invalid_input():
46
+ X = np.arange(100)
47
+ mcd = MinCovDet()
48
+ msg = "Expected 2D array, got 1D array instead"
49
+ with pytest.raises(ValueError, match=msg):
50
+ mcd.fit(X)
51
+
52
+
53
+ def launch_mcd_on_dataset(
54
+ n_samples, n_features, n_outliers, tol_loc, tol_cov, tol_support, seed
55
+ ):
56
+ rand_gen = np.random.RandomState(seed)
57
+ data = rand_gen.randn(n_samples, n_features)
58
+ # add some outliers
59
+ outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
60
+ outliers_offset = 10.0 * (rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
61
+ data[outliers_index] += outliers_offset
62
+ inliers_mask = np.ones(n_samples).astype(bool)
63
+ inliers_mask[outliers_index] = False
64
+
65
+ pure_data = data[inliers_mask]
66
+ # compute MCD by fitting an object
67
+ mcd_fit = MinCovDet(random_state=seed).fit(data)
68
+ T = mcd_fit.location_
69
+ S = mcd_fit.covariance_
70
+ H = mcd_fit.support_
71
+ # compare with the estimates learnt from the inliers
72
+ error_location = np.mean((pure_data.mean(0) - T) ** 2)
73
+ assert error_location < tol_loc
74
+ error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
75
+ assert error_cov < tol_cov
76
+ assert np.sum(H) >= tol_support
77
+ assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
78
+
79
+
80
+ def test_mcd_issue1127():
81
+ # Check that the code does not break with X.shape = (3, 1)
82
+ # (i.e. n_support = n_samples)
83
+ rnd = np.random.RandomState(0)
84
+ X = rnd.normal(size=(3, 1))
85
+ mcd = MinCovDet()
86
+ mcd.fit(X)
87
+
88
+
89
+ def test_mcd_issue3367(global_random_seed):
90
+ # Check that MCD completes when the covariance matrix is singular
91
+ # i.e. one of the rows and columns are all zeros
92
+ rand_gen = np.random.RandomState(global_random_seed)
93
+
94
+ # Think of these as the values for X and Y -> 10 values between -5 and 5
95
+ data_values = np.linspace(-5, 5, 10).tolist()
96
+ # Get the cartesian product of all possible coordinate pairs from above set
97
+ data = np.array(list(itertools.product(data_values, data_values)))
98
+
99
+ # Add a third column that's all zeros to make our data a set of point
100
+ # within a plane, which means that the covariance matrix will be singular
101
+ data = np.hstack((data, np.zeros((data.shape[0], 1))))
102
+
103
+ # The below line of code should raise an exception if the covariance matrix
104
+ # is singular. As a further test, since we have points in XYZ, the
105
+ # principle components (Eigenvectors) of these directly relate to the
106
+ # geometry of the points. Since it's a plane, we should be able to test
107
+ # that the Eigenvector that corresponds to the smallest Eigenvalue is the
108
+ # plane normal, specifically [0, 0, 1], since everything is in the XY plane
109
+ # (as I've set it up above). To do this one would start by:
110
+ #
111
+ # evals, evecs = np.linalg.eigh(mcd_fit.covariance_)
112
+ # normal = evecs[:, np.argmin(evals)]
113
+ #
114
+ # After which we need to assert that our `normal` is equal to [0, 0, 1].
115
+ # Do note that there is floating point error associated with this, so it's
116
+ # best to subtract the two and then compare some small tolerance (e.g.
117
+ # 1e-12).
118
+ MinCovDet(random_state=rand_gen).fit(data)
119
+
120
+
121
+ def test_mcd_support_covariance_is_zero():
122
+ # Check that MCD returns a ValueError with informative message when the
123
+ # covariance of the support data is equal to 0.
124
+ X_1 = np.array([0.5, 0.1, 0.1, 0.1, 0.957, 0.1, 0.1, 0.1, 0.4285, 0.1])
125
+ X_1 = X_1.reshape(-1, 1)
126
+ X_2 = np.array([0.5, 0.3, 0.3, 0.3, 0.957, 0.3, 0.3, 0.3, 0.4285, 0.3])
127
+ X_2 = X_2.reshape(-1, 1)
128
+ msg = (
129
+ "The covariance matrix of the support data is equal to 0, try to "
130
+ "increase support_fraction"
131
+ )
132
+ for X in [X_1, X_2]:
133
+ with pytest.raises(ValueError, match=msg):
134
+ MinCovDet().fit(X)
135
+
136
+
137
+ def test_mcd_increasing_det_warning(global_random_seed):
138
+ # Check that a warning is raised if we observe increasing determinants
139
+ # during the c_step. In theory the sequence of determinants should be
140
+ # decreasing. Increasing determinants are likely due to ill-conditioned
141
+ # covariance matrices that result in poor precision matrices.
142
+
143
+ X = [
144
+ [5.1, 3.5, 1.4, 0.2],
145
+ [4.9, 3.0, 1.4, 0.2],
146
+ [4.7, 3.2, 1.3, 0.2],
147
+ [4.6, 3.1, 1.5, 0.2],
148
+ [5.0, 3.6, 1.4, 0.2],
149
+ [4.6, 3.4, 1.4, 0.3],
150
+ [5.0, 3.4, 1.5, 0.2],
151
+ [4.4, 2.9, 1.4, 0.2],
152
+ [4.9, 3.1, 1.5, 0.1],
153
+ [5.4, 3.7, 1.5, 0.2],
154
+ [4.8, 3.4, 1.6, 0.2],
155
+ [4.8, 3.0, 1.4, 0.1],
156
+ [4.3, 3.0, 1.1, 0.1],
157
+ [5.1, 3.5, 1.4, 0.3],
158
+ [5.7, 3.8, 1.7, 0.3],
159
+ [5.4, 3.4, 1.7, 0.2],
160
+ [4.6, 3.6, 1.0, 0.2],
161
+ [5.0, 3.0, 1.6, 0.2],
162
+ [5.2, 3.5, 1.5, 0.2],
163
+ ]
164
+
165
+ mcd = MinCovDet(support_fraction=0.5, random_state=global_random_seed)
166
+ warn_msg = "Determinant has increased"
167
+ with pytest.warns(RuntimeWarning, match=warn_msg):
168
+ mcd.fit(X)
mantis_evalkit/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.26 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_base.cpython-310.pyc ADDED
Binary file (5.94 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_factor_analysis.cpython-310.pyc ADDED
Binary file (13.5 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/sklearn/frozen/_frozen.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: The scikit-learn developers
2
+ # SPDX-License-Identifier: BSD-3-Clause
3
+
4
+ from copy import deepcopy
5
+
6
+ from ..base import BaseEstimator
7
+ from ..exceptions import NotFittedError
8
+ from ..utils import get_tags
9
+ from ..utils.metaestimators import available_if
10
+ from ..utils.validation import check_is_fitted
11
+
12
+
13
+ def _estimator_has(attr):
14
+ """Check that final_estimator has `attr`.
15
+
16
+ Used together with `available_if`.
17
+ """
18
+
19
+ def check(self):
20
+ # raise original `AttributeError` if `attr` does not exist
21
+ getattr(self.estimator, attr)
22
+ return True
23
+
24
+ return check
25
+
26
+
27
+ class FrozenEstimator(BaseEstimator):
28
+ """Estimator that wraps a fitted estimator to prevent re-fitting.
29
+
30
+ This meta-estimator takes an estimator and freezes it, in the sense that calling
31
+ `fit` on it has no effect. `fit_predict` and `fit_transform` are also disabled.
32
+ All other methods are delegated to the original estimator and original estimator's
33
+ attributes are accessible as well.
34
+
35
+ This is particularly useful when you have a fitted or a pre-trained model as a
36
+ transformer in a pipeline, and you'd like `pipeline.fit` to have no effect on this
37
+ step.
38
+
39
+ Parameters
40
+ ----------
41
+ estimator : estimator
42
+ The estimator which is to be kept frozen.
43
+
44
+ See Also
45
+ --------
46
+ None: No similar entry in the scikit-learn documentation.
47
+
48
+ Examples
49
+ --------
50
+ >>> from sklearn.datasets import make_classification
51
+ >>> from sklearn.frozen import FrozenEstimator
52
+ >>> from sklearn.linear_model import LogisticRegression
53
+ >>> X, y = make_classification(random_state=0)
54
+ >>> clf = LogisticRegression(random_state=0).fit(X, y)
55
+ >>> frozen_clf = FrozenEstimator(clf)
56
+ >>> frozen_clf.fit(X, y) # No-op
57
+ FrozenEstimator(estimator=LogisticRegression(random_state=0))
58
+ >>> frozen_clf.predict(X) # Predictions from `clf.predict`
59
+ array(...)
60
+ """
61
+
62
+ def __init__(self, estimator):
63
+ self.estimator = estimator
64
+
65
+ @available_if(_estimator_has("__getitem__"))
66
+ def __getitem__(self, *args, **kwargs):
67
+ """__getitem__ is defined in :class:`~sklearn.pipeline.Pipeline` and \
68
+ :class:`~sklearn.compose.ColumnTransformer`.
69
+ """
70
+ return self.estimator.__getitem__(*args, **kwargs)
71
+
72
+ def __getattr__(self, name):
73
+ # `estimator`'s attributes are now accessible except `fit_predict` and
74
+ # `fit_transform`
75
+ if name in ["fit_predict", "fit_transform"]:
76
+ raise AttributeError(f"{name} is not available for frozen estimators.")
77
+ return getattr(self.estimator, name)
78
+
79
+ def __sklearn_clone__(self):
80
+ return self
81
+
82
+ def __sklearn_is_fitted__(self):
83
+ try:
84
+ check_is_fitted(self.estimator)
85
+ return True
86
+ except NotFittedError:
87
+ return False
88
+
89
+ def fit(self, X, y, *args, **kwargs):
90
+ """No-op.
91
+
92
+ As a frozen estimator, calling `fit` has no effect.
93
+
94
+ Parameters
95
+ ----------
96
+ X : object
97
+ Ignored.
98
+
99
+ y : object
100
+ Ignored.
101
+
102
+ *args : tuple
103
+ Additional positional arguments. Ignored, but present for API compatibility
104
+ with `self.estimator`.
105
+
106
+ **kwargs : dict
107
+ Additional keyword arguments. Ignored, but present for API compatibility
108
+ with `self.estimator`.
109
+
110
+ Returns
111
+ -------
112
+ self : object
113
+ Returns the instance itself.
114
+ """
115
+ check_is_fitted(self.estimator)
116
+ return self
117
+
118
+ def set_params(self, **kwargs):
119
+ """Set the parameters of this estimator.
120
+
121
+ The only valid key here is `estimator`. You cannot set the parameters of the
122
+ inner estimator.
123
+
124
+ Parameters
125
+ ----------
126
+ **kwargs : dict
127
+ Estimator parameters.
128
+
129
+ Returns
130
+ -------
131
+ self : FrozenEstimator
132
+ This estimator.
133
+ """
134
+ estimator = kwargs.pop("estimator", None)
135
+ if estimator is not None:
136
+ self.estimator = estimator
137
+ if kwargs:
138
+ raise ValueError(
139
+ "You cannot set parameters of the inner estimator in a frozen "
140
+ "estimator since calling `fit` has no effect. You can use "
141
+ "`frozenestimator.estimator.set_params` to set parameters of the inner "
142
+ "estimator."
143
+ )
144
+
145
+ def get_params(self, deep=True):
146
+ """Get parameters for this estimator.
147
+
148
+ Returns a `{"estimator": estimator}` dict. The parameters of the inner
149
+ estimator are not included.
150
+
151
+ Parameters
152
+ ----------
153
+ deep : bool, default=True
154
+ Ignored.
155
+
156
+ Returns
157
+ -------
158
+ params : dict
159
+ Parameter names mapped to their values.
160
+ """
161
+ return {"estimator": self.estimator}
162
+
163
+ def __sklearn_tags__(self):
164
+ tags = deepcopy(get_tags(self.estimator))
165
+ tags._skip_test = True
166
+ return tags
mantis_evalkit/lib/python3.10/site-packages/sklearn/frozen/tests/test_frozen.py ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: The scikit-learn developers
2
+ # SPDX-License-Identifier: BSD-3-Clause
3
+
4
+ import re
5
+
6
+ import numpy as np
7
+ import pytest
8
+ from numpy.testing import assert_array_equal
9
+
10
+ from sklearn import config_context
11
+ from sklearn.base import (
12
+ BaseEstimator,
13
+ clone,
14
+ is_classifier,
15
+ is_clusterer,
16
+ is_outlier_detector,
17
+ is_regressor,
18
+ )
19
+ from sklearn.cluster import KMeans
20
+ from sklearn.compose import make_column_transformer
21
+ from sklearn.datasets import make_classification, make_regression
22
+ from sklearn.exceptions import NotFittedError, UnsetMetadataPassedError
23
+ from sklearn.frozen import FrozenEstimator
24
+ from sklearn.linear_model import LinearRegression, LogisticRegression
25
+ from sklearn.neighbors import LocalOutlierFactor
26
+ from sklearn.pipeline import make_pipeline
27
+ from sklearn.preprocessing import RobustScaler, StandardScaler
28
+ from sklearn.utils._testing import set_random_state
29
+ from sklearn.utils.validation import check_is_fitted
30
+
31
+
32
+ @pytest.fixture
33
+ def regression_dataset():
34
+ return make_regression()
35
+
36
+
37
+ @pytest.fixture
38
+ def classification_dataset():
39
+ return make_classification()
40
+
41
+
42
+ @pytest.mark.parametrize(
43
+ "estimator, dataset",
44
+ [
45
+ (LinearRegression(), "regression_dataset"),
46
+ (LogisticRegression(), "classification_dataset"),
47
+ (make_pipeline(StandardScaler(), LinearRegression()), "regression_dataset"),
48
+ (
49
+ make_pipeline(StandardScaler(), LogisticRegression()),
50
+ "classification_dataset",
51
+ ),
52
+ (StandardScaler(), "regression_dataset"),
53
+ (KMeans(), "regression_dataset"),
54
+ (LocalOutlierFactor(), "regression_dataset"),
55
+ (
56
+ make_column_transformer(
57
+ (StandardScaler(), [0]),
58
+ (RobustScaler(), [1]),
59
+ ),
60
+ "regression_dataset",
61
+ ),
62
+ ],
63
+ )
64
+ @pytest.mark.parametrize(
65
+ "method",
66
+ ["predict", "predict_proba", "predict_log_proba", "decision_function", "transform"],
67
+ )
68
+ def test_frozen_methods(estimator, dataset, request, method):
69
+ """Test that frozen.fit doesn't do anything, and that all other methods are
70
+ exposed by the frozen estimator and return the same values as the estimator.
71
+ """
72
+ X, y = request.getfixturevalue(dataset)
73
+ set_random_state(estimator)
74
+ estimator.fit(X, y)
75
+ frozen = FrozenEstimator(estimator)
76
+ # this should be no-op
77
+ frozen.fit([[1]], [1])
78
+
79
+ if hasattr(estimator, method):
80
+ assert_array_equal(getattr(estimator, method)(X), getattr(frozen, method)(X))
81
+
82
+ assert is_classifier(estimator) == is_classifier(frozen)
83
+ assert is_regressor(estimator) == is_regressor(frozen)
84
+ assert is_clusterer(estimator) == is_clusterer(frozen)
85
+ assert is_outlier_detector(estimator) == is_outlier_detector(frozen)
86
+
87
+
88
+ @config_context(enable_metadata_routing=True)
89
+ def test_frozen_metadata_routing(regression_dataset):
90
+ """Test that metadata routing works with frozen estimators."""
91
+
92
+ class ConsumesMetadata(BaseEstimator):
93
+ def __init__(self, on_fit=None, on_predict=None):
94
+ self.on_fit = on_fit
95
+ self.on_predict = on_predict
96
+
97
+ def fit(self, X, y, metadata=None):
98
+ if self.on_fit:
99
+ assert metadata is not None
100
+ self.fitted_ = True
101
+ return self
102
+
103
+ def predict(self, X, metadata=None):
104
+ if self.on_predict:
105
+ assert metadata is not None
106
+ return np.ones(len(X))
107
+
108
+ X, y = regression_dataset
109
+ pipeline = make_pipeline(
110
+ ConsumesMetadata(on_fit=True, on_predict=True)
111
+ .set_fit_request(metadata=True)
112
+ .set_predict_request(metadata=True)
113
+ )
114
+
115
+ pipeline.fit(X, y, metadata="test")
116
+ frozen = FrozenEstimator(pipeline)
117
+ pipeline.predict(X, metadata="test")
118
+ frozen.predict(X, metadata="test")
119
+
120
+ frozen["consumesmetadata"].set_predict_request(metadata=False)
121
+ with pytest.raises(
122
+ TypeError,
123
+ match=re.escape(
124
+ "Pipeline.predict got unexpected argument(s) {'metadata'}, which are not "
125
+ "routed to any object."
126
+ ),
127
+ ):
128
+ frozen.predict(X, metadata="test")
129
+
130
+ frozen["consumesmetadata"].set_predict_request(metadata=None)
131
+ with pytest.raises(UnsetMetadataPassedError):
132
+ frozen.predict(X, metadata="test")
133
+
134
+
135
+ def test_composite_fit(classification_dataset):
136
+ """Test that calling fit_transform and fit_predict doesn't call fit."""
137
+
138
+ class Estimator(BaseEstimator):
139
+ def fit(self, X, y):
140
+ try:
141
+ self._fit_counter += 1
142
+ except AttributeError:
143
+ self._fit_counter = 1
144
+ return self
145
+
146
+ def fit_transform(self, X, y=None):
147
+ # only here to test that it doesn't get called
148
+ ... # pragma: no cover
149
+
150
+ def fit_predict(self, X, y=None):
151
+ # only here to test that it doesn't get called
152
+ ... # pragma: no cover
153
+
154
+ X, y = classification_dataset
155
+ est = Estimator().fit(X, y)
156
+ frozen = FrozenEstimator(est)
157
+
158
+ with pytest.raises(AttributeError):
159
+ frozen.fit_predict(X, y)
160
+ with pytest.raises(AttributeError):
161
+ frozen.fit_transform(X, y)
162
+
163
+ assert frozen._fit_counter == 1
164
+
165
+
166
+ def test_clone_frozen(regression_dataset):
167
+ """Test that cloning a frozen estimator keeps the frozen state."""
168
+ X, y = regression_dataset
169
+ estimator = LinearRegression().fit(X, y)
170
+ frozen = FrozenEstimator(estimator)
171
+ cloned = clone(frozen)
172
+ assert cloned.estimator is estimator
173
+
174
+
175
+ def test_check_is_fitted(regression_dataset):
176
+ """Test that check_is_fitted works on frozen estimators."""
177
+ X, y = regression_dataset
178
+
179
+ estimator = LinearRegression()
180
+ frozen = FrozenEstimator(estimator)
181
+ with pytest.raises(NotFittedError):
182
+ check_is_fitted(frozen)
183
+
184
+ estimator = LinearRegression().fit(X, y)
185
+ frozen = FrozenEstimator(estimator)
186
+ check_is_fitted(frozen)
187
+
188
+
189
+ def test_frozen_tags():
190
+ """Test that frozen estimators have the same tags as the original estimator
191
+ except for the skip_test tag."""
192
+
193
+ class Estimator(BaseEstimator):
194
+ def __sklearn_tags__(self):
195
+ tags = super().__sklearn_tags__()
196
+ tags.input_tags.categorical = True
197
+ return tags
198
+
199
+ estimator = Estimator()
200
+ frozen = FrozenEstimator(estimator)
201
+ frozen_tags = frozen.__sklearn_tags__()
202
+ estimator_tags = estimator.__sklearn_tags__()
203
+
204
+ assert frozen_tags._skip_test is True
205
+ assert estimator_tags._skip_test is False
206
+
207
+ assert estimator_tags.input_tags.categorical is True
208
+ assert frozen_tags.input_tags.categorical is True
209
+
210
+
211
+ def test_frozen_params():
212
+ """Test that FrozenEstimator only exposes the estimator parameter."""
213
+ est = LogisticRegression()
214
+ frozen = FrozenEstimator(est)
215
+
216
+ with pytest.raises(ValueError, match="You cannot set parameters of the inner"):
217
+ frozen.set_params(estimator__C=1)
218
+
219
+ assert frozen.get_params() == {"estimator": est}
220
+
221
+ other_est = LocalOutlierFactor()
222
+ frozen.set_params(estimator=other_est)
223
+ assert frozen.get_params() == {"estimator": other_est}
mantis_evalkit/lib/python3.10/site-packages/sklearn/gaussian_process/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ """Gaussian process based regression and classification."""
2
+
3
+ # Authors: The scikit-learn developers
4
+ # SPDX-License-Identifier: BSD-3-Clause
5
+
6
+ from . import kernels
7
+ from ._gpc import GaussianProcessClassifier
8
+ from ._gpr import GaussianProcessRegressor
9
+
10
+ __all__ = ["GaussianProcessRegressor", "GaussianProcessClassifier", "kernels"]
mantis_evalkit/lib/python3.10/site-packages/sklearn/gaussian_process/_gpc.py ADDED
@@ -0,0 +1,904 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Gaussian processes classification."""
2
+
3
+ # Authors: The scikit-learn developers
4
+ # SPDX-License-Identifier: BSD-3-Clause
5
+
6
+ from numbers import Integral
7
+ from operator import itemgetter
8
+
9
+ import numpy as np
10
+ import scipy.optimize
11
+ from scipy.linalg import cho_solve, cholesky, solve
12
+ from scipy.special import erf, expit
13
+
14
+ from ..base import BaseEstimator, ClassifierMixin, _fit_context, clone
15
+ from ..multiclass import OneVsOneClassifier, OneVsRestClassifier
16
+ from ..preprocessing import LabelEncoder
17
+ from ..utils import check_random_state
18
+ from ..utils._param_validation import Interval, StrOptions
19
+ from ..utils.optimize import _check_optimize_result
20
+ from ..utils.validation import check_is_fitted, validate_data
21
+ from .kernels import RBF, CompoundKernel, Kernel
22
+ from .kernels import ConstantKernel as C
23
+
24
+ # Values required for approximating the logistic sigmoid by
25
+ # error functions. coefs are obtained via:
26
+ # x = np.array([0, 0.6, 2, 3.5, 4.5, np.inf])
27
+ # b = logistic(x)
28
+ # A = (erf(np.dot(x, self.lambdas)) + 1) / 2
29
+ # coefs = lstsq(A, b)[0]
30
+ LAMBDAS = np.array([0.41, 0.4, 0.37, 0.44, 0.39])[:, np.newaxis]
31
+ COEFS = np.array(
32
+ [-1854.8214151, 3516.89893646, 221.29346712, 128.12323805, -2010.49422654]
33
+ )[:, np.newaxis]
34
+
35
+
36
+ class _BinaryGaussianProcessClassifierLaplace(BaseEstimator):
37
+ """Binary Gaussian process classification based on Laplace approximation.
38
+
39
+ The implementation is based on Algorithm 3.1, 3.2, and 5.1 from [RW2006]_.
40
+
41
+ Internally, the Laplace approximation is used for approximating the
42
+ non-Gaussian posterior by a Gaussian.
43
+
44
+ Currently, the implementation is restricted to using the logistic link
45
+ function.
46
+
47
+ .. versionadded:: 0.18
48
+
49
+ Parameters
50
+ ----------
51
+ kernel : kernel instance, default=None
52
+ The kernel specifying the covariance function of the GP. If None is
53
+ passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
54
+ the kernel's hyperparameters are optimized during fitting.
55
+
56
+ optimizer : 'fmin_l_bfgs_b' or callable, default='fmin_l_bfgs_b'
57
+ Can either be one of the internally supported optimizers for optimizing
58
+ the kernel's parameters, specified by a string, or an externally
59
+ defined optimizer passed as a callable. If a callable is passed, it
60
+ must have the signature::
61
+
62
+ def optimizer(obj_func, initial_theta, bounds):
63
+ # * 'obj_func' is the objective function to be maximized, which
64
+ # takes the hyperparameters theta as parameter and an
65
+ # optional flag eval_gradient, which determines if the
66
+ # gradient is returned additionally to the function value
67
+ # * 'initial_theta': the initial value for theta, which can be
68
+ # used by local optimizers
69
+ # * 'bounds': the bounds on the values of theta
70
+ ....
71
+ # Returned are the best found hyperparameters theta and
72
+ # the corresponding value of the target function.
73
+ return theta_opt, func_min
74
+
75
+ Per default, the 'L-BFGS-B' algorithm from scipy.optimize.minimize
76
+ is used. If None is passed, the kernel's parameters are kept fixed.
77
+ Available internal optimizers are::
78
+
79
+ 'fmin_l_bfgs_b'
80
+
81
+ n_restarts_optimizer : int, default=0
82
+ The number of restarts of the optimizer for finding the kernel's
83
+ parameters which maximize the log-marginal likelihood. The first run
84
+ of the optimizer is performed from the kernel's initial parameters,
85
+ the remaining ones (if any) from thetas sampled log-uniform randomly
86
+ from the space of allowed theta-values. If greater than 0, all bounds
87
+ must be finite. Note that n_restarts_optimizer=0 implies that one
88
+ run is performed.
89
+
90
+ max_iter_predict : int, default=100
91
+ The maximum number of iterations in Newton's method for approximating
92
+ the posterior during predict. Smaller values will reduce computation
93
+ time at the cost of worse results.
94
+
95
+ warm_start : bool, default=False
96
+ If warm-starts are enabled, the solution of the last Newton iteration
97
+ on the Laplace approximation of the posterior mode is used as
98
+ initialization for the next call of _posterior_mode(). This can speed
99
+ up convergence when _posterior_mode is called several times on similar
100
+ problems as in hyperparameter optimization. See :term:`the Glossary
101
+ <warm_start>`.
102
+
103
+ copy_X_train : bool, default=True
104
+ If True, a persistent copy of the training data is stored in the
105
+ object. Otherwise, just a reference to the training data is stored,
106
+ which might cause predictions to change if the data is modified
107
+ externally.
108
+
109
+ random_state : int, RandomState instance or None, default=None
110
+ Determines random number generation used to initialize the centers.
111
+ Pass an int for reproducible results across multiple function calls.
112
+ See :term:`Glossary <random_state>`.
113
+
114
+ Attributes
115
+ ----------
116
+ X_train_ : array-like of shape (n_samples, n_features) or list of object
117
+ Feature vectors or other representations of training data (also
118
+ required for prediction).
119
+
120
+ y_train_ : array-like of shape (n_samples,)
121
+ Target values in training data (also required for prediction)
122
+
123
+ classes_ : array-like of shape (n_classes,)
124
+ Unique class labels.
125
+
126
+ kernel_ : kernl instance
127
+ The kernel used for prediction. The structure of the kernel is the
128
+ same as the one passed as parameter but with optimized hyperparameters
129
+
130
+ L_ : array-like of shape (n_samples, n_samples)
131
+ Lower-triangular Cholesky decomposition of the kernel in X_train_
132
+
133
+ pi_ : array-like of shape (n_samples,)
134
+ The probabilities of the positive class for the training points
135
+ X_train_
136
+
137
+ W_sr_ : array-like of shape (n_samples,)
138
+ Square root of W, the Hessian of log-likelihood of the latent function
139
+ values for the observed labels. Since W is diagonal, only the diagonal
140
+ of sqrt(W) is stored.
141
+
142
+ log_marginal_likelihood_value_ : float
143
+ The log-marginal-likelihood of ``self.kernel_.theta``
144
+
145
+ References
146
+ ----------
147
+ .. [RW2006] `Carl E. Rasmussen and Christopher K.I. Williams,
148
+ "Gaussian Processes for Machine Learning",
149
+ MIT Press 2006 <https://www.gaussianprocess.org/gpml/chapters/RW.pdf>`_
150
+ """
151
+
152
+ def __init__(
153
+ self,
154
+ kernel=None,
155
+ *,
156
+ optimizer="fmin_l_bfgs_b",
157
+ n_restarts_optimizer=0,
158
+ max_iter_predict=100,
159
+ warm_start=False,
160
+ copy_X_train=True,
161
+ random_state=None,
162
+ ):
163
+ self.kernel = kernel
164
+ self.optimizer = optimizer
165
+ self.n_restarts_optimizer = n_restarts_optimizer
166
+ self.max_iter_predict = max_iter_predict
167
+ self.warm_start = warm_start
168
+ self.copy_X_train = copy_X_train
169
+ self.random_state = random_state
170
+
171
+ def fit(self, X, y):
172
+ """Fit Gaussian process classification model.
173
+
174
+ Parameters
175
+ ----------
176
+ X : array-like of shape (n_samples, n_features) or list of object
177
+ Feature vectors or other representations of training data.
178
+
179
+ y : array-like of shape (n_samples,)
180
+ Target values, must be binary.
181
+
182
+ Returns
183
+ -------
184
+ self : returns an instance of self.
185
+ """
186
+ if self.kernel is None: # Use an RBF kernel as default
187
+ self.kernel_ = C(1.0, constant_value_bounds="fixed") * RBF(
188
+ 1.0, length_scale_bounds="fixed"
189
+ )
190
+ else:
191
+ self.kernel_ = clone(self.kernel)
192
+
193
+ self.rng = check_random_state(self.random_state)
194
+
195
+ self.X_train_ = np.copy(X) if self.copy_X_train else X
196
+
197
+ # Encode class labels and check that it is a binary classification
198
+ # problem
199
+ label_encoder = LabelEncoder()
200
+ self.y_train_ = label_encoder.fit_transform(y)
201
+ self.classes_ = label_encoder.classes_
202
+ if self.classes_.size > 2:
203
+ raise ValueError(
204
+ "%s supports only binary classification. y contains classes %s"
205
+ % (self.__class__.__name__, self.classes_)
206
+ )
207
+ elif self.classes_.size == 1:
208
+ raise ValueError(
209
+ "{0:s} requires 2 classes; got {1:d} class".format(
210
+ self.__class__.__name__, self.classes_.size
211
+ )
212
+ )
213
+
214
+ if self.optimizer is not None and self.kernel_.n_dims > 0:
215
+ # Choose hyperparameters based on maximizing the log-marginal
216
+ # likelihood (potentially starting from several initial values)
217
+ def obj_func(theta, eval_gradient=True):
218
+ if eval_gradient:
219
+ lml, grad = self.log_marginal_likelihood(
220
+ theta, eval_gradient=True, clone_kernel=False
221
+ )
222
+ return -lml, -grad
223
+ else:
224
+ return -self.log_marginal_likelihood(theta, clone_kernel=False)
225
+
226
+ # First optimize starting from theta specified in kernel
227
+ optima = [
228
+ self._constrained_optimization(
229
+ obj_func, self.kernel_.theta, self.kernel_.bounds
230
+ )
231
+ ]
232
+
233
+ # Additional runs are performed from log-uniform chosen initial
234
+ # theta
235
+ if self.n_restarts_optimizer > 0:
236
+ if not np.isfinite(self.kernel_.bounds).all():
237
+ raise ValueError(
238
+ "Multiple optimizer restarts (n_restarts_optimizer>0) "
239
+ "requires that all bounds are finite."
240
+ )
241
+ bounds = self.kernel_.bounds
242
+ for iteration in range(self.n_restarts_optimizer):
243
+ theta_initial = np.exp(self.rng.uniform(bounds[:, 0], bounds[:, 1]))
244
+ optima.append(
245
+ self._constrained_optimization(obj_func, theta_initial, bounds)
246
+ )
247
+ # Select result from run with minimal (negative) log-marginal
248
+ # likelihood
249
+ lml_values = list(map(itemgetter(1), optima))
250
+ self.kernel_.theta = optima[np.argmin(lml_values)][0]
251
+ self.kernel_._check_bounds_params()
252
+
253
+ self.log_marginal_likelihood_value_ = -np.min(lml_values)
254
+ else:
255
+ self.log_marginal_likelihood_value_ = self.log_marginal_likelihood(
256
+ self.kernel_.theta
257
+ )
258
+
259
+ # Precompute quantities required for predictions which are independent
260
+ # of actual query points
261
+ K = self.kernel_(self.X_train_)
262
+
263
+ _, (self.pi_, self.W_sr_, self.L_, _, _) = self._posterior_mode(
264
+ K, return_temporaries=True
265
+ )
266
+
267
+ return self
268
+
269
+ def predict(self, X):
270
+ """Perform classification on an array of test vectors X.
271
+
272
+ Parameters
273
+ ----------
274
+ X : array-like of shape (n_samples, n_features) or list of object
275
+ Query points where the GP is evaluated for classification.
276
+
277
+ Returns
278
+ -------
279
+ C : ndarray of shape (n_samples,)
280
+ Predicted target values for X, values are from ``classes_``
281
+ """
282
+ check_is_fitted(self)
283
+
284
+ # As discussed on Section 3.4.2 of GPML, for making hard binary
285
+ # decisions, it is enough to compute the MAP of the posterior and
286
+ # pass it through the link function
287
+ K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)
288
+ f_star = K_star.T.dot(self.y_train_ - self.pi_) # Algorithm 3.2,Line 4
289
+
290
+ return np.where(f_star > 0, self.classes_[1], self.classes_[0])
291
+
292
+ def predict_proba(self, X):
293
+ """Return probability estimates for the test vector X.
294
+
295
+ Parameters
296
+ ----------
297
+ X : array-like of shape (n_samples, n_features) or list of object
298
+ Query points where the GP is evaluated for classification.
299
+
300
+ Returns
301
+ -------
302
+ C : array-like of shape (n_samples, n_classes)
303
+ Returns the probability of the samples for each class in
304
+ the model. The columns correspond to the classes in sorted
305
+ order, as they appear in the attribute ``classes_``.
306
+ """
307
+ check_is_fitted(self)
308
+
309
+ # Based on Algorithm 3.2 of GPML
310
+ K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)
311
+ f_star = K_star.T.dot(self.y_train_ - self.pi_) # Line 4
312
+ v = solve(self.L_, self.W_sr_[:, np.newaxis] * K_star) # Line 5
313
+ # Line 6 (compute np.diag(v.T.dot(v)) via einsum)
314
+ var_f_star = self.kernel_.diag(X) - np.einsum("ij,ij->j", v, v)
315
+
316
+ # Line 7:
317
+ # Approximate \int log(z) * N(z | f_star, var_f_star)
318
+ # Approximation is due to Williams & Barber, "Bayesian Classification
319
+ # with Gaussian Processes", Appendix A: Approximate the logistic
320
+ # sigmoid by a linear combination of 5 error functions.
321
+ # For information on how this integral can be computed see
322
+ # blitiri.blogspot.de/2012/11/gaussian-integral-of-error-function.html
323
+ alpha = 1 / (2 * var_f_star)
324
+ gamma = LAMBDAS * f_star
325
+ integrals = (
326
+ np.sqrt(np.pi / alpha)
327
+ * erf(gamma * np.sqrt(alpha / (alpha + LAMBDAS**2)))
328
+ / (2 * np.sqrt(var_f_star * 2 * np.pi))
329
+ )
330
+ pi_star = (COEFS * integrals).sum(axis=0) + 0.5 * COEFS.sum()
331
+
332
+ return np.vstack((1 - pi_star, pi_star)).T
333
+
334
+ def log_marginal_likelihood(
335
+ self, theta=None, eval_gradient=False, clone_kernel=True
336
+ ):
337
+ """Returns log-marginal likelihood of theta for training data.
338
+
339
+ Parameters
340
+ ----------
341
+ theta : array-like of shape (n_kernel_params,), default=None
342
+ Kernel hyperparameters for which the log-marginal likelihood is
343
+ evaluated. If None, the precomputed log_marginal_likelihood
344
+ of ``self.kernel_.theta`` is returned.
345
+
346
+ eval_gradient : bool, default=False
347
+ If True, the gradient of the log-marginal likelihood with respect
348
+ to the kernel hyperparameters at position theta is returned
349
+ additionally. If True, theta must not be None.
350
+
351
+ clone_kernel : bool, default=True
352
+ If True, the kernel attribute is copied. If False, the kernel
353
+ attribute is modified, but may result in a performance improvement.
354
+
355
+ Returns
356
+ -------
357
+ log_likelihood : float
358
+ Log-marginal likelihood of theta for training data.
359
+
360
+ log_likelihood_gradient : ndarray of shape (n_kernel_params,), \
361
+ optional
362
+ Gradient of the log-marginal likelihood with respect to the kernel
363
+ hyperparameters at position theta.
364
+ Only returned when `eval_gradient` is True.
365
+ """
366
+ if theta is None:
367
+ if eval_gradient:
368
+ raise ValueError("Gradient can only be evaluated for theta!=None")
369
+ return self.log_marginal_likelihood_value_
370
+
371
+ if clone_kernel:
372
+ kernel = self.kernel_.clone_with_theta(theta)
373
+ else:
374
+ kernel = self.kernel_
375
+ kernel.theta = theta
376
+
377
+ if eval_gradient:
378
+ K, K_gradient = kernel(self.X_train_, eval_gradient=True)
379
+ else:
380
+ K = kernel(self.X_train_)
381
+
382
+ # Compute log-marginal-likelihood Z and also store some temporaries
383
+ # which can be reused for computing Z's gradient
384
+ Z, (pi, W_sr, L, b, a) = self._posterior_mode(K, return_temporaries=True)
385
+
386
+ if not eval_gradient:
387
+ return Z
388
+
389
+ # Compute gradient based on Algorithm 5.1 of GPML
390
+ d_Z = np.empty(theta.shape[0])
391
+ # XXX: Get rid of the np.diag() in the next line
392
+ R = W_sr[:, np.newaxis] * cho_solve((L, True), np.diag(W_sr)) # Line 7
393
+ C = solve(L, W_sr[:, np.newaxis] * K) # Line 8
394
+ # Line 9: (use einsum to compute np.diag(C.T.dot(C))))
395
+ s_2 = (
396
+ -0.5
397
+ * (np.diag(K) - np.einsum("ij, ij -> j", C, C))
398
+ * (pi * (1 - pi) * (1 - 2 * pi))
399
+ ) # third derivative
400
+
401
+ for j in range(d_Z.shape[0]):
402
+ C = K_gradient[:, :, j] # Line 11
403
+ # Line 12: (R.T.ravel().dot(C.ravel()) = np.trace(R.dot(C)))
404
+ s_1 = 0.5 * a.T.dot(C).dot(a) - 0.5 * R.T.ravel().dot(C.ravel())
405
+
406
+ b = C.dot(self.y_train_ - pi) # Line 13
407
+ s_3 = b - K.dot(R.dot(b)) # Line 14
408
+
409
+ d_Z[j] = s_1 + s_2.T.dot(s_3) # Line 15
410
+
411
+ return Z, d_Z
412
+
413
+ def _posterior_mode(self, K, return_temporaries=False):
414
+ """Mode-finding for binary Laplace GPC and fixed kernel.
415
+
416
+ This approximates the posterior of the latent function values for given
417
+ inputs and target observations with a Gaussian approximation and uses
418
+ Newton's iteration to find the mode of this approximation.
419
+ """
420
+ # Based on Algorithm 3.1 of GPML
421
+
422
+ # If warm_start are enabled, we reuse the last solution for the
423
+ # posterior mode as initialization; otherwise, we initialize with 0
424
+ if (
425
+ self.warm_start
426
+ and hasattr(self, "f_cached")
427
+ and self.f_cached.shape == self.y_train_.shape
428
+ ):
429
+ f = self.f_cached
430
+ else:
431
+ f = np.zeros_like(self.y_train_, dtype=np.float64)
432
+
433
+ # Use Newton's iteration method to find mode of Laplace approximation
434
+ log_marginal_likelihood = -np.inf
435
+ for _ in range(self.max_iter_predict):
436
+ # Line 4
437
+ pi = expit(f)
438
+ W = pi * (1 - pi)
439
+ # Line 5
440
+ W_sr = np.sqrt(W)
441
+ W_sr_K = W_sr[:, np.newaxis] * K
442
+ B = np.eye(W.shape[0]) + W_sr_K * W_sr
443
+ L = cholesky(B, lower=True)
444
+ # Line 6
445
+ b = W * f + (self.y_train_ - pi)
446
+ # Line 7
447
+ a = b - W_sr * cho_solve((L, True), W_sr_K.dot(b))
448
+ # Line 8
449
+ f = K.dot(a)
450
+
451
+ # Line 10: Compute log marginal likelihood in loop and use as
452
+ # convergence criterion
453
+ lml = (
454
+ -0.5 * a.T.dot(f)
455
+ - np.log1p(np.exp(-(self.y_train_ * 2 - 1) * f)).sum()
456
+ - np.log(np.diag(L)).sum()
457
+ )
458
+ # Check if we have converged (log marginal likelihood does
459
+ # not decrease)
460
+ # XXX: more complex convergence criterion
461
+ if lml - log_marginal_likelihood < 1e-10:
462
+ break
463
+ log_marginal_likelihood = lml
464
+
465
+ self.f_cached = f # Remember solution for later warm-starts
466
+ if return_temporaries:
467
+ return log_marginal_likelihood, (pi, W_sr, L, b, a)
468
+ else:
469
+ return log_marginal_likelihood
470
+
471
+ def _constrained_optimization(self, obj_func, initial_theta, bounds):
472
+ if self.optimizer == "fmin_l_bfgs_b":
473
+ opt_res = scipy.optimize.minimize(
474
+ obj_func, initial_theta, method="L-BFGS-B", jac=True, bounds=bounds
475
+ )
476
+ _check_optimize_result("lbfgs", opt_res)
477
+ theta_opt, func_min = opt_res.x, opt_res.fun
478
+ elif callable(self.optimizer):
479
+ theta_opt, func_min = self.optimizer(obj_func, initial_theta, bounds=bounds)
480
+ else:
481
+ raise ValueError("Unknown optimizer %s." % self.optimizer)
482
+
483
+ return theta_opt, func_min
484
+
485
+
486
+ class GaussianProcessClassifier(ClassifierMixin, BaseEstimator):
487
+ """Gaussian process classification (GPC) based on Laplace approximation.
488
+
489
+ The implementation is based on Algorithm 3.1, 3.2, and 5.1 from [RW2006]_.
490
+
491
+ Internally, the Laplace approximation is used for approximating the
492
+ non-Gaussian posterior by a Gaussian.
493
+
494
+ Currently, the implementation is restricted to using the logistic link
495
+ function. For multi-class classification, several binary one-versus rest
496
+ classifiers are fitted. Note that this class thus does not implement
497
+ a true multi-class Laplace approximation.
498
+
499
+ Read more in the :ref:`User Guide <gaussian_process>`.
500
+
501
+ .. versionadded:: 0.18
502
+
503
+ Parameters
504
+ ----------
505
+ kernel : kernel instance, default=None
506
+ The kernel specifying the covariance function of the GP. If None is
507
+ passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
508
+ the kernel's hyperparameters are optimized during fitting. Also kernel
509
+ cannot be a `CompoundKernel`.
510
+
511
+ optimizer : 'fmin_l_bfgs_b', callable or None, default='fmin_l_bfgs_b'
512
+ Can either be one of the internally supported optimizers for optimizing
513
+ the kernel's parameters, specified by a string, or an externally
514
+ defined optimizer passed as a callable. If a callable is passed, it
515
+ must have the signature::
516
+
517
+ def optimizer(obj_func, initial_theta, bounds):
518
+ # * 'obj_func' is the objective function to be maximized, which
519
+ # takes the hyperparameters theta as parameter and an
520
+ # optional flag eval_gradient, which determines if the
521
+ # gradient is returned additionally to the function value
522
+ # * 'initial_theta': the initial value for theta, which can be
523
+ # used by local optimizers
524
+ # * 'bounds': the bounds on the values of theta
525
+ ....
526
+ # Returned are the best found hyperparameters theta and
527
+ # the corresponding value of the target function.
528
+ return theta_opt, func_min
529
+
530
+ Per default, the 'L-BFGS-B' algorithm from scipy.optimize.minimize
531
+ is used. If None is passed, the kernel's parameters are kept fixed.
532
+ Available internal optimizers are::
533
+
534
+ 'fmin_l_bfgs_b'
535
+
536
+ n_restarts_optimizer : int, default=0
537
+ The number of restarts of the optimizer for finding the kernel's
538
+ parameters which maximize the log-marginal likelihood. The first run
539
+ of the optimizer is performed from the kernel's initial parameters,
540
+ the remaining ones (if any) from thetas sampled log-uniform randomly
541
+ from the space of allowed theta-values. If greater than 0, all bounds
542
+ must be finite. Note that n_restarts_optimizer=0 implies that one
543
+ run is performed.
544
+
545
+ max_iter_predict : int, default=100
546
+ The maximum number of iterations in Newton's method for approximating
547
+ the posterior during predict. Smaller values will reduce computation
548
+ time at the cost of worse results.
549
+
550
+ warm_start : bool, default=False
551
+ If warm-starts are enabled, the solution of the last Newton iteration
552
+ on the Laplace approximation of the posterior mode is used as
553
+ initialization for the next call of _posterior_mode(). This can speed
554
+ up convergence when _posterior_mode is called several times on similar
555
+ problems as in hyperparameter optimization. See :term:`the Glossary
556
+ <warm_start>`.
557
+
558
+ copy_X_train : bool, default=True
559
+ If True, a persistent copy of the training data is stored in the
560
+ object. Otherwise, just a reference to the training data is stored,
561
+ which might cause predictions to change if the data is modified
562
+ externally.
563
+
564
+ random_state : int, RandomState instance or None, default=None
565
+ Determines random number generation used to initialize the centers.
566
+ Pass an int for reproducible results across multiple function calls.
567
+ See :term:`Glossary <random_state>`.
568
+
569
+ multi_class : {'one_vs_rest', 'one_vs_one'}, default='one_vs_rest'
570
+ Specifies how multi-class classification problems are handled.
571
+ Supported are 'one_vs_rest' and 'one_vs_one'. In 'one_vs_rest',
572
+ one binary Gaussian process classifier is fitted for each class, which
573
+ is trained to separate this class from the rest. In 'one_vs_one', one
574
+ binary Gaussian process classifier is fitted for each pair of classes,
575
+ which is trained to separate these two classes. The predictions of
576
+ these binary predictors are combined into multi-class predictions.
577
+ Note that 'one_vs_one' does not support predicting probability
578
+ estimates.
579
+
580
+ n_jobs : int, default=None
581
+ The number of jobs to use for the computation: the specified
582
+ multiclass problems are computed in parallel.
583
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
584
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
585
+ for more details.
586
+
587
+ Attributes
588
+ ----------
589
+ base_estimator_ : ``Estimator`` instance
590
+ The estimator instance that defines the likelihood function
591
+ using the observed data.
592
+
593
+ kernel_ : kernel instance
594
+ The kernel used for prediction. In case of binary classification,
595
+ the structure of the kernel is the same as the one passed as parameter
596
+ but with optimized hyperparameters. In case of multi-class
597
+ classification, a CompoundKernel is returned which consists of the
598
+ different kernels used in the one-versus-rest classifiers.
599
+
600
+ log_marginal_likelihood_value_ : float
601
+ The log-marginal-likelihood of ``self.kernel_.theta``
602
+
603
+ classes_ : array-like of shape (n_classes,)
604
+ Unique class labels.
605
+
606
+ n_classes_ : int
607
+ The number of classes in the training data
608
+
609
+ n_features_in_ : int
610
+ Number of features seen during :term:`fit`.
611
+
612
+ .. versionadded:: 0.24
613
+
614
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
615
+ Names of features seen during :term:`fit`. Defined only when `X`
616
+ has feature names that are all strings.
617
+
618
+ .. versionadded:: 1.0
619
+
620
+ See Also
621
+ --------
622
+ GaussianProcessRegressor : Gaussian process regression (GPR).
623
+
624
+ References
625
+ ----------
626
+ .. [RW2006] `Carl E. Rasmussen and Christopher K.I. Williams,
627
+ "Gaussian Processes for Machine Learning",
628
+ MIT Press 2006 <https://www.gaussianprocess.org/gpml/chapters/RW.pdf>`_
629
+
630
+ Examples
631
+ --------
632
+ >>> from sklearn.datasets import load_iris
633
+ >>> from sklearn.gaussian_process import GaussianProcessClassifier
634
+ >>> from sklearn.gaussian_process.kernels import RBF
635
+ >>> X, y = load_iris(return_X_y=True)
636
+ >>> kernel = 1.0 * RBF(1.0)
637
+ >>> gpc = GaussianProcessClassifier(kernel=kernel,
638
+ ... random_state=0).fit(X, y)
639
+ >>> gpc.score(X, y)
640
+ 0.9866...
641
+ >>> gpc.predict_proba(X[:2,:])
642
+ array([[0.83548752, 0.03228706, 0.13222543],
643
+ [0.79064206, 0.06525643, 0.14410151]])
644
+
645
+ For a comaprison of the GaussianProcessClassifier with other classifiers see:
646
+ :ref:`sphx_glr_auto_examples_classification_plot_classification_probability.py`.
647
+ """
648
+
649
+ _parameter_constraints: dict = {
650
+ "kernel": [Kernel, None],
651
+ "optimizer": [StrOptions({"fmin_l_bfgs_b"}), callable, None],
652
+ "n_restarts_optimizer": [Interval(Integral, 0, None, closed="left")],
653
+ "max_iter_predict": [Interval(Integral, 1, None, closed="left")],
654
+ "warm_start": ["boolean"],
655
+ "copy_X_train": ["boolean"],
656
+ "random_state": ["random_state"],
657
+ "multi_class": [StrOptions({"one_vs_rest", "one_vs_one"})],
658
+ "n_jobs": [Integral, None],
659
+ }
660
+
661
+ def __init__(
662
+ self,
663
+ kernel=None,
664
+ *,
665
+ optimizer="fmin_l_bfgs_b",
666
+ n_restarts_optimizer=0,
667
+ max_iter_predict=100,
668
+ warm_start=False,
669
+ copy_X_train=True,
670
+ random_state=None,
671
+ multi_class="one_vs_rest",
672
+ n_jobs=None,
673
+ ):
674
+ self.kernel = kernel
675
+ self.optimizer = optimizer
676
+ self.n_restarts_optimizer = n_restarts_optimizer
677
+ self.max_iter_predict = max_iter_predict
678
+ self.warm_start = warm_start
679
+ self.copy_X_train = copy_X_train
680
+ self.random_state = random_state
681
+ self.multi_class = multi_class
682
+ self.n_jobs = n_jobs
683
+
684
+ @_fit_context(prefer_skip_nested_validation=True)
685
+ def fit(self, X, y):
686
+ """Fit Gaussian process classification model.
687
+
688
+ Parameters
689
+ ----------
690
+ X : array-like of shape (n_samples, n_features) or list of object
691
+ Feature vectors or other representations of training data.
692
+
693
+ y : array-like of shape (n_samples,)
694
+ Target values, must be binary.
695
+
696
+ Returns
697
+ -------
698
+ self : object
699
+ Returns an instance of self.
700
+ """
701
+ if isinstance(self.kernel, CompoundKernel):
702
+ raise ValueError("kernel cannot be a CompoundKernel")
703
+
704
+ if self.kernel is None or self.kernel.requires_vector_input:
705
+ X, y = validate_data(
706
+ self, X, y, multi_output=False, ensure_2d=True, dtype="numeric"
707
+ )
708
+ else:
709
+ X, y = validate_data(
710
+ self, X, y, multi_output=False, ensure_2d=False, dtype=None
711
+ )
712
+
713
+ self.base_estimator_ = _BinaryGaussianProcessClassifierLaplace(
714
+ kernel=self.kernel,
715
+ optimizer=self.optimizer,
716
+ n_restarts_optimizer=self.n_restarts_optimizer,
717
+ max_iter_predict=self.max_iter_predict,
718
+ warm_start=self.warm_start,
719
+ copy_X_train=self.copy_X_train,
720
+ random_state=self.random_state,
721
+ )
722
+
723
+ self.classes_ = np.unique(y)
724
+ self.n_classes_ = self.classes_.size
725
+ if self.n_classes_ == 1:
726
+ raise ValueError(
727
+ "GaussianProcessClassifier requires 2 or more "
728
+ "distinct classes; got %d class (only class %s "
729
+ "is present)" % (self.n_classes_, self.classes_[0])
730
+ )
731
+ if self.n_classes_ > 2:
732
+ if self.multi_class == "one_vs_rest":
733
+ self.base_estimator_ = OneVsRestClassifier(
734
+ self.base_estimator_, n_jobs=self.n_jobs
735
+ )
736
+ elif self.multi_class == "one_vs_one":
737
+ self.base_estimator_ = OneVsOneClassifier(
738
+ self.base_estimator_, n_jobs=self.n_jobs
739
+ )
740
+ else:
741
+ raise ValueError("Unknown multi-class mode %s" % self.multi_class)
742
+
743
+ self.base_estimator_.fit(X, y)
744
+
745
+ if self.n_classes_ > 2:
746
+ self.log_marginal_likelihood_value_ = np.mean(
747
+ [
748
+ estimator.log_marginal_likelihood()
749
+ for estimator in self.base_estimator_.estimators_
750
+ ]
751
+ )
752
+ else:
753
+ self.log_marginal_likelihood_value_ = (
754
+ self.base_estimator_.log_marginal_likelihood()
755
+ )
756
+
757
+ return self
758
+
759
+ def predict(self, X):
760
+ """Perform classification on an array of test vectors X.
761
+
762
+ Parameters
763
+ ----------
764
+ X : array-like of shape (n_samples, n_features) or list of object
765
+ Query points where the GP is evaluated for classification.
766
+
767
+ Returns
768
+ -------
769
+ C : ndarray of shape (n_samples,)
770
+ Predicted target values for X, values are from ``classes_``.
771
+ """
772
+ check_is_fitted(self)
773
+
774
+ if self.kernel is None or self.kernel.requires_vector_input:
775
+ X = validate_data(self, X, ensure_2d=True, dtype="numeric", reset=False)
776
+ else:
777
+ X = validate_data(self, X, ensure_2d=False, dtype=None, reset=False)
778
+
779
+ return self.base_estimator_.predict(X)
780
+
781
+ def predict_proba(self, X):
782
+ """Return probability estimates for the test vector X.
783
+
784
+ Parameters
785
+ ----------
786
+ X : array-like of shape (n_samples, n_features) or list of object
787
+ Query points where the GP is evaluated for classification.
788
+
789
+ Returns
790
+ -------
791
+ C : array-like of shape (n_samples, n_classes)
792
+ Returns the probability of the samples for each class in
793
+ the model. The columns correspond to the classes in sorted
794
+ order, as they appear in the attribute :term:`classes_`.
795
+ """
796
+ check_is_fitted(self)
797
+ if self.n_classes_ > 2 and self.multi_class == "one_vs_one":
798
+ raise ValueError(
799
+ "one_vs_one multi-class mode does not support "
800
+ "predicting probability estimates. Use "
801
+ "one_vs_rest mode instead."
802
+ )
803
+
804
+ if self.kernel is None or self.kernel.requires_vector_input:
805
+ X = validate_data(self, X, ensure_2d=True, dtype="numeric", reset=False)
806
+ else:
807
+ X = validate_data(self, X, ensure_2d=False, dtype=None, reset=False)
808
+
809
+ return self.base_estimator_.predict_proba(X)
810
+
811
+ @property
812
+ def kernel_(self):
813
+ """Return the kernel of the base estimator."""
814
+ if self.n_classes_ == 2:
815
+ return self.base_estimator_.kernel_
816
+ else:
817
+ return CompoundKernel(
818
+ [estimator.kernel_ for estimator in self.base_estimator_.estimators_]
819
+ )
820
+
821
+ def log_marginal_likelihood(
822
+ self, theta=None, eval_gradient=False, clone_kernel=True
823
+ ):
824
+ """Return log-marginal likelihood of theta for training data.
825
+
826
+ In the case of multi-class classification, the mean log-marginal
827
+ likelihood of the one-versus-rest classifiers are returned.
828
+
829
+ Parameters
830
+ ----------
831
+ theta : array-like of shape (n_kernel_params,), default=None
832
+ Kernel hyperparameters for which the log-marginal likelihood is
833
+ evaluated. In the case of multi-class classification, theta may
834
+ be the hyperparameters of the compound kernel or of an individual
835
+ kernel. In the latter case, all individual kernel get assigned the
836
+ same theta values. If None, the precomputed log_marginal_likelihood
837
+ of ``self.kernel_.theta`` is returned.
838
+
839
+ eval_gradient : bool, default=False
840
+ If True, the gradient of the log-marginal likelihood with respect
841
+ to the kernel hyperparameters at position theta is returned
842
+ additionally. Note that gradient computation is not supported
843
+ for non-binary classification. If True, theta must not be None.
844
+
845
+ clone_kernel : bool, default=True
846
+ If True, the kernel attribute is copied. If False, the kernel
847
+ attribute is modified, but may result in a performance improvement.
848
+
849
+ Returns
850
+ -------
851
+ log_likelihood : float
852
+ Log-marginal likelihood of theta for training data.
853
+
854
+ log_likelihood_gradient : ndarray of shape (n_kernel_params,), optional
855
+ Gradient of the log-marginal likelihood with respect to the kernel
856
+ hyperparameters at position theta.
857
+ Only returned when `eval_gradient` is True.
858
+ """
859
+ check_is_fitted(self)
860
+
861
+ if theta is None:
862
+ if eval_gradient:
863
+ raise ValueError("Gradient can only be evaluated for theta!=None")
864
+ return self.log_marginal_likelihood_value_
865
+
866
+ theta = np.asarray(theta)
867
+ if self.n_classes_ == 2:
868
+ return self.base_estimator_.log_marginal_likelihood(
869
+ theta, eval_gradient, clone_kernel=clone_kernel
870
+ )
871
+ else:
872
+ if eval_gradient:
873
+ raise NotImplementedError(
874
+ "Gradient of log-marginal-likelihood not implemented for "
875
+ "multi-class GPC."
876
+ )
877
+ estimators = self.base_estimator_.estimators_
878
+ n_dims = estimators[0].kernel_.n_dims
879
+ if theta.shape[0] == n_dims: # use same theta for all sub-kernels
880
+ return np.mean(
881
+ [
882
+ estimator.log_marginal_likelihood(
883
+ theta, clone_kernel=clone_kernel
884
+ )
885
+ for i, estimator in enumerate(estimators)
886
+ ]
887
+ )
888
+ elif theta.shape[0] == n_dims * self.classes_.shape[0]:
889
+ # theta for compound kernel
890
+ return np.mean(
891
+ [
892
+ estimator.log_marginal_likelihood(
893
+ theta[n_dims * i : n_dims * (i + 1)],
894
+ clone_kernel=clone_kernel,
895
+ )
896
+ for i, estimator in enumerate(estimators)
897
+ ]
898
+ )
899
+ else:
900
+ raise ValueError(
901
+ "Shape of theta must be either %d or %d. "
902
+ "Obtained theta with shape %d."
903
+ % (n_dims, n_dims * self.classes_.shape[0], theta.shape[0])
904
+ )
mantis_evalkit/lib/python3.10/site-packages/sklearn/gaussian_process/kernels.py ADDED
@@ -0,0 +1,2410 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """A set of kernels that can be combined by operators and used in Gaussian processes."""
2
+
3
+ # Kernels for Gaussian process regression and classification.
4
+ #
5
+ # The kernels in this module allow kernel-engineering, i.e., they can be
6
+ # combined via the "+" and "*" operators or be exponentiated with a scalar
7
+ # via "**". These sum and product expressions can also contain scalar values,
8
+ # which are automatically converted to a constant kernel.
9
+ #
10
+ # All kernels allow (analytic) gradient-based hyperparameter optimization.
11
+ # The space of hyperparameters can be specified by giving lower und upper
12
+ # boundaries for the value of each hyperparameter (the search space is thus
13
+ # rectangular). Instead of specifying bounds, hyperparameters can also be
14
+ # declared to be "fixed", which causes these hyperparameters to be excluded from
15
+ # optimization.
16
+
17
+
18
+ # Authors: The scikit-learn developers
19
+ # SPDX-License-Identifier: BSD-3-Clause
20
+
21
+ # Note: this module is strongly inspired by the kernel module of the george
22
+ # package.
23
+
24
+ import math
25
+ import warnings
26
+ from abc import ABCMeta, abstractmethod
27
+ from collections import namedtuple
28
+ from inspect import signature
29
+
30
+ import numpy as np
31
+ from scipy.spatial.distance import cdist, pdist, squareform
32
+ from scipy.special import gamma, kv
33
+
34
+ from ..base import clone
35
+ from ..exceptions import ConvergenceWarning
36
+ from ..metrics.pairwise import pairwise_kernels
37
+ from ..utils.validation import _num_samples
38
+
39
+
40
+ def _check_length_scale(X, length_scale):
41
+ length_scale = np.squeeze(length_scale).astype(float)
42
+ if np.ndim(length_scale) > 1:
43
+ raise ValueError("length_scale cannot be of dimension greater than 1")
44
+ if np.ndim(length_scale) == 1 and X.shape[1] != length_scale.shape[0]:
45
+ raise ValueError(
46
+ "Anisotropic kernel must have the same number of "
47
+ "dimensions as data (%d!=%d)" % (length_scale.shape[0], X.shape[1])
48
+ )
49
+ return length_scale
50
+
51
+
52
+ class Hyperparameter(
53
+ namedtuple(
54
+ "Hyperparameter", ("name", "value_type", "bounds", "n_elements", "fixed")
55
+ )
56
+ ):
57
+ """A kernel hyperparameter's specification in form of a namedtuple.
58
+
59
+ .. versionadded:: 0.18
60
+
61
+ Attributes
62
+ ----------
63
+ name : str
64
+ The name of the hyperparameter. Note that a kernel using a
65
+ hyperparameter with name "x" must have the attributes self.x and
66
+ self.x_bounds
67
+
68
+ value_type : str
69
+ The type of the hyperparameter. Currently, only "numeric"
70
+ hyperparameters are supported.
71
+
72
+ bounds : pair of floats >= 0 or "fixed"
73
+ The lower and upper bound on the parameter. If n_elements>1, a pair
74
+ of 1d array with n_elements each may be given alternatively. If
75
+ the string "fixed" is passed as bounds, the hyperparameter's value
76
+ cannot be changed.
77
+
78
+ n_elements : int, default=1
79
+ The number of elements of the hyperparameter value. Defaults to 1,
80
+ which corresponds to a scalar hyperparameter. n_elements > 1
81
+ corresponds to a hyperparameter which is vector-valued,
82
+ such as, e.g., anisotropic length-scales.
83
+
84
+ fixed : bool, default=None
85
+ Whether the value of this hyperparameter is fixed, i.e., cannot be
86
+ changed during hyperparameter tuning. If None is passed, the "fixed" is
87
+ derived based on the given bounds.
88
+
89
+ Examples
90
+ --------
91
+ >>> from sklearn.gaussian_process.kernels import ConstantKernel
92
+ >>> from sklearn.datasets import make_friedman2
93
+ >>> from sklearn.gaussian_process import GaussianProcessRegressor
94
+ >>> from sklearn.gaussian_process.kernels import Hyperparameter
95
+ >>> X, y = make_friedman2(n_samples=50, noise=0, random_state=0)
96
+ >>> kernel = ConstantKernel(constant_value=1.0,
97
+ ... constant_value_bounds=(0.0, 10.0))
98
+
99
+ We can access each hyperparameter:
100
+
101
+ >>> for hyperparameter in kernel.hyperparameters:
102
+ ... print(hyperparameter)
103
+ Hyperparameter(name='constant_value', value_type='numeric',
104
+ bounds=array([[ 0., 10.]]), n_elements=1, fixed=False)
105
+
106
+ >>> params = kernel.get_params()
107
+ >>> for key in sorted(params): print(f"{key} : {params[key]}")
108
+ constant_value : 1.0
109
+ constant_value_bounds : (0.0, 10.0)
110
+ """
111
+
112
+ # A raw namedtuple is very memory efficient as it packs the attributes
113
+ # in a struct to get rid of the __dict__ of attributes in particular it
114
+ # does not copy the string for the keys on each instance.
115
+ # By deriving a namedtuple class just to introduce the __init__ method we
116
+ # would also reintroduce the __dict__ on the instance. By telling the
117
+ # Python interpreter that this subclass uses static __slots__ instead of
118
+ # dynamic attributes. Furthermore we don't need any additional slot in the
119
+ # subclass so we set __slots__ to the empty tuple.
120
+ __slots__ = ()
121
+
122
+ def __new__(cls, name, value_type, bounds, n_elements=1, fixed=None):
123
+ if not isinstance(bounds, str) or bounds != "fixed":
124
+ bounds = np.atleast_2d(bounds)
125
+ if n_elements > 1: # vector-valued parameter
126
+ if bounds.shape[0] == 1:
127
+ bounds = np.repeat(bounds, n_elements, 0)
128
+ elif bounds.shape[0] != n_elements:
129
+ raise ValueError(
130
+ "Bounds on %s should have either 1 or "
131
+ "%d dimensions. Given are %d"
132
+ % (name, n_elements, bounds.shape[0])
133
+ )
134
+
135
+ if fixed is None:
136
+ fixed = isinstance(bounds, str) and bounds == "fixed"
137
+ return super(Hyperparameter, cls).__new__(
138
+ cls, name, value_type, bounds, n_elements, fixed
139
+ )
140
+
141
+ # This is mainly a testing utility to check that two hyperparameters
142
+ # are equal.
143
+ def __eq__(self, other):
144
+ return (
145
+ self.name == other.name
146
+ and self.value_type == other.value_type
147
+ and np.all(self.bounds == other.bounds)
148
+ and self.n_elements == other.n_elements
149
+ and self.fixed == other.fixed
150
+ )
151
+
152
+
153
+ class Kernel(metaclass=ABCMeta):
154
+ """Base class for all kernels.
155
+
156
+ .. versionadded:: 0.18
157
+
158
+ Examples
159
+ --------
160
+ >>> from sklearn.gaussian_process.kernels import Kernel, RBF
161
+ >>> import numpy as np
162
+ >>> class CustomKernel(Kernel):
163
+ ... def __init__(self, length_scale=1.0):
164
+ ... self.length_scale = length_scale
165
+ ... def __call__(self, X, Y=None):
166
+ ... if Y is None:
167
+ ... Y = X
168
+ ... return np.inner(X, X if Y is None else Y) ** 2
169
+ ... def diag(self, X):
170
+ ... return np.ones(X.shape[0])
171
+ ... def is_stationary(self):
172
+ ... return True
173
+ >>> kernel = CustomKernel(length_scale=2.0)
174
+ >>> X = np.array([[1, 2], [3, 4]])
175
+ >>> print(kernel(X))
176
+ [[ 25 121]
177
+ [121 625]]
178
+ """
179
+
180
+ def get_params(self, deep=True):
181
+ """Get parameters of this kernel.
182
+
183
+ Parameters
184
+ ----------
185
+ deep : bool, default=True
186
+ If True, will return the parameters for this estimator and
187
+ contained subobjects that are estimators.
188
+
189
+ Returns
190
+ -------
191
+ params : dict
192
+ Parameter names mapped to their values.
193
+ """
194
+ params = dict()
195
+
196
+ # introspect the constructor arguments to find the model parameters
197
+ # to represent
198
+ cls = self.__class__
199
+ init = getattr(cls.__init__, "deprecated_original", cls.__init__)
200
+ init_sign = signature(init)
201
+ args, varargs = [], []
202
+ for parameter in init_sign.parameters.values():
203
+ if parameter.kind != parameter.VAR_KEYWORD and parameter.name != "self":
204
+ args.append(parameter.name)
205
+ if parameter.kind == parameter.VAR_POSITIONAL:
206
+ varargs.append(parameter.name)
207
+
208
+ if len(varargs) != 0:
209
+ raise RuntimeError(
210
+ "scikit-learn kernels should always "
211
+ "specify their parameters in the signature"
212
+ " of their __init__ (no varargs)."
213
+ " %s doesn't follow this convention." % (cls,)
214
+ )
215
+ for arg in args:
216
+ params[arg] = getattr(self, arg)
217
+
218
+ return params
219
+
220
+ def set_params(self, **params):
221
+ """Set the parameters of this kernel.
222
+
223
+ The method works on simple kernels as well as on nested kernels.
224
+ The latter have parameters of the form ``<component>__<parameter>``
225
+ so that it's possible to update each component of a nested object.
226
+
227
+ Returns
228
+ -------
229
+ self
230
+ """
231
+ if not params:
232
+ # Simple optimisation to gain speed (inspect is slow)
233
+ return self
234
+ valid_params = self.get_params(deep=True)
235
+ for key, value in params.items():
236
+ split = key.split("__", 1)
237
+ if len(split) > 1:
238
+ # nested objects case
239
+ name, sub_name = split
240
+ if name not in valid_params:
241
+ raise ValueError(
242
+ "Invalid parameter %s for kernel %s. "
243
+ "Check the list of available parameters "
244
+ "with `kernel.get_params().keys()`." % (name, self)
245
+ )
246
+ sub_object = valid_params[name]
247
+ sub_object.set_params(**{sub_name: value})
248
+ else:
249
+ # simple objects case
250
+ if key not in valid_params:
251
+ raise ValueError(
252
+ "Invalid parameter %s for kernel %s. "
253
+ "Check the list of available parameters "
254
+ "with `kernel.get_params().keys()`."
255
+ % (key, self.__class__.__name__)
256
+ )
257
+ setattr(self, key, value)
258
+ return self
259
+
260
+ def clone_with_theta(self, theta):
261
+ """Returns a clone of self with given hyperparameters theta.
262
+
263
+ Parameters
264
+ ----------
265
+ theta : ndarray of shape (n_dims,)
266
+ The hyperparameters
267
+ """
268
+ cloned = clone(self)
269
+ cloned.theta = theta
270
+ return cloned
271
+
272
+ @property
273
+ def n_dims(self):
274
+ """Returns the number of non-fixed hyperparameters of the kernel."""
275
+ return self.theta.shape[0]
276
+
277
+ @property
278
+ def hyperparameters(self):
279
+ """Returns a list of all hyperparameter specifications."""
280
+ r = [
281
+ getattr(self, attr)
282
+ for attr in dir(self)
283
+ if attr.startswith("hyperparameter_")
284
+ ]
285
+ return r
286
+
287
+ @property
288
+ def theta(self):
289
+ """Returns the (flattened, log-transformed) non-fixed hyperparameters.
290
+
291
+ Note that theta are typically the log-transformed values of the
292
+ kernel's hyperparameters as this representation of the search space
293
+ is more amenable for hyperparameter search, as hyperparameters like
294
+ length-scales naturally live on a log-scale.
295
+
296
+ Returns
297
+ -------
298
+ theta : ndarray of shape (n_dims,)
299
+ The non-fixed, log-transformed hyperparameters of the kernel
300
+ """
301
+ theta = []
302
+ params = self.get_params()
303
+ for hyperparameter in self.hyperparameters:
304
+ if not hyperparameter.fixed:
305
+ theta.append(params[hyperparameter.name])
306
+ if len(theta) > 0:
307
+ return np.log(np.hstack(theta))
308
+ else:
309
+ return np.array([])
310
+
311
+ @theta.setter
312
+ def theta(self, theta):
313
+ """Sets the (flattened, log-transformed) non-fixed hyperparameters.
314
+
315
+ Parameters
316
+ ----------
317
+ theta : ndarray of shape (n_dims,)
318
+ The non-fixed, log-transformed hyperparameters of the kernel
319
+ """
320
+ params = self.get_params()
321
+ i = 0
322
+ for hyperparameter in self.hyperparameters:
323
+ if hyperparameter.fixed:
324
+ continue
325
+ if hyperparameter.n_elements > 1:
326
+ # vector-valued parameter
327
+ params[hyperparameter.name] = np.exp(
328
+ theta[i : i + hyperparameter.n_elements]
329
+ )
330
+ i += hyperparameter.n_elements
331
+ else:
332
+ params[hyperparameter.name] = np.exp(theta[i])
333
+ i += 1
334
+
335
+ if i != len(theta):
336
+ raise ValueError(
337
+ "theta has not the correct number of entries."
338
+ " Should be %d; given are %d" % (i, len(theta))
339
+ )
340
+ self.set_params(**params)
341
+
342
+ @property
343
+ def bounds(self):
344
+ """Returns the log-transformed bounds on the theta.
345
+
346
+ Returns
347
+ -------
348
+ bounds : ndarray of shape (n_dims, 2)
349
+ The log-transformed bounds on the kernel's hyperparameters theta
350
+ """
351
+ bounds = [
352
+ hyperparameter.bounds
353
+ for hyperparameter in self.hyperparameters
354
+ if not hyperparameter.fixed
355
+ ]
356
+ if len(bounds) > 0:
357
+ return np.log(np.vstack(bounds))
358
+ else:
359
+ return np.array([])
360
+
361
+ def __add__(self, b):
362
+ if not isinstance(b, Kernel):
363
+ return Sum(self, ConstantKernel(b))
364
+ return Sum(self, b)
365
+
366
+ def __radd__(self, b):
367
+ if not isinstance(b, Kernel):
368
+ return Sum(ConstantKernel(b), self)
369
+ return Sum(b, self)
370
+
371
+ def __mul__(self, b):
372
+ if not isinstance(b, Kernel):
373
+ return Product(self, ConstantKernel(b))
374
+ return Product(self, b)
375
+
376
+ def __rmul__(self, b):
377
+ if not isinstance(b, Kernel):
378
+ return Product(ConstantKernel(b), self)
379
+ return Product(b, self)
380
+
381
+ def __pow__(self, b):
382
+ return Exponentiation(self, b)
383
+
384
+ def __eq__(self, b):
385
+ if type(self) != type(b):
386
+ return False
387
+ params_a = self.get_params()
388
+ params_b = b.get_params()
389
+ for key in set(list(params_a.keys()) + list(params_b.keys())):
390
+ if np.any(params_a.get(key, None) != params_b.get(key, None)):
391
+ return False
392
+ return True
393
+
394
+ def __repr__(self):
395
+ return "{0}({1})".format(
396
+ self.__class__.__name__, ", ".join(map("{0:.3g}".format, self.theta))
397
+ )
398
+
399
+ @abstractmethod
400
+ def __call__(self, X, Y=None, eval_gradient=False):
401
+ """Evaluate the kernel."""
402
+
403
+ @abstractmethod
404
+ def diag(self, X):
405
+ """Returns the diagonal of the kernel k(X, X).
406
+
407
+ The result of this method is identical to np.diag(self(X)); however,
408
+ it can be evaluated more efficiently since only the diagonal is
409
+ evaluated.
410
+
411
+ Parameters
412
+ ----------
413
+ X : array-like of shape (n_samples,)
414
+ Left argument of the returned kernel k(X, Y)
415
+
416
+ Returns
417
+ -------
418
+ K_diag : ndarray of shape (n_samples_X,)
419
+ Diagonal of kernel k(X, X)
420
+ """
421
+
422
+ @abstractmethod
423
+ def is_stationary(self):
424
+ """Returns whether the kernel is stationary."""
425
+
426
+ @property
427
+ def requires_vector_input(self):
428
+ """Returns whether the kernel is defined on fixed-length feature
429
+ vectors or generic objects. Defaults to True for backward
430
+ compatibility."""
431
+ return True
432
+
433
+ def _check_bounds_params(self):
434
+ """Called after fitting to warn if bounds may have been too tight."""
435
+ list_close = np.isclose(self.bounds, np.atleast_2d(self.theta).T)
436
+ idx = 0
437
+ for hyp in self.hyperparameters:
438
+ if hyp.fixed:
439
+ continue
440
+ for dim in range(hyp.n_elements):
441
+ if list_close[idx, 0]:
442
+ warnings.warn(
443
+ "The optimal value found for "
444
+ "dimension %s of parameter %s is "
445
+ "close to the specified lower "
446
+ "bound %s. Decreasing the bound and"
447
+ " calling fit again may find a "
448
+ "better value." % (dim, hyp.name, hyp.bounds[dim][0]),
449
+ ConvergenceWarning,
450
+ )
451
+ elif list_close[idx, 1]:
452
+ warnings.warn(
453
+ "The optimal value found for "
454
+ "dimension %s of parameter %s is "
455
+ "close to the specified upper "
456
+ "bound %s. Increasing the bound and"
457
+ " calling fit again may find a "
458
+ "better value." % (dim, hyp.name, hyp.bounds[dim][1]),
459
+ ConvergenceWarning,
460
+ )
461
+ idx += 1
462
+
463
+
464
+ class NormalizedKernelMixin:
465
+ """Mixin for kernels which are normalized: k(X, X)=1.
466
+
467
+ .. versionadded:: 0.18
468
+ """
469
+
470
+ def diag(self, X):
471
+ """Returns the diagonal of the kernel k(X, X).
472
+
473
+ The result of this method is identical to np.diag(self(X)); however,
474
+ it can be evaluated more efficiently since only the diagonal is
475
+ evaluated.
476
+
477
+ Parameters
478
+ ----------
479
+ X : ndarray of shape (n_samples_X, n_features)
480
+ Left argument of the returned kernel k(X, Y)
481
+
482
+ Returns
483
+ -------
484
+ K_diag : ndarray of shape (n_samples_X,)
485
+ Diagonal of kernel k(X, X)
486
+ """
487
+ return np.ones(X.shape[0])
488
+
489
+
490
+ class StationaryKernelMixin:
491
+ """Mixin for kernels which are stationary: k(X, Y)= f(X-Y).
492
+
493
+ .. versionadded:: 0.18
494
+ """
495
+
496
+ def is_stationary(self):
497
+ """Returns whether the kernel is stationary."""
498
+ return True
499
+
500
+
501
+ class GenericKernelMixin:
502
+ """Mixin for kernels which operate on generic objects such as variable-
503
+ length sequences, trees, and graphs.
504
+
505
+ .. versionadded:: 0.22
506
+ """
507
+
508
+ @property
509
+ def requires_vector_input(self):
510
+ """Whether the kernel works only on fixed-length feature vectors."""
511
+ return False
512
+
513
+
514
+ class CompoundKernel(Kernel):
515
+ """Kernel which is composed of a set of other kernels.
516
+
517
+ .. versionadded:: 0.18
518
+
519
+ Parameters
520
+ ----------
521
+ kernels : list of Kernels
522
+ The other kernels
523
+
524
+ Examples
525
+ --------
526
+ >>> from sklearn.gaussian_process.kernels import WhiteKernel
527
+ >>> from sklearn.gaussian_process.kernels import RBF
528
+ >>> from sklearn.gaussian_process.kernels import CompoundKernel
529
+ >>> kernel = CompoundKernel(
530
+ ... [WhiteKernel(noise_level=3.0), RBF(length_scale=2.0)])
531
+ >>> print(kernel.bounds)
532
+ [[-11.51292546 11.51292546]
533
+ [-11.51292546 11.51292546]]
534
+ >>> print(kernel.n_dims)
535
+ 2
536
+ >>> print(kernel.theta)
537
+ [1.09861229 0.69314718]
538
+ """
539
+
540
+ def __init__(self, kernels):
541
+ self.kernels = kernels
542
+
543
+ def get_params(self, deep=True):
544
+ """Get parameters of this kernel.
545
+
546
+ Parameters
547
+ ----------
548
+ deep : bool, default=True
549
+ If True, will return the parameters for this estimator and
550
+ contained subobjects that are estimators.
551
+
552
+ Returns
553
+ -------
554
+ params : dict
555
+ Parameter names mapped to their values.
556
+ """
557
+ return dict(kernels=self.kernels)
558
+
559
+ @property
560
+ def theta(self):
561
+ """Returns the (flattened, log-transformed) non-fixed hyperparameters.
562
+
563
+ Note that theta are typically the log-transformed values of the
564
+ kernel's hyperparameters as this representation of the search space
565
+ is more amenable for hyperparameter search, as hyperparameters like
566
+ length-scales naturally live on a log-scale.
567
+
568
+ Returns
569
+ -------
570
+ theta : ndarray of shape (n_dims,)
571
+ The non-fixed, log-transformed hyperparameters of the kernel
572
+ """
573
+ return np.hstack([kernel.theta for kernel in self.kernels])
574
+
575
+ @theta.setter
576
+ def theta(self, theta):
577
+ """Sets the (flattened, log-transformed) non-fixed hyperparameters.
578
+
579
+ Parameters
580
+ ----------
581
+ theta : array of shape (n_dims,)
582
+ The non-fixed, log-transformed hyperparameters of the kernel
583
+ """
584
+ k_dims = self.k1.n_dims
585
+ for i, kernel in enumerate(self.kernels):
586
+ kernel.theta = theta[i * k_dims : (i + 1) * k_dims]
587
+
588
+ @property
589
+ def bounds(self):
590
+ """Returns the log-transformed bounds on the theta.
591
+
592
+ Returns
593
+ -------
594
+ bounds : array of shape (n_dims, 2)
595
+ The log-transformed bounds on the kernel's hyperparameters theta
596
+ """
597
+ return np.vstack([kernel.bounds for kernel in self.kernels])
598
+
599
+ def __call__(self, X, Y=None, eval_gradient=False):
600
+ """Return the kernel k(X, Y) and optionally its gradient.
601
+
602
+ Note that this compound kernel returns the results of all simple kernel
603
+ stacked along an additional axis.
604
+
605
+ Parameters
606
+ ----------
607
+ X : array-like of shape (n_samples_X, n_features) or list of object, \
608
+ default=None
609
+ Left argument of the returned kernel k(X, Y)
610
+
611
+ Y : array-like of shape (n_samples_X, n_features) or list of object, \
612
+ default=None
613
+ Right argument of the returned kernel k(X, Y). If None, k(X, X)
614
+ is evaluated instead.
615
+
616
+ eval_gradient : bool, default=False
617
+ Determines whether the gradient with respect to the log of the
618
+ kernel hyperparameter is computed.
619
+
620
+ Returns
621
+ -------
622
+ K : ndarray of shape (n_samples_X, n_samples_Y, n_kernels)
623
+ Kernel k(X, Y)
624
+
625
+ K_gradient : ndarray of shape \
626
+ (n_samples_X, n_samples_X, n_dims, n_kernels), optional
627
+ The gradient of the kernel k(X, X) with respect to the log of the
628
+ hyperparameter of the kernel. Only returned when `eval_gradient`
629
+ is True.
630
+ """
631
+ if eval_gradient:
632
+ K = []
633
+ K_grad = []
634
+ for kernel in self.kernels:
635
+ K_single, K_grad_single = kernel(X, Y, eval_gradient)
636
+ K.append(K_single)
637
+ K_grad.append(K_grad_single[..., np.newaxis])
638
+ return np.dstack(K), np.concatenate(K_grad, 3)
639
+ else:
640
+ return np.dstack([kernel(X, Y, eval_gradient) for kernel in self.kernels])
641
+
642
+ def __eq__(self, b):
643
+ if type(self) != type(b) or len(self.kernels) != len(b.kernels):
644
+ return False
645
+ return np.all(
646
+ [self.kernels[i] == b.kernels[i] for i in range(len(self.kernels))]
647
+ )
648
+
649
+ def is_stationary(self):
650
+ """Returns whether the kernel is stationary."""
651
+ return np.all([kernel.is_stationary() for kernel in self.kernels])
652
+
653
+ @property
654
+ def requires_vector_input(self):
655
+ """Returns whether the kernel is defined on discrete structures."""
656
+ return np.any([kernel.requires_vector_input for kernel in self.kernels])
657
+
658
+ def diag(self, X):
659
+ """Returns the diagonal of the kernel k(X, X).
660
+
661
+ The result of this method is identical to `np.diag(self(X))`; however,
662
+ it can be evaluated more efficiently since only the diagonal is
663
+ evaluated.
664
+
665
+ Parameters
666
+ ----------
667
+ X : array-like of shape (n_samples_X, n_features) or list of object
668
+ Argument to the kernel.
669
+
670
+ Returns
671
+ -------
672
+ K_diag : ndarray of shape (n_samples_X, n_kernels)
673
+ Diagonal of kernel k(X, X)
674
+ """
675
+ return np.vstack([kernel.diag(X) for kernel in self.kernels]).T
676
+
677
+
678
+ class KernelOperator(Kernel):
679
+ """Base class for all kernel operators.
680
+
681
+ .. versionadded:: 0.18
682
+ """
683
+
684
+ def __init__(self, k1, k2):
685
+ self.k1 = k1
686
+ self.k2 = k2
687
+
688
+ def get_params(self, deep=True):
689
+ """Get parameters of this kernel.
690
+
691
+ Parameters
692
+ ----------
693
+ deep : bool, default=True
694
+ If True, will return the parameters for this estimator and
695
+ contained subobjects that are estimators.
696
+
697
+ Returns
698
+ -------
699
+ params : dict
700
+ Parameter names mapped to their values.
701
+ """
702
+ params = dict(k1=self.k1, k2=self.k2)
703
+ if deep:
704
+ deep_items = self.k1.get_params().items()
705
+ params.update(("k1__" + k, val) for k, val in deep_items)
706
+ deep_items = self.k2.get_params().items()
707
+ params.update(("k2__" + k, val) for k, val in deep_items)
708
+
709
+ return params
710
+
711
+ @property
712
+ def hyperparameters(self):
713
+ """Returns a list of all hyperparameter."""
714
+ r = [
715
+ Hyperparameter(
716
+ "k1__" + hyperparameter.name,
717
+ hyperparameter.value_type,
718
+ hyperparameter.bounds,
719
+ hyperparameter.n_elements,
720
+ )
721
+ for hyperparameter in self.k1.hyperparameters
722
+ ]
723
+
724
+ for hyperparameter in self.k2.hyperparameters:
725
+ r.append(
726
+ Hyperparameter(
727
+ "k2__" + hyperparameter.name,
728
+ hyperparameter.value_type,
729
+ hyperparameter.bounds,
730
+ hyperparameter.n_elements,
731
+ )
732
+ )
733
+ return r
734
+
735
+ @property
736
+ def theta(self):
737
+ """Returns the (flattened, log-transformed) non-fixed hyperparameters.
738
+
739
+ Note that theta are typically the log-transformed values of the
740
+ kernel's hyperparameters as this representation of the search space
741
+ is more amenable for hyperparameter search, as hyperparameters like
742
+ length-scales naturally live on a log-scale.
743
+
744
+ Returns
745
+ -------
746
+ theta : ndarray of shape (n_dims,)
747
+ The non-fixed, log-transformed hyperparameters of the kernel
748
+ """
749
+ return np.append(self.k1.theta, self.k2.theta)
750
+
751
+ @theta.setter
752
+ def theta(self, theta):
753
+ """Sets the (flattened, log-transformed) non-fixed hyperparameters.
754
+
755
+ Parameters
756
+ ----------
757
+ theta : ndarray of shape (n_dims,)
758
+ The non-fixed, log-transformed hyperparameters of the kernel
759
+ """
760
+ k1_dims = self.k1.n_dims
761
+ self.k1.theta = theta[:k1_dims]
762
+ self.k2.theta = theta[k1_dims:]
763
+
764
+ @property
765
+ def bounds(self):
766
+ """Returns the log-transformed bounds on the theta.
767
+
768
+ Returns
769
+ -------
770
+ bounds : ndarray of shape (n_dims, 2)
771
+ The log-transformed bounds on the kernel's hyperparameters theta
772
+ """
773
+ if self.k1.bounds.size == 0:
774
+ return self.k2.bounds
775
+ if self.k2.bounds.size == 0:
776
+ return self.k1.bounds
777
+ return np.vstack((self.k1.bounds, self.k2.bounds))
778
+
779
+ def __eq__(self, b):
780
+ if type(self) != type(b):
781
+ return False
782
+ return (self.k1 == b.k1 and self.k2 == b.k2) or (
783
+ self.k1 == b.k2 and self.k2 == b.k1
784
+ )
785
+
786
+ def is_stationary(self):
787
+ """Returns whether the kernel is stationary."""
788
+ return self.k1.is_stationary() and self.k2.is_stationary()
789
+
790
+ @property
791
+ def requires_vector_input(self):
792
+ """Returns whether the kernel is stationary."""
793
+ return self.k1.requires_vector_input or self.k2.requires_vector_input
794
+
795
+
796
+ class Sum(KernelOperator):
797
+ """The `Sum` kernel takes two kernels :math:`k_1` and :math:`k_2`
798
+ and combines them via
799
+
800
+ .. math::
801
+ k_{sum}(X, Y) = k_1(X, Y) + k_2(X, Y)
802
+
803
+ Note that the `__add__` magic method is overridden, so
804
+ `Sum(RBF(), RBF())` is equivalent to using the + operator
805
+ with `RBF() + RBF()`.
806
+
807
+
808
+ Read more in the :ref:`User Guide <gp_kernels>`.
809
+
810
+ .. versionadded:: 0.18
811
+
812
+ Parameters
813
+ ----------
814
+ k1 : Kernel
815
+ The first base-kernel of the sum-kernel
816
+
817
+ k2 : Kernel
818
+ The second base-kernel of the sum-kernel
819
+
820
+ Examples
821
+ --------
822
+ >>> from sklearn.datasets import make_friedman2
823
+ >>> from sklearn.gaussian_process import GaussianProcessRegressor
824
+ >>> from sklearn.gaussian_process.kernels import RBF, Sum, ConstantKernel
825
+ >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
826
+ >>> kernel = Sum(ConstantKernel(2), RBF())
827
+ >>> gpr = GaussianProcessRegressor(kernel=kernel,
828
+ ... random_state=0).fit(X, y)
829
+ >>> gpr.score(X, y)
830
+ 1.0
831
+ >>> kernel
832
+ 1.41**2 + RBF(length_scale=1)
833
+ """
834
+
835
+ def __call__(self, X, Y=None, eval_gradient=False):
836
+ """Return the kernel k(X, Y) and optionally its gradient.
837
+
838
+ Parameters
839
+ ----------
840
+ X : array-like of shape (n_samples_X, n_features) or list of object
841
+ Left argument of the returned kernel k(X, Y)
842
+
843
+ Y : array-like of shape (n_samples_X, n_features) or list of object,\
844
+ default=None
845
+ Right argument of the returned kernel k(X, Y). If None, k(X, X)
846
+ is evaluated instead.
847
+
848
+ eval_gradient : bool, default=False
849
+ Determines whether the gradient with respect to the log of
850
+ the kernel hyperparameter is computed.
851
+
852
+ Returns
853
+ -------
854
+ K : ndarray of shape (n_samples_X, n_samples_Y)
855
+ Kernel k(X, Y)
856
+
857
+ K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\
858
+ optional
859
+ The gradient of the kernel k(X, X) with respect to the log of the
860
+ hyperparameter of the kernel. Only returned when `eval_gradient`
861
+ is True.
862
+ """
863
+ if eval_gradient:
864
+ K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
865
+ K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
866
+ return K1 + K2, np.dstack((K1_gradient, K2_gradient))
867
+ else:
868
+ return self.k1(X, Y) + self.k2(X, Y)
869
+
870
+ def diag(self, X):
871
+ """Returns the diagonal of the kernel k(X, X).
872
+
873
+ The result of this method is identical to `np.diag(self(X))`; however,
874
+ it can be evaluated more efficiently since only the diagonal is
875
+ evaluated.
876
+
877
+ Parameters
878
+ ----------
879
+ X : array-like of shape (n_samples_X, n_features) or list of object
880
+ Argument to the kernel.
881
+
882
+ Returns
883
+ -------
884
+ K_diag : ndarray of shape (n_samples_X,)
885
+ Diagonal of kernel k(X, X)
886
+ """
887
+ return self.k1.diag(X) + self.k2.diag(X)
888
+
889
+ def __repr__(self):
890
+ return "{0} + {1}".format(self.k1, self.k2)
891
+
892
+
893
+ class Product(KernelOperator):
894
+ """The `Product` kernel takes two kernels :math:`k_1` and :math:`k_2`
895
+ and combines them via
896
+
897
+ .. math::
898
+ k_{prod}(X, Y) = k_1(X, Y) * k_2(X, Y)
899
+
900
+ Note that the `__mul__` magic method is overridden, so
901
+ `Product(RBF(), RBF())` is equivalent to using the * operator
902
+ with `RBF() * RBF()`.
903
+
904
+ Read more in the :ref:`User Guide <gp_kernels>`.
905
+
906
+ .. versionadded:: 0.18
907
+
908
+ Parameters
909
+ ----------
910
+ k1 : Kernel
911
+ The first base-kernel of the product-kernel
912
+
913
+ k2 : Kernel
914
+ The second base-kernel of the product-kernel
915
+
916
+
917
+ Examples
918
+ --------
919
+ >>> from sklearn.datasets import make_friedman2
920
+ >>> from sklearn.gaussian_process import GaussianProcessRegressor
921
+ >>> from sklearn.gaussian_process.kernels import (RBF, Product,
922
+ ... ConstantKernel)
923
+ >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
924
+ >>> kernel = Product(ConstantKernel(2), RBF())
925
+ >>> gpr = GaussianProcessRegressor(kernel=kernel,
926
+ ... random_state=0).fit(X, y)
927
+ >>> gpr.score(X, y)
928
+ 1.0
929
+ >>> kernel
930
+ 1.41**2 * RBF(length_scale=1)
931
+ """
932
+
933
+ def __call__(self, X, Y=None, eval_gradient=False):
934
+ """Return the kernel k(X, Y) and optionally its gradient.
935
+
936
+ Parameters
937
+ ----------
938
+ X : array-like of shape (n_samples_X, n_features) or list of object
939
+ Left argument of the returned kernel k(X, Y)
940
+
941
+ Y : array-like of shape (n_samples_Y, n_features) or list of object,\
942
+ default=None
943
+ Right argument of the returned kernel k(X, Y). If None, k(X, X)
944
+ is evaluated instead.
945
+
946
+ eval_gradient : bool, default=False
947
+ Determines whether the gradient with respect to the log of
948
+ the kernel hyperparameter is computed.
949
+
950
+ Returns
951
+ -------
952
+ K : ndarray of shape (n_samples_X, n_samples_Y)
953
+ Kernel k(X, Y)
954
+
955
+ K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
956
+ optional
957
+ The gradient of the kernel k(X, X) with respect to the log of the
958
+ hyperparameter of the kernel. Only returned when `eval_gradient`
959
+ is True.
960
+ """
961
+ if eval_gradient:
962
+ K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
963
+ K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
964
+ return K1 * K2, np.dstack(
965
+ (K1_gradient * K2[:, :, np.newaxis], K2_gradient * K1[:, :, np.newaxis])
966
+ )
967
+ else:
968
+ return self.k1(X, Y) * self.k2(X, Y)
969
+
970
+ def diag(self, X):
971
+ """Returns the diagonal of the kernel k(X, X).
972
+
973
+ The result of this method is identical to np.diag(self(X)); however,
974
+ it can be evaluated more efficiently since only the diagonal is
975
+ evaluated.
976
+
977
+ Parameters
978
+ ----------
979
+ X : array-like of shape (n_samples_X, n_features) or list of object
980
+ Argument to the kernel.
981
+
982
+ Returns
983
+ -------
984
+ K_diag : ndarray of shape (n_samples_X,)
985
+ Diagonal of kernel k(X, X)
986
+ """
987
+ return self.k1.diag(X) * self.k2.diag(X)
988
+
989
+ def __repr__(self):
990
+ return "{0} * {1}".format(self.k1, self.k2)
991
+
992
+
993
+ class Exponentiation(Kernel):
994
+ """The Exponentiation kernel takes one base kernel and a scalar parameter
995
+ :math:`p` and combines them via
996
+
997
+ .. math::
998
+ k_{exp}(X, Y) = k(X, Y) ^p
999
+
1000
+ Note that the `__pow__` magic method is overridden, so
1001
+ `Exponentiation(RBF(), 2)` is equivalent to using the ** operator
1002
+ with `RBF() ** 2`.
1003
+
1004
+
1005
+ Read more in the :ref:`User Guide <gp_kernels>`.
1006
+
1007
+ .. versionadded:: 0.18
1008
+
1009
+ Parameters
1010
+ ----------
1011
+ kernel : Kernel
1012
+ The base kernel
1013
+
1014
+ exponent : float
1015
+ The exponent for the base kernel
1016
+
1017
+
1018
+ Examples
1019
+ --------
1020
+ >>> from sklearn.datasets import make_friedman2
1021
+ >>> from sklearn.gaussian_process import GaussianProcessRegressor
1022
+ >>> from sklearn.gaussian_process.kernels import (RationalQuadratic,
1023
+ ... Exponentiation)
1024
+ >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
1025
+ >>> kernel = Exponentiation(RationalQuadratic(), exponent=2)
1026
+ >>> gpr = GaussianProcessRegressor(kernel=kernel, alpha=5,
1027
+ ... random_state=0).fit(X, y)
1028
+ >>> gpr.score(X, y)
1029
+ 0.419...
1030
+ >>> gpr.predict(X[:1,:], return_std=True)
1031
+ (array([635.5...]), array([0.559...]))
1032
+ """
1033
+
1034
+ def __init__(self, kernel, exponent):
1035
+ self.kernel = kernel
1036
+ self.exponent = exponent
1037
+
1038
+ def get_params(self, deep=True):
1039
+ """Get parameters of this kernel.
1040
+
1041
+ Parameters
1042
+ ----------
1043
+ deep : bool, default=True
1044
+ If True, will return the parameters for this estimator and
1045
+ contained subobjects that are estimators.
1046
+
1047
+ Returns
1048
+ -------
1049
+ params : dict
1050
+ Parameter names mapped to their values.
1051
+ """
1052
+ params = dict(kernel=self.kernel, exponent=self.exponent)
1053
+ if deep:
1054
+ deep_items = self.kernel.get_params().items()
1055
+ params.update(("kernel__" + k, val) for k, val in deep_items)
1056
+ return params
1057
+
1058
+ @property
1059
+ def hyperparameters(self):
1060
+ """Returns a list of all hyperparameter."""
1061
+ r = []
1062
+ for hyperparameter in self.kernel.hyperparameters:
1063
+ r.append(
1064
+ Hyperparameter(
1065
+ "kernel__" + hyperparameter.name,
1066
+ hyperparameter.value_type,
1067
+ hyperparameter.bounds,
1068
+ hyperparameter.n_elements,
1069
+ )
1070
+ )
1071
+ return r
1072
+
1073
+ @property
1074
+ def theta(self):
1075
+ """Returns the (flattened, log-transformed) non-fixed hyperparameters.
1076
+
1077
+ Note that theta are typically the log-transformed values of the
1078
+ kernel's hyperparameters as this representation of the search space
1079
+ is more amenable for hyperparameter search, as hyperparameters like
1080
+ length-scales naturally live on a log-scale.
1081
+
1082
+ Returns
1083
+ -------
1084
+ theta : ndarray of shape (n_dims,)
1085
+ The non-fixed, log-transformed hyperparameters of the kernel
1086
+ """
1087
+ return self.kernel.theta
1088
+
1089
+ @theta.setter
1090
+ def theta(self, theta):
1091
+ """Sets the (flattened, log-transformed) non-fixed hyperparameters.
1092
+
1093
+ Parameters
1094
+ ----------
1095
+ theta : ndarray of shape (n_dims,)
1096
+ The non-fixed, log-transformed hyperparameters of the kernel
1097
+ """
1098
+ self.kernel.theta = theta
1099
+
1100
+ @property
1101
+ def bounds(self):
1102
+ """Returns the log-transformed bounds on the theta.
1103
+
1104
+ Returns
1105
+ -------
1106
+ bounds : ndarray of shape (n_dims, 2)
1107
+ The log-transformed bounds on the kernel's hyperparameters theta
1108
+ """
1109
+ return self.kernel.bounds
1110
+
1111
+ def __eq__(self, b):
1112
+ if type(self) != type(b):
1113
+ return False
1114
+ return self.kernel == b.kernel and self.exponent == b.exponent
1115
+
1116
+ def __call__(self, X, Y=None, eval_gradient=False):
1117
+ """Return the kernel k(X, Y) and optionally its gradient.
1118
+
1119
+ Parameters
1120
+ ----------
1121
+ X : array-like of shape (n_samples_X, n_features) or list of object
1122
+ Left argument of the returned kernel k(X, Y)
1123
+
1124
+ Y : array-like of shape (n_samples_Y, n_features) or list of object,\
1125
+ default=None
1126
+ Right argument of the returned kernel k(X, Y). If None, k(X, X)
1127
+ is evaluated instead.
1128
+
1129
+ eval_gradient : bool, default=False
1130
+ Determines whether the gradient with respect to the log of
1131
+ the kernel hyperparameter is computed.
1132
+
1133
+ Returns
1134
+ -------
1135
+ K : ndarray of shape (n_samples_X, n_samples_Y)
1136
+ Kernel k(X, Y)
1137
+
1138
+ K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\
1139
+ optional
1140
+ The gradient of the kernel k(X, X) with respect to the log of the
1141
+ hyperparameter of the kernel. Only returned when `eval_gradient`
1142
+ is True.
1143
+ """
1144
+ if eval_gradient:
1145
+ K, K_gradient = self.kernel(X, Y, eval_gradient=True)
1146
+ K_gradient *= self.exponent * K[:, :, np.newaxis] ** (self.exponent - 1)
1147
+ return K**self.exponent, K_gradient
1148
+ else:
1149
+ K = self.kernel(X, Y, eval_gradient=False)
1150
+ return K**self.exponent
1151
+
1152
+ def diag(self, X):
1153
+ """Returns the diagonal of the kernel k(X, X).
1154
+
1155
+ The result of this method is identical to np.diag(self(X)); however,
1156
+ it can be evaluated more efficiently since only the diagonal is
1157
+ evaluated.
1158
+
1159
+ Parameters
1160
+ ----------
1161
+ X : array-like of shape (n_samples_X, n_features) or list of object
1162
+ Argument to the kernel.
1163
+
1164
+ Returns
1165
+ -------
1166
+ K_diag : ndarray of shape (n_samples_X,)
1167
+ Diagonal of kernel k(X, X)
1168
+ """
1169
+ return self.kernel.diag(X) ** self.exponent
1170
+
1171
+ def __repr__(self):
1172
+ return "{0} ** {1}".format(self.kernel, self.exponent)
1173
+
1174
+ def is_stationary(self):
1175
+ """Returns whether the kernel is stationary."""
1176
+ return self.kernel.is_stationary()
1177
+
1178
+ @property
1179
+ def requires_vector_input(self):
1180
+ """Returns whether the kernel is defined on discrete structures."""
1181
+ return self.kernel.requires_vector_input
1182
+
1183
+
1184
+ class ConstantKernel(StationaryKernelMixin, GenericKernelMixin, Kernel):
1185
+ """Constant kernel.
1186
+
1187
+ Can be used as part of a product-kernel where it scales the magnitude of
1188
+ the other factor (kernel) or as part of a sum-kernel, where it modifies
1189
+ the mean of the Gaussian process.
1190
+
1191
+ .. math::
1192
+ k(x_1, x_2) = constant\\_value \\;\\forall\\; x_1, x_2
1193
+
1194
+ Adding a constant kernel is equivalent to adding a constant::
1195
+
1196
+ kernel = RBF() + ConstantKernel(constant_value=2)
1197
+
1198
+ is the same as::
1199
+
1200
+ kernel = RBF() + 2
1201
+
1202
+
1203
+ Read more in the :ref:`User Guide <gp_kernels>`.
1204
+
1205
+ .. versionadded:: 0.18
1206
+
1207
+ Parameters
1208
+ ----------
1209
+ constant_value : float, default=1.0
1210
+ The constant value which defines the covariance:
1211
+ k(x_1, x_2) = constant_value
1212
+
1213
+ constant_value_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
1214
+ The lower and upper bound on `constant_value`.
1215
+ If set to "fixed", `constant_value` cannot be changed during
1216
+ hyperparameter tuning.
1217
+
1218
+ Examples
1219
+ --------
1220
+ >>> from sklearn.datasets import make_friedman2
1221
+ >>> from sklearn.gaussian_process import GaussianProcessRegressor
1222
+ >>> from sklearn.gaussian_process.kernels import RBF, ConstantKernel
1223
+ >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
1224
+ >>> kernel = RBF() + ConstantKernel(constant_value=2)
1225
+ >>> gpr = GaussianProcessRegressor(kernel=kernel, alpha=5,
1226
+ ... random_state=0).fit(X, y)
1227
+ >>> gpr.score(X, y)
1228
+ 0.3696...
1229
+ >>> gpr.predict(X[:1,:], return_std=True)
1230
+ (array([606.1...]), array([0.24...]))
1231
+ """
1232
+
1233
+ def __init__(self, constant_value=1.0, constant_value_bounds=(1e-5, 1e5)):
1234
+ self.constant_value = constant_value
1235
+ self.constant_value_bounds = constant_value_bounds
1236
+
1237
+ @property
1238
+ def hyperparameter_constant_value(self):
1239
+ return Hyperparameter("constant_value", "numeric", self.constant_value_bounds)
1240
+
1241
+ def __call__(self, X, Y=None, eval_gradient=False):
1242
+ """Return the kernel k(X, Y) and optionally its gradient.
1243
+
1244
+ Parameters
1245
+ ----------
1246
+ X : array-like of shape (n_samples_X, n_features) or list of object
1247
+ Left argument of the returned kernel k(X, Y)
1248
+
1249
+ Y : array-like of shape (n_samples_X, n_features) or list of object, \
1250
+ default=None
1251
+ Right argument of the returned kernel k(X, Y). If None, k(X, X)
1252
+ is evaluated instead.
1253
+
1254
+ eval_gradient : bool, default=False
1255
+ Determines whether the gradient with respect to the log of
1256
+ the kernel hyperparameter is computed.
1257
+ Only supported when Y is None.
1258
+
1259
+ Returns
1260
+ -------
1261
+ K : ndarray of shape (n_samples_X, n_samples_Y)
1262
+ Kernel k(X, Y)
1263
+
1264
+ K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
1265
+ optional
1266
+ The gradient of the kernel k(X, X) with respect to the log of the
1267
+ hyperparameter of the kernel. Only returned when eval_gradient
1268
+ is True.
1269
+ """
1270
+ if Y is None:
1271
+ Y = X
1272
+ elif eval_gradient:
1273
+ raise ValueError("Gradient can only be evaluated when Y is None.")
1274
+
1275
+ K = np.full(
1276
+ (_num_samples(X), _num_samples(Y)),
1277
+ self.constant_value,
1278
+ dtype=np.array(self.constant_value).dtype,
1279
+ )
1280
+ if eval_gradient:
1281
+ if not self.hyperparameter_constant_value.fixed:
1282
+ return (
1283
+ K,
1284
+ np.full(
1285
+ (_num_samples(X), _num_samples(X), 1),
1286
+ self.constant_value,
1287
+ dtype=np.array(self.constant_value).dtype,
1288
+ ),
1289
+ )
1290
+ else:
1291
+ return K, np.empty((_num_samples(X), _num_samples(X), 0))
1292
+ else:
1293
+ return K
1294
+
1295
+ def diag(self, X):
1296
+ """Returns the diagonal of the kernel k(X, X).
1297
+
1298
+ The result of this method is identical to np.diag(self(X)); however,
1299
+ it can be evaluated more efficiently since only the diagonal is
1300
+ evaluated.
1301
+
1302
+ Parameters
1303
+ ----------
1304
+ X : array-like of shape (n_samples_X, n_features) or list of object
1305
+ Argument to the kernel.
1306
+
1307
+ Returns
1308
+ -------
1309
+ K_diag : ndarray of shape (n_samples_X,)
1310
+ Diagonal of kernel k(X, X)
1311
+ """
1312
+ return np.full(
1313
+ _num_samples(X),
1314
+ self.constant_value,
1315
+ dtype=np.array(self.constant_value).dtype,
1316
+ )
1317
+
1318
+ def __repr__(self):
1319
+ return "{0:.3g}**2".format(np.sqrt(self.constant_value))
1320
+
1321
+
1322
+ class WhiteKernel(StationaryKernelMixin, GenericKernelMixin, Kernel):
1323
+ """White kernel.
1324
+
1325
+ The main use-case of this kernel is as part of a sum-kernel where it
1326
+ explains the noise of the signal as independently and identically
1327
+ normally-distributed. The parameter noise_level equals the variance of this
1328
+ noise.
1329
+
1330
+ .. math::
1331
+ k(x_1, x_2) = noise\\_level \\text{ if } x_i == x_j \\text{ else } 0
1332
+
1333
+
1334
+ Read more in the :ref:`User Guide <gp_kernels>`.
1335
+
1336
+ .. versionadded:: 0.18
1337
+
1338
+ Parameters
1339
+ ----------
1340
+ noise_level : float, default=1.0
1341
+ Parameter controlling the noise level (variance)
1342
+
1343
+ noise_level_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
1344
+ The lower and upper bound on 'noise_level'.
1345
+ If set to "fixed", 'noise_level' cannot be changed during
1346
+ hyperparameter tuning.
1347
+
1348
+ Examples
1349
+ --------
1350
+ >>> from sklearn.datasets import make_friedman2
1351
+ >>> from sklearn.gaussian_process import GaussianProcessRegressor
1352
+ >>> from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel
1353
+ >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
1354
+ >>> kernel = DotProduct() + WhiteKernel(noise_level=0.5)
1355
+ >>> gpr = GaussianProcessRegressor(kernel=kernel,
1356
+ ... random_state=0).fit(X, y)
1357
+ >>> gpr.score(X, y)
1358
+ 0.3680...
1359
+ >>> gpr.predict(X[:2,:], return_std=True)
1360
+ (array([653.0..., 592.1... ]), array([316.6..., 316.6...]))
1361
+ """
1362
+
1363
+ def __init__(self, noise_level=1.0, noise_level_bounds=(1e-5, 1e5)):
1364
+ self.noise_level = noise_level
1365
+ self.noise_level_bounds = noise_level_bounds
1366
+
1367
+ @property
1368
+ def hyperparameter_noise_level(self):
1369
+ return Hyperparameter("noise_level", "numeric", self.noise_level_bounds)
1370
+
1371
+ def __call__(self, X, Y=None, eval_gradient=False):
1372
+ """Return the kernel k(X, Y) and optionally its gradient.
1373
+
1374
+ Parameters
1375
+ ----------
1376
+ X : array-like of shape (n_samples_X, n_features) or list of object
1377
+ Left argument of the returned kernel k(X, Y)
1378
+
1379
+ Y : array-like of shape (n_samples_X, n_features) or list of object,\
1380
+ default=None
1381
+ Right argument of the returned kernel k(X, Y). If None, k(X, X)
1382
+ is evaluated instead.
1383
+
1384
+ eval_gradient : bool, default=False
1385
+ Determines whether the gradient with respect to the log of
1386
+ the kernel hyperparameter is computed.
1387
+ Only supported when Y is None.
1388
+
1389
+ Returns
1390
+ -------
1391
+ K : ndarray of shape (n_samples_X, n_samples_Y)
1392
+ Kernel k(X, Y)
1393
+
1394
+ K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\
1395
+ optional
1396
+ The gradient of the kernel k(X, X) with respect to the log of the
1397
+ hyperparameter of the kernel. Only returned when eval_gradient
1398
+ is True.
1399
+ """
1400
+ if Y is not None and eval_gradient:
1401
+ raise ValueError("Gradient can only be evaluated when Y is None.")
1402
+
1403
+ if Y is None:
1404
+ K = self.noise_level * np.eye(_num_samples(X))
1405
+ if eval_gradient:
1406
+ if not self.hyperparameter_noise_level.fixed:
1407
+ return (
1408
+ K,
1409
+ self.noise_level * np.eye(_num_samples(X))[:, :, np.newaxis],
1410
+ )
1411
+ else:
1412
+ return K, np.empty((_num_samples(X), _num_samples(X), 0))
1413
+ else:
1414
+ return K
1415
+ else:
1416
+ return np.zeros((_num_samples(X), _num_samples(Y)))
1417
+
1418
+ def diag(self, X):
1419
+ """Returns the diagonal of the kernel k(X, X).
1420
+
1421
+ The result of this method is identical to np.diag(self(X)); however,
1422
+ it can be evaluated more efficiently since only the diagonal is
1423
+ evaluated.
1424
+
1425
+ Parameters
1426
+ ----------
1427
+ X : array-like of shape (n_samples_X, n_features) or list of object
1428
+ Argument to the kernel.
1429
+
1430
+ Returns
1431
+ -------
1432
+ K_diag : ndarray of shape (n_samples_X,)
1433
+ Diagonal of kernel k(X, X)
1434
+ """
1435
+ return np.full(
1436
+ _num_samples(X), self.noise_level, dtype=np.array(self.noise_level).dtype
1437
+ )
1438
+
1439
+ def __repr__(self):
1440
+ return "{0}(noise_level={1:.3g})".format(
1441
+ self.__class__.__name__, self.noise_level
1442
+ )
1443
+
1444
+
1445
+ class RBF(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
1446
+ """Radial basis function kernel (aka squared-exponential kernel).
1447
+
1448
+ The RBF kernel is a stationary kernel. It is also known as the
1449
+ "squared exponential" kernel. It is parameterized by a length scale
1450
+ parameter :math:`l>0`, which can either be a scalar (isotropic variant
1451
+ of the kernel) or a vector with the same number of dimensions as the inputs
1452
+ X (anisotropic variant of the kernel). The kernel is given by:
1453
+
1454
+ .. math::
1455
+ k(x_i, x_j) = \\exp\\left(- \\frac{d(x_i, x_j)^2}{2l^2} \\right)
1456
+
1457
+ where :math:`l` is the length scale of the kernel and
1458
+ :math:`d(\\cdot,\\cdot)` is the Euclidean distance.
1459
+ For advice on how to set the length scale parameter, see e.g. [1]_.
1460
+
1461
+ This kernel is infinitely differentiable, which implies that GPs with this
1462
+ kernel as covariance function have mean square derivatives of all orders,
1463
+ and are thus very smooth.
1464
+ See [2]_, Chapter 4, Section 4.2, for further details of the RBF kernel.
1465
+
1466
+ Read more in the :ref:`User Guide <gp_kernels>`.
1467
+
1468
+ .. versionadded:: 0.18
1469
+
1470
+ Parameters
1471
+ ----------
1472
+ length_scale : float or ndarray of shape (n_features,), default=1.0
1473
+ The length scale of the kernel. If a float, an isotropic kernel is
1474
+ used. If an array, an anisotropic kernel is used where each dimension
1475
+ of l defines the length-scale of the respective feature dimension.
1476
+
1477
+ length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
1478
+ The lower and upper bound on 'length_scale'.
1479
+ If set to "fixed", 'length_scale' cannot be changed during
1480
+ hyperparameter tuning.
1481
+
1482
+ References
1483
+ ----------
1484
+ .. [1] `David Duvenaud (2014). "The Kernel Cookbook:
1485
+ Advice on Covariance functions".
1486
+ <https://www.cs.toronto.edu/~duvenaud/cookbook/>`_
1487
+
1488
+ .. [2] `Carl Edward Rasmussen, Christopher K. I. Williams (2006).
1489
+ "Gaussian Processes for Machine Learning". The MIT Press.
1490
+ <http://www.gaussianprocess.org/gpml/>`_
1491
+
1492
+ Examples
1493
+ --------
1494
+ >>> from sklearn.datasets import load_iris
1495
+ >>> from sklearn.gaussian_process import GaussianProcessClassifier
1496
+ >>> from sklearn.gaussian_process.kernels import RBF
1497
+ >>> X, y = load_iris(return_X_y=True)
1498
+ >>> kernel = 1.0 * RBF(1.0)
1499
+ >>> gpc = GaussianProcessClassifier(kernel=kernel,
1500
+ ... random_state=0).fit(X, y)
1501
+ >>> gpc.score(X, y)
1502
+ 0.9866...
1503
+ >>> gpc.predict_proba(X[:2,:])
1504
+ array([[0.8354..., 0.03228..., 0.1322...],
1505
+ [0.7906..., 0.0652..., 0.1441...]])
1506
+ """
1507
+
1508
+ def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5)):
1509
+ self.length_scale = length_scale
1510
+ self.length_scale_bounds = length_scale_bounds
1511
+
1512
+ @property
1513
+ def anisotropic(self):
1514
+ return np.iterable(self.length_scale) and len(self.length_scale) > 1
1515
+
1516
+ @property
1517
+ def hyperparameter_length_scale(self):
1518
+ if self.anisotropic:
1519
+ return Hyperparameter(
1520
+ "length_scale",
1521
+ "numeric",
1522
+ self.length_scale_bounds,
1523
+ len(self.length_scale),
1524
+ )
1525
+ return Hyperparameter("length_scale", "numeric", self.length_scale_bounds)
1526
+
1527
+ def __call__(self, X, Y=None, eval_gradient=False):
1528
+ """Return the kernel k(X, Y) and optionally its gradient.
1529
+
1530
+ Parameters
1531
+ ----------
1532
+ X : ndarray of shape (n_samples_X, n_features)
1533
+ Left argument of the returned kernel k(X, Y)
1534
+
1535
+ Y : ndarray of shape (n_samples_Y, n_features), default=None
1536
+ Right argument of the returned kernel k(X, Y). If None, k(X, X)
1537
+ if evaluated instead.
1538
+
1539
+ eval_gradient : bool, default=False
1540
+ Determines whether the gradient with respect to the log of
1541
+ the kernel hyperparameter is computed.
1542
+ Only supported when Y is None.
1543
+
1544
+ Returns
1545
+ -------
1546
+ K : ndarray of shape (n_samples_X, n_samples_Y)
1547
+ Kernel k(X, Y)
1548
+
1549
+ K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
1550
+ optional
1551
+ The gradient of the kernel k(X, X) with respect to the log of the
1552
+ hyperparameter of the kernel. Only returned when `eval_gradient`
1553
+ is True.
1554
+ """
1555
+ X = np.atleast_2d(X)
1556
+ length_scale = _check_length_scale(X, self.length_scale)
1557
+ if Y is None:
1558
+ dists = pdist(X / length_scale, metric="sqeuclidean")
1559
+ K = np.exp(-0.5 * dists)
1560
+ # convert from upper-triangular matrix to square matrix
1561
+ K = squareform(K)
1562
+ np.fill_diagonal(K, 1)
1563
+ else:
1564
+ if eval_gradient:
1565
+ raise ValueError("Gradient can only be evaluated when Y is None.")
1566
+ dists = cdist(X / length_scale, Y / length_scale, metric="sqeuclidean")
1567
+ K = np.exp(-0.5 * dists)
1568
+
1569
+ if eval_gradient:
1570
+ if self.hyperparameter_length_scale.fixed:
1571
+ # Hyperparameter l kept fixed
1572
+ return K, np.empty((X.shape[0], X.shape[0], 0))
1573
+ elif not self.anisotropic or length_scale.shape[0] == 1:
1574
+ K_gradient = (K * squareform(dists))[:, :, np.newaxis]
1575
+ return K, K_gradient
1576
+ elif self.anisotropic:
1577
+ # We need to recompute the pairwise dimension-wise distances
1578
+ K_gradient = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 / (
1579
+ length_scale**2
1580
+ )
1581
+ K_gradient *= K[..., np.newaxis]
1582
+ return K, K_gradient
1583
+ else:
1584
+ return K
1585
+
1586
+ def __repr__(self):
1587
+ if self.anisotropic:
1588
+ return "{0}(length_scale=[{1}])".format(
1589
+ self.__class__.__name__,
1590
+ ", ".join(map("{0:.3g}".format, self.length_scale)),
1591
+ )
1592
+ else: # isotropic
1593
+ return "{0}(length_scale={1:.3g})".format(
1594
+ self.__class__.__name__, np.ravel(self.length_scale)[0]
1595
+ )
1596
+
1597
+
1598
+ class Matern(RBF):
1599
+ """Matern kernel.
1600
+
1601
+ The class of Matern kernels is a generalization of the :class:`RBF`.
1602
+ It has an additional parameter :math:`\\nu` which controls the
1603
+ smoothness of the resulting function. The smaller :math:`\\nu`,
1604
+ the less smooth the approximated function is.
1605
+ As :math:`\\nu\\rightarrow\\infty`, the kernel becomes equivalent to
1606
+ the :class:`RBF` kernel. When :math:`\\nu = 1/2`, the Matérn kernel
1607
+ becomes identical to the absolute exponential kernel.
1608
+ Important intermediate values are
1609
+ :math:`\\nu=1.5` (once differentiable functions)
1610
+ and :math:`\\nu=2.5` (twice differentiable functions).
1611
+
1612
+ The kernel is given by:
1613
+
1614
+ .. math::
1615
+ k(x_i, x_j) = \\frac{1}{\\Gamma(\\nu)2^{\\nu-1}}\\Bigg(
1616
+ \\frac{\\sqrt{2\\nu}}{l} d(x_i , x_j )
1617
+ \\Bigg)^\\nu K_\\nu\\Bigg(
1618
+ \\frac{\\sqrt{2\\nu}}{l} d(x_i , x_j )\\Bigg)
1619
+
1620
+
1621
+
1622
+ where :math:`d(\\cdot,\\cdot)` is the Euclidean distance,
1623
+ :math:`K_{\\nu}(\\cdot)` is a modified Bessel function and
1624
+ :math:`\\Gamma(\\cdot)` is the gamma function.
1625
+ See [1]_, Chapter 4, Section 4.2, for details regarding the different
1626
+ variants of the Matern kernel.
1627
+
1628
+ Read more in the :ref:`User Guide <gp_kernels>`.
1629
+
1630
+ .. versionadded:: 0.18
1631
+
1632
+ Parameters
1633
+ ----------
1634
+ length_scale : float or ndarray of shape (n_features,), default=1.0
1635
+ The length scale of the kernel. If a float, an isotropic kernel is
1636
+ used. If an array, an anisotropic kernel is used where each dimension
1637
+ of l defines the length-scale of the respective feature dimension.
1638
+
1639
+ length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
1640
+ The lower and upper bound on 'length_scale'.
1641
+ If set to "fixed", 'length_scale' cannot be changed during
1642
+ hyperparameter tuning.
1643
+
1644
+ nu : float, default=1.5
1645
+ The parameter nu controlling the smoothness of the learned function.
1646
+ The smaller nu, the less smooth the approximated function is.
1647
+ For nu=inf, the kernel becomes equivalent to the RBF kernel and for
1648
+ nu=0.5 to the absolute exponential kernel. Important intermediate
1649
+ values are nu=1.5 (once differentiable functions) and nu=2.5
1650
+ (twice differentiable functions). Note that values of nu not in
1651
+ [0.5, 1.5, 2.5, inf] incur a considerably higher computational cost
1652
+ (appr. 10 times higher) since they require to evaluate the modified
1653
+ Bessel function. Furthermore, in contrast to l, nu is kept fixed to
1654
+ its initial value and not optimized.
1655
+
1656
+ References
1657
+ ----------
1658
+ .. [1] `Carl Edward Rasmussen, Christopher K. I. Williams (2006).
1659
+ "Gaussian Processes for Machine Learning". The MIT Press.
1660
+ <http://www.gaussianprocess.org/gpml/>`_
1661
+
1662
+ Examples
1663
+ --------
1664
+ >>> from sklearn.datasets import load_iris
1665
+ >>> from sklearn.gaussian_process import GaussianProcessClassifier
1666
+ >>> from sklearn.gaussian_process.kernels import Matern
1667
+ >>> X, y = load_iris(return_X_y=True)
1668
+ >>> kernel = 1.0 * Matern(length_scale=1.0, nu=1.5)
1669
+ >>> gpc = GaussianProcessClassifier(kernel=kernel,
1670
+ ... random_state=0).fit(X, y)
1671
+ >>> gpc.score(X, y)
1672
+ 0.9866...
1673
+ >>> gpc.predict_proba(X[:2,:])
1674
+ array([[0.8513..., 0.0368..., 0.1117...],
1675
+ [0.8086..., 0.0693..., 0.1220...]])
1676
+ """
1677
+
1678
+ def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5), nu=1.5):
1679
+ super().__init__(length_scale, length_scale_bounds)
1680
+ self.nu = nu
1681
+
1682
+ def __call__(self, X, Y=None, eval_gradient=False):
1683
+ """Return the kernel k(X, Y) and optionally its gradient.
1684
+
1685
+ Parameters
1686
+ ----------
1687
+ X : ndarray of shape (n_samples_X, n_features)
1688
+ Left argument of the returned kernel k(X, Y)
1689
+
1690
+ Y : ndarray of shape (n_samples_Y, n_features), default=None
1691
+ Right argument of the returned kernel k(X, Y). If None, k(X, X)
1692
+ if evaluated instead.
1693
+
1694
+ eval_gradient : bool, default=False
1695
+ Determines whether the gradient with respect to the log of
1696
+ the kernel hyperparameter is computed.
1697
+ Only supported when Y is None.
1698
+
1699
+ Returns
1700
+ -------
1701
+ K : ndarray of shape (n_samples_X, n_samples_Y)
1702
+ Kernel k(X, Y)
1703
+
1704
+ K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
1705
+ optional
1706
+ The gradient of the kernel k(X, X) with respect to the log of the
1707
+ hyperparameter of the kernel. Only returned when `eval_gradient`
1708
+ is True.
1709
+ """
1710
+ X = np.atleast_2d(X)
1711
+ length_scale = _check_length_scale(X, self.length_scale)
1712
+ if Y is None:
1713
+ dists = pdist(X / length_scale, metric="euclidean")
1714
+ else:
1715
+ if eval_gradient:
1716
+ raise ValueError("Gradient can only be evaluated when Y is None.")
1717
+ dists = cdist(X / length_scale, Y / length_scale, metric="euclidean")
1718
+
1719
+ if self.nu == 0.5:
1720
+ K = np.exp(-dists)
1721
+ elif self.nu == 1.5:
1722
+ K = dists * math.sqrt(3)
1723
+ K = (1.0 + K) * np.exp(-K)
1724
+ elif self.nu == 2.5:
1725
+ K = dists * math.sqrt(5)
1726
+ K = (1.0 + K + K**2 / 3.0) * np.exp(-K)
1727
+ elif self.nu == np.inf:
1728
+ K = np.exp(-(dists**2) / 2.0)
1729
+ else: # general case; expensive to evaluate
1730
+ K = dists
1731
+ K[K == 0.0] += np.finfo(float).eps # strict zeros result in nan
1732
+ tmp = math.sqrt(2 * self.nu) * K
1733
+ K.fill((2 ** (1.0 - self.nu)) / gamma(self.nu))
1734
+ K *= tmp**self.nu
1735
+ K *= kv(self.nu, tmp)
1736
+
1737
+ if Y is None:
1738
+ # convert from upper-triangular matrix to square matrix
1739
+ K = squareform(K)
1740
+ np.fill_diagonal(K, 1)
1741
+
1742
+ if eval_gradient:
1743
+ if self.hyperparameter_length_scale.fixed:
1744
+ # Hyperparameter l kept fixed
1745
+ K_gradient = np.empty((X.shape[0], X.shape[0], 0))
1746
+ return K, K_gradient
1747
+
1748
+ # We need to recompute the pairwise dimension-wise distances
1749
+ if self.anisotropic:
1750
+ D = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 / (length_scale**2)
1751
+ else:
1752
+ D = squareform(dists**2)[:, :, np.newaxis]
1753
+
1754
+ if self.nu == 0.5:
1755
+ denominator = np.sqrt(D.sum(axis=2))[:, :, np.newaxis]
1756
+ divide_result = np.zeros_like(D)
1757
+ np.divide(
1758
+ D,
1759
+ denominator,
1760
+ out=divide_result,
1761
+ where=denominator != 0,
1762
+ )
1763
+ K_gradient = K[..., np.newaxis] * divide_result
1764
+ elif self.nu == 1.5:
1765
+ K_gradient = 3 * D * np.exp(-np.sqrt(3 * D.sum(-1)))[..., np.newaxis]
1766
+ elif self.nu == 2.5:
1767
+ tmp = np.sqrt(5 * D.sum(-1))[..., np.newaxis]
1768
+ K_gradient = 5.0 / 3.0 * D * (tmp + 1) * np.exp(-tmp)
1769
+ elif self.nu == np.inf:
1770
+ K_gradient = D * K[..., np.newaxis]
1771
+ else:
1772
+ # approximate gradient numerically
1773
+ def f(theta): # helper function
1774
+ return self.clone_with_theta(theta)(X, Y)
1775
+
1776
+ return K, _approx_fprime(self.theta, f, 1e-10)
1777
+
1778
+ if not self.anisotropic:
1779
+ return K, K_gradient[:, :].sum(-1)[:, :, np.newaxis]
1780
+ else:
1781
+ return K, K_gradient
1782
+ else:
1783
+ return K
1784
+
1785
+ def __repr__(self):
1786
+ if self.anisotropic:
1787
+ return "{0}(length_scale=[{1}], nu={2:.3g})".format(
1788
+ self.__class__.__name__,
1789
+ ", ".join(map("{0:.3g}".format, self.length_scale)),
1790
+ self.nu,
1791
+ )
1792
+ else:
1793
+ return "{0}(length_scale={1:.3g}, nu={2:.3g})".format(
1794
+ self.__class__.__name__, np.ravel(self.length_scale)[0], self.nu
1795
+ )
1796
+
1797
+
1798
+ class RationalQuadratic(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
1799
+ """Rational Quadratic kernel.
1800
+
1801
+ The RationalQuadratic kernel can be seen as a scale mixture (an infinite
1802
+ sum) of RBF kernels with different characteristic length scales. It is
1803
+ parameterized by a length scale parameter :math:`l>0` and a scale
1804
+ mixture parameter :math:`\\alpha>0`. Only the isotropic variant
1805
+ where length_scale :math:`l` is a scalar is supported at the moment.
1806
+ The kernel is given by:
1807
+
1808
+ .. math::
1809
+ k(x_i, x_j) = \\left(
1810
+ 1 + \\frac{d(x_i, x_j)^2 }{ 2\\alpha l^2}\\right)^{-\\alpha}
1811
+
1812
+ where :math:`\\alpha` is the scale mixture parameter, :math:`l` is
1813
+ the length scale of the kernel and :math:`d(\\cdot,\\cdot)` is the
1814
+ Euclidean distance.
1815
+ For advice on how to set the parameters, see e.g. [1]_.
1816
+
1817
+ Read more in the :ref:`User Guide <gp_kernels>`.
1818
+
1819
+ .. versionadded:: 0.18
1820
+
1821
+ Parameters
1822
+ ----------
1823
+ length_scale : float > 0, default=1.0
1824
+ The length scale of the kernel.
1825
+
1826
+ alpha : float > 0, default=1.0
1827
+ Scale mixture parameter
1828
+
1829
+ length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
1830
+ The lower and upper bound on 'length_scale'.
1831
+ If set to "fixed", 'length_scale' cannot be changed during
1832
+ hyperparameter tuning.
1833
+
1834
+ alpha_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
1835
+ The lower and upper bound on 'alpha'.
1836
+ If set to "fixed", 'alpha' cannot be changed during
1837
+ hyperparameter tuning.
1838
+
1839
+ References
1840
+ ----------
1841
+ .. [1] `David Duvenaud (2014). "The Kernel Cookbook:
1842
+ Advice on Covariance functions".
1843
+ <https://www.cs.toronto.edu/~duvenaud/cookbook/>`_
1844
+
1845
+ Examples
1846
+ --------
1847
+ >>> from sklearn.datasets import load_iris
1848
+ >>> from sklearn.gaussian_process import GaussianProcessClassifier
1849
+ >>> from sklearn.gaussian_process.kernels import RationalQuadratic
1850
+ >>> X, y = load_iris(return_X_y=True)
1851
+ >>> kernel = RationalQuadratic(length_scale=1.0, alpha=1.5)
1852
+ >>> gpc = GaussianProcessClassifier(kernel=kernel,
1853
+ ... random_state=0).fit(X, y)
1854
+ >>> gpc.score(X, y)
1855
+ 0.9733...
1856
+ >>> gpc.predict_proba(X[:2,:])
1857
+ array([[0.8881..., 0.0566..., 0.05518...],
1858
+ [0.8678..., 0.0707... , 0.0614...]])
1859
+ """
1860
+
1861
+ def __init__(
1862
+ self,
1863
+ length_scale=1.0,
1864
+ alpha=1.0,
1865
+ length_scale_bounds=(1e-5, 1e5),
1866
+ alpha_bounds=(1e-5, 1e5),
1867
+ ):
1868
+ self.length_scale = length_scale
1869
+ self.alpha = alpha
1870
+ self.length_scale_bounds = length_scale_bounds
1871
+ self.alpha_bounds = alpha_bounds
1872
+
1873
+ @property
1874
+ def hyperparameter_length_scale(self):
1875
+ return Hyperparameter("length_scale", "numeric", self.length_scale_bounds)
1876
+
1877
+ @property
1878
+ def hyperparameter_alpha(self):
1879
+ return Hyperparameter("alpha", "numeric", self.alpha_bounds)
1880
+
1881
+ def __call__(self, X, Y=None, eval_gradient=False):
1882
+ """Return the kernel k(X, Y) and optionally its gradient.
1883
+
1884
+ Parameters
1885
+ ----------
1886
+ X : ndarray of shape (n_samples_X, n_features)
1887
+ Left argument of the returned kernel k(X, Y)
1888
+
1889
+ Y : ndarray of shape (n_samples_Y, n_features), default=None
1890
+ Right argument of the returned kernel k(X, Y). If None, k(X, X)
1891
+ if evaluated instead.
1892
+
1893
+ eval_gradient : bool, default=False
1894
+ Determines whether the gradient with respect to the log of
1895
+ the kernel hyperparameter is computed.
1896
+ Only supported when Y is None.
1897
+
1898
+ Returns
1899
+ -------
1900
+ K : ndarray of shape (n_samples_X, n_samples_Y)
1901
+ Kernel k(X, Y)
1902
+
1903
+ K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims)
1904
+ The gradient of the kernel k(X, X) with respect to the log of the
1905
+ hyperparameter of the kernel. Only returned when eval_gradient
1906
+ is True.
1907
+ """
1908
+ if len(np.atleast_1d(self.length_scale)) > 1:
1909
+ raise AttributeError(
1910
+ "RationalQuadratic kernel only supports isotropic version, "
1911
+ "please use a single scalar for length_scale"
1912
+ )
1913
+ X = np.atleast_2d(X)
1914
+ if Y is None:
1915
+ dists = squareform(pdist(X, metric="sqeuclidean"))
1916
+ tmp = dists / (2 * self.alpha * self.length_scale**2)
1917
+ base = 1 + tmp
1918
+ K = base**-self.alpha
1919
+ np.fill_diagonal(K, 1)
1920
+ else:
1921
+ if eval_gradient:
1922
+ raise ValueError("Gradient can only be evaluated when Y is None.")
1923
+ dists = cdist(X, Y, metric="sqeuclidean")
1924
+ K = (1 + dists / (2 * self.alpha * self.length_scale**2)) ** -self.alpha
1925
+
1926
+ if eval_gradient:
1927
+ # gradient with respect to length_scale
1928
+ if not self.hyperparameter_length_scale.fixed:
1929
+ length_scale_gradient = dists * K / (self.length_scale**2 * base)
1930
+ length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
1931
+ else: # l is kept fixed
1932
+ length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
1933
+
1934
+ # gradient with respect to alpha
1935
+ if not self.hyperparameter_alpha.fixed:
1936
+ alpha_gradient = K * (
1937
+ -self.alpha * np.log(base)
1938
+ + dists / (2 * self.length_scale**2 * base)
1939
+ )
1940
+ alpha_gradient = alpha_gradient[:, :, np.newaxis]
1941
+ else: # alpha is kept fixed
1942
+ alpha_gradient = np.empty((K.shape[0], K.shape[1], 0))
1943
+
1944
+ return K, np.dstack((alpha_gradient, length_scale_gradient))
1945
+ else:
1946
+ return K
1947
+
1948
+ def __repr__(self):
1949
+ return "{0}(alpha={1:.3g}, length_scale={2:.3g})".format(
1950
+ self.__class__.__name__, self.alpha, self.length_scale
1951
+ )
1952
+
1953
+
1954
+ class ExpSineSquared(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
1955
+ r"""Exp-Sine-Squared kernel (aka periodic kernel).
1956
+
1957
+ The ExpSineSquared kernel allows one to model functions which repeat
1958
+ themselves exactly. It is parameterized by a length scale
1959
+ parameter :math:`l>0` and a periodicity parameter :math:`p>0`.
1960
+ Only the isotropic variant where :math:`l` is a scalar is
1961
+ supported at the moment. The kernel is given by:
1962
+
1963
+ .. math::
1964
+ k(x_i, x_j) = \text{exp}\left(-
1965
+ \frac{ 2\sin^2(\pi d(x_i, x_j)/p) }{ l^ 2} \right)
1966
+
1967
+ where :math:`l` is the length scale of the kernel, :math:`p` the
1968
+ periodicity of the kernel and :math:`d(\cdot,\cdot)` is the
1969
+ Euclidean distance.
1970
+
1971
+ Read more in the :ref:`User Guide <gp_kernels>`.
1972
+
1973
+ .. versionadded:: 0.18
1974
+
1975
+ Parameters
1976
+ ----------
1977
+
1978
+ length_scale : float > 0, default=1.0
1979
+ The length scale of the kernel.
1980
+
1981
+ periodicity : float > 0, default=1.0
1982
+ The periodicity of the kernel.
1983
+
1984
+ length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
1985
+ The lower and upper bound on 'length_scale'.
1986
+ If set to "fixed", 'length_scale' cannot be changed during
1987
+ hyperparameter tuning.
1988
+
1989
+ periodicity_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
1990
+ The lower and upper bound on 'periodicity'.
1991
+ If set to "fixed", 'periodicity' cannot be changed during
1992
+ hyperparameter tuning.
1993
+
1994
+ Examples
1995
+ --------
1996
+ >>> from sklearn.datasets import make_friedman2
1997
+ >>> from sklearn.gaussian_process import GaussianProcessRegressor
1998
+ >>> from sklearn.gaussian_process.kernels import ExpSineSquared
1999
+ >>> X, y = make_friedman2(n_samples=50, noise=0, random_state=0)
2000
+ >>> kernel = ExpSineSquared(length_scale=1, periodicity=1)
2001
+ >>> gpr = GaussianProcessRegressor(kernel=kernel, alpha=5,
2002
+ ... random_state=0).fit(X, y)
2003
+ >>> gpr.score(X, y)
2004
+ 0.0144...
2005
+ >>> gpr.predict(X[:2,:], return_std=True)
2006
+ (array([425.6..., 457.5...]), array([0.3894..., 0.3467...]))
2007
+ """
2008
+
2009
+ def __init__(
2010
+ self,
2011
+ length_scale=1.0,
2012
+ periodicity=1.0,
2013
+ length_scale_bounds=(1e-5, 1e5),
2014
+ periodicity_bounds=(1e-5, 1e5),
2015
+ ):
2016
+ self.length_scale = length_scale
2017
+ self.periodicity = periodicity
2018
+ self.length_scale_bounds = length_scale_bounds
2019
+ self.periodicity_bounds = periodicity_bounds
2020
+
2021
+ @property
2022
+ def hyperparameter_length_scale(self):
2023
+ """Returns the length scale"""
2024
+ return Hyperparameter("length_scale", "numeric", self.length_scale_bounds)
2025
+
2026
+ @property
2027
+ def hyperparameter_periodicity(self):
2028
+ return Hyperparameter("periodicity", "numeric", self.periodicity_bounds)
2029
+
2030
+ def __call__(self, X, Y=None, eval_gradient=False):
2031
+ """Return the kernel k(X, Y) and optionally its gradient.
2032
+
2033
+ Parameters
2034
+ ----------
2035
+ X : ndarray of shape (n_samples_X, n_features)
2036
+ Left argument of the returned kernel k(X, Y)
2037
+
2038
+ Y : ndarray of shape (n_samples_Y, n_features), default=None
2039
+ Right argument of the returned kernel k(X, Y). If None, k(X, X)
2040
+ if evaluated instead.
2041
+
2042
+ eval_gradient : bool, default=False
2043
+ Determines whether the gradient with respect to the log of
2044
+ the kernel hyperparameter is computed.
2045
+ Only supported when Y is None.
2046
+
2047
+ Returns
2048
+ -------
2049
+ K : ndarray of shape (n_samples_X, n_samples_Y)
2050
+ Kernel k(X, Y)
2051
+
2052
+ K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
2053
+ optional
2054
+ The gradient of the kernel k(X, X) with respect to the log of the
2055
+ hyperparameter of the kernel. Only returned when `eval_gradient`
2056
+ is True.
2057
+ """
2058
+ X = np.atleast_2d(X)
2059
+ if Y is None:
2060
+ dists = squareform(pdist(X, metric="euclidean"))
2061
+ arg = np.pi * dists / self.periodicity
2062
+ sin_of_arg = np.sin(arg)
2063
+ K = np.exp(-2 * (sin_of_arg / self.length_scale) ** 2)
2064
+ else:
2065
+ if eval_gradient:
2066
+ raise ValueError("Gradient can only be evaluated when Y is None.")
2067
+ dists = cdist(X, Y, metric="euclidean")
2068
+ K = np.exp(
2069
+ -2 * (np.sin(np.pi / self.periodicity * dists) / self.length_scale) ** 2
2070
+ )
2071
+
2072
+ if eval_gradient:
2073
+ cos_of_arg = np.cos(arg)
2074
+ # gradient with respect to length_scale
2075
+ if not self.hyperparameter_length_scale.fixed:
2076
+ length_scale_gradient = 4 / self.length_scale**2 * sin_of_arg**2 * K
2077
+ length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
2078
+ else: # length_scale is kept fixed
2079
+ length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
2080
+ # gradient with respect to p
2081
+ if not self.hyperparameter_periodicity.fixed:
2082
+ periodicity_gradient = (
2083
+ 4 * arg / self.length_scale**2 * cos_of_arg * sin_of_arg * K
2084
+ )
2085
+ periodicity_gradient = periodicity_gradient[:, :, np.newaxis]
2086
+ else: # p is kept fixed
2087
+ periodicity_gradient = np.empty((K.shape[0], K.shape[1], 0))
2088
+
2089
+ return K, np.dstack((length_scale_gradient, periodicity_gradient))
2090
+ else:
2091
+ return K
2092
+
2093
+ def __repr__(self):
2094
+ return "{0}(length_scale={1:.3g}, periodicity={2:.3g})".format(
2095
+ self.__class__.__name__, self.length_scale, self.periodicity
2096
+ )
2097
+
2098
+
2099
+ class DotProduct(Kernel):
2100
+ r"""Dot-Product kernel.
2101
+
2102
+ The DotProduct kernel is non-stationary and can be obtained from linear
2103
+ regression by putting :math:`N(0, 1)` priors on the coefficients
2104
+ of :math:`x_d (d = 1, . . . , D)` and a prior of :math:`N(0, \sigma_0^2)`
2105
+ on the bias. The DotProduct kernel is invariant to a rotation of
2106
+ the coordinates about the origin, but not translations.
2107
+ It is parameterized by a parameter sigma_0 :math:`\sigma`
2108
+ which controls the inhomogenity of the kernel. For :math:`\sigma_0^2 =0`,
2109
+ the kernel is called the homogeneous linear kernel, otherwise
2110
+ it is inhomogeneous. The kernel is given by
2111
+
2112
+ .. math::
2113
+ k(x_i, x_j) = \sigma_0 ^ 2 + x_i \cdot x_j
2114
+
2115
+ The DotProduct kernel is commonly combined with exponentiation.
2116
+
2117
+ See [1]_, Chapter 4, Section 4.2, for further details regarding the
2118
+ DotProduct kernel.
2119
+
2120
+ Read more in the :ref:`User Guide <gp_kernels>`.
2121
+
2122
+ .. versionadded:: 0.18
2123
+
2124
+ Parameters
2125
+ ----------
2126
+ sigma_0 : float >= 0, default=1.0
2127
+ Parameter controlling the inhomogenity of the kernel. If sigma_0=0,
2128
+ the kernel is homogeneous.
2129
+
2130
+ sigma_0_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
2131
+ The lower and upper bound on 'sigma_0'.
2132
+ If set to "fixed", 'sigma_0' cannot be changed during
2133
+ hyperparameter tuning.
2134
+
2135
+ References
2136
+ ----------
2137
+ .. [1] `Carl Edward Rasmussen, Christopher K. I. Williams (2006).
2138
+ "Gaussian Processes for Machine Learning". The MIT Press.
2139
+ <http://www.gaussianprocess.org/gpml/>`_
2140
+
2141
+ Examples
2142
+ --------
2143
+ >>> from sklearn.datasets import make_friedman2
2144
+ >>> from sklearn.gaussian_process import GaussianProcessRegressor
2145
+ >>> from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel
2146
+ >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
2147
+ >>> kernel = DotProduct() + WhiteKernel()
2148
+ >>> gpr = GaussianProcessRegressor(kernel=kernel,
2149
+ ... random_state=0).fit(X, y)
2150
+ >>> gpr.score(X, y)
2151
+ 0.3680...
2152
+ >>> gpr.predict(X[:2,:], return_std=True)
2153
+ (array([653.0..., 592.1...]), array([316.6..., 316.6...]))
2154
+ """
2155
+
2156
+ def __init__(self, sigma_0=1.0, sigma_0_bounds=(1e-5, 1e5)):
2157
+ self.sigma_0 = sigma_0
2158
+ self.sigma_0_bounds = sigma_0_bounds
2159
+
2160
+ @property
2161
+ def hyperparameter_sigma_0(self):
2162
+ return Hyperparameter("sigma_0", "numeric", self.sigma_0_bounds)
2163
+
2164
+ def __call__(self, X, Y=None, eval_gradient=False):
2165
+ """Return the kernel k(X, Y) and optionally its gradient.
2166
+
2167
+ Parameters
2168
+ ----------
2169
+ X : ndarray of shape (n_samples_X, n_features)
2170
+ Left argument of the returned kernel k(X, Y)
2171
+
2172
+ Y : ndarray of shape (n_samples_Y, n_features), default=None
2173
+ Right argument of the returned kernel k(X, Y). If None, k(X, X)
2174
+ if evaluated instead.
2175
+
2176
+ eval_gradient : bool, default=False
2177
+ Determines whether the gradient with respect to the log of
2178
+ the kernel hyperparameter is computed.
2179
+ Only supported when Y is None.
2180
+
2181
+ Returns
2182
+ -------
2183
+ K : ndarray of shape (n_samples_X, n_samples_Y)
2184
+ Kernel k(X, Y)
2185
+
2186
+ K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\
2187
+ optional
2188
+ The gradient of the kernel k(X, X) with respect to the log of the
2189
+ hyperparameter of the kernel. Only returned when `eval_gradient`
2190
+ is True.
2191
+ """
2192
+ X = np.atleast_2d(X)
2193
+ if Y is None:
2194
+ K = np.inner(X, X) + self.sigma_0**2
2195
+ else:
2196
+ if eval_gradient:
2197
+ raise ValueError("Gradient can only be evaluated when Y is None.")
2198
+ K = np.inner(X, Y) + self.sigma_0**2
2199
+
2200
+ if eval_gradient:
2201
+ if not self.hyperparameter_sigma_0.fixed:
2202
+ K_gradient = np.empty((K.shape[0], K.shape[1], 1))
2203
+ K_gradient[..., 0] = 2 * self.sigma_0**2
2204
+ return K, K_gradient
2205
+ else:
2206
+ return K, np.empty((X.shape[0], X.shape[0], 0))
2207
+ else:
2208
+ return K
2209
+
2210
+ def diag(self, X):
2211
+ """Returns the diagonal of the kernel k(X, X).
2212
+
2213
+ The result of this method is identical to np.diag(self(X)); however,
2214
+ it can be evaluated more efficiently since only the diagonal is
2215
+ evaluated.
2216
+
2217
+ Parameters
2218
+ ----------
2219
+ X : ndarray of shape (n_samples_X, n_features)
2220
+ Left argument of the returned kernel k(X, Y).
2221
+
2222
+ Returns
2223
+ -------
2224
+ K_diag : ndarray of shape (n_samples_X,)
2225
+ Diagonal of kernel k(X, X).
2226
+ """
2227
+ return np.einsum("ij,ij->i", X, X) + self.sigma_0**2
2228
+
2229
+ def is_stationary(self):
2230
+ """Returns whether the kernel is stationary."""
2231
+ return False
2232
+
2233
+ def __repr__(self):
2234
+ return "{0}(sigma_0={1:.3g})".format(self.__class__.__name__, self.sigma_0)
2235
+
2236
+
2237
+ # adapted from scipy/optimize/optimize.py for functions with 2d output
2238
+ def _approx_fprime(xk, f, epsilon, args=()):
2239
+ f0 = f(*((xk,) + args))
2240
+ grad = np.zeros((f0.shape[0], f0.shape[1], len(xk)), float)
2241
+ ei = np.zeros((len(xk),), float)
2242
+ for k in range(len(xk)):
2243
+ ei[k] = 1.0
2244
+ d = epsilon * ei
2245
+ grad[:, :, k] = (f(*((xk + d,) + args)) - f0) / d[k]
2246
+ ei[k] = 0.0
2247
+ return grad
2248
+
2249
+
2250
+ class PairwiseKernel(Kernel):
2251
+ """Wrapper for kernels in sklearn.metrics.pairwise.
2252
+
2253
+ A thin wrapper around the functionality of the kernels in
2254
+ sklearn.metrics.pairwise.
2255
+
2256
+ Note: Evaluation of eval_gradient is not analytic but numeric and all
2257
+ kernels support only isotropic distances. The parameter gamma is
2258
+ considered to be a hyperparameter and may be optimized. The other
2259
+ kernel parameters are set directly at initialization and are kept
2260
+ fixed.
2261
+
2262
+ .. versionadded:: 0.18
2263
+
2264
+ Parameters
2265
+ ----------
2266
+ gamma : float, default=1.0
2267
+ Parameter gamma of the pairwise kernel specified by metric. It should
2268
+ be positive.
2269
+
2270
+ gamma_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
2271
+ The lower and upper bound on 'gamma'.
2272
+ If set to "fixed", 'gamma' cannot be changed during
2273
+ hyperparameter tuning.
2274
+
2275
+ metric : {"linear", "additive_chi2", "chi2", "poly", "polynomial", \
2276
+ "rbf", "laplacian", "sigmoid", "cosine"} or callable, \
2277
+ default="linear"
2278
+ The metric to use when calculating kernel between instances in a
2279
+ feature array. If metric is a string, it must be one of the metrics
2280
+ in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
2281
+ If metric is "precomputed", X is assumed to be a kernel matrix.
2282
+ Alternatively, if metric is a callable function, it is called on each
2283
+ pair of instances (rows) and the resulting value recorded. The callable
2284
+ should take two arrays from X as input and return a value indicating
2285
+ the distance between them.
2286
+
2287
+ pairwise_kernels_kwargs : dict, default=None
2288
+ All entries of this dict (if any) are passed as keyword arguments to
2289
+ the pairwise kernel function.
2290
+
2291
+ Examples
2292
+ --------
2293
+ >>> from sklearn.datasets import load_iris
2294
+ >>> from sklearn.gaussian_process import GaussianProcessClassifier
2295
+ >>> from sklearn.gaussian_process.kernels import PairwiseKernel
2296
+ >>> X, y = load_iris(return_X_y=True)
2297
+ >>> kernel = PairwiseKernel(metric='rbf')
2298
+ >>> gpc = GaussianProcessClassifier(kernel=kernel,
2299
+ ... random_state=0).fit(X, y)
2300
+ >>> gpc.score(X, y)
2301
+ 0.9733...
2302
+ >>> gpc.predict_proba(X[:2,:])
2303
+ array([[0.8880..., 0.05663..., 0.05532...],
2304
+ [0.8676..., 0.07073..., 0.06165...]])
2305
+ """
2306
+
2307
+ def __init__(
2308
+ self,
2309
+ gamma=1.0,
2310
+ gamma_bounds=(1e-5, 1e5),
2311
+ metric="linear",
2312
+ pairwise_kernels_kwargs=None,
2313
+ ):
2314
+ self.gamma = gamma
2315
+ self.gamma_bounds = gamma_bounds
2316
+ self.metric = metric
2317
+ self.pairwise_kernels_kwargs = pairwise_kernels_kwargs
2318
+
2319
+ @property
2320
+ def hyperparameter_gamma(self):
2321
+ return Hyperparameter("gamma", "numeric", self.gamma_bounds)
2322
+
2323
+ def __call__(self, X, Y=None, eval_gradient=False):
2324
+ """Return the kernel k(X, Y) and optionally its gradient.
2325
+
2326
+ Parameters
2327
+ ----------
2328
+ X : ndarray of shape (n_samples_X, n_features)
2329
+ Left argument of the returned kernel k(X, Y)
2330
+
2331
+ Y : ndarray of shape (n_samples_Y, n_features), default=None
2332
+ Right argument of the returned kernel k(X, Y). If None, k(X, X)
2333
+ if evaluated instead.
2334
+
2335
+ eval_gradient : bool, default=False
2336
+ Determines whether the gradient with respect to the log of
2337
+ the kernel hyperparameter is computed.
2338
+ Only supported when Y is None.
2339
+
2340
+ Returns
2341
+ -------
2342
+ K : ndarray of shape (n_samples_X, n_samples_Y)
2343
+ Kernel k(X, Y)
2344
+
2345
+ K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\
2346
+ optional
2347
+ The gradient of the kernel k(X, X) with respect to the log of the
2348
+ hyperparameter of the kernel. Only returned when `eval_gradient`
2349
+ is True.
2350
+ """
2351
+ pairwise_kernels_kwargs = self.pairwise_kernels_kwargs
2352
+ if self.pairwise_kernels_kwargs is None:
2353
+ pairwise_kernels_kwargs = {}
2354
+
2355
+ X = np.atleast_2d(X)
2356
+ K = pairwise_kernels(
2357
+ X,
2358
+ Y,
2359
+ metric=self.metric,
2360
+ gamma=self.gamma,
2361
+ filter_params=True,
2362
+ **pairwise_kernels_kwargs,
2363
+ )
2364
+ if eval_gradient:
2365
+ if self.hyperparameter_gamma.fixed:
2366
+ return K, np.empty((X.shape[0], X.shape[0], 0))
2367
+ else:
2368
+ # approximate gradient numerically
2369
+ def f(gamma): # helper function
2370
+ return pairwise_kernels(
2371
+ X,
2372
+ Y,
2373
+ metric=self.metric,
2374
+ gamma=np.exp(gamma),
2375
+ filter_params=True,
2376
+ **pairwise_kernels_kwargs,
2377
+ )
2378
+
2379
+ return K, _approx_fprime(self.theta, f, 1e-10)
2380
+ else:
2381
+ return K
2382
+
2383
+ def diag(self, X):
2384
+ """Returns the diagonal of the kernel k(X, X).
2385
+
2386
+ The result of this method is identical to np.diag(self(X)); however,
2387
+ it can be evaluated more efficiently since only the diagonal is
2388
+ evaluated.
2389
+
2390
+ Parameters
2391
+ ----------
2392
+ X : ndarray of shape (n_samples_X, n_features)
2393
+ Left argument of the returned kernel k(X, Y)
2394
+
2395
+ Returns
2396
+ -------
2397
+ K_diag : ndarray of shape (n_samples_X,)
2398
+ Diagonal of kernel k(X, X)
2399
+ """
2400
+ # We have to fall back to slow way of computing diagonal
2401
+ return np.apply_along_axis(self, 1, X).ravel()
2402
+
2403
+ def is_stationary(self):
2404
+ """Returns whether the kernel is stationary."""
2405
+ return self.metric in ["rbf"]
2406
+
2407
+ def __repr__(self):
2408
+ return "{0}(gamma={1}, metric={2})".format(
2409
+ self.__class__.__name__, self.gamma, self.metric
2410
+ )
mantis_evalkit/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__init__.py ADDED
File without changes
mantis_evalkit/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__pycache__/test_gpc.cpython-310.pyc ADDED
Binary file (8.26 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__pycache__/test_gpr.cpython-310.pyc ADDED
Binary file (22.9 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/sklearn/gaussian_process/tests/_mini_sequence_kernel.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ from sklearn.base import clone
4
+ from sklearn.gaussian_process.kernels import (
5
+ GenericKernelMixin,
6
+ Hyperparameter,
7
+ Kernel,
8
+ StationaryKernelMixin,
9
+ )
10
+
11
+
12
+ class MiniSeqKernel(GenericKernelMixin, StationaryKernelMixin, Kernel):
13
+ """
14
+ A minimal (but valid) convolutional kernel for sequences of variable
15
+ length.
16
+ """
17
+
18
+ def __init__(self, baseline_similarity=0.5, baseline_similarity_bounds=(1e-5, 1)):
19
+ self.baseline_similarity = baseline_similarity
20
+ self.baseline_similarity_bounds = baseline_similarity_bounds
21
+
22
+ @property
23
+ def hyperparameter_baseline_similarity(self):
24
+ return Hyperparameter(
25
+ "baseline_similarity", "numeric", self.baseline_similarity_bounds
26
+ )
27
+
28
+ def _f(self, s1, s2):
29
+ return sum(
30
+ [1.0 if c1 == c2 else self.baseline_similarity for c1 in s1 for c2 in s2]
31
+ )
32
+
33
+ def _g(self, s1, s2):
34
+ return sum([0.0 if c1 == c2 else 1.0 for c1 in s1 for c2 in s2])
35
+
36
+ def __call__(self, X, Y=None, eval_gradient=False):
37
+ if Y is None:
38
+ Y = X
39
+
40
+ if eval_gradient:
41
+ return (
42
+ np.array([[self._f(x, y) for y in Y] for x in X]),
43
+ np.array([[[self._g(x, y)] for y in Y] for x in X]),
44
+ )
45
+ else:
46
+ return np.array([[self._f(x, y) for y in Y] for x in X])
47
+
48
+ def diag(self, X):
49
+ return np.array([self._f(x, x) for x in X])
50
+
51
+ def clone_with_theta(self, theta):
52
+ cloned = clone(self)
53
+ cloned.theta = theta
54
+ return cloned
mantis_evalkit/lib/python3.10/site-packages/sklearn/gaussian_process/tests/test_gpc.py ADDED
@@ -0,0 +1,284 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Testing for Gaussian process classification"""
2
+
3
+ # Authors: The scikit-learn developers
4
+ # SPDX-License-Identifier: BSD-3-Clause
5
+
6
+ import warnings
7
+
8
+ import numpy as np
9
+ import pytest
10
+ from scipy.optimize import approx_fprime
11
+
12
+ from sklearn.exceptions import ConvergenceWarning
13
+ from sklearn.gaussian_process import GaussianProcessClassifier
14
+ from sklearn.gaussian_process.kernels import (
15
+ RBF,
16
+ CompoundKernel,
17
+ WhiteKernel,
18
+ )
19
+ from sklearn.gaussian_process.kernels import (
20
+ ConstantKernel as C,
21
+ )
22
+ from sklearn.gaussian_process.tests._mini_sequence_kernel import MiniSeqKernel
23
+ from sklearn.utils._testing import assert_almost_equal, assert_array_equal
24
+
25
+
26
+ def f(x):
27
+ return np.sin(x)
28
+
29
+
30
+ X = np.atleast_2d(np.linspace(0, 10, 30)).T
31
+ X2 = np.atleast_2d([2.0, 4.0, 5.5, 6.5, 7.5]).T
32
+ y = np.array(f(X).ravel() > 0, dtype=int)
33
+ fX = f(X).ravel()
34
+ y_mc = np.empty(y.shape, dtype=int) # multi-class
35
+ y_mc[fX < -0.35] = 0
36
+ y_mc[(fX >= -0.35) & (fX < 0.35)] = 1
37
+ y_mc[fX > 0.35] = 2
38
+
39
+
40
+ fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed")
41
+ kernels = [
42
+ RBF(length_scale=0.1),
43
+ fixed_kernel,
44
+ RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
45
+ C(1.0, (1e-2, 1e2)) * RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
46
+ ]
47
+ non_fixed_kernels = [kernel for kernel in kernels if kernel != fixed_kernel]
48
+
49
+
50
+ @pytest.mark.parametrize("kernel", kernels)
51
+ def test_predict_consistent(kernel):
52
+ # Check binary predict decision has also predicted probability above 0.5.
53
+ gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
54
+ assert_array_equal(gpc.predict(X), gpc.predict_proba(X)[:, 1] >= 0.5)
55
+
56
+
57
+ def test_predict_consistent_structured():
58
+ # Check binary predict decision has also predicted probability above 0.5.
59
+ X = ["A", "AB", "B"]
60
+ y = np.array([True, False, True])
61
+ kernel = MiniSeqKernel(baseline_similarity_bounds="fixed")
62
+ gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
63
+ assert_array_equal(gpc.predict(X), gpc.predict_proba(X)[:, 1] >= 0.5)
64
+
65
+
66
+ @pytest.mark.parametrize("kernel", non_fixed_kernels)
67
+ def test_lml_improving(kernel):
68
+ # Test that hyperparameter-tuning improves log-marginal likelihood.
69
+ gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
70
+ assert gpc.log_marginal_likelihood(gpc.kernel_.theta) > gpc.log_marginal_likelihood(
71
+ kernel.theta
72
+ )
73
+
74
+
75
+ @pytest.mark.parametrize("kernel", kernels)
76
+ def test_lml_precomputed(kernel):
77
+ # Test that lml of optimized kernel is stored correctly.
78
+ gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
79
+ assert_almost_equal(
80
+ gpc.log_marginal_likelihood(gpc.kernel_.theta), gpc.log_marginal_likelihood(), 7
81
+ )
82
+
83
+
84
+ @pytest.mark.parametrize("kernel", kernels)
85
+ def test_lml_without_cloning_kernel(kernel):
86
+ # Test that clone_kernel=False has side-effects of kernel.theta.
87
+ gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
88
+ input_theta = np.ones(gpc.kernel_.theta.shape, dtype=np.float64)
89
+
90
+ gpc.log_marginal_likelihood(input_theta, clone_kernel=False)
91
+ assert_almost_equal(gpc.kernel_.theta, input_theta, 7)
92
+
93
+
94
+ @pytest.mark.parametrize("kernel", non_fixed_kernels)
95
+ def test_converged_to_local_maximum(kernel):
96
+ # Test that we are in local maximum after hyperparameter-optimization.
97
+ gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
98
+
99
+ lml, lml_gradient = gpc.log_marginal_likelihood(gpc.kernel_.theta, True)
100
+
101
+ assert np.all(
102
+ (np.abs(lml_gradient) < 1e-4)
103
+ | (gpc.kernel_.theta == gpc.kernel_.bounds[:, 0])
104
+ | (gpc.kernel_.theta == gpc.kernel_.bounds[:, 1])
105
+ )
106
+
107
+
108
+ @pytest.mark.parametrize("kernel", kernels)
109
+ def test_lml_gradient(kernel):
110
+ # Compare analytic and numeric gradient of log marginal likelihood.
111
+ gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
112
+
113
+ lml, lml_gradient = gpc.log_marginal_likelihood(kernel.theta, True)
114
+ lml_gradient_approx = approx_fprime(
115
+ kernel.theta, lambda theta: gpc.log_marginal_likelihood(theta, False), 1e-10
116
+ )
117
+
118
+ assert_almost_equal(lml_gradient, lml_gradient_approx, 3)
119
+
120
+
121
+ def test_random_starts(global_random_seed):
122
+ # Test that an increasing number of random-starts of GP fitting only
123
+ # increases the log marginal likelihood of the chosen theta.
124
+ n_samples, n_features = 25, 2
125
+ rng = np.random.RandomState(global_random_seed)
126
+ X = rng.randn(n_samples, n_features) * 2 - 1
127
+ y = (np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)) > 0
128
+
129
+ kernel = C(1.0, (1e-2, 1e2)) * RBF(
130
+ length_scale=[1e-3] * n_features, length_scale_bounds=[(1e-4, 1e2)] * n_features
131
+ )
132
+ last_lml = -np.inf
133
+ for n_restarts_optimizer in range(5):
134
+ gp = GaussianProcessClassifier(
135
+ kernel=kernel,
136
+ n_restarts_optimizer=n_restarts_optimizer,
137
+ random_state=global_random_seed,
138
+ ).fit(X, y)
139
+ lml = gp.log_marginal_likelihood(gp.kernel_.theta)
140
+ assert lml > last_lml - np.finfo(np.float32).eps
141
+ last_lml = lml
142
+
143
+
144
+ @pytest.mark.parametrize("kernel", non_fixed_kernels)
145
+ def test_custom_optimizer(kernel, global_random_seed):
146
+ # Test that GPC can use externally defined optimizers.
147
+ # Define a dummy optimizer that simply tests 10 random hyperparameters
148
+ def optimizer(obj_func, initial_theta, bounds):
149
+ rng = np.random.RandomState(global_random_seed)
150
+ theta_opt, func_min = initial_theta, obj_func(
151
+ initial_theta, eval_gradient=False
152
+ )
153
+ for _ in range(10):
154
+ theta = np.atleast_1d(
155
+ rng.uniform(np.maximum(-2, bounds[:, 0]), np.minimum(1, bounds[:, 1]))
156
+ )
157
+ f = obj_func(theta, eval_gradient=False)
158
+ if f < func_min:
159
+ theta_opt, func_min = theta, f
160
+ return theta_opt, func_min
161
+
162
+ gpc = GaussianProcessClassifier(kernel=kernel, optimizer=optimizer)
163
+ gpc.fit(X, y_mc)
164
+ # Checks that optimizer improved marginal likelihood
165
+ assert gpc.log_marginal_likelihood(
166
+ gpc.kernel_.theta
167
+ ) >= gpc.log_marginal_likelihood(kernel.theta)
168
+
169
+
170
+ @pytest.mark.parametrize("kernel", kernels)
171
+ def test_multi_class(kernel):
172
+ # Test GPC for multi-class classification problems.
173
+ gpc = GaussianProcessClassifier(kernel=kernel)
174
+ gpc.fit(X, y_mc)
175
+
176
+ y_prob = gpc.predict_proba(X2)
177
+ assert_almost_equal(y_prob.sum(1), 1)
178
+
179
+ y_pred = gpc.predict(X2)
180
+ assert_array_equal(np.argmax(y_prob, 1), y_pred)
181
+
182
+
183
+ @pytest.mark.parametrize("kernel", kernels)
184
+ def test_multi_class_n_jobs(kernel):
185
+ # Test that multi-class GPC produces identical results with n_jobs>1.
186
+ gpc = GaussianProcessClassifier(kernel=kernel)
187
+ gpc.fit(X, y_mc)
188
+
189
+ gpc_2 = GaussianProcessClassifier(kernel=kernel, n_jobs=2)
190
+ gpc_2.fit(X, y_mc)
191
+
192
+ y_prob = gpc.predict_proba(X2)
193
+ y_prob_2 = gpc_2.predict_proba(X2)
194
+ assert_almost_equal(y_prob, y_prob_2)
195
+
196
+
197
+ def test_warning_bounds():
198
+ kernel = RBF(length_scale_bounds=[1e-5, 1e-3])
199
+ gpc = GaussianProcessClassifier(kernel=kernel)
200
+ warning_message = (
201
+ "The optimal value found for dimension 0 of parameter "
202
+ "length_scale is close to the specified upper bound "
203
+ "0.001. Increasing the bound and calling fit again may "
204
+ "find a better value."
205
+ )
206
+ with pytest.warns(ConvergenceWarning, match=warning_message):
207
+ gpc.fit(X, y)
208
+
209
+ kernel_sum = WhiteKernel(noise_level_bounds=[1e-5, 1e-3]) + RBF(
210
+ length_scale_bounds=[1e3, 1e5]
211
+ )
212
+ gpc_sum = GaussianProcessClassifier(kernel=kernel_sum)
213
+ with warnings.catch_warnings(record=True) as record:
214
+ warnings.simplefilter("always")
215
+ gpc_sum.fit(X, y)
216
+
217
+ assert len(record) == 2
218
+
219
+ assert issubclass(record[0].category, ConvergenceWarning)
220
+ assert (
221
+ record[0].message.args[0] == "The optimal value found for "
222
+ "dimension 0 of parameter "
223
+ "k1__noise_level is close to the "
224
+ "specified upper bound 0.001. "
225
+ "Increasing the bound and calling "
226
+ "fit again may find a better value."
227
+ )
228
+
229
+ assert issubclass(record[1].category, ConvergenceWarning)
230
+ assert (
231
+ record[1].message.args[0] == "The optimal value found for "
232
+ "dimension 0 of parameter "
233
+ "k2__length_scale is close to the "
234
+ "specified lower bound 1000.0. "
235
+ "Decreasing the bound and calling "
236
+ "fit again may find a better value."
237
+ )
238
+
239
+ X_tile = np.tile(X, 2)
240
+ kernel_dims = RBF(length_scale=[1.0, 2.0], length_scale_bounds=[1e1, 1e2])
241
+ gpc_dims = GaussianProcessClassifier(kernel=kernel_dims)
242
+
243
+ with warnings.catch_warnings(record=True) as record:
244
+ warnings.simplefilter("always")
245
+ gpc_dims.fit(X_tile, y)
246
+
247
+ assert len(record) == 2
248
+
249
+ assert issubclass(record[0].category, ConvergenceWarning)
250
+ assert (
251
+ record[0].message.args[0] == "The optimal value found for "
252
+ "dimension 0 of parameter "
253
+ "length_scale is close to the "
254
+ "specified upper bound 100.0. "
255
+ "Increasing the bound and calling "
256
+ "fit again may find a better value."
257
+ )
258
+
259
+ assert issubclass(record[1].category, ConvergenceWarning)
260
+ assert (
261
+ record[1].message.args[0] == "The optimal value found for "
262
+ "dimension 1 of parameter "
263
+ "length_scale is close to the "
264
+ "specified upper bound 100.0. "
265
+ "Increasing the bound and calling "
266
+ "fit again may find a better value."
267
+ )
268
+
269
+
270
+ @pytest.mark.parametrize(
271
+ "params, error_type, err_msg",
272
+ [
273
+ (
274
+ {"kernel": CompoundKernel(0)},
275
+ ValueError,
276
+ "kernel cannot be a CompoundKernel",
277
+ )
278
+ ],
279
+ )
280
+ def test_gpc_fit_error(params, error_type, err_msg):
281
+ """Check that expected error are raised during fit."""
282
+ gpc = GaussianProcessClassifier(**params)
283
+ with pytest.raises(error_type, match=err_msg):
284
+ gpc.fit(X, y)
mantis_evalkit/lib/python3.10/site-packages/sklearn/gaussian_process/tests/test_gpr.py ADDED
@@ -0,0 +1,848 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Testing for Gaussian process regression"""
2
+
3
+ # Authors: The scikit-learn developers
4
+ # SPDX-License-Identifier: BSD-3-Clause
5
+
6
+ import re
7
+ import sys
8
+ import warnings
9
+
10
+ import numpy as np
11
+ import pytest
12
+ from scipy.optimize import approx_fprime
13
+
14
+ from sklearn.exceptions import ConvergenceWarning
15
+ from sklearn.gaussian_process import GaussianProcessRegressor
16
+ from sklearn.gaussian_process.kernels import (
17
+ RBF,
18
+ DotProduct,
19
+ ExpSineSquared,
20
+ WhiteKernel,
21
+ )
22
+ from sklearn.gaussian_process.kernels import (
23
+ ConstantKernel as C,
24
+ )
25
+ from sklearn.gaussian_process.tests._mini_sequence_kernel import MiniSeqKernel
26
+ from sklearn.utils._testing import (
27
+ assert_allclose,
28
+ assert_almost_equal,
29
+ assert_array_almost_equal,
30
+ assert_array_less,
31
+ )
32
+
33
+
34
+ def f(x):
35
+ return x * np.sin(x)
36
+
37
+
38
+ X = np.atleast_2d([1.0, 3.0, 5.0, 6.0, 7.0, 8.0]).T
39
+ X2 = np.atleast_2d([2.0, 4.0, 5.5, 6.5, 7.5]).T
40
+ y = f(X).ravel()
41
+
42
+ fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed")
43
+ kernels = [
44
+ RBF(length_scale=1.0),
45
+ fixed_kernel,
46
+ RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
47
+ C(1.0, (1e-2, 1e2)) * RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
48
+ C(1.0, (1e-2, 1e2)) * RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3))
49
+ + C(1e-5, (1e-5, 1e2)),
50
+ C(0.1, (1e-2, 1e2)) * RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3))
51
+ + C(1e-5, (1e-5, 1e2)),
52
+ ]
53
+ non_fixed_kernels = [kernel for kernel in kernels if kernel != fixed_kernel]
54
+
55
+
56
+ @pytest.mark.parametrize("kernel", kernels)
57
+ def test_gpr_interpolation(kernel):
58
+ if sys.maxsize <= 2**32:
59
+ pytest.xfail("This test may fail on 32 bit Python")
60
+
61
+ # Test the interpolating property for different kernels.
62
+ gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
63
+ y_pred, y_cov = gpr.predict(X, return_cov=True)
64
+
65
+ assert_almost_equal(y_pred, y)
66
+ assert_almost_equal(np.diag(y_cov), 0.0)
67
+
68
+
69
+ def test_gpr_interpolation_structured():
70
+ # Test the interpolating property for different kernels.
71
+ kernel = MiniSeqKernel(baseline_similarity_bounds="fixed")
72
+ X = ["A", "B", "C"]
73
+ y = np.array([1, 2, 3])
74
+ gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
75
+ y_pred, y_cov = gpr.predict(X, return_cov=True)
76
+
77
+ assert_almost_equal(
78
+ kernel(X, eval_gradient=True)[1].ravel(), (1 - np.eye(len(X))).ravel()
79
+ )
80
+ assert_almost_equal(y_pred, y)
81
+ assert_almost_equal(np.diag(y_cov), 0.0)
82
+
83
+
84
+ @pytest.mark.parametrize("kernel", non_fixed_kernels)
85
+ def test_lml_improving(kernel):
86
+ if sys.maxsize <= 2**32:
87
+ pytest.xfail("This test may fail on 32 bit Python")
88
+
89
+ # Test that hyperparameter-tuning improves log-marginal likelihood.
90
+ gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
91
+ assert gpr.log_marginal_likelihood(gpr.kernel_.theta) > gpr.log_marginal_likelihood(
92
+ kernel.theta
93
+ )
94
+
95
+
96
+ @pytest.mark.parametrize("kernel", kernels)
97
+ def test_lml_precomputed(kernel):
98
+ # Test that lml of optimized kernel is stored correctly.
99
+ gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
100
+ assert gpr.log_marginal_likelihood(gpr.kernel_.theta) == pytest.approx(
101
+ gpr.log_marginal_likelihood()
102
+ )
103
+
104
+
105
+ @pytest.mark.parametrize("kernel", kernels)
106
+ def test_lml_without_cloning_kernel(kernel):
107
+ # Test that lml of optimized kernel is stored correctly.
108
+ gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
109
+ input_theta = np.ones(gpr.kernel_.theta.shape, dtype=np.float64)
110
+
111
+ gpr.log_marginal_likelihood(input_theta, clone_kernel=False)
112
+ assert_almost_equal(gpr.kernel_.theta, input_theta, 7)
113
+
114
+
115
+ @pytest.mark.parametrize("kernel", non_fixed_kernels)
116
+ def test_converged_to_local_maximum(kernel):
117
+ # Test that we are in local maximum after hyperparameter-optimization.
118
+ gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
119
+
120
+ lml, lml_gradient = gpr.log_marginal_likelihood(gpr.kernel_.theta, True)
121
+
122
+ assert np.all(
123
+ (np.abs(lml_gradient) < 1e-4)
124
+ | (gpr.kernel_.theta == gpr.kernel_.bounds[:, 0])
125
+ | (gpr.kernel_.theta == gpr.kernel_.bounds[:, 1])
126
+ )
127
+
128
+
129
+ @pytest.mark.parametrize("kernel", non_fixed_kernels)
130
+ def test_solution_inside_bounds(kernel):
131
+ # Test that hyperparameter-optimization remains in bounds#
132
+ gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
133
+
134
+ bounds = gpr.kernel_.bounds
135
+ max_ = np.finfo(gpr.kernel_.theta.dtype).max
136
+ tiny = 1e-10
137
+ bounds[~np.isfinite(bounds[:, 1]), 1] = max_
138
+
139
+ assert_array_less(bounds[:, 0], gpr.kernel_.theta + tiny)
140
+ assert_array_less(gpr.kernel_.theta, bounds[:, 1] + tiny)
141
+
142
+
143
+ @pytest.mark.parametrize("kernel", kernels)
144
+ def test_lml_gradient(kernel):
145
+ # Compare analytic and numeric gradient of log marginal likelihood.
146
+ gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
147
+
148
+ lml, lml_gradient = gpr.log_marginal_likelihood(kernel.theta, True)
149
+ lml_gradient_approx = approx_fprime(
150
+ kernel.theta, lambda theta: gpr.log_marginal_likelihood(theta, False), 1e-10
151
+ )
152
+
153
+ assert_almost_equal(lml_gradient, lml_gradient_approx, 3)
154
+
155
+
156
+ @pytest.mark.parametrize("kernel", kernels)
157
+ def test_prior(kernel):
158
+ # Test that GP prior has mean 0 and identical variances.
159
+ gpr = GaussianProcessRegressor(kernel=kernel)
160
+
161
+ y_mean, y_cov = gpr.predict(X, return_cov=True)
162
+
163
+ assert_almost_equal(y_mean, 0, 5)
164
+ if len(gpr.kernel.theta) > 1:
165
+ # XXX: quite hacky, works only for current kernels
166
+ assert_almost_equal(np.diag(y_cov), np.exp(kernel.theta[0]), 5)
167
+ else:
168
+ assert_almost_equal(np.diag(y_cov), 1, 5)
169
+
170
+
171
+ @pytest.mark.parametrize("kernel", kernels)
172
+ def test_sample_statistics(kernel):
173
+ # Test that statistics of samples drawn from GP are correct.
174
+ gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
175
+
176
+ y_mean, y_cov = gpr.predict(X2, return_cov=True)
177
+
178
+ samples = gpr.sample_y(X2, 300000)
179
+
180
+ # More digits accuracy would require many more samples
181
+ assert_almost_equal(y_mean, np.mean(samples, 1), 1)
182
+ assert_almost_equal(
183
+ np.diag(y_cov) / np.diag(y_cov).max(),
184
+ np.var(samples, 1) / np.diag(y_cov).max(),
185
+ 1,
186
+ )
187
+
188
+
189
+ def test_no_optimizer():
190
+ # Test that kernel parameters are unmodified when optimizer is None.
191
+ kernel = RBF(1.0)
192
+ gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None).fit(X, y)
193
+ assert np.exp(gpr.kernel_.theta) == 1.0
194
+
195
+
196
+ @pytest.mark.parametrize("kernel", kernels)
197
+ @pytest.mark.parametrize("target", [y, np.ones(X.shape[0], dtype=np.float64)])
198
+ def test_predict_cov_vs_std(kernel, target):
199
+ if sys.maxsize <= 2**32:
200
+ pytest.xfail("This test may fail on 32 bit Python")
201
+
202
+ # Test that predicted std.-dev. is consistent with cov's diagonal.
203
+ gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
204
+ y_mean, y_cov = gpr.predict(X2, return_cov=True)
205
+ y_mean, y_std = gpr.predict(X2, return_std=True)
206
+ assert_almost_equal(np.sqrt(np.diag(y_cov)), y_std)
207
+
208
+
209
+ def test_anisotropic_kernel():
210
+ # Test that GPR can identify meaningful anisotropic length-scales.
211
+ # We learn a function which varies in one dimension ten-times slower
212
+ # than in the other. The corresponding length-scales should differ by at
213
+ # least a factor 5
214
+ rng = np.random.RandomState(0)
215
+ X = rng.uniform(-1, 1, (50, 2))
216
+ y = X[:, 0] + 0.1 * X[:, 1]
217
+
218
+ kernel = RBF([1.0, 1.0])
219
+ gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
220
+ assert np.exp(gpr.kernel_.theta[1]) > np.exp(gpr.kernel_.theta[0]) * 5
221
+
222
+
223
+ def test_random_starts():
224
+ # Test that an increasing number of random-starts of GP fitting only
225
+ # increases the log marginal likelihood of the chosen theta.
226
+ n_samples, n_features = 25, 2
227
+ rng = np.random.RandomState(0)
228
+ X = rng.randn(n_samples, n_features) * 2 - 1
229
+ y = (
230
+ np.sin(X).sum(axis=1)
231
+ + np.sin(3 * X).sum(axis=1)
232
+ + rng.normal(scale=0.1, size=n_samples)
233
+ )
234
+
235
+ kernel = C(1.0, (1e-2, 1e2)) * RBF(
236
+ length_scale=[1.0] * n_features, length_scale_bounds=[(1e-4, 1e2)] * n_features
237
+ ) + WhiteKernel(noise_level=1e-5, noise_level_bounds=(1e-5, 1e1))
238
+ last_lml = -np.inf
239
+ for n_restarts_optimizer in range(5):
240
+ gp = GaussianProcessRegressor(
241
+ kernel=kernel,
242
+ n_restarts_optimizer=n_restarts_optimizer,
243
+ random_state=0,
244
+ ).fit(X, y)
245
+ lml = gp.log_marginal_likelihood(gp.kernel_.theta)
246
+ assert lml > last_lml - np.finfo(np.float32).eps
247
+ last_lml = lml
248
+
249
+
250
+ @pytest.mark.parametrize("kernel", kernels)
251
+ def test_y_normalization(kernel):
252
+ """
253
+ Test normalization of the target values in GP
254
+
255
+ Fitting non-normalizing GP on normalized y and fitting normalizing GP
256
+ on unnormalized y should yield identical results. Note that, here,
257
+ 'normalized y' refers to y that has been made zero mean and unit
258
+ variance.
259
+
260
+ """
261
+
262
+ y_mean = np.mean(y)
263
+ y_std = np.std(y)
264
+ y_norm = (y - y_mean) / y_std
265
+
266
+ # Fit non-normalizing GP on normalized y
267
+ gpr = GaussianProcessRegressor(kernel=kernel)
268
+ gpr.fit(X, y_norm)
269
+
270
+ # Fit normalizing GP on unnormalized y
271
+ gpr_norm = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
272
+ gpr_norm.fit(X, y)
273
+
274
+ # Compare predicted mean, std-devs and covariances
275
+ y_pred, y_pred_std = gpr.predict(X2, return_std=True)
276
+ y_pred = y_pred * y_std + y_mean
277
+ y_pred_std = y_pred_std * y_std
278
+ y_pred_norm, y_pred_std_norm = gpr_norm.predict(X2, return_std=True)
279
+
280
+ assert_almost_equal(y_pred, y_pred_norm)
281
+ assert_almost_equal(y_pred_std, y_pred_std_norm)
282
+
283
+ _, y_cov = gpr.predict(X2, return_cov=True)
284
+ y_cov = y_cov * y_std**2
285
+ _, y_cov_norm = gpr_norm.predict(X2, return_cov=True)
286
+
287
+ assert_almost_equal(y_cov, y_cov_norm)
288
+
289
+
290
+ def test_large_variance_y():
291
+ """
292
+ Here we test that, when noramlize_y=True, our GP can produce a
293
+ sensible fit to training data whose variance is significantly
294
+ larger than unity. This test was made in response to issue #15612.
295
+
296
+ GP predictions are verified against predictions that were made
297
+ using GPy which, here, is treated as the 'gold standard'. Note that we
298
+ only investigate the RBF kernel here, as that is what was used in the
299
+ GPy implementation.
300
+
301
+ The following code can be used to recreate the GPy data:
302
+
303
+ --------------------------------------------------------------------------
304
+ import GPy
305
+
306
+ kernel_gpy = GPy.kern.RBF(input_dim=1, lengthscale=1.)
307
+ gpy = GPy.models.GPRegression(X, np.vstack(y_large), kernel_gpy)
308
+ gpy.optimize()
309
+ y_pred_gpy, y_var_gpy = gpy.predict(X2)
310
+ y_pred_std_gpy = np.sqrt(y_var_gpy)
311
+ --------------------------------------------------------------------------
312
+ """
313
+
314
+ # Here we utilise a larger variance version of the training data
315
+ y_large = 10 * y
316
+
317
+ # Standard GP with normalize_y=True
318
+ RBF_params = {"length_scale": 1.0}
319
+ kernel = RBF(**RBF_params)
320
+ gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
321
+ gpr.fit(X, y_large)
322
+ y_pred, y_pred_std = gpr.predict(X2, return_std=True)
323
+
324
+ # 'Gold standard' mean predictions from GPy
325
+ y_pred_gpy = np.array(
326
+ [15.16918303, -27.98707845, -39.31636019, 14.52605515, 69.18503589]
327
+ )
328
+
329
+ # 'Gold standard' std predictions from GPy
330
+ y_pred_std_gpy = np.array(
331
+ [7.78860962, 3.83179178, 0.63149951, 0.52745188, 0.86170042]
332
+ )
333
+
334
+ # Based on numerical experiments, it's reasonable to expect our
335
+ # GP's mean predictions to get within 7% of predictions of those
336
+ # made by GPy.
337
+ assert_allclose(y_pred, y_pred_gpy, rtol=0.07, atol=0)
338
+
339
+ # Based on numerical experiments, it's reasonable to expect our
340
+ # GP's std predictions to get within 15% of predictions of those
341
+ # made by GPy.
342
+ assert_allclose(y_pred_std, y_pred_std_gpy, rtol=0.15, atol=0)
343
+
344
+
345
+ def test_y_multioutput():
346
+ # Test that GPR can deal with multi-dimensional target values
347
+ y_2d = np.vstack((y, y * 2)).T
348
+
349
+ # Test for fixed kernel that first dimension of 2d GP equals the output
350
+ # of 1d GP and that second dimension is twice as large
351
+ kernel = RBF(length_scale=1.0)
352
+
353
+ gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None, normalize_y=False)
354
+ gpr.fit(X, y)
355
+
356
+ gpr_2d = GaussianProcessRegressor(kernel=kernel, optimizer=None, normalize_y=False)
357
+ gpr_2d.fit(X, y_2d)
358
+
359
+ y_pred_1d, y_std_1d = gpr.predict(X2, return_std=True)
360
+ y_pred_2d, y_std_2d = gpr_2d.predict(X2, return_std=True)
361
+ _, y_cov_1d = gpr.predict(X2, return_cov=True)
362
+ _, y_cov_2d = gpr_2d.predict(X2, return_cov=True)
363
+
364
+ assert_almost_equal(y_pred_1d, y_pred_2d[:, 0])
365
+ assert_almost_equal(y_pred_1d, y_pred_2d[:, 1] / 2)
366
+
367
+ # Standard deviation and covariance do not depend on output
368
+ for target in range(y_2d.shape[1]):
369
+ assert_almost_equal(y_std_1d, y_std_2d[..., target])
370
+ assert_almost_equal(y_cov_1d, y_cov_2d[..., target])
371
+
372
+ y_sample_1d = gpr.sample_y(X2, n_samples=10)
373
+ y_sample_2d = gpr_2d.sample_y(X2, n_samples=10)
374
+
375
+ assert y_sample_1d.shape == (5, 10)
376
+ assert y_sample_2d.shape == (5, 2, 10)
377
+ # Only the first target will be equal
378
+ assert_almost_equal(y_sample_1d, y_sample_2d[:, 0, :])
379
+
380
+ # Test hyperparameter optimization
381
+ for kernel in kernels:
382
+ gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
383
+ gpr.fit(X, y)
384
+
385
+ gpr_2d = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
386
+ gpr_2d.fit(X, np.vstack((y, y)).T)
387
+
388
+ assert_almost_equal(gpr.kernel_.theta, gpr_2d.kernel_.theta, 4)
389
+
390
+
391
+ @pytest.mark.parametrize("kernel", non_fixed_kernels)
392
+ def test_custom_optimizer(kernel):
393
+ # Test that GPR can use externally defined optimizers.
394
+ # Define a dummy optimizer that simply tests 50 random hyperparameters
395
+ def optimizer(obj_func, initial_theta, bounds):
396
+ rng = np.random.RandomState(0)
397
+ theta_opt, func_min = initial_theta, obj_func(
398
+ initial_theta, eval_gradient=False
399
+ )
400
+ for _ in range(50):
401
+ theta = np.atleast_1d(
402
+ rng.uniform(np.maximum(-2, bounds[:, 0]), np.minimum(1, bounds[:, 1]))
403
+ )
404
+ f = obj_func(theta, eval_gradient=False)
405
+ if f < func_min:
406
+ theta_opt, func_min = theta, f
407
+ return theta_opt, func_min
408
+
409
+ gpr = GaussianProcessRegressor(kernel=kernel, optimizer=optimizer)
410
+ gpr.fit(X, y)
411
+ # Checks that optimizer improved marginal likelihood
412
+ assert gpr.log_marginal_likelihood(gpr.kernel_.theta) > gpr.log_marginal_likelihood(
413
+ gpr.kernel.theta
414
+ )
415
+
416
+
417
+ def test_gpr_correct_error_message():
418
+ X = np.arange(12).reshape(6, -1)
419
+ y = np.ones(6)
420
+ kernel = DotProduct()
421
+ gpr = GaussianProcessRegressor(kernel=kernel, alpha=0.0)
422
+ message = (
423
+ "The kernel, %s, is not returning a "
424
+ "positive definite matrix. Try gradually increasing "
425
+ "the 'alpha' parameter of your "
426
+ "GaussianProcessRegressor estimator." % kernel
427
+ )
428
+ with pytest.raises(np.linalg.LinAlgError, match=re.escape(message)):
429
+ gpr.fit(X, y)
430
+
431
+
432
+ @pytest.mark.parametrize("kernel", kernels)
433
+ def test_duplicate_input(kernel):
434
+ # Test GPR can handle two different output-values for the same input.
435
+ gpr_equal_inputs = GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
436
+ gpr_similar_inputs = GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
437
+
438
+ X_ = np.vstack((X, X[0]))
439
+ y_ = np.hstack((y, y[0] + 1))
440
+ gpr_equal_inputs.fit(X_, y_)
441
+
442
+ X_ = np.vstack((X, X[0] + 1e-15))
443
+ y_ = np.hstack((y, y[0] + 1))
444
+ gpr_similar_inputs.fit(X_, y_)
445
+
446
+ X_test = np.linspace(0, 10, 100)[:, None]
447
+ y_pred_equal, y_std_equal = gpr_equal_inputs.predict(X_test, return_std=True)
448
+ y_pred_similar, y_std_similar = gpr_similar_inputs.predict(X_test, return_std=True)
449
+
450
+ assert_almost_equal(y_pred_equal, y_pred_similar)
451
+ assert_almost_equal(y_std_equal, y_std_similar)
452
+
453
+
454
+ def test_no_fit_default_predict():
455
+ # Test that GPR predictions without fit does not break by default.
456
+ default_kernel = C(1.0, constant_value_bounds="fixed") * RBF(
457
+ 1.0, length_scale_bounds="fixed"
458
+ )
459
+ gpr1 = GaussianProcessRegressor()
460
+ _, y_std1 = gpr1.predict(X, return_std=True)
461
+ _, y_cov1 = gpr1.predict(X, return_cov=True)
462
+
463
+ gpr2 = GaussianProcessRegressor(kernel=default_kernel)
464
+ _, y_std2 = gpr2.predict(X, return_std=True)
465
+ _, y_cov2 = gpr2.predict(X, return_cov=True)
466
+
467
+ assert_array_almost_equal(y_std1, y_std2)
468
+ assert_array_almost_equal(y_cov1, y_cov2)
469
+
470
+
471
+ def test_warning_bounds():
472
+ kernel = RBF(length_scale_bounds=[1e-5, 1e-3])
473
+ gpr = GaussianProcessRegressor(kernel=kernel)
474
+ warning_message = (
475
+ "The optimal value found for dimension 0 of parameter "
476
+ "length_scale is close to the specified upper bound "
477
+ "0.001. Increasing the bound and calling fit again may "
478
+ "find a better value."
479
+ )
480
+ with pytest.warns(ConvergenceWarning, match=warning_message):
481
+ gpr.fit(X, y)
482
+
483
+ kernel_sum = WhiteKernel(noise_level_bounds=[1e-5, 1e-3]) + RBF(
484
+ length_scale_bounds=[1e3, 1e5]
485
+ )
486
+ gpr_sum = GaussianProcessRegressor(kernel=kernel_sum)
487
+ with warnings.catch_warnings(record=True) as record:
488
+ warnings.simplefilter("always")
489
+ gpr_sum.fit(X, y)
490
+
491
+ assert len(record) == 2
492
+
493
+ assert issubclass(record[0].category, ConvergenceWarning)
494
+ assert (
495
+ record[0].message.args[0] == "The optimal value found for "
496
+ "dimension 0 of parameter "
497
+ "k1__noise_level is close to the "
498
+ "specified upper bound 0.001. "
499
+ "Increasing the bound and calling "
500
+ "fit again may find a better value."
501
+ )
502
+
503
+ assert issubclass(record[1].category, ConvergenceWarning)
504
+ assert (
505
+ record[1].message.args[0] == "The optimal value found for "
506
+ "dimension 0 of parameter "
507
+ "k2__length_scale is close to the "
508
+ "specified lower bound 1000.0. "
509
+ "Decreasing the bound and calling "
510
+ "fit again may find a better value."
511
+ )
512
+
513
+ X_tile = np.tile(X, 2)
514
+ kernel_dims = RBF(length_scale=[1.0, 2.0], length_scale_bounds=[1e1, 1e2])
515
+ gpr_dims = GaussianProcessRegressor(kernel=kernel_dims)
516
+
517
+ with warnings.catch_warnings(record=True) as record:
518
+ warnings.simplefilter("always")
519
+ gpr_dims.fit(X_tile, y)
520
+
521
+ assert len(record) == 2
522
+
523
+ assert issubclass(record[0].category, ConvergenceWarning)
524
+ assert (
525
+ record[0].message.args[0] == "The optimal value found for "
526
+ "dimension 0 of parameter "
527
+ "length_scale is close to the "
528
+ "specified lower bound 10.0. "
529
+ "Decreasing the bound and calling "
530
+ "fit again may find a better value."
531
+ )
532
+
533
+ assert issubclass(record[1].category, ConvergenceWarning)
534
+ assert (
535
+ record[1].message.args[0] == "The optimal value found for "
536
+ "dimension 1 of parameter "
537
+ "length_scale is close to the "
538
+ "specified lower bound 10.0. "
539
+ "Decreasing the bound and calling "
540
+ "fit again may find a better value."
541
+ )
542
+
543
+
544
+ def test_bound_check_fixed_hyperparameter():
545
+ # Regression test for issue #17943
546
+ # Check that having a hyperparameter with fixed bounds doesn't cause an
547
+ # error
548
+ k1 = 50.0**2 * RBF(length_scale=50.0) # long term smooth rising trend
549
+ k2 = ExpSineSquared(
550
+ length_scale=1.0, periodicity=1.0, periodicity_bounds="fixed"
551
+ ) # seasonal component
552
+ kernel = k1 + k2
553
+ GaussianProcessRegressor(kernel=kernel).fit(X, y)
554
+
555
+
556
+ @pytest.mark.parametrize("kernel", kernels)
557
+ def test_constant_target(kernel):
558
+ """Check that the std. dev. is affected to 1 when normalizing a constant
559
+ feature.
560
+ Non-regression test for:
561
+ https://github.com/scikit-learn/scikit-learn/issues/18318
562
+ NaN where affected to the target when scaling due to null std. dev. with
563
+ constant target.
564
+ """
565
+ y_constant = np.ones(X.shape[0], dtype=np.float64)
566
+
567
+ gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
568
+ gpr.fit(X, y_constant)
569
+ assert gpr._y_train_std == pytest.approx(1.0)
570
+
571
+ y_pred, y_cov = gpr.predict(X, return_cov=True)
572
+ assert_allclose(y_pred, y_constant)
573
+ # set atol because we compare to zero
574
+ assert_allclose(np.diag(y_cov), 0.0, atol=1e-9)
575
+
576
+ # Test multi-target data
577
+ n_samples, n_targets = X.shape[0], 2
578
+ rng = np.random.RandomState(0)
579
+ y = np.concatenate(
580
+ [
581
+ rng.normal(size=(n_samples, 1)), # non-constant target
582
+ np.full(shape=(n_samples, 1), fill_value=2), # constant target
583
+ ],
584
+ axis=1,
585
+ )
586
+
587
+ gpr.fit(X, y)
588
+ Y_pred, Y_cov = gpr.predict(X, return_cov=True)
589
+
590
+ assert_allclose(Y_pred[:, 1], 2)
591
+ assert_allclose(np.diag(Y_cov[..., 1]), 0.0, atol=1e-9)
592
+
593
+ assert Y_pred.shape == (n_samples, n_targets)
594
+ assert Y_cov.shape == (n_samples, n_samples, n_targets)
595
+
596
+
597
+ def test_gpr_consistency_std_cov_non_invertible_kernel():
598
+ """Check the consistency between the returned std. dev. and the covariance.
599
+ Non-regression test for:
600
+ https://github.com/scikit-learn/scikit-learn/issues/19936
601
+ Inconsistencies were observed when the kernel cannot be inverted (or
602
+ numerically stable).
603
+ """
604
+ kernel = C(8.98576054e05, (1e-12, 1e12)) * RBF(
605
+ [5.91326520e02, 1.32584051e03], (1e-12, 1e12)
606
+ ) + WhiteKernel(noise_level=1e-5)
607
+ gpr = GaussianProcessRegressor(kernel=kernel, alpha=0, optimizer=None)
608
+ X_train = np.array(
609
+ [
610
+ [0.0, 0.0],
611
+ [1.54919334, -0.77459667],
612
+ [-1.54919334, 0.0],
613
+ [0.0, -1.54919334],
614
+ [0.77459667, 0.77459667],
615
+ [-0.77459667, 1.54919334],
616
+ ]
617
+ )
618
+ y_train = np.array(
619
+ [
620
+ [-2.14882017e-10],
621
+ [-4.66975823e00],
622
+ [4.01823986e00],
623
+ [-1.30303674e00],
624
+ [-1.35760156e00],
625
+ [3.31215668e00],
626
+ ]
627
+ )
628
+ gpr.fit(X_train, y_train)
629
+ X_test = np.array(
630
+ [
631
+ [-1.93649167, -1.93649167],
632
+ [1.93649167, -1.93649167],
633
+ [-1.93649167, 1.93649167],
634
+ [1.93649167, 1.93649167],
635
+ ]
636
+ )
637
+ pred1, std = gpr.predict(X_test, return_std=True)
638
+ pred2, cov = gpr.predict(X_test, return_cov=True)
639
+ assert_allclose(std, np.sqrt(np.diagonal(cov)), rtol=1e-5)
640
+
641
+
642
+ @pytest.mark.parametrize(
643
+ "params, TypeError, err_msg",
644
+ [
645
+ (
646
+ {"alpha": np.zeros(100)},
647
+ ValueError,
648
+ "alpha must be a scalar or an array with same number of entries as y",
649
+ ),
650
+ (
651
+ {
652
+ "kernel": WhiteKernel(noise_level_bounds=(-np.inf, np.inf)),
653
+ "n_restarts_optimizer": 2,
654
+ },
655
+ ValueError,
656
+ "requires that all bounds are finite",
657
+ ),
658
+ ],
659
+ )
660
+ def test_gpr_fit_error(params, TypeError, err_msg):
661
+ """Check that expected error are raised during fit."""
662
+ gpr = GaussianProcessRegressor(**params)
663
+ with pytest.raises(TypeError, match=err_msg):
664
+ gpr.fit(X, y)
665
+
666
+
667
+ def test_gpr_lml_error():
668
+ """Check that we raise the proper error in the LML method."""
669
+ gpr = GaussianProcessRegressor(kernel=RBF()).fit(X, y)
670
+
671
+ err_msg = "Gradient can only be evaluated for theta!=None"
672
+ with pytest.raises(ValueError, match=err_msg):
673
+ gpr.log_marginal_likelihood(eval_gradient=True)
674
+
675
+
676
+ def test_gpr_predict_error():
677
+ """Check that we raise the proper error during predict."""
678
+ gpr = GaussianProcessRegressor(kernel=RBF()).fit(X, y)
679
+
680
+ err_msg = "At most one of return_std or return_cov can be requested."
681
+ with pytest.raises(RuntimeError, match=err_msg):
682
+ gpr.predict(X, return_cov=True, return_std=True)
683
+
684
+
685
+ @pytest.mark.parametrize("normalize_y", [True, False])
686
+ @pytest.mark.parametrize("n_targets", [None, 1, 10])
687
+ def test_predict_shapes(normalize_y, n_targets):
688
+ """Check the shapes of y_mean, y_std, and y_cov in single-output
689
+ (n_targets=None) and multi-output settings, including the edge case when
690
+ n_targets=1, where the sklearn convention is to squeeze the predictions.
691
+
692
+ Non-regression test for:
693
+ https://github.com/scikit-learn/scikit-learn/issues/17394
694
+ https://github.com/scikit-learn/scikit-learn/issues/18065
695
+ https://github.com/scikit-learn/scikit-learn/issues/22174
696
+ """
697
+ rng = np.random.RandomState(1234)
698
+
699
+ n_features, n_samples_train, n_samples_test = 6, 9, 7
700
+
701
+ y_train_shape = (n_samples_train,)
702
+ if n_targets is not None:
703
+ y_train_shape = y_train_shape + (n_targets,)
704
+
705
+ # By convention single-output data is squeezed upon prediction
706
+ y_test_shape = (n_samples_test,)
707
+ if n_targets is not None and n_targets > 1:
708
+ y_test_shape = y_test_shape + (n_targets,)
709
+
710
+ X_train = rng.randn(n_samples_train, n_features)
711
+ X_test = rng.randn(n_samples_test, n_features)
712
+ y_train = rng.randn(*y_train_shape)
713
+
714
+ model = GaussianProcessRegressor(normalize_y=normalize_y)
715
+ model.fit(X_train, y_train)
716
+
717
+ y_pred, y_std = model.predict(X_test, return_std=True)
718
+ _, y_cov = model.predict(X_test, return_cov=True)
719
+
720
+ assert y_pred.shape == y_test_shape
721
+ assert y_std.shape == y_test_shape
722
+ assert y_cov.shape == (n_samples_test,) + y_test_shape
723
+
724
+
725
+ @pytest.mark.parametrize("normalize_y", [True, False])
726
+ @pytest.mark.parametrize("n_targets", [None, 1, 10])
727
+ def test_sample_y_shapes(normalize_y, n_targets):
728
+ """Check the shapes of y_samples in single-output (n_targets=0) and
729
+ multi-output settings, including the edge case when n_targets=1, where the
730
+ sklearn convention is to squeeze the predictions.
731
+
732
+ Non-regression test for:
733
+ https://github.com/scikit-learn/scikit-learn/issues/22175
734
+ """
735
+ rng = np.random.RandomState(1234)
736
+
737
+ n_features, n_samples_train = 6, 9
738
+ # Number of spatial locations to predict at
739
+ n_samples_X_test = 7
740
+ # Number of sample predictions per test point
741
+ n_samples_y_test = 5
742
+
743
+ y_train_shape = (n_samples_train,)
744
+ if n_targets is not None:
745
+ y_train_shape = y_train_shape + (n_targets,)
746
+
747
+ # By convention single-output data is squeezed upon prediction
748
+ if n_targets is not None and n_targets > 1:
749
+ y_test_shape = (n_samples_X_test, n_targets, n_samples_y_test)
750
+ else:
751
+ y_test_shape = (n_samples_X_test, n_samples_y_test)
752
+
753
+ X_train = rng.randn(n_samples_train, n_features)
754
+ X_test = rng.randn(n_samples_X_test, n_features)
755
+ y_train = rng.randn(*y_train_shape)
756
+
757
+ model = GaussianProcessRegressor(normalize_y=normalize_y)
758
+
759
+ # FIXME: before fitting, the estimator does not have information regarding
760
+ # the number of targets and default to 1. This is inconsistent with the shape
761
+ # provided after `fit`. This assert should be made once the following issue
762
+ # is fixed:
763
+ # https://github.com/scikit-learn/scikit-learn/issues/22430
764
+ # y_samples = model.sample_y(X_test, n_samples=n_samples_y_test)
765
+ # assert y_samples.shape == y_test_shape
766
+
767
+ model.fit(X_train, y_train)
768
+
769
+ y_samples = model.sample_y(X_test, n_samples=n_samples_y_test)
770
+ assert y_samples.shape == y_test_shape
771
+
772
+
773
+ @pytest.mark.parametrize("n_targets", [None, 1, 2, 3])
774
+ @pytest.mark.parametrize("n_samples", [1, 5])
775
+ def test_sample_y_shape_with_prior(n_targets, n_samples):
776
+ """Check the output shape of `sample_y` is consistent before and after `fit`."""
777
+ rng = np.random.RandomState(1024)
778
+
779
+ X = rng.randn(10, 3)
780
+ y = rng.randn(10, n_targets if n_targets is not None else 1)
781
+
782
+ model = GaussianProcessRegressor(n_targets=n_targets)
783
+ shape_before_fit = model.sample_y(X, n_samples=n_samples).shape
784
+ model.fit(X, y)
785
+ shape_after_fit = model.sample_y(X, n_samples=n_samples).shape
786
+ assert shape_before_fit == shape_after_fit
787
+
788
+
789
+ @pytest.mark.parametrize("n_targets", [None, 1, 2, 3])
790
+ def test_predict_shape_with_prior(n_targets):
791
+ """Check the output shape of `predict` with prior distribution."""
792
+ rng = np.random.RandomState(1024)
793
+
794
+ n_sample = 10
795
+ X = rng.randn(n_sample, 3)
796
+ y = rng.randn(n_sample, n_targets if n_targets is not None else 1)
797
+
798
+ model = GaussianProcessRegressor(n_targets=n_targets)
799
+ mean_prior, cov_prior = model.predict(X, return_cov=True)
800
+ _, std_prior = model.predict(X, return_std=True)
801
+
802
+ model.fit(X, y)
803
+ mean_post, cov_post = model.predict(X, return_cov=True)
804
+ _, std_post = model.predict(X, return_std=True)
805
+
806
+ assert mean_prior.shape == mean_post.shape
807
+ assert cov_prior.shape == cov_post.shape
808
+ assert std_prior.shape == std_post.shape
809
+
810
+
811
+ def test_n_targets_error():
812
+ """Check that an error is raised when the number of targets seen at fit is
813
+ inconsistent with n_targets.
814
+ """
815
+ rng = np.random.RandomState(0)
816
+ X = rng.randn(10, 3)
817
+ y = rng.randn(10, 2)
818
+
819
+ model = GaussianProcessRegressor(n_targets=1)
820
+ with pytest.raises(ValueError, match="The number of targets seen in `y`"):
821
+ model.fit(X, y)
822
+
823
+
824
+ class CustomKernel(C):
825
+ """
826
+ A custom kernel that has a diag method that returns the first column of the
827
+ input matrix X. This is a helper for the test to check that the input
828
+ matrix X is not mutated.
829
+ """
830
+
831
+ def diag(self, X):
832
+ return X[:, 0]
833
+
834
+
835
+ def test_gpr_predict_input_not_modified():
836
+ """
837
+ Check that the input X is not modified by the predict method of the
838
+ GaussianProcessRegressor when setting return_std=True.
839
+
840
+ Non-regression test for:
841
+ https://github.com/scikit-learn/scikit-learn/issues/24340
842
+ """
843
+ gpr = GaussianProcessRegressor(kernel=CustomKernel()).fit(X, y)
844
+
845
+ X2_copy = np.copy(X2)
846
+ _, _ = gpr.predict(X2, return_std=True)
847
+
848
+ assert_allclose(X2, X2_copy)
mantis_evalkit/lib/python3.10/site-packages/sklearn/gaussian_process/tests/test_kernels.py ADDED
@@ -0,0 +1,403 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Testing for kernels for Gaussian processes."""
2
+
3
+ # Authors: The scikit-learn developers
4
+ # SPDX-License-Identifier: BSD-3-Clause
5
+
6
+ from inspect import signature
7
+
8
+ import numpy as np
9
+ import pytest
10
+
11
+ from sklearn.base import clone
12
+ from sklearn.gaussian_process.kernels import (
13
+ RBF,
14
+ CompoundKernel,
15
+ ConstantKernel,
16
+ DotProduct,
17
+ Exponentiation,
18
+ ExpSineSquared,
19
+ KernelOperator,
20
+ Matern,
21
+ PairwiseKernel,
22
+ RationalQuadratic,
23
+ WhiteKernel,
24
+ _approx_fprime,
25
+ )
26
+ from sklearn.metrics.pairwise import (
27
+ PAIRWISE_KERNEL_FUNCTIONS,
28
+ euclidean_distances,
29
+ pairwise_kernels,
30
+ )
31
+ from sklearn.utils._testing import (
32
+ assert_allclose,
33
+ assert_almost_equal,
34
+ assert_array_almost_equal,
35
+ assert_array_equal,
36
+ )
37
+
38
+ X = np.random.RandomState(0).normal(0, 1, (5, 2))
39
+ Y = np.random.RandomState(0).normal(0, 1, (6, 2))
40
+ # Set shared test data as read-only to avoid unintentional in-place
41
+ # modifications that would introduce side-effects between tests.
42
+ X.flags.writeable = False
43
+ Y.flags.writeable = False
44
+
45
+ kernel_rbf_plus_white = RBF(length_scale=2.0) + WhiteKernel(noise_level=3.0)
46
+ kernels = [
47
+ RBF(length_scale=2.0),
48
+ RBF(length_scale_bounds=(0.5, 2.0)),
49
+ ConstantKernel(constant_value=10.0),
50
+ 2.0 * RBF(length_scale=0.33, length_scale_bounds="fixed"),
51
+ 2.0 * RBF(length_scale=0.5),
52
+ kernel_rbf_plus_white,
53
+ 2.0 * RBF(length_scale=[0.5, 2.0]),
54
+ 2.0 * Matern(length_scale=0.33, length_scale_bounds="fixed"),
55
+ 2.0 * Matern(length_scale=0.5, nu=0.5),
56
+ 2.0 * Matern(length_scale=1.5, nu=1.5),
57
+ 2.0 * Matern(length_scale=2.5, nu=2.5),
58
+ 2.0 * Matern(length_scale=[0.5, 2.0], nu=0.5),
59
+ 3.0 * Matern(length_scale=[2.0, 0.5], nu=1.5),
60
+ 4.0 * Matern(length_scale=[0.5, 0.5], nu=2.5),
61
+ RationalQuadratic(length_scale=0.5, alpha=1.5),
62
+ ExpSineSquared(length_scale=0.5, periodicity=1.5),
63
+ DotProduct(sigma_0=2.0),
64
+ DotProduct(sigma_0=2.0) ** 2,
65
+ RBF(length_scale=[2.0]),
66
+ Matern(length_scale=[2.0]),
67
+ ]
68
+ for metric in PAIRWISE_KERNEL_FUNCTIONS:
69
+ if metric in ["additive_chi2", "chi2"]:
70
+ continue
71
+ kernels.append(PairwiseKernel(gamma=1.0, metric=metric))
72
+
73
+
74
+ @pytest.mark.parametrize("kernel", kernels)
75
+ def test_kernel_gradient(kernel):
76
+ # Compare analytic and numeric gradient of kernels.
77
+ kernel = clone(kernel) # make tests independent of one-another
78
+ K, K_gradient = kernel(X, eval_gradient=True)
79
+
80
+ assert K_gradient.shape[0] == X.shape[0]
81
+ assert K_gradient.shape[1] == X.shape[0]
82
+ assert K_gradient.shape[2] == kernel.theta.shape[0]
83
+
84
+ def eval_kernel_for_theta(theta):
85
+ kernel_clone = kernel.clone_with_theta(theta)
86
+ K = kernel_clone(X, eval_gradient=False)
87
+ return K
88
+
89
+ K_gradient_approx = _approx_fprime(kernel.theta, eval_kernel_for_theta, 1e-10)
90
+
91
+ assert_almost_equal(K_gradient, K_gradient_approx, 4)
92
+
93
+
94
+ @pytest.mark.parametrize(
95
+ "kernel",
96
+ [
97
+ kernel
98
+ for kernel in kernels
99
+ # skip non-basic kernels
100
+ if not (isinstance(kernel, (KernelOperator, Exponentiation)))
101
+ ],
102
+ )
103
+ def test_kernel_theta(kernel):
104
+ # Check that parameter vector theta of kernel is set correctly.
105
+ kernel = clone(kernel) # make tests independent of one-another
106
+ theta = kernel.theta
107
+ _, K_gradient = kernel(X, eval_gradient=True)
108
+
109
+ # Determine kernel parameters that contribute to theta
110
+ init_sign = signature(kernel.__class__.__init__).parameters.values()
111
+ args = [p.name for p in init_sign if p.name != "self"]
112
+ theta_vars = map(
113
+ lambda s: s[0 : -len("_bounds")], filter(lambda s: s.endswith("_bounds"), args)
114
+ )
115
+ assert set(hyperparameter.name for hyperparameter in kernel.hyperparameters) == set(
116
+ theta_vars
117
+ )
118
+
119
+ # Check that values returned in theta are consistent with
120
+ # hyperparameter values (being their logarithms)
121
+ for i, hyperparameter in enumerate(kernel.hyperparameters):
122
+ assert theta[i] == np.log(getattr(kernel, hyperparameter.name))
123
+
124
+ # Fixed kernel parameters must be excluded from theta and gradient.
125
+ for i, hyperparameter in enumerate(kernel.hyperparameters):
126
+ # create copy with certain hyperparameter fixed
127
+ params = kernel.get_params()
128
+ params[hyperparameter.name + "_bounds"] = "fixed"
129
+ kernel_class = kernel.__class__
130
+ new_kernel = kernel_class(**params)
131
+ # Check that theta and K_gradient are identical with the fixed
132
+ # dimension left out
133
+ _, K_gradient_new = new_kernel(X, eval_gradient=True)
134
+ assert theta.shape[0] == new_kernel.theta.shape[0] + 1
135
+ assert K_gradient.shape[2] == K_gradient_new.shape[2] + 1
136
+ if i > 0:
137
+ assert theta[:i] == new_kernel.theta[:i]
138
+ assert_array_equal(K_gradient[..., :i], K_gradient_new[..., :i])
139
+ if i + 1 < len(kernel.hyperparameters):
140
+ assert theta[i + 1 :] == new_kernel.theta[i:]
141
+ assert_array_equal(K_gradient[..., i + 1 :], K_gradient_new[..., i:])
142
+
143
+ # Check that values of theta are modified correctly
144
+ for i, hyperparameter in enumerate(kernel.hyperparameters):
145
+ theta[i] = np.log(42)
146
+ kernel.theta = theta
147
+ assert_almost_equal(getattr(kernel, hyperparameter.name), 42)
148
+
149
+ setattr(kernel, hyperparameter.name, 43)
150
+ assert_almost_equal(kernel.theta[i], np.log(43))
151
+
152
+
153
+ @pytest.mark.parametrize(
154
+ "kernel",
155
+ [
156
+ kernel
157
+ for kernel in kernels
158
+ # Identity is not satisfied on diagonal
159
+ if kernel != kernel_rbf_plus_white
160
+ ],
161
+ )
162
+ def test_auto_vs_cross(kernel):
163
+ kernel = clone(kernel) # make tests independent of one-another
164
+ # Auto-correlation and cross-correlation should be consistent.
165
+ K_auto = kernel(X)
166
+ K_cross = kernel(X, X)
167
+ assert_almost_equal(K_auto, K_cross, 5)
168
+
169
+
170
+ @pytest.mark.parametrize("kernel", kernels)
171
+ def test_kernel_diag(kernel):
172
+ kernel = clone(kernel) # make tests independent of one-another
173
+ # Test that diag method of kernel returns consistent results.
174
+ K_call_diag = np.diag(kernel(X))
175
+ K_diag = kernel.diag(X)
176
+ assert_almost_equal(K_call_diag, K_diag, 5)
177
+
178
+
179
+ def test_kernel_operator_commutative():
180
+ # Adding kernels and multiplying kernels should be commutative.
181
+ # Check addition
182
+ assert_almost_equal((RBF(2.0) + 1.0)(X), (1.0 + RBF(2.0))(X))
183
+
184
+ # Check multiplication
185
+ assert_almost_equal((3.0 * RBF(2.0))(X), (RBF(2.0) * 3.0)(X))
186
+
187
+
188
+ def test_kernel_anisotropic():
189
+ # Anisotropic kernel should be consistent with isotropic kernels.
190
+ kernel = 3.0 * RBF([0.5, 2.0])
191
+
192
+ K = kernel(X)
193
+ X1 = X.copy()
194
+ X1[:, 0] *= 4
195
+ K1 = 3.0 * RBF(2.0)(X1)
196
+ assert_almost_equal(K, K1)
197
+
198
+ X2 = X.copy()
199
+ X2[:, 1] /= 4
200
+ K2 = 3.0 * RBF(0.5)(X2)
201
+ assert_almost_equal(K, K2)
202
+
203
+ # Check getting and setting via theta
204
+ kernel.theta = kernel.theta + np.log(2)
205
+ assert_array_equal(kernel.theta, np.log([6.0, 1.0, 4.0]))
206
+ assert_array_equal(kernel.k2.length_scale, [1.0, 4.0])
207
+
208
+
209
+ @pytest.mark.parametrize(
210
+ "kernel", [kernel for kernel in kernels if kernel.is_stationary()]
211
+ )
212
+ def test_kernel_stationary(kernel):
213
+ kernel = clone(kernel) # make tests independent of one-another
214
+ # Test stationarity of kernels.
215
+ K = kernel(X, X + 1)
216
+ assert_almost_equal(K[0, 0], np.diag(K))
217
+
218
+
219
+ @pytest.mark.parametrize("kernel", kernels)
220
+ def test_kernel_input_type(kernel):
221
+ kernel = clone(kernel) # make tests independent of one-another
222
+ # Test whether kernels is for vectors or structured data
223
+ if isinstance(kernel, Exponentiation):
224
+ assert kernel.requires_vector_input == kernel.kernel.requires_vector_input
225
+ if isinstance(kernel, KernelOperator):
226
+ assert kernel.requires_vector_input == (
227
+ kernel.k1.requires_vector_input or kernel.k2.requires_vector_input
228
+ )
229
+
230
+
231
+ def test_compound_kernel_input_type():
232
+ kernel = CompoundKernel([WhiteKernel(noise_level=3.0)])
233
+ assert not kernel.requires_vector_input
234
+
235
+ kernel = CompoundKernel([WhiteKernel(noise_level=3.0), RBF(length_scale=2.0)])
236
+ assert kernel.requires_vector_input
237
+
238
+
239
+ def check_hyperparameters_equal(kernel1, kernel2):
240
+ # Check that hyperparameters of two kernels are equal
241
+ for attr in set(dir(kernel1) + dir(kernel2)):
242
+ if attr.startswith("hyperparameter_"):
243
+ attr_value1 = getattr(kernel1, attr)
244
+ attr_value2 = getattr(kernel2, attr)
245
+ assert attr_value1 == attr_value2
246
+
247
+
248
+ @pytest.mark.parametrize("kernel", kernels)
249
+ def test_kernel_clone(kernel):
250
+ kernel = clone(kernel) # make tests independent of one-another
251
+ # Test that sklearn's clone works correctly on kernels.
252
+ kernel_cloned = clone(kernel)
253
+
254
+ # XXX: Should this be fixed?
255
+ # This differs from the sklearn's estimators equality check.
256
+ assert kernel == kernel_cloned
257
+ assert id(kernel) != id(kernel_cloned)
258
+
259
+ # Check that all constructor parameters are equal.
260
+ assert kernel.get_params() == kernel_cloned.get_params()
261
+
262
+ # Check that all hyperparameters are equal.
263
+ check_hyperparameters_equal(kernel, kernel_cloned)
264
+
265
+
266
+ @pytest.mark.parametrize("kernel", kernels)
267
+ def test_kernel_clone_after_set_params(kernel):
268
+ kernel = clone(kernel) # make tests independent of one-another
269
+ # This test is to verify that using set_params does not
270
+ # break clone on kernels.
271
+ # This used to break because in kernels such as the RBF, non-trivial
272
+ # logic that modified the length scale used to be in the constructor
273
+ # See https://github.com/scikit-learn/scikit-learn/issues/6961
274
+ # for more details.
275
+ bounds = (1e-5, 1e5)
276
+ kernel_cloned = clone(kernel)
277
+ params = kernel.get_params()
278
+ # RationalQuadratic kernel is isotropic.
279
+ isotropic_kernels = (ExpSineSquared, RationalQuadratic)
280
+ if "length_scale" in params and not isinstance(kernel, isotropic_kernels):
281
+ length_scale = params["length_scale"]
282
+ if np.iterable(length_scale):
283
+ # XXX unreached code as of v0.22
284
+ params["length_scale"] = length_scale[0]
285
+ params["length_scale_bounds"] = bounds
286
+ else:
287
+ params["length_scale"] = [length_scale] * 2
288
+ params["length_scale_bounds"] = bounds * 2
289
+ kernel_cloned.set_params(**params)
290
+ kernel_cloned_clone = clone(kernel_cloned)
291
+ assert kernel_cloned_clone.get_params() == kernel_cloned.get_params()
292
+ assert id(kernel_cloned_clone) != id(kernel_cloned)
293
+ check_hyperparameters_equal(kernel_cloned, kernel_cloned_clone)
294
+
295
+
296
+ def test_matern_kernel():
297
+ # Test consistency of Matern kernel for special values of nu.
298
+ K = Matern(nu=1.5, length_scale=1.0)(X)
299
+ # the diagonal elements of a matern kernel are 1
300
+ assert_array_almost_equal(np.diag(K), np.ones(X.shape[0]))
301
+ # matern kernel for coef0==0.5 is equal to absolute exponential kernel
302
+ K_absexp = np.exp(-euclidean_distances(X, X, squared=False))
303
+ K = Matern(nu=0.5, length_scale=1.0)(X)
304
+ assert_array_almost_equal(K, K_absexp)
305
+ # matern kernel with coef0==inf is equal to RBF kernel
306
+ K_rbf = RBF(length_scale=1.0)(X)
307
+ K = Matern(nu=np.inf, length_scale=1.0)(X)
308
+ assert_array_almost_equal(K, K_rbf)
309
+ assert_allclose(K, K_rbf)
310
+ # test that special cases of matern kernel (coef0 in [0.5, 1.5, 2.5])
311
+ # result in nearly identical results as the general case for coef0 in
312
+ # [0.5 + tiny, 1.5 + tiny, 2.5 + tiny]
313
+ tiny = 1e-10
314
+ for nu in [0.5, 1.5, 2.5]:
315
+ K1 = Matern(nu=nu, length_scale=1.0)(X)
316
+ K2 = Matern(nu=nu + tiny, length_scale=1.0)(X)
317
+ assert_array_almost_equal(K1, K2)
318
+ # test that coef0==large is close to RBF
319
+ large = 100
320
+ K1 = Matern(nu=large, length_scale=1.0)(X)
321
+ K2 = RBF(length_scale=1.0)(X)
322
+ assert_array_almost_equal(K1, K2, decimal=2)
323
+
324
+
325
+ @pytest.mark.parametrize("kernel", kernels)
326
+ def test_kernel_versus_pairwise(kernel):
327
+ kernel = clone(kernel) # make tests independent of one-another
328
+ # Check that GP kernels can also be used as pairwise kernels.
329
+
330
+ # Test auto-kernel
331
+ if kernel != kernel_rbf_plus_white:
332
+ # For WhiteKernel: k(X) != k(X,X). This is assumed by
333
+ # pairwise_kernels
334
+ K1 = kernel(X)
335
+ K2 = pairwise_kernels(X, metric=kernel)
336
+ assert_array_almost_equal(K1, K2)
337
+
338
+ # Test cross-kernel
339
+ K1 = kernel(X, Y)
340
+ K2 = pairwise_kernels(X, Y, metric=kernel)
341
+ assert_array_almost_equal(K1, K2)
342
+
343
+
344
+ @pytest.mark.parametrize("kernel", kernels)
345
+ def test_set_get_params(kernel):
346
+ kernel = clone(kernel) # make tests independent of one-another
347
+ # Check that set_params()/get_params() is consistent with kernel.theta.
348
+
349
+ # Test get_params()
350
+ index = 0
351
+ params = kernel.get_params()
352
+ for hyperparameter in kernel.hyperparameters:
353
+ if isinstance("string", type(hyperparameter.bounds)):
354
+ if hyperparameter.bounds == "fixed":
355
+ continue
356
+ size = hyperparameter.n_elements
357
+ if size > 1: # anisotropic kernels
358
+ assert_almost_equal(
359
+ np.exp(kernel.theta[index : index + size]), params[hyperparameter.name]
360
+ )
361
+ index += size
362
+ else:
363
+ assert_almost_equal(
364
+ np.exp(kernel.theta[index]), params[hyperparameter.name]
365
+ )
366
+ index += 1
367
+ # Test set_params()
368
+ index = 0
369
+ value = 10 # arbitrary value
370
+ for hyperparameter in kernel.hyperparameters:
371
+ if isinstance("string", type(hyperparameter.bounds)):
372
+ if hyperparameter.bounds == "fixed":
373
+ continue
374
+ size = hyperparameter.n_elements
375
+ if size > 1: # anisotropic kernels
376
+ kernel.set_params(**{hyperparameter.name: [value] * size})
377
+ assert_almost_equal(
378
+ np.exp(kernel.theta[index : index + size]), [value] * size
379
+ )
380
+ index += size
381
+ else:
382
+ kernel.set_params(**{hyperparameter.name: value})
383
+ assert_almost_equal(np.exp(kernel.theta[index]), value)
384
+ index += 1
385
+
386
+
387
+ @pytest.mark.parametrize("kernel", kernels)
388
+ def test_repr_kernels(kernel):
389
+ kernel = clone(kernel) # make tests independent of one-another
390
+ # Smoke-test for repr in kernels.
391
+
392
+ repr(kernel)
393
+
394
+
395
+ def test_rational_quadratic_kernel():
396
+ kernel = RationalQuadratic(length_scale=[1.0, 1.0])
397
+ message = (
398
+ "RationalQuadratic kernel only supports isotropic "
399
+ "version, please use a single "
400
+ "scalar for length_scale"
401
+ )
402
+ with pytest.raises(AttributeError, match=message):
403
+ kernel(X)