Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/__init__.py +20 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/__init__.cpython-310.pyc +0 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/framework.cpython-310.pyc +0 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/main.cpython-310.pyc +0 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/models.cpython-310.pyc +0 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/problem.cpython-310.pyc +0 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/settings.cpython-310.pyc +0 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/framework.py +1240 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/main.py +1506 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/models.py +1529 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/problem.py +1296 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/settings.py +132 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/__init__.py +14 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/__pycache__/__init__.cpython-310.pyc +0 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/__pycache__/geometry.cpython-310.pyc +0 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/__pycache__/optim.cpython-310.pyc +0 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/geometry.py +387 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/optim.py +1203 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/__init__.py +18 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/__pycache__/__init__.cpython-310.pyc +0 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/__pycache__/exceptions.cpython-310.pyc +0 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/__pycache__/math.cpython-310.pyc +0 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/__pycache__/versions.cpython-310.pyc +0 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/exceptions.py +22 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/math.py +77 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/versions.py +67 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/__init__.py +0 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_bunch.cpython-310.pyc +0 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_ccallback.cpython-310.pyc +0 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test__gcutils.py +110 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test__pep440.py +67 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test__testutils.py +32 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test__threadsafety.py +51 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test__util.py +657 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_array_api.py +187 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_bunch.py +162 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_ccallback.py +204 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_config.py +45 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_deprecation.py +10 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_doccer.py +143 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_import_cycles.py +18 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_public_api.py +469 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_scipy_version.py +28 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_tmpdirs.py +48 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_warnings.py +137 -0
- moondream/lib/python3.10/site-packages/pandas/tests/__pycache__/test_aggregation.cpython-310.pyc +0 -0
- moondream/lib/python3.10/site-packages/pandas/tests/__pycache__/test_algos.cpython-310.pyc +0 -0
- moondream/lib/python3.10/site-packages/pandas/tests/__pycache__/test_common.cpython-310.pyc +0 -0
- moondream/lib/python3.10/site-packages/pandas/tests/__pycache__/test_nanops.cpython-310.pyc +0 -0
- moondream/lib/python3.10/site-packages/pandas/tests/__pycache__/test_optional_dependency.cpython-310.pyc +0 -0
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/__init__.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .main import minimize
|
| 2 |
+
from .utils import show_versions
|
| 3 |
+
|
| 4 |
+
# PEP0440 compatible formatted version, see:
|
| 5 |
+
# https://www.python.org/dev/peps/pep-0440/
|
| 6 |
+
#
|
| 7 |
+
# Final release markers:
|
| 8 |
+
# X.Y.0 # For first release after an increment in Y
|
| 9 |
+
# X.Y.Z # For bugfix releases
|
| 10 |
+
#
|
| 11 |
+
# Admissible pre-release markers:
|
| 12 |
+
# X.YaN # Alpha release
|
| 13 |
+
# X.YbN # Beta release
|
| 14 |
+
# X.YrcN # Release Candidate
|
| 15 |
+
#
|
| 16 |
+
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
|
| 17 |
+
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'.
|
| 18 |
+
__version__ = "1.1.2"
|
| 19 |
+
|
| 20 |
+
__all__ = ["minimize", "show_versions"]
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (312 Bytes). View file
|
|
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/framework.cpython-310.pyc
ADDED
|
Binary file (29.6 kB). View file
|
|
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/main.cpython-310.pyc
ADDED
|
Binary file (33.5 kB). View file
|
|
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/models.cpython-310.pyc
ADDED
|
Binary file (41.4 kB). View file
|
|
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/problem.cpython-310.pyc
ADDED
|
Binary file (28.2 kB). View file
|
|
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/settings.cpython-310.pyc
ADDED
|
Binary file (3.88 kB). View file
|
|
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/framework.py
ADDED
|
@@ -0,0 +1,1240 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import warnings
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
from scipy.optimize import lsq_linear
|
| 5 |
+
|
| 6 |
+
from .models import Models, Quadratic
|
| 7 |
+
from .settings import Options, Constants
|
| 8 |
+
from .subsolvers import (
|
| 9 |
+
cauchy_geometry,
|
| 10 |
+
spider_geometry,
|
| 11 |
+
normal_byrd_omojokun,
|
| 12 |
+
tangential_byrd_omojokun,
|
| 13 |
+
constrained_tangential_byrd_omojokun,
|
| 14 |
+
)
|
| 15 |
+
from .subsolvers.optim import qr_tangential_byrd_omojokun
|
| 16 |
+
from .utils import get_arrays_tol
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
TINY = np.finfo(float).tiny
|
| 20 |
+
EPS = np.finfo(float).eps
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class TrustRegion:
|
| 24 |
+
"""
|
| 25 |
+
Trust-region framework.
|
| 26 |
+
"""
|
| 27 |
+
|
| 28 |
+
def __init__(self, pb, options, constants):
|
| 29 |
+
"""
|
| 30 |
+
Initialize the trust-region framework.
|
| 31 |
+
|
| 32 |
+
Parameters
|
| 33 |
+
----------
|
| 34 |
+
pb : `cobyqa.problem.Problem`
|
| 35 |
+
Problem to solve.
|
| 36 |
+
options : dict
|
| 37 |
+
Options of the solver.
|
| 38 |
+
constants : dict
|
| 39 |
+
Constants of the solver.
|
| 40 |
+
|
| 41 |
+
Raises
|
| 42 |
+
------
|
| 43 |
+
`cobyqa.utils.MaxEvalError`
|
| 44 |
+
If the maximum number of evaluations is reached.
|
| 45 |
+
`cobyqa.utils.TargetSuccess`
|
| 46 |
+
If a nearly feasible point has been found with an objective
|
| 47 |
+
function value below the target.
|
| 48 |
+
`cobyqa.utils.FeasibleSuccess`
|
| 49 |
+
If a feasible point has been found for a feasibility problem.
|
| 50 |
+
`numpy.linalg.LinAlgError`
|
| 51 |
+
If the initial interpolation system is ill-defined.
|
| 52 |
+
"""
|
| 53 |
+
# Set the initial penalty parameter.
|
| 54 |
+
self._penalty = 0.0
|
| 55 |
+
|
| 56 |
+
# Initialize the models.
|
| 57 |
+
self._pb = pb
|
| 58 |
+
self._models = Models(self._pb, options, self.penalty)
|
| 59 |
+
self._constants = constants
|
| 60 |
+
|
| 61 |
+
# Set the index of the best interpolation point.
|
| 62 |
+
self._best_index = 0
|
| 63 |
+
self.set_best_index()
|
| 64 |
+
|
| 65 |
+
# Set the initial Lagrange multipliers.
|
| 66 |
+
self._lm_linear_ub = np.zeros(self.m_linear_ub)
|
| 67 |
+
self._lm_linear_eq = np.zeros(self.m_linear_eq)
|
| 68 |
+
self._lm_nonlinear_ub = np.zeros(self.m_nonlinear_ub)
|
| 69 |
+
self._lm_nonlinear_eq = np.zeros(self.m_nonlinear_eq)
|
| 70 |
+
self.set_multipliers(self.x_best)
|
| 71 |
+
|
| 72 |
+
# Set the initial trust-region radius and the resolution.
|
| 73 |
+
self._resolution = options[Options.RHOBEG]
|
| 74 |
+
self._radius = self.resolution
|
| 75 |
+
|
| 76 |
+
@property
|
| 77 |
+
def n(self):
|
| 78 |
+
"""
|
| 79 |
+
Number of variables.
|
| 80 |
+
|
| 81 |
+
Returns
|
| 82 |
+
-------
|
| 83 |
+
int
|
| 84 |
+
Number of variables.
|
| 85 |
+
"""
|
| 86 |
+
return self._pb.n
|
| 87 |
+
|
| 88 |
+
@property
|
| 89 |
+
def m_linear_ub(self):
|
| 90 |
+
"""
|
| 91 |
+
Number of linear inequality constraints.
|
| 92 |
+
|
| 93 |
+
Returns
|
| 94 |
+
-------
|
| 95 |
+
int
|
| 96 |
+
Number of linear inequality constraints.
|
| 97 |
+
"""
|
| 98 |
+
return self._pb.m_linear_ub
|
| 99 |
+
|
| 100 |
+
@property
|
| 101 |
+
def m_linear_eq(self):
|
| 102 |
+
"""
|
| 103 |
+
Number of linear equality constraints.
|
| 104 |
+
|
| 105 |
+
Returns
|
| 106 |
+
-------
|
| 107 |
+
int
|
| 108 |
+
Number of linear equality constraints.
|
| 109 |
+
"""
|
| 110 |
+
return self._pb.m_linear_eq
|
| 111 |
+
|
| 112 |
+
@property
|
| 113 |
+
def m_nonlinear_ub(self):
|
| 114 |
+
"""
|
| 115 |
+
Number of nonlinear inequality constraints.
|
| 116 |
+
|
| 117 |
+
Returns
|
| 118 |
+
-------
|
| 119 |
+
int
|
| 120 |
+
Number of nonlinear inequality constraints.
|
| 121 |
+
"""
|
| 122 |
+
return self._pb.m_nonlinear_ub
|
| 123 |
+
|
| 124 |
+
@property
|
| 125 |
+
def m_nonlinear_eq(self):
|
| 126 |
+
"""
|
| 127 |
+
Number of nonlinear equality constraints.
|
| 128 |
+
|
| 129 |
+
Returns
|
| 130 |
+
-------
|
| 131 |
+
int
|
| 132 |
+
Number of nonlinear equality constraints.
|
| 133 |
+
"""
|
| 134 |
+
return self._pb.m_nonlinear_eq
|
| 135 |
+
|
| 136 |
+
@property
|
| 137 |
+
def radius(self):
|
| 138 |
+
"""
|
| 139 |
+
Trust-region radius.
|
| 140 |
+
|
| 141 |
+
Returns
|
| 142 |
+
-------
|
| 143 |
+
float
|
| 144 |
+
Trust-region radius.
|
| 145 |
+
"""
|
| 146 |
+
return self._radius
|
| 147 |
+
|
| 148 |
+
@radius.setter
|
| 149 |
+
def radius(self, radius):
|
| 150 |
+
"""
|
| 151 |
+
Set the trust-region radius.
|
| 152 |
+
|
| 153 |
+
Parameters
|
| 154 |
+
----------
|
| 155 |
+
radius : float
|
| 156 |
+
New trust-region radius.
|
| 157 |
+
"""
|
| 158 |
+
self._radius = radius
|
| 159 |
+
if (
|
| 160 |
+
self.radius
|
| 161 |
+
<= self._constants[Constants.DECREASE_RADIUS_THRESHOLD]
|
| 162 |
+
* self.resolution
|
| 163 |
+
):
|
| 164 |
+
self._radius = self.resolution
|
| 165 |
+
|
| 166 |
+
@property
|
| 167 |
+
def resolution(self):
|
| 168 |
+
"""
|
| 169 |
+
Resolution of the trust-region framework.
|
| 170 |
+
|
| 171 |
+
The resolution is a lower bound on the trust-region radius.
|
| 172 |
+
|
| 173 |
+
Returns
|
| 174 |
+
-------
|
| 175 |
+
float
|
| 176 |
+
Resolution of the trust-region framework.
|
| 177 |
+
"""
|
| 178 |
+
return self._resolution
|
| 179 |
+
|
| 180 |
+
@resolution.setter
|
| 181 |
+
def resolution(self, resolution):
|
| 182 |
+
"""
|
| 183 |
+
Set the resolution of the trust-region framework.
|
| 184 |
+
|
| 185 |
+
Parameters
|
| 186 |
+
----------
|
| 187 |
+
resolution : float
|
| 188 |
+
New resolution of the trust-region framework.
|
| 189 |
+
"""
|
| 190 |
+
self._resolution = resolution
|
| 191 |
+
|
| 192 |
+
@property
|
| 193 |
+
def penalty(self):
|
| 194 |
+
"""
|
| 195 |
+
Penalty parameter.
|
| 196 |
+
|
| 197 |
+
Returns
|
| 198 |
+
-------
|
| 199 |
+
float
|
| 200 |
+
Penalty parameter.
|
| 201 |
+
"""
|
| 202 |
+
return self._penalty
|
| 203 |
+
|
| 204 |
+
@property
|
| 205 |
+
def models(self):
|
| 206 |
+
"""
|
| 207 |
+
Models of the objective function and constraints.
|
| 208 |
+
|
| 209 |
+
Returns
|
| 210 |
+
-------
|
| 211 |
+
`cobyqa.models.Models`
|
| 212 |
+
Models of the objective function and constraints.
|
| 213 |
+
"""
|
| 214 |
+
return self._models
|
| 215 |
+
|
| 216 |
+
@property
|
| 217 |
+
def best_index(self):
|
| 218 |
+
"""
|
| 219 |
+
Index of the best interpolation point.
|
| 220 |
+
|
| 221 |
+
Returns
|
| 222 |
+
-------
|
| 223 |
+
int
|
| 224 |
+
Index of the best interpolation point.
|
| 225 |
+
"""
|
| 226 |
+
return self._best_index
|
| 227 |
+
|
| 228 |
+
@property
|
| 229 |
+
def x_best(self):
|
| 230 |
+
"""
|
| 231 |
+
Best interpolation point.
|
| 232 |
+
|
| 233 |
+
Its value is interpreted as relative to the origin, not the base point.
|
| 234 |
+
|
| 235 |
+
Returns
|
| 236 |
+
-------
|
| 237 |
+
`numpy.ndarray`
|
| 238 |
+
Best interpolation point.
|
| 239 |
+
"""
|
| 240 |
+
return self.models.interpolation.point(self.best_index)
|
| 241 |
+
|
| 242 |
+
@property
|
| 243 |
+
def fun_best(self):
|
| 244 |
+
"""
|
| 245 |
+
Value of the objective function at `x_best`.
|
| 246 |
+
|
| 247 |
+
Returns
|
| 248 |
+
-------
|
| 249 |
+
float
|
| 250 |
+
Value of the objective function at `x_best`.
|
| 251 |
+
"""
|
| 252 |
+
return self.models.fun_val[self.best_index]
|
| 253 |
+
|
| 254 |
+
@property
|
| 255 |
+
def cub_best(self):
|
| 256 |
+
"""
|
| 257 |
+
Values of the nonlinear inequality constraints at `x_best`.
|
| 258 |
+
|
| 259 |
+
Returns
|
| 260 |
+
-------
|
| 261 |
+
`numpy.ndarray`, shape (m_nonlinear_ub,)
|
| 262 |
+
Values of the nonlinear inequality constraints at `x_best`.
|
| 263 |
+
"""
|
| 264 |
+
return self.models.cub_val[self.best_index, :]
|
| 265 |
+
|
| 266 |
+
@property
|
| 267 |
+
def ceq_best(self):
|
| 268 |
+
"""
|
| 269 |
+
Values of the nonlinear equality constraints at `x_best`.
|
| 270 |
+
|
| 271 |
+
Returns
|
| 272 |
+
-------
|
| 273 |
+
`numpy.ndarray`, shape (m_nonlinear_eq,)
|
| 274 |
+
Values of the nonlinear equality constraints at `x_best`.
|
| 275 |
+
"""
|
| 276 |
+
return self.models.ceq_val[self.best_index, :]
|
| 277 |
+
|
| 278 |
+
def lag_model(self, x):
|
| 279 |
+
"""
|
| 280 |
+
Evaluate the Lagrangian model at a given point.
|
| 281 |
+
|
| 282 |
+
Parameters
|
| 283 |
+
----------
|
| 284 |
+
x : `numpy.ndarray`, shape (n,)
|
| 285 |
+
Point at which the Lagrangian model is evaluated.
|
| 286 |
+
|
| 287 |
+
Returns
|
| 288 |
+
-------
|
| 289 |
+
float
|
| 290 |
+
Value of the Lagrangian model at `x`.
|
| 291 |
+
"""
|
| 292 |
+
return (
|
| 293 |
+
self.models.fun(x)
|
| 294 |
+
+ self._lm_linear_ub
|
| 295 |
+
@ (self._pb.linear.a_ub @ x - self._pb.linear.b_ub)
|
| 296 |
+
+ self._lm_linear_eq
|
| 297 |
+
@ (self._pb.linear.a_eq @ x - self._pb.linear.b_eq)
|
| 298 |
+
+ self._lm_nonlinear_ub @ self.models.cub(x)
|
| 299 |
+
+ self._lm_nonlinear_eq @ self.models.ceq(x)
|
| 300 |
+
)
|
| 301 |
+
|
| 302 |
+
def lag_model_grad(self, x):
|
| 303 |
+
"""
|
| 304 |
+
Evaluate the gradient of the Lagrangian model at a given point.
|
| 305 |
+
|
| 306 |
+
Parameters
|
| 307 |
+
----------
|
| 308 |
+
x : `numpy.ndarray`, shape (n,)
|
| 309 |
+
Point at which the gradient of the Lagrangian model is evaluated.
|
| 310 |
+
|
| 311 |
+
Returns
|
| 312 |
+
-------
|
| 313 |
+
`numpy.ndarray`, shape (n,)
|
| 314 |
+
Gradient of the Lagrangian model at `x`.
|
| 315 |
+
"""
|
| 316 |
+
return (
|
| 317 |
+
self.models.fun_grad(x)
|
| 318 |
+
+ self._lm_linear_ub @ self._pb.linear.a_ub
|
| 319 |
+
+ self._lm_linear_eq @ self._pb.linear.a_eq
|
| 320 |
+
+ self._lm_nonlinear_ub @ self.models.cub_grad(x)
|
| 321 |
+
+ self._lm_nonlinear_eq @ self.models.ceq_grad(x)
|
| 322 |
+
)
|
| 323 |
+
|
| 324 |
+
def lag_model_hess(self):
|
| 325 |
+
"""
|
| 326 |
+
Evaluate the Hessian matrix of the Lagrangian model at a given point.
|
| 327 |
+
|
| 328 |
+
Returns
|
| 329 |
+
-------
|
| 330 |
+
`numpy.ndarray`, shape (n, n)
|
| 331 |
+
Hessian matrix of the Lagrangian model at `x`.
|
| 332 |
+
"""
|
| 333 |
+
hess = self.models.fun_hess()
|
| 334 |
+
if self.m_nonlinear_ub > 0:
|
| 335 |
+
hess += self._lm_nonlinear_ub @ self.models.cub_hess()
|
| 336 |
+
if self.m_nonlinear_eq > 0:
|
| 337 |
+
hess += self._lm_nonlinear_eq @ self.models.ceq_hess()
|
| 338 |
+
return hess
|
| 339 |
+
|
| 340 |
+
def lag_model_hess_prod(self, v):
|
| 341 |
+
"""
|
| 342 |
+
Evaluate the right product of the Hessian matrix of the Lagrangian
|
| 343 |
+
model with a given vector.
|
| 344 |
+
|
| 345 |
+
Parameters
|
| 346 |
+
----------
|
| 347 |
+
v : `numpy.ndarray`, shape (n,)
|
| 348 |
+
Vector with which the Hessian matrix of the Lagrangian model is
|
| 349 |
+
multiplied from the right.
|
| 350 |
+
|
| 351 |
+
Returns
|
| 352 |
+
-------
|
| 353 |
+
`numpy.ndarray`, shape (n,)
|
| 354 |
+
Right product of the Hessian matrix of the Lagrangian model with
|
| 355 |
+
`v`.
|
| 356 |
+
"""
|
| 357 |
+
return (
|
| 358 |
+
self.models.fun_hess_prod(v)
|
| 359 |
+
+ self._lm_nonlinear_ub @ self.models.cub_hess_prod(v)
|
| 360 |
+
+ self._lm_nonlinear_eq @ self.models.ceq_hess_prod(v)
|
| 361 |
+
)
|
| 362 |
+
|
| 363 |
+
def lag_model_curv(self, v):
|
| 364 |
+
"""
|
| 365 |
+
Evaluate the curvature of the Lagrangian model along a given direction.
|
| 366 |
+
|
| 367 |
+
Parameters
|
| 368 |
+
----------
|
| 369 |
+
v : `numpy.ndarray`, shape (n,)
|
| 370 |
+
Direction along which the curvature of the Lagrangian model is
|
| 371 |
+
evaluated.
|
| 372 |
+
|
| 373 |
+
Returns
|
| 374 |
+
-------
|
| 375 |
+
float
|
| 376 |
+
Curvature of the Lagrangian model along `v`.
|
| 377 |
+
"""
|
| 378 |
+
return (
|
| 379 |
+
self.models.fun_curv(v)
|
| 380 |
+
+ self._lm_nonlinear_ub @ self.models.cub_curv(v)
|
| 381 |
+
+ self._lm_nonlinear_eq @ self.models.ceq_curv(v)
|
| 382 |
+
)
|
| 383 |
+
|
| 384 |
+
def sqp_fun(self, step):
|
| 385 |
+
"""
|
| 386 |
+
Evaluate the objective function of the SQP subproblem.
|
| 387 |
+
|
| 388 |
+
Parameters
|
| 389 |
+
----------
|
| 390 |
+
step : `numpy.ndarray`, shape (n,)
|
| 391 |
+
Step along which the objective function of the SQP subproblem is
|
| 392 |
+
evaluated.
|
| 393 |
+
|
| 394 |
+
Returns
|
| 395 |
+
-------
|
| 396 |
+
float
|
| 397 |
+
Value of the objective function of the SQP subproblem along `step`.
|
| 398 |
+
"""
|
| 399 |
+
return step @ (
|
| 400 |
+
self.models.fun_grad(self.x_best)
|
| 401 |
+
+ 0.5 * self.lag_model_hess_prod(step)
|
| 402 |
+
)
|
| 403 |
+
|
| 404 |
+
def sqp_cub(self, step):
|
| 405 |
+
"""
|
| 406 |
+
Evaluate the linearization of the nonlinear inequality constraints.
|
| 407 |
+
|
| 408 |
+
Parameters
|
| 409 |
+
----------
|
| 410 |
+
step : `numpy.ndarray`, shape (n,)
|
| 411 |
+
Step along which the linearization of the nonlinear inequality
|
| 412 |
+
constraints is evaluated.
|
| 413 |
+
|
| 414 |
+
Returns
|
| 415 |
+
-------
|
| 416 |
+
`numpy.ndarray`, shape (m_nonlinear_ub,)
|
| 417 |
+
Value of the linearization of the nonlinear inequality constraints
|
| 418 |
+
along `step`.
|
| 419 |
+
"""
|
| 420 |
+
return (
|
| 421 |
+
self.models.cub(self.x_best)
|
| 422 |
+
+ self.models.cub_grad(self.x_best) @ step
|
| 423 |
+
)
|
| 424 |
+
|
| 425 |
+
def sqp_ceq(self, step):
|
| 426 |
+
"""
|
| 427 |
+
Evaluate the linearization of the nonlinear equality constraints.
|
| 428 |
+
|
| 429 |
+
Parameters
|
| 430 |
+
----------
|
| 431 |
+
step : `numpy.ndarray`, shape (n,)
|
| 432 |
+
Step along which the linearization of the nonlinear equality
|
| 433 |
+
constraints is evaluated.
|
| 434 |
+
|
| 435 |
+
Returns
|
| 436 |
+
-------
|
| 437 |
+
`numpy.ndarray`, shape (m_nonlinear_ub,)
|
| 438 |
+
Value of the linearization of the nonlinear equality constraints
|
| 439 |
+
along `step`.
|
| 440 |
+
"""
|
| 441 |
+
return (
|
| 442 |
+
self.models.ceq(self.x_best)
|
| 443 |
+
+ self.models.ceq_grad(self.x_best) @ step
|
| 444 |
+
)
|
| 445 |
+
|
| 446 |
+
def merit(self, x, fun_val=None, cub_val=None, ceq_val=None):
|
| 447 |
+
"""
|
| 448 |
+
Evaluate the merit function at a given point.
|
| 449 |
+
|
| 450 |
+
Parameters
|
| 451 |
+
----------
|
| 452 |
+
x : `numpy.ndarray`, shape (n,)
|
| 453 |
+
Point at which the merit function is evaluated.
|
| 454 |
+
fun_val : float, optional
|
| 455 |
+
Value of the objective function at `x`. If not provided, the
|
| 456 |
+
objective function is evaluated at `x`.
|
| 457 |
+
cub_val : `numpy.ndarray`, shape (m_nonlinear_ub,), optional
|
| 458 |
+
Values of the nonlinear inequality constraints. If not provided,
|
| 459 |
+
the nonlinear inequality constraints are evaluated at `x`.
|
| 460 |
+
ceq_val : `numpy.ndarray`, shape (m_nonlinear_eq,), optional
|
| 461 |
+
Values of the nonlinear equality constraints. If not provided,
|
| 462 |
+
the nonlinear equality constraints are evaluated at `x`.
|
| 463 |
+
|
| 464 |
+
Returns
|
| 465 |
+
-------
|
| 466 |
+
float
|
| 467 |
+
Value of the merit function at `x`.
|
| 468 |
+
"""
|
| 469 |
+
if fun_val is None or cub_val is None or ceq_val is None:
|
| 470 |
+
fun_val, cub_val, ceq_val = self._pb(x, self.penalty)
|
| 471 |
+
m_val = fun_val
|
| 472 |
+
if self._penalty > 0.0:
|
| 473 |
+
c_val = self._pb.violation(x, cub_val=cub_val, ceq_val=ceq_val)
|
| 474 |
+
if np.count_nonzero(c_val):
|
| 475 |
+
m_val += self._penalty * np.linalg.norm(c_val)
|
| 476 |
+
return m_val
|
| 477 |
+
|
| 478 |
+
def get_constraint_linearizations(self, x):
|
| 479 |
+
"""
|
| 480 |
+
Get the linearizations of the constraints at a given point.
|
| 481 |
+
|
| 482 |
+
Parameters
|
| 483 |
+
----------
|
| 484 |
+
x : `numpy.ndarray`, shape (n,)
|
| 485 |
+
Point at which the linearizations of the constraints are evaluated.
|
| 486 |
+
|
| 487 |
+
Returns
|
| 488 |
+
-------
|
| 489 |
+
`numpy.ndarray`, shape (m_linear_ub + m_nonlinear_ub, n)
|
| 490 |
+
Left-hand side matrix of the linearized inequality constraints.
|
| 491 |
+
`numpy.ndarray`, shape (m_linear_ub + m_nonlinear_ub,)
|
| 492 |
+
Right-hand side vector of the linearized inequality constraints.
|
| 493 |
+
`numpy.ndarray`, shape (m_linear_eq + m_nonlinear_eq, n)
|
| 494 |
+
Left-hand side matrix of the linearized equality constraints.
|
| 495 |
+
`numpy.ndarray`, shape (m_linear_eq + m_nonlinear_eq,)
|
| 496 |
+
Right-hand side vector of the linearized equality constraints.
|
| 497 |
+
"""
|
| 498 |
+
aub = np.block(
|
| 499 |
+
[
|
| 500 |
+
[self._pb.linear.a_ub],
|
| 501 |
+
[self.models.cub_grad(x)],
|
| 502 |
+
]
|
| 503 |
+
)
|
| 504 |
+
bub = np.block(
|
| 505 |
+
[
|
| 506 |
+
self._pb.linear.b_ub - self._pb.linear.a_ub @ x,
|
| 507 |
+
-self.models.cub(x),
|
| 508 |
+
]
|
| 509 |
+
)
|
| 510 |
+
aeq = np.block(
|
| 511 |
+
[
|
| 512 |
+
[self._pb.linear.a_eq],
|
| 513 |
+
[self.models.ceq_grad(x)],
|
| 514 |
+
]
|
| 515 |
+
)
|
| 516 |
+
beq = np.block(
|
| 517 |
+
[
|
| 518 |
+
self._pb.linear.b_eq - self._pb.linear.a_eq @ x,
|
| 519 |
+
-self.models.ceq(x),
|
| 520 |
+
]
|
| 521 |
+
)
|
| 522 |
+
return aub, bub, aeq, beq
|
| 523 |
+
|
| 524 |
+
def get_trust_region_step(self, options):
|
| 525 |
+
"""
|
| 526 |
+
Get the trust-region step.
|
| 527 |
+
|
| 528 |
+
The trust-region step is computed by solving the derivative-free
|
| 529 |
+
trust-region SQP subproblem using a Byrd-Omojokun composite-step
|
| 530 |
+
approach. For more details, see Section 5.2.3 of [1]_.
|
| 531 |
+
|
| 532 |
+
Parameters
|
| 533 |
+
----------
|
| 534 |
+
options : dict
|
| 535 |
+
Options of the solver.
|
| 536 |
+
|
| 537 |
+
Returns
|
| 538 |
+
-------
|
| 539 |
+
`numpy.ndarray`, shape (n,)
|
| 540 |
+
Normal step.
|
| 541 |
+
`numpy.ndarray`, shape (n,)
|
| 542 |
+
Tangential step.
|
| 543 |
+
|
| 544 |
+
References
|
| 545 |
+
----------
|
| 546 |
+
.. [1] T. M. Ragonneau. *Model-Based Derivative-Free Optimization
|
| 547 |
+
Methods and Software*. PhD thesis, Department of Applied
|
| 548 |
+
Mathematics, The Hong Kong Polytechnic University, Hong Kong, China,
|
| 549 |
+
2022. URL: https://theses.lib.polyu.edu.hk/handle/200/12294.
|
| 550 |
+
"""
|
| 551 |
+
# Evaluate the linearizations of the constraints.
|
| 552 |
+
aub, bub, aeq, beq = self.get_constraint_linearizations(self.x_best)
|
| 553 |
+
xl = self._pb.bounds.xl - self.x_best
|
| 554 |
+
xu = self._pb.bounds.xu - self.x_best
|
| 555 |
+
|
| 556 |
+
# Evaluate the normal step.
|
| 557 |
+
radius = self._constants[Constants.BYRD_OMOJOKUN_FACTOR] * self.radius
|
| 558 |
+
normal_step = normal_byrd_omojokun(
|
| 559 |
+
aub,
|
| 560 |
+
bub,
|
| 561 |
+
aeq,
|
| 562 |
+
beq,
|
| 563 |
+
xl,
|
| 564 |
+
xu,
|
| 565 |
+
radius,
|
| 566 |
+
options[Options.DEBUG],
|
| 567 |
+
**self._constants,
|
| 568 |
+
)
|
| 569 |
+
if options[Options.DEBUG]:
|
| 570 |
+
tol = get_arrays_tol(xl, xu)
|
| 571 |
+
if (np.any(normal_step + tol < xl)
|
| 572 |
+
or np.any(xu < normal_step - tol)):
|
| 573 |
+
warnings.warn(
|
| 574 |
+
"the normal step does not respect the bound constraint.",
|
| 575 |
+
RuntimeWarning,
|
| 576 |
+
2,
|
| 577 |
+
)
|
| 578 |
+
if np.linalg.norm(normal_step) > 1.1 * radius:
|
| 579 |
+
warnings.warn(
|
| 580 |
+
"the normal step does not respect the trust-region "
|
| 581 |
+
"constraint.",
|
| 582 |
+
RuntimeWarning,
|
| 583 |
+
2,
|
| 584 |
+
)
|
| 585 |
+
|
| 586 |
+
# Evaluate the tangential step.
|
| 587 |
+
radius = np.sqrt(self.radius**2.0 - normal_step @ normal_step)
|
| 588 |
+
xl -= normal_step
|
| 589 |
+
xu -= normal_step
|
| 590 |
+
bub = np.maximum(bub - aub @ normal_step, 0.0)
|
| 591 |
+
g_best = self.models.fun_grad(self.x_best) + self.lag_model_hess_prod(
|
| 592 |
+
normal_step
|
| 593 |
+
)
|
| 594 |
+
if self._pb.type in ["unconstrained", "bound-constrained"]:
|
| 595 |
+
tangential_step = tangential_byrd_omojokun(
|
| 596 |
+
g_best,
|
| 597 |
+
self.lag_model_hess_prod,
|
| 598 |
+
xl,
|
| 599 |
+
xu,
|
| 600 |
+
radius,
|
| 601 |
+
options[Options.DEBUG],
|
| 602 |
+
**self._constants,
|
| 603 |
+
)
|
| 604 |
+
else:
|
| 605 |
+
tangential_step = constrained_tangential_byrd_omojokun(
|
| 606 |
+
g_best,
|
| 607 |
+
self.lag_model_hess_prod,
|
| 608 |
+
xl,
|
| 609 |
+
xu,
|
| 610 |
+
aub,
|
| 611 |
+
bub,
|
| 612 |
+
aeq,
|
| 613 |
+
radius,
|
| 614 |
+
options["debug"],
|
| 615 |
+
**self._constants,
|
| 616 |
+
)
|
| 617 |
+
if options[Options.DEBUG]:
|
| 618 |
+
tol = get_arrays_tol(xl, xu)
|
| 619 |
+
if np.any(tangential_step + tol < xl) or np.any(
|
| 620 |
+
xu < tangential_step - tol
|
| 621 |
+
):
|
| 622 |
+
warnings.warn(
|
| 623 |
+
"The tangential step does not respect the bound "
|
| 624 |
+
"constraints.",
|
| 625 |
+
RuntimeWarning,
|
| 626 |
+
2,
|
| 627 |
+
)
|
| 628 |
+
if (
|
| 629 |
+
np.linalg.norm(normal_step + tangential_step)
|
| 630 |
+
> 1.1 * np.sqrt(2.0) * self.radius
|
| 631 |
+
):
|
| 632 |
+
warnings.warn(
|
| 633 |
+
"The trial step does not respect the trust-region "
|
| 634 |
+
"constraint.",
|
| 635 |
+
RuntimeWarning,
|
| 636 |
+
2,
|
| 637 |
+
)
|
| 638 |
+
return normal_step, tangential_step
|
| 639 |
+
|
| 640 |
+
def get_geometry_step(self, k_new, options):
|
| 641 |
+
"""
|
| 642 |
+
Get the geometry-improving step.
|
| 643 |
+
|
| 644 |
+
Three different geometry-improving steps are computed and the best one
|
| 645 |
+
is returned. For more details, see Section 5.2.7 of [1]_.
|
| 646 |
+
|
| 647 |
+
Parameters
|
| 648 |
+
----------
|
| 649 |
+
k_new : int
|
| 650 |
+
Index of the interpolation point to be modified.
|
| 651 |
+
options : dict
|
| 652 |
+
Options of the solver.
|
| 653 |
+
|
| 654 |
+
Returns
|
| 655 |
+
-------
|
| 656 |
+
`numpy.ndarray`, shape (n,)
|
| 657 |
+
Geometry-improving step.
|
| 658 |
+
|
| 659 |
+
Raises
|
| 660 |
+
------
|
| 661 |
+
`numpy.linalg.LinAlgError`
|
| 662 |
+
If the computation of a determinant fails.
|
| 663 |
+
|
| 664 |
+
References
|
| 665 |
+
----------
|
| 666 |
+
.. [1] T. M. Ragonneau. *Model-Based Derivative-Free Optimization
|
| 667 |
+
Methods and Software*. PhD thesis, Department of Applied
|
| 668 |
+
Mathematics, The Hong Kong Polytechnic University, Hong Kong, China,
|
| 669 |
+
2022. URL: https://theses.lib.polyu.edu.hk/handle/200/12294.
|
| 670 |
+
"""
|
| 671 |
+
if options[Options.DEBUG]:
|
| 672 |
+
assert (
|
| 673 |
+
k_new != self.best_index
|
| 674 |
+
), "The index `k_new` must be different from the best index."
|
| 675 |
+
|
| 676 |
+
# Build the k_new-th Lagrange polynomial.
|
| 677 |
+
coord_vec = np.squeeze(np.eye(1, self.models.npt, k_new))
|
| 678 |
+
lag = Quadratic(
|
| 679 |
+
self.models.interpolation,
|
| 680 |
+
coord_vec,
|
| 681 |
+
options[Options.DEBUG],
|
| 682 |
+
)
|
| 683 |
+
g_lag = lag.grad(self.x_best, self.models.interpolation)
|
| 684 |
+
|
| 685 |
+
# Compute a simple constrained Cauchy step.
|
| 686 |
+
xl = self._pb.bounds.xl - self.x_best
|
| 687 |
+
xu = self._pb.bounds.xu - self.x_best
|
| 688 |
+
step = cauchy_geometry(
|
| 689 |
+
0.0,
|
| 690 |
+
g_lag,
|
| 691 |
+
lambda v: lag.curv(v, self.models.interpolation),
|
| 692 |
+
xl,
|
| 693 |
+
xu,
|
| 694 |
+
self.radius,
|
| 695 |
+
options[Options.DEBUG],
|
| 696 |
+
)
|
| 697 |
+
sigma = self.models.determinants(self.x_best + step, k_new)
|
| 698 |
+
|
| 699 |
+
# Compute the solution on the straight lines joining the interpolation
|
| 700 |
+
# points to the k-th one, and choose it if it provides a larger value
|
| 701 |
+
# of the determinant of the interpolation system in absolute value.
|
| 702 |
+
xpt = (
|
| 703 |
+
self.models.interpolation.xpt
|
| 704 |
+
- self.models.interpolation.xpt[:, self.best_index, np.newaxis]
|
| 705 |
+
)
|
| 706 |
+
xpt[:, [0, self.best_index]] = xpt[:, [self.best_index, 0]]
|
| 707 |
+
step_alt = spider_geometry(
|
| 708 |
+
0.0,
|
| 709 |
+
g_lag,
|
| 710 |
+
lambda v: lag.curv(v, self.models.interpolation),
|
| 711 |
+
xpt[:, 1:],
|
| 712 |
+
xl,
|
| 713 |
+
xu,
|
| 714 |
+
self.radius,
|
| 715 |
+
options[Options.DEBUG],
|
| 716 |
+
)
|
| 717 |
+
sigma_alt = self.models.determinants(self.x_best + step_alt, k_new)
|
| 718 |
+
if abs(sigma_alt) > abs(sigma):
|
| 719 |
+
step = step_alt
|
| 720 |
+
sigma = sigma_alt
|
| 721 |
+
|
| 722 |
+
# Compute a Cauchy step on the tangent space of the active constraints.
|
| 723 |
+
if self._pb.type in [
|
| 724 |
+
"linearly constrained",
|
| 725 |
+
"nonlinearly constrained",
|
| 726 |
+
]:
|
| 727 |
+
aub, bub, aeq, beq = (
|
| 728 |
+
self.get_constraint_linearizations(self.x_best))
|
| 729 |
+
tol_bd = get_arrays_tol(xl, xu)
|
| 730 |
+
tol_ub = get_arrays_tol(bub)
|
| 731 |
+
free_xl = xl <= -tol_bd
|
| 732 |
+
free_xu = xu >= tol_bd
|
| 733 |
+
free_ub = bub >= tol_ub
|
| 734 |
+
|
| 735 |
+
# Compute the Cauchy step.
|
| 736 |
+
n_act, q = qr_tangential_byrd_omojokun(
|
| 737 |
+
aub,
|
| 738 |
+
aeq,
|
| 739 |
+
free_xl,
|
| 740 |
+
free_xu,
|
| 741 |
+
free_ub,
|
| 742 |
+
)
|
| 743 |
+
g_lag_proj = q[:, n_act:] @ (q[:, n_act:].T @ g_lag)
|
| 744 |
+
norm_g_lag_proj = np.linalg.norm(g_lag_proj)
|
| 745 |
+
if 0 < n_act < self._pb.n and norm_g_lag_proj > TINY * self.radius:
|
| 746 |
+
step_alt = (self.radius / norm_g_lag_proj) * g_lag_proj
|
| 747 |
+
if lag.curv(step_alt, self.models.interpolation) < 0.0:
|
| 748 |
+
step_alt = -step_alt
|
| 749 |
+
|
| 750 |
+
# Evaluate the constraint violation at the Cauchy step.
|
| 751 |
+
cbd = np.block([xl - step_alt, step_alt - xu])
|
| 752 |
+
cub = aub @ step_alt - bub
|
| 753 |
+
ceq = aeq @ step_alt - beq
|
| 754 |
+
maxcv_val = max(
|
| 755 |
+
np.max(array, initial=0.0)
|
| 756 |
+
for array in [cbd, cub, np.abs(ceq)]
|
| 757 |
+
)
|
| 758 |
+
|
| 759 |
+
# Accept the new step if it is nearly feasible and do not
|
| 760 |
+
# drastically worsen the determinant of the interpolation
|
| 761 |
+
# system in absolute value.
|
| 762 |
+
tol = np.max(np.abs(step_alt[~free_xl]), initial=0.0)
|
| 763 |
+
tol = np.max(np.abs(step_alt[~free_xu]), initial=tol)
|
| 764 |
+
tol = np.max(np.abs(aub[~free_ub, :] @ step_alt), initial=tol)
|
| 765 |
+
tol = min(10.0 * tol, 1e-2 * np.linalg.norm(step_alt))
|
| 766 |
+
if maxcv_val <= tol:
|
| 767 |
+
sigma_alt = self.models.determinants(
|
| 768 |
+
self.x_best + step_alt, k_new
|
| 769 |
+
)
|
| 770 |
+
if abs(sigma_alt) >= 0.1 * abs(sigma):
|
| 771 |
+
step = np.clip(step_alt, xl, xu)
|
| 772 |
+
|
| 773 |
+
if options[Options.DEBUG]:
|
| 774 |
+
tol = get_arrays_tol(xl, xu)
|
| 775 |
+
if np.any(step + tol < xl) or np.any(xu < step - tol):
|
| 776 |
+
warnings.warn(
|
| 777 |
+
"The geometry step does not respect the bound "
|
| 778 |
+
"constraints.",
|
| 779 |
+
RuntimeWarning,
|
| 780 |
+
2,
|
| 781 |
+
)
|
| 782 |
+
if np.linalg.norm(step) > 1.1 * self.radius:
|
| 783 |
+
warnings.warn(
|
| 784 |
+
"The geometry step does not respect the "
|
| 785 |
+
"trust-region constraint.",
|
| 786 |
+
RuntimeWarning,
|
| 787 |
+
2,
|
| 788 |
+
)
|
| 789 |
+
return step
|
| 790 |
+
|
| 791 |
+
def get_second_order_correction_step(self, step, options):
|
| 792 |
+
"""
|
| 793 |
+
Get the second-order correction step.
|
| 794 |
+
|
| 795 |
+
Parameters
|
| 796 |
+
----------
|
| 797 |
+
step : `numpy.ndarray`, shape (n,)
|
| 798 |
+
Trust-region step.
|
| 799 |
+
options : dict
|
| 800 |
+
Options of the solver.
|
| 801 |
+
|
| 802 |
+
Returns
|
| 803 |
+
-------
|
| 804 |
+
`numpy.ndarray`, shape (n,)
|
| 805 |
+
Second-order correction step.
|
| 806 |
+
"""
|
| 807 |
+
# Evaluate the linearizations of the constraints.
|
| 808 |
+
aub, bub, aeq, beq = self.get_constraint_linearizations(self.x_best)
|
| 809 |
+
xl = self._pb.bounds.xl - self.x_best
|
| 810 |
+
xu = self._pb.bounds.xu - self.x_best
|
| 811 |
+
radius = np.linalg.norm(step)
|
| 812 |
+
soc_step = normal_byrd_omojokun(
|
| 813 |
+
aub,
|
| 814 |
+
bub,
|
| 815 |
+
aeq,
|
| 816 |
+
beq,
|
| 817 |
+
xl,
|
| 818 |
+
xu,
|
| 819 |
+
radius,
|
| 820 |
+
options[Options.DEBUG],
|
| 821 |
+
**self._constants,
|
| 822 |
+
)
|
| 823 |
+
if options[Options.DEBUG]:
|
| 824 |
+
tol = get_arrays_tol(xl, xu)
|
| 825 |
+
if np.any(soc_step + tol < xl) or np.any(xu < soc_step - tol):
|
| 826 |
+
warnings.warn(
|
| 827 |
+
"The second-order correction step does not "
|
| 828 |
+
"respect the bound constraints.",
|
| 829 |
+
RuntimeWarning,
|
| 830 |
+
2,
|
| 831 |
+
)
|
| 832 |
+
if np.linalg.norm(soc_step) > 1.1 * radius:
|
| 833 |
+
warnings.warn(
|
| 834 |
+
"The second-order correction step does not "
|
| 835 |
+
"respect the trust-region constraint.",
|
| 836 |
+
RuntimeWarning,
|
| 837 |
+
2,
|
| 838 |
+
)
|
| 839 |
+
return soc_step
|
| 840 |
+
|
| 841 |
+
def get_reduction_ratio(self, step, fun_val, cub_val, ceq_val):
|
| 842 |
+
"""
|
| 843 |
+
Get the reduction ratio.
|
| 844 |
+
|
| 845 |
+
Parameters
|
| 846 |
+
----------
|
| 847 |
+
step : `numpy.ndarray`, shape (n,)
|
| 848 |
+
Trust-region step.
|
| 849 |
+
fun_val : float
|
| 850 |
+
Objective function value at the trial point.
|
| 851 |
+
cub_val : `numpy.ndarray`, shape (m_nonlinear_ub,)
|
| 852 |
+
Nonlinear inequality constraint values at the trial point.
|
| 853 |
+
ceq_val : `numpy.ndarray`, shape (m_nonlinear_eq,)
|
| 854 |
+
Nonlinear equality constraint values at the trial point.
|
| 855 |
+
|
| 856 |
+
Returns
|
| 857 |
+
-------
|
| 858 |
+
float
|
| 859 |
+
Reduction ratio.
|
| 860 |
+
"""
|
| 861 |
+
merit_old = self.merit(
|
| 862 |
+
self.x_best,
|
| 863 |
+
self.fun_best,
|
| 864 |
+
self.cub_best,
|
| 865 |
+
self.ceq_best,
|
| 866 |
+
)
|
| 867 |
+
merit_new = self.merit(self.x_best + step, fun_val, cub_val, ceq_val)
|
| 868 |
+
merit_model_old = self.merit(
|
| 869 |
+
self.x_best,
|
| 870 |
+
0.0,
|
| 871 |
+
self.models.cub(self.x_best),
|
| 872 |
+
self.models.ceq(self.x_best),
|
| 873 |
+
)
|
| 874 |
+
merit_model_new = self.merit(
|
| 875 |
+
self.x_best + step,
|
| 876 |
+
self.sqp_fun(step),
|
| 877 |
+
self.sqp_cub(step),
|
| 878 |
+
self.sqp_ceq(step),
|
| 879 |
+
)
|
| 880 |
+
if abs(merit_model_old - merit_model_new) > TINY * abs(
|
| 881 |
+
merit_old - merit_new
|
| 882 |
+
):
|
| 883 |
+
return (merit_old - merit_new) / abs(
|
| 884 |
+
merit_model_old - merit_model_new
|
| 885 |
+
)
|
| 886 |
+
else:
|
| 887 |
+
return -1.0
|
| 888 |
+
|
| 889 |
+
def increase_penalty(self, step):
|
| 890 |
+
"""
|
| 891 |
+
Increase the penalty parameter.
|
| 892 |
+
|
| 893 |
+
Parameters
|
| 894 |
+
----------
|
| 895 |
+
step : `numpy.ndarray`, shape (n,)
|
| 896 |
+
Trust-region step.
|
| 897 |
+
"""
|
| 898 |
+
aub, bub, aeq, beq = self.get_constraint_linearizations(self.x_best)
|
| 899 |
+
viol_diff = max(
|
| 900 |
+
np.linalg.norm(
|
| 901 |
+
np.block(
|
| 902 |
+
[
|
| 903 |
+
np.maximum(0.0, -bub),
|
| 904 |
+
beq,
|
| 905 |
+
]
|
| 906 |
+
)
|
| 907 |
+
)
|
| 908 |
+
- np.linalg.norm(
|
| 909 |
+
np.block(
|
| 910 |
+
[
|
| 911 |
+
np.maximum(0.0, aub @ step - bub),
|
| 912 |
+
aeq @ step - beq,
|
| 913 |
+
]
|
| 914 |
+
)
|
| 915 |
+
),
|
| 916 |
+
0.0,
|
| 917 |
+
)
|
| 918 |
+
sqp_val = self.sqp_fun(step)
|
| 919 |
+
|
| 920 |
+
threshold = np.linalg.norm(
|
| 921 |
+
np.block(
|
| 922 |
+
[
|
| 923 |
+
self._lm_linear_ub,
|
| 924 |
+
self._lm_linear_eq,
|
| 925 |
+
self._lm_nonlinear_ub,
|
| 926 |
+
self._lm_nonlinear_eq,
|
| 927 |
+
]
|
| 928 |
+
)
|
| 929 |
+
)
|
| 930 |
+
if abs(viol_diff) > TINY * abs(sqp_val):
|
| 931 |
+
threshold = max(threshold, sqp_val / viol_diff)
|
| 932 |
+
best_index_save = self.best_index
|
| 933 |
+
if (
|
| 934 |
+
self._penalty
|
| 935 |
+
<= self._constants[Constants.PENALTY_INCREASE_THRESHOLD]
|
| 936 |
+
* threshold
|
| 937 |
+
):
|
| 938 |
+
self._penalty = max(
|
| 939 |
+
self._constants[Constants.PENALTY_INCREASE_FACTOR] * threshold,
|
| 940 |
+
1.0,
|
| 941 |
+
)
|
| 942 |
+
self.set_best_index()
|
| 943 |
+
return best_index_save == self.best_index
|
| 944 |
+
|
| 945 |
+
def decrease_penalty(self):
|
| 946 |
+
"""
|
| 947 |
+
Decrease the penalty parameter.
|
| 948 |
+
"""
|
| 949 |
+
self._penalty = min(self._penalty, self._get_low_penalty())
|
| 950 |
+
self.set_best_index()
|
| 951 |
+
|
| 952 |
+
def set_best_index(self):
|
| 953 |
+
"""
|
| 954 |
+
Set the index of the best point.
|
| 955 |
+
"""
|
| 956 |
+
best_index = self.best_index
|
| 957 |
+
m_best = self.merit(
|
| 958 |
+
self.x_best,
|
| 959 |
+
self.models.fun_val[best_index],
|
| 960 |
+
self.models.cub_val[best_index, :],
|
| 961 |
+
self.models.ceq_val[best_index, :],
|
| 962 |
+
)
|
| 963 |
+
r_best = self._pb.maxcv(
|
| 964 |
+
self.x_best,
|
| 965 |
+
self.models.cub_val[best_index, :],
|
| 966 |
+
self.models.ceq_val[best_index, :],
|
| 967 |
+
)
|
| 968 |
+
tol = (
|
| 969 |
+
10.0
|
| 970 |
+
* EPS
|
| 971 |
+
* max(self.models.n, self.models.npt)
|
| 972 |
+
* max(abs(m_best), 1.0)
|
| 973 |
+
)
|
| 974 |
+
for k in range(self.models.npt):
|
| 975 |
+
if k != self.best_index:
|
| 976 |
+
x_val = self.models.interpolation.point(k)
|
| 977 |
+
m_val = self.merit(
|
| 978 |
+
x_val,
|
| 979 |
+
self.models.fun_val[k],
|
| 980 |
+
self.models.cub_val[k, :],
|
| 981 |
+
self.models.ceq_val[k, :],
|
| 982 |
+
)
|
| 983 |
+
r_val = self._pb.maxcv(
|
| 984 |
+
x_val,
|
| 985 |
+
self.models.cub_val[k, :],
|
| 986 |
+
self.models.ceq_val[k, :],
|
| 987 |
+
)
|
| 988 |
+
if m_val < m_best or (m_val < m_best + tol and r_val < r_best):
|
| 989 |
+
best_index = k
|
| 990 |
+
m_best = m_val
|
| 991 |
+
r_best = r_val
|
| 992 |
+
self._best_index = best_index
|
| 993 |
+
|
| 994 |
+
def get_index_to_remove(self, x_new=None):
|
| 995 |
+
"""
|
| 996 |
+
Get the index of the interpolation point to remove.
|
| 997 |
+
|
| 998 |
+
If `x_new` is not provided, the index returned should be used during
|
| 999 |
+
the geometry-improvement phase. Otherwise, the index returned is the
|
| 1000 |
+
best index for included `x_new` in the interpolation set.
|
| 1001 |
+
|
| 1002 |
+
Parameters
|
| 1003 |
+
----------
|
| 1004 |
+
x_new : `numpy.ndarray`, shape (n,), optional
|
| 1005 |
+
New point to be included in the interpolation set.
|
| 1006 |
+
|
| 1007 |
+
Returns
|
| 1008 |
+
-------
|
| 1009 |
+
int
|
| 1010 |
+
Index of the interpolation point to remove.
|
| 1011 |
+
float
|
| 1012 |
+
Distance between `x_best` and the removed point.
|
| 1013 |
+
|
| 1014 |
+
Raises
|
| 1015 |
+
------
|
| 1016 |
+
`numpy.linalg.LinAlgError`
|
| 1017 |
+
If the computation of a determinant fails.
|
| 1018 |
+
"""
|
| 1019 |
+
dist_sq = np.sum(
|
| 1020 |
+
(
|
| 1021 |
+
self.models.interpolation.xpt
|
| 1022 |
+
- self.models.interpolation.xpt[:, self.best_index, np.newaxis]
|
| 1023 |
+
)
|
| 1024 |
+
** 2.0,
|
| 1025 |
+
axis=0,
|
| 1026 |
+
)
|
| 1027 |
+
if x_new is None:
|
| 1028 |
+
sigma = 1.0
|
| 1029 |
+
weights = dist_sq
|
| 1030 |
+
else:
|
| 1031 |
+
sigma = self.models.determinants(x_new)
|
| 1032 |
+
weights = (
|
| 1033 |
+
np.maximum(
|
| 1034 |
+
1.0,
|
| 1035 |
+
dist_sq
|
| 1036 |
+
/ max(
|
| 1037 |
+
self._constants[Constants.LOW_RADIUS_FACTOR]
|
| 1038 |
+
* self.radius,
|
| 1039 |
+
self.resolution,
|
| 1040 |
+
)
|
| 1041 |
+
** 2.0,
|
| 1042 |
+
)
|
| 1043 |
+
** 3.0
|
| 1044 |
+
)
|
| 1045 |
+
weights[self.best_index] = -1.0 # do not remove the best point
|
| 1046 |
+
k_max = np.argmax(weights * np.abs(sigma))
|
| 1047 |
+
return k_max, np.sqrt(dist_sq[k_max])
|
| 1048 |
+
|
| 1049 |
+
def update_radius(self, step, ratio):
|
| 1050 |
+
"""
|
| 1051 |
+
Update the trust-region radius.
|
| 1052 |
+
|
| 1053 |
+
Parameters
|
| 1054 |
+
----------
|
| 1055 |
+
step : `numpy.ndarray`, shape (n,)
|
| 1056 |
+
Trust-region step.
|
| 1057 |
+
ratio : float
|
| 1058 |
+
Reduction ratio.
|
| 1059 |
+
"""
|
| 1060 |
+
s_norm = np.linalg.norm(step)
|
| 1061 |
+
if ratio <= self._constants[Constants.LOW_RATIO]:
|
| 1062 |
+
self.radius *= self._constants[Constants.DECREASE_RADIUS_FACTOR]
|
| 1063 |
+
elif ratio <= self._constants[Constants.HIGH_RATIO]:
|
| 1064 |
+
self.radius = max(
|
| 1065 |
+
self._constants[Constants.DECREASE_RADIUS_FACTOR]
|
| 1066 |
+
* self.radius,
|
| 1067 |
+
s_norm,
|
| 1068 |
+
)
|
| 1069 |
+
else:
|
| 1070 |
+
self.radius = min(
|
| 1071 |
+
self._constants[Constants.INCREASE_RADIUS_FACTOR]
|
| 1072 |
+
* self.radius,
|
| 1073 |
+
max(
|
| 1074 |
+
self._constants[Constants.DECREASE_RADIUS_FACTOR]
|
| 1075 |
+
* self.radius,
|
| 1076 |
+
self._constants[Constants.INCREASE_RADIUS_THRESHOLD]
|
| 1077 |
+
* s_norm,
|
| 1078 |
+
),
|
| 1079 |
+
)
|
| 1080 |
+
|
| 1081 |
+
def enhance_resolution(self, options):
|
| 1082 |
+
"""
|
| 1083 |
+
Enhance the resolution of the trust-region framework.
|
| 1084 |
+
|
| 1085 |
+
Parameters
|
| 1086 |
+
----------
|
| 1087 |
+
options : dict
|
| 1088 |
+
Options of the solver.
|
| 1089 |
+
"""
|
| 1090 |
+
if (
|
| 1091 |
+
self._constants[Constants.LARGE_RESOLUTION_THRESHOLD]
|
| 1092 |
+
* options[Options.RHOEND]
|
| 1093 |
+
< self.resolution
|
| 1094 |
+
):
|
| 1095 |
+
self.resolution *= self._constants[
|
| 1096 |
+
Constants.DECREASE_RESOLUTION_FACTOR
|
| 1097 |
+
]
|
| 1098 |
+
elif (
|
| 1099 |
+
self._constants[Constants.MODERATE_RESOLUTION_THRESHOLD]
|
| 1100 |
+
* options[Options.RHOEND]
|
| 1101 |
+
< self.resolution
|
| 1102 |
+
):
|
| 1103 |
+
self.resolution = np.sqrt(self.resolution
|
| 1104 |
+
* options[Options.RHOEND])
|
| 1105 |
+
else:
|
| 1106 |
+
self.resolution = options[Options.RHOEND]
|
| 1107 |
+
|
| 1108 |
+
# Reduce the trust-region radius.
|
| 1109 |
+
self._radius = max(
|
| 1110 |
+
self._constants[Constants.DECREASE_RADIUS_FACTOR] * self._radius,
|
| 1111 |
+
self.resolution,
|
| 1112 |
+
)
|
| 1113 |
+
|
| 1114 |
+
def shift_x_base(self, options):
|
| 1115 |
+
"""
|
| 1116 |
+
Shift the base point to `x_best`.
|
| 1117 |
+
|
| 1118 |
+
Parameters
|
| 1119 |
+
----------
|
| 1120 |
+
options : dict
|
| 1121 |
+
Options of the solver.
|
| 1122 |
+
"""
|
| 1123 |
+
self.models.shift_x_base(np.copy(self.x_best), options)
|
| 1124 |
+
|
| 1125 |
+
def set_multipliers(self, x):
|
| 1126 |
+
"""
|
| 1127 |
+
Set the Lagrange multipliers.
|
| 1128 |
+
|
| 1129 |
+
This method computes and set the Lagrange multipliers of the linear and
|
| 1130 |
+
nonlinear constraints to be the QP multipliers.
|
| 1131 |
+
|
| 1132 |
+
Parameters
|
| 1133 |
+
----------
|
| 1134 |
+
x : `numpy.ndarray`, shape (n,)
|
| 1135 |
+
Point at which the Lagrange multipliers are computed.
|
| 1136 |
+
"""
|
| 1137 |
+
# Build the constraints of the least-squares problem.
|
| 1138 |
+
incl_linear_ub = self._pb.linear.a_ub @ x >= self._pb.linear.b_ub
|
| 1139 |
+
incl_nonlinear_ub = self.cub_best >= 0.0
|
| 1140 |
+
incl_xl = self._pb.bounds.xl >= x
|
| 1141 |
+
incl_xu = self._pb.bounds.xu <= x
|
| 1142 |
+
m_linear_ub = np.count_nonzero(incl_linear_ub)
|
| 1143 |
+
m_nonlinear_ub = np.count_nonzero(incl_nonlinear_ub)
|
| 1144 |
+
m_xl = np.count_nonzero(incl_xl)
|
| 1145 |
+
m_xu = np.count_nonzero(incl_xu)
|
| 1146 |
+
|
| 1147 |
+
if (
|
| 1148 |
+
m_linear_ub + m_nonlinear_ub + self.m_linear_eq
|
| 1149 |
+
+ self.m_nonlinear_eq > 0
|
| 1150 |
+
):
|
| 1151 |
+
identity = np.eye(self._pb.n)
|
| 1152 |
+
c_jac = np.r_[
|
| 1153 |
+
-identity[incl_xl, :],
|
| 1154 |
+
identity[incl_xu, :],
|
| 1155 |
+
self._pb.linear.a_ub[incl_linear_ub, :],
|
| 1156 |
+
self.models.cub_grad(x, incl_nonlinear_ub),
|
| 1157 |
+
self._pb.linear.a_eq,
|
| 1158 |
+
self.models.ceq_grad(x),
|
| 1159 |
+
]
|
| 1160 |
+
|
| 1161 |
+
# Solve the least-squares problem.
|
| 1162 |
+
g_best = self.models.fun_grad(x)
|
| 1163 |
+
xl_lm = np.full(c_jac.shape[0], -np.inf)
|
| 1164 |
+
xl_lm[: m_xl + m_xu + m_linear_ub + m_nonlinear_ub] = 0.0
|
| 1165 |
+
res = lsq_linear(
|
| 1166 |
+
c_jac.T,
|
| 1167 |
+
-g_best,
|
| 1168 |
+
bounds=(xl_lm, np.inf),
|
| 1169 |
+
method="bvls",
|
| 1170 |
+
)
|
| 1171 |
+
|
| 1172 |
+
# Extract the Lagrange multipliers.
|
| 1173 |
+
self._lm_linear_ub[incl_linear_ub] = res.x[
|
| 1174 |
+
m_xl + m_xu:m_xl + m_xu + m_linear_ub
|
| 1175 |
+
]
|
| 1176 |
+
self._lm_linear_ub[~incl_linear_ub] = 0.0
|
| 1177 |
+
self._lm_nonlinear_ub[incl_nonlinear_ub] = res.x[
|
| 1178 |
+
m_xl
|
| 1179 |
+
+ m_xu
|
| 1180 |
+
+ m_linear_ub:m_xl
|
| 1181 |
+
+ m_xu
|
| 1182 |
+
+ m_linear_ub
|
| 1183 |
+
+ m_nonlinear_ub
|
| 1184 |
+
]
|
| 1185 |
+
self._lm_nonlinear_ub[~incl_nonlinear_ub] = 0.0
|
| 1186 |
+
self._lm_linear_eq[:] = res.x[
|
| 1187 |
+
m_xl
|
| 1188 |
+
+ m_xu
|
| 1189 |
+
+ m_linear_ub
|
| 1190 |
+
+ m_nonlinear_ub:m_xl
|
| 1191 |
+
+ m_xu
|
| 1192 |
+
+ m_linear_ub
|
| 1193 |
+
+ m_nonlinear_ub
|
| 1194 |
+
+ self.m_linear_eq
|
| 1195 |
+
]
|
| 1196 |
+
self._lm_nonlinear_eq[:] = res.x[
|
| 1197 |
+
m_xl + m_xu + m_linear_ub + m_nonlinear_ub + self.m_linear_eq:
|
| 1198 |
+
]
|
| 1199 |
+
|
| 1200 |
+
def _get_low_penalty(self):
|
| 1201 |
+
r_val_ub = np.c_[
|
| 1202 |
+
(
|
| 1203 |
+
self.models.interpolation.x_base[np.newaxis, :]
|
| 1204 |
+
+ self.models.interpolation.xpt.T
|
| 1205 |
+
)
|
| 1206 |
+
@ self._pb.linear.a_ub.T
|
| 1207 |
+
- self._pb.linear.b_ub[np.newaxis, :],
|
| 1208 |
+
self.models.cub_val,
|
| 1209 |
+
]
|
| 1210 |
+
r_val_eq = (
|
| 1211 |
+
self.models.interpolation.x_base[np.newaxis, :]
|
| 1212 |
+
+ self.models.interpolation.xpt.T
|
| 1213 |
+
) @ self._pb.linear.a_eq.T - self._pb.linear.b_eq[np.newaxis, :]
|
| 1214 |
+
r_val_eq = np.block(
|
| 1215 |
+
[
|
| 1216 |
+
r_val_eq,
|
| 1217 |
+
-r_val_eq,
|
| 1218 |
+
self.models.ceq_val,
|
| 1219 |
+
-self.models.ceq_val,
|
| 1220 |
+
]
|
| 1221 |
+
)
|
| 1222 |
+
r_val = np.block([r_val_ub, r_val_eq])
|
| 1223 |
+
c_min = np.nanmin(r_val, axis=0)
|
| 1224 |
+
c_max = np.nanmax(r_val, axis=0)
|
| 1225 |
+
indices = (
|
| 1226 |
+
c_min
|
| 1227 |
+
< self._constants[Constants.THRESHOLD_RATIO_CONSTRAINTS] * c_max
|
| 1228 |
+
)
|
| 1229 |
+
if np.any(indices):
|
| 1230 |
+
f_min = np.nanmin(self.models.fun_val)
|
| 1231 |
+
f_max = np.nanmax(self.models.fun_val)
|
| 1232 |
+
c_min_neg = np.minimum(0.0, c_min[indices])
|
| 1233 |
+
c_diff = np.min(c_max[indices] - c_min_neg)
|
| 1234 |
+
if c_diff > TINY * (f_max - f_min):
|
| 1235 |
+
penalty = (f_max - f_min) / c_diff
|
| 1236 |
+
else:
|
| 1237 |
+
penalty = np.inf
|
| 1238 |
+
else:
|
| 1239 |
+
penalty = 0.0
|
| 1240 |
+
return penalty
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/main.py
ADDED
|
@@ -0,0 +1,1506 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import warnings
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
from scipy.optimize import (
|
| 5 |
+
Bounds,
|
| 6 |
+
LinearConstraint,
|
| 7 |
+
NonlinearConstraint,
|
| 8 |
+
OptimizeResult,
|
| 9 |
+
)
|
| 10 |
+
|
| 11 |
+
from .framework import TrustRegion
|
| 12 |
+
from .problem import (
|
| 13 |
+
ObjectiveFunction,
|
| 14 |
+
BoundConstraints,
|
| 15 |
+
LinearConstraints,
|
| 16 |
+
NonlinearConstraints,
|
| 17 |
+
Problem,
|
| 18 |
+
)
|
| 19 |
+
from .utils import (
|
| 20 |
+
MaxEvalError,
|
| 21 |
+
TargetSuccess,
|
| 22 |
+
CallbackSuccess,
|
| 23 |
+
FeasibleSuccess,
|
| 24 |
+
exact_1d_array,
|
| 25 |
+
)
|
| 26 |
+
from .settings import (
|
| 27 |
+
ExitStatus,
|
| 28 |
+
Options,
|
| 29 |
+
Constants,
|
| 30 |
+
DEFAULT_OPTIONS,
|
| 31 |
+
DEFAULT_CONSTANTS,
|
| 32 |
+
PRINT_OPTIONS,
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def minimize(
|
| 37 |
+
fun,
|
| 38 |
+
x0,
|
| 39 |
+
args=(),
|
| 40 |
+
bounds=None,
|
| 41 |
+
constraints=(),
|
| 42 |
+
callback=None,
|
| 43 |
+
options=None,
|
| 44 |
+
**kwargs,
|
| 45 |
+
):
|
| 46 |
+
r"""
|
| 47 |
+
Minimize a scalar function using the COBYQA method.
|
| 48 |
+
|
| 49 |
+
The Constrained Optimization BY Quadratic Approximations (COBYQA) method is
|
| 50 |
+
a derivative-free optimization method designed to solve general nonlinear
|
| 51 |
+
optimization problems. A complete description of COBYQA is given in [3]_.
|
| 52 |
+
|
| 53 |
+
Parameters
|
| 54 |
+
----------
|
| 55 |
+
fun : {callable, None}
|
| 56 |
+
Objective function to be minimized.
|
| 57 |
+
|
| 58 |
+
``fun(x, *args) -> float``
|
| 59 |
+
|
| 60 |
+
where ``x`` is an array with shape (n,) and `args` is a tuple. If `fun`
|
| 61 |
+
is ``None``, the objective function is assumed to be the zero function,
|
| 62 |
+
resulting in a feasibility problem.
|
| 63 |
+
x0 : array_like, shape (n,)
|
| 64 |
+
Initial guess.
|
| 65 |
+
args : tuple, optional
|
| 66 |
+
Extra arguments passed to the objective function.
|
| 67 |
+
bounds : {`scipy.optimize.Bounds`, array_like, shape (n, 2)}, optional
|
| 68 |
+
Bound constraints of the problem. It can be one of the cases below.
|
| 69 |
+
|
| 70 |
+
#. An instance of `scipy.optimize.Bounds`. For the time being, the
|
| 71 |
+
argument ``keep_feasible`` is disregarded, and all the constraints
|
| 72 |
+
are considered unrelaxable and will be enforced.
|
| 73 |
+
#. An array with shape (n, 2). The bound constraints for ``x[i]`` are
|
| 74 |
+
``bounds[i][0] <= x[i] <= bounds[i][1]``. Set ``bounds[i][0]`` to
|
| 75 |
+
:math:`-\infty` if there is no lower bound, and set ``bounds[i][1]``
|
| 76 |
+
to :math:`\infty` if there is no upper bound.
|
| 77 |
+
|
| 78 |
+
The COBYQA method always respect the bound constraints.
|
| 79 |
+
constraints : {Constraint, list}, optional
|
| 80 |
+
General constraints of the problem. It can be one of the cases below.
|
| 81 |
+
|
| 82 |
+
#. An instance of `scipy.optimize.LinearConstraint`. The argument
|
| 83 |
+
``keep_feasible`` is disregarded.
|
| 84 |
+
#. An instance of `scipy.optimize.NonlinearConstraint`. The arguments
|
| 85 |
+
``jac``, ``hess``, ``keep_feasible``, ``finite_diff_rel_step``, and
|
| 86 |
+
``finite_diff_jac_sparsity`` are disregarded.
|
| 87 |
+
|
| 88 |
+
#. A list, each of whose elements are described in the cases above.
|
| 89 |
+
|
| 90 |
+
callback : callable, optional
|
| 91 |
+
A callback executed at each objective function evaluation. The method
|
| 92 |
+
terminates if a ``StopIteration`` exception is raised by the callback
|
| 93 |
+
function. Its signature can be one of the following:
|
| 94 |
+
|
| 95 |
+
``callback(intermediate_result)``
|
| 96 |
+
|
| 97 |
+
where ``intermediate_result`` is a keyword parameter that contains an
|
| 98 |
+
instance of `scipy.optimize.OptimizeResult`, with attributes ``x``
|
| 99 |
+
and ``fun``, being the point at which the objective function is
|
| 100 |
+
evaluated and the value of the objective function, respectively. The
|
| 101 |
+
name of the parameter must be ``intermediate_result`` for the callback
|
| 102 |
+
to be passed an instance of `scipy.optimize.OptimizeResult`.
|
| 103 |
+
|
| 104 |
+
Alternatively, the callback function can have the signature:
|
| 105 |
+
|
| 106 |
+
``callback(xk)``
|
| 107 |
+
|
| 108 |
+
where ``xk`` is the point at which the objective function is evaluated.
|
| 109 |
+
Introspection is used to determine which of the signatures to invoke.
|
| 110 |
+
options : dict, optional
|
| 111 |
+
Options passed to the solver. Accepted keys are:
|
| 112 |
+
|
| 113 |
+
disp : bool, optional
|
| 114 |
+
Whether to print information about the optimization procedure.
|
| 115 |
+
Default is ``False``.
|
| 116 |
+
maxfev : int, optional
|
| 117 |
+
Maximum number of function evaluations. Default is ``500 * n``.
|
| 118 |
+
maxiter : int, optional
|
| 119 |
+
Maximum number of iterations. Default is ``1000 * n``.
|
| 120 |
+
target : float, optional
|
| 121 |
+
Target on the objective function value. The optimization
|
| 122 |
+
procedure is terminated when the objective function value of a
|
| 123 |
+
feasible point is less than or equal to this target. Default is
|
| 124 |
+
``-numpy.inf``.
|
| 125 |
+
feasibility_tol : float, optional
|
| 126 |
+
Tolerance on the constraint violation. If the maximum
|
| 127 |
+
constraint violation at a point is less than or equal to this
|
| 128 |
+
tolerance, the point is considered feasible. Default is
|
| 129 |
+
``numpy.sqrt(numpy.finfo(float).eps)``.
|
| 130 |
+
radius_init : float, optional
|
| 131 |
+
Initial trust-region radius. Typically, this value should be in
|
| 132 |
+
the order of one tenth of the greatest expected change to `x0`.
|
| 133 |
+
Default is ``1.0``.
|
| 134 |
+
radius_final : float, optional
|
| 135 |
+
Final trust-region radius. It should indicate the accuracy
|
| 136 |
+
required in the final values of the variables. Default is
|
| 137 |
+
``1e-6``.
|
| 138 |
+
nb_points : int, optional
|
| 139 |
+
Number of interpolation points used to build the quadratic
|
| 140 |
+
models of the objective and constraint functions. Default is
|
| 141 |
+
``2 * n + 1``.
|
| 142 |
+
scale : bool, optional
|
| 143 |
+
Whether to scale the variables according to the bounds. Default
|
| 144 |
+
is ``False``.
|
| 145 |
+
filter_size : int, optional
|
| 146 |
+
Maximum number of points in the filter. The filter is used to
|
| 147 |
+
select the best point returned by the optimization procedure.
|
| 148 |
+
Default is ``sys.maxsize``.
|
| 149 |
+
store_history : bool, optional
|
| 150 |
+
Whether to store the history of the function evaluations.
|
| 151 |
+
Default is ``False``.
|
| 152 |
+
history_size : int, optional
|
| 153 |
+
Maximum number of function evaluations to store in the history.
|
| 154 |
+
Default is ``sys.maxsize``.
|
| 155 |
+
debug : bool, optional
|
| 156 |
+
Whether to perform additional checks during the optimization
|
| 157 |
+
procedure. This option should be used only for debugging
|
| 158 |
+
purposes and is highly discouraged to general users. Default is
|
| 159 |
+
``False``.
|
| 160 |
+
|
| 161 |
+
Other constants (from the keyword arguments) are described below. They
|
| 162 |
+
are not intended to be changed by general users. They should only be
|
| 163 |
+
changed by users with a deep understanding of the algorithm, who want
|
| 164 |
+
to experiment with different settings.
|
| 165 |
+
|
| 166 |
+
Returns
|
| 167 |
+
-------
|
| 168 |
+
`scipy.optimize.OptimizeResult`
|
| 169 |
+
Result of the optimization procedure, with the following fields:
|
| 170 |
+
|
| 171 |
+
message : str
|
| 172 |
+
Description of the cause of the termination.
|
| 173 |
+
success : bool
|
| 174 |
+
Whether the optimization procedure terminated successfully.
|
| 175 |
+
status : int
|
| 176 |
+
Termination status of the optimization procedure.
|
| 177 |
+
x : `numpy.ndarray`, shape (n,)
|
| 178 |
+
Solution point.
|
| 179 |
+
fun : float
|
| 180 |
+
Objective function value at the solution point.
|
| 181 |
+
maxcv : float
|
| 182 |
+
Maximum constraint violation at the solution point.
|
| 183 |
+
nfev : int
|
| 184 |
+
Number of function evaluations.
|
| 185 |
+
nit : int
|
| 186 |
+
Number of iterations.
|
| 187 |
+
|
| 188 |
+
If ``store_history`` is True, the result also has the following fields:
|
| 189 |
+
|
| 190 |
+
fun_history : `numpy.ndarray`, shape (nfev,)
|
| 191 |
+
History of the objective function values.
|
| 192 |
+
maxcv_history : `numpy.ndarray`, shape (nfev,)
|
| 193 |
+
History of the maximum constraint violations.
|
| 194 |
+
|
| 195 |
+
A description of the termination statuses is given below.
|
| 196 |
+
|
| 197 |
+
.. list-table::
|
| 198 |
+
:widths: 25 75
|
| 199 |
+
:header-rows: 1
|
| 200 |
+
|
| 201 |
+
* - Exit status
|
| 202 |
+
- Description
|
| 203 |
+
* - 0
|
| 204 |
+
- The lower bound for the trust-region radius has been reached.
|
| 205 |
+
* - 1
|
| 206 |
+
- The target objective function value has been reached.
|
| 207 |
+
* - 2
|
| 208 |
+
- All variables are fixed by the bound constraints.
|
| 209 |
+
* - 3
|
| 210 |
+
- The callback requested to stop the optimization procedure.
|
| 211 |
+
* - 4
|
| 212 |
+
- The feasibility problem received has been solved successfully.
|
| 213 |
+
* - 5
|
| 214 |
+
- The maximum number of function evaluations has been exceeded.
|
| 215 |
+
* - 6
|
| 216 |
+
- The maximum number of iterations has been exceeded.
|
| 217 |
+
* - -1
|
| 218 |
+
- The bound constraints are infeasible.
|
| 219 |
+
* - -2
|
| 220 |
+
- A linear algebra error occurred.
|
| 221 |
+
|
| 222 |
+
Other Parameters
|
| 223 |
+
----------------
|
| 224 |
+
decrease_radius_factor : float, optional
|
| 225 |
+
Factor by which the trust-region radius is reduced when the reduction
|
| 226 |
+
ratio is low or negative. Default is ``0.5``.
|
| 227 |
+
increase_radius_factor : float, optional
|
| 228 |
+
Factor by which the trust-region radius is increased when the reduction
|
| 229 |
+
ratio is large. Default is ``numpy.sqrt(2.0)``.
|
| 230 |
+
increase_radius_threshold : float, optional
|
| 231 |
+
Threshold that controls the increase of the trust-region radius when
|
| 232 |
+
the reduction ratio is large. Default is ``2.0``.
|
| 233 |
+
decrease_radius_threshold : float, optional
|
| 234 |
+
Threshold used to determine whether the trust-region radius should be
|
| 235 |
+
reduced to the resolution. Default is ``1.4``.
|
| 236 |
+
decrease_resolution_factor : float, optional
|
| 237 |
+
Factor by which the resolution is reduced when the current value is far
|
| 238 |
+
from its final value. Default is ``0.1``.
|
| 239 |
+
large_resolution_threshold : float, optional
|
| 240 |
+
Threshold used to determine whether the resolution is far from its
|
| 241 |
+
final value. Default is ``250.0``.
|
| 242 |
+
moderate_resolution_threshold : float, optional
|
| 243 |
+
Threshold used to determine whether the resolution is close to its
|
| 244 |
+
final value. Default is ``16.0``.
|
| 245 |
+
low_ratio : float, optional
|
| 246 |
+
Threshold used to determine whether the reduction ratio is low. Default
|
| 247 |
+
is ``0.1``.
|
| 248 |
+
high_ratio : float, optional
|
| 249 |
+
Threshold used to determine whether the reduction ratio is high.
|
| 250 |
+
Default is ``0.7``.
|
| 251 |
+
very_low_ratio : float, optional
|
| 252 |
+
Threshold used to determine whether the reduction ratio is very low.
|
| 253 |
+
This is used to determine whether the models should be reset. Default
|
| 254 |
+
is ``0.01``.
|
| 255 |
+
penalty_increase_threshold : float, optional
|
| 256 |
+
Threshold used to determine whether the penalty parameter should be
|
| 257 |
+
increased. Default is ``1.5``.
|
| 258 |
+
penalty_increase_factor : float, optional
|
| 259 |
+
Factor by which the penalty parameter is increased. Default is ``2.0``.
|
| 260 |
+
short_step_threshold : float, optional
|
| 261 |
+
Factor used to determine whether the trial step is too short. Default
|
| 262 |
+
is ``0.5``.
|
| 263 |
+
low_radius_factor : float, optional
|
| 264 |
+
Factor used to determine which interpolation point should be removed
|
| 265 |
+
from the interpolation set at each iteration. Default is ``0.1``.
|
| 266 |
+
byrd_omojokun_factor : float, optional
|
| 267 |
+
Factor by which the trust-region radius is reduced for the computations
|
| 268 |
+
of the normal step in the Byrd-Omojokun composite-step approach.
|
| 269 |
+
Default is ``0.8``.
|
| 270 |
+
threshold_ratio_constraints : float, optional
|
| 271 |
+
Threshold used to determine which constraints should be taken into
|
| 272 |
+
account when decreasing the penalty parameter. Default is ``2.0``.
|
| 273 |
+
large_shift_factor : float, optional
|
| 274 |
+
Factor used to determine whether the point around which the quadratic
|
| 275 |
+
models are built should be updated. Default is ``10.0``.
|
| 276 |
+
large_gradient_factor : float, optional
|
| 277 |
+
Factor used to determine whether the models should be reset. Default is
|
| 278 |
+
``10.0``.
|
| 279 |
+
resolution_factor : float, optional
|
| 280 |
+
Factor by which the resolution is decreased. Default is ``2.0``.
|
| 281 |
+
improve_tcg : bool, optional
|
| 282 |
+
Whether to improve the steps computed by the truncated conjugate
|
| 283 |
+
gradient method when the trust-region boundary is reached. Default is
|
| 284 |
+
``True``.
|
| 285 |
+
|
| 286 |
+
References
|
| 287 |
+
----------
|
| 288 |
+
.. [1] J. Nocedal and S. J. Wright. *Numerical Optimization*. Springer Ser.
|
| 289 |
+
Oper. Res. Financ. Eng. Springer, New York, NY, USA, second edition,
|
| 290 |
+
2006. `doi:10.1007/978-0-387-40065-5
|
| 291 |
+
<https://doi.org/10.1007/978-0-387-40065-5>`_.
|
| 292 |
+
.. [2] M. J. D. Powell. A direct search optimization method that models the
|
| 293 |
+
objective and constraint functions by linear interpolation. In S. Gomez
|
| 294 |
+
and J.-P. Hennart, editors, *Advances in Optimization and Numerical
|
| 295 |
+
Analysis*, volume 275 of Math. Appl., pages 51--67. Springer, Dordrecht,
|
| 296 |
+
Netherlands, 1994. `doi:10.1007/978-94-015-8330-5_4
|
| 297 |
+
<https://doi.org/10.1007/978-94-015-8330-5_4>`_.
|
| 298 |
+
.. [3] T. M. Ragonneau. *Model-Based Derivative-Free Optimization Methods
|
| 299 |
+
and Software*. PhD thesis, Department of Applied Mathematics, The Hong
|
| 300 |
+
Kong Polytechnic University, Hong Kong, China, 2022. URL:
|
| 301 |
+
https://theses.lib.polyu.edu.hk/handle/200/12294.
|
| 302 |
+
|
| 303 |
+
Examples
|
| 304 |
+
--------
|
| 305 |
+
To demonstrate how to use `minimize`, we first minimize the Rosenbrock
|
| 306 |
+
function implemented in `scipy.optimize` in an unconstrained setting.
|
| 307 |
+
|
| 308 |
+
.. testsetup::
|
| 309 |
+
|
| 310 |
+
import numpy as np
|
| 311 |
+
np.set_printoptions(precision=3, suppress=True)
|
| 312 |
+
|
| 313 |
+
>>> from cobyqa import minimize
|
| 314 |
+
>>> from scipy.optimize import rosen
|
| 315 |
+
|
| 316 |
+
To solve the problem using COBYQA, run:
|
| 317 |
+
|
| 318 |
+
>>> x0 = [1.3, 0.7, 0.8, 1.9, 1.2]
|
| 319 |
+
>>> res = minimize(rosen, x0)
|
| 320 |
+
>>> res.x
|
| 321 |
+
array([1., 1., 1., 1., 1.])
|
| 322 |
+
|
| 323 |
+
To see how bound and constraints are handled using `minimize`, we solve
|
| 324 |
+
Example 16.4 of [1]_, defined as
|
| 325 |
+
|
| 326 |
+
.. math::
|
| 327 |
+
|
| 328 |
+
\begin{aligned}
|
| 329 |
+
\min_{x \in \mathbb{R}^2} & \quad (x_1 - 1)^2 + (x_2 - 2.5)^2\\
|
| 330 |
+
\text{s.t.} & \quad -x_1 + 2x_2 \le 2,\\
|
| 331 |
+
& \quad x_1 + 2x_2 \le 6,\\
|
| 332 |
+
& \quad x_1 - 2x_2 \le 2,\\
|
| 333 |
+
& \quad x_1 \ge 0,\\
|
| 334 |
+
& \quad x_2 \ge 0.
|
| 335 |
+
\end{aligned}
|
| 336 |
+
|
| 337 |
+
>>> import numpy as np
|
| 338 |
+
>>> from scipy.optimize import Bounds, LinearConstraint
|
| 339 |
+
|
| 340 |
+
Its objective function can be implemented as:
|
| 341 |
+
|
| 342 |
+
>>> def fun(x):
|
| 343 |
+
... return (x[0] - 1.0)**2 + (x[1] - 2.5)**2
|
| 344 |
+
|
| 345 |
+
This problem can be solved using `minimize` as:
|
| 346 |
+
|
| 347 |
+
>>> x0 = [2.0, 0.0]
|
| 348 |
+
>>> bounds = Bounds([0.0, 0.0], np.inf)
|
| 349 |
+
>>> constraints = LinearConstraint([
|
| 350 |
+
... [-1.0, 2.0],
|
| 351 |
+
... [1.0, 2.0],
|
| 352 |
+
... [1.0, -2.0],
|
| 353 |
+
... ], -np.inf, [2.0, 6.0, 2.0])
|
| 354 |
+
>>> res = minimize(fun, x0, bounds=bounds, constraints=constraints)
|
| 355 |
+
>>> res.x
|
| 356 |
+
array([1.4, 1.7])
|
| 357 |
+
|
| 358 |
+
To see how nonlinear constraints are handled, we solve Problem (F) of [2]_,
|
| 359 |
+
defined as
|
| 360 |
+
|
| 361 |
+
.. math::
|
| 362 |
+
|
| 363 |
+
\begin{aligned}
|
| 364 |
+
\min_{x \in \mathbb{R}^2} & \quad -x_1 - x_2\\
|
| 365 |
+
\text{s.t.} & \quad x_1^2 - x_2 \le 0,\\
|
| 366 |
+
& \quad x_1^2 + x_2^2 \le 1.
|
| 367 |
+
\end{aligned}
|
| 368 |
+
|
| 369 |
+
>>> from scipy.optimize import NonlinearConstraint
|
| 370 |
+
|
| 371 |
+
Its objective and constraint functions can be implemented as:
|
| 372 |
+
|
| 373 |
+
>>> def fun(x):
|
| 374 |
+
... return -x[0] - x[1]
|
| 375 |
+
>>>
|
| 376 |
+
>>> def cub(x):
|
| 377 |
+
... return [x[0]**2 - x[1], x[0]**2 + x[1]**2]
|
| 378 |
+
|
| 379 |
+
This problem can be solved using `minimize` as:
|
| 380 |
+
|
| 381 |
+
>>> x0 = [1.0, 1.0]
|
| 382 |
+
>>> constraints = NonlinearConstraint(cub, -np.inf, [0.0, 1.0])
|
| 383 |
+
>>> res = minimize(fun, x0, constraints=constraints)
|
| 384 |
+
>>> res.x
|
| 385 |
+
array([0.707, 0.707])
|
| 386 |
+
|
| 387 |
+
Finally, to see how to supply linear and nonlinear constraints
|
| 388 |
+
simultaneously, we solve Problem (G) of [2]_, defined as
|
| 389 |
+
|
| 390 |
+
.. math::
|
| 391 |
+
|
| 392 |
+
\begin{aligned}
|
| 393 |
+
\min_{x \in \mathbb{R}^3} & \quad x_3\\
|
| 394 |
+
\text{s.t.} & \quad 5x_1 - x_2 + x_3 \ge 0,\\
|
| 395 |
+
& \quad -5x_1 - x_2 + x_3 \ge 0,\\
|
| 396 |
+
& \quad x_1^2 + x_2^2 + 4x_2 \le x_3.
|
| 397 |
+
\end{aligned}
|
| 398 |
+
|
| 399 |
+
Its objective and nonlinear constraint functions can be implemented as:
|
| 400 |
+
|
| 401 |
+
>>> def fun(x):
|
| 402 |
+
... return x[2]
|
| 403 |
+
>>>
|
| 404 |
+
>>> def cub(x):
|
| 405 |
+
... return x[0]**2 + x[1]**2 + 4.0*x[1] - x[2]
|
| 406 |
+
|
| 407 |
+
This problem can be solved using `minimize` as:
|
| 408 |
+
|
| 409 |
+
>>> x0 = [1.0, 1.0, 1.0]
|
| 410 |
+
>>> constraints = [
|
| 411 |
+
... LinearConstraint(
|
| 412 |
+
... [[5.0, -1.0, 1.0], [-5.0, -1.0, 1.0]],
|
| 413 |
+
... [0.0, 0.0],
|
| 414 |
+
... np.inf,
|
| 415 |
+
... ),
|
| 416 |
+
... NonlinearConstraint(cub, -np.inf, 0.0),
|
| 417 |
+
... ]
|
| 418 |
+
>>> res = minimize(fun, x0, constraints=constraints)
|
| 419 |
+
>>> res.x
|
| 420 |
+
array([ 0., -3., -3.])
|
| 421 |
+
"""
|
| 422 |
+
# Get basic options that are needed for the initialization.
|
| 423 |
+
if options is None:
|
| 424 |
+
options = {}
|
| 425 |
+
else:
|
| 426 |
+
options = dict(options)
|
| 427 |
+
verbose = options.get(Options.VERBOSE, DEFAULT_OPTIONS[Options.VERBOSE])
|
| 428 |
+
verbose = bool(verbose)
|
| 429 |
+
feasibility_tol = options.get(
|
| 430 |
+
Options.FEASIBILITY_TOL,
|
| 431 |
+
DEFAULT_OPTIONS[Options.FEASIBILITY_TOL],
|
| 432 |
+
)
|
| 433 |
+
feasibility_tol = float(feasibility_tol)
|
| 434 |
+
scale = options.get(Options.SCALE, DEFAULT_OPTIONS[Options.SCALE])
|
| 435 |
+
scale = bool(scale)
|
| 436 |
+
store_history = options.get(
|
| 437 |
+
Options.STORE_HISTORY,
|
| 438 |
+
DEFAULT_OPTIONS[Options.STORE_HISTORY],
|
| 439 |
+
)
|
| 440 |
+
store_history = bool(store_history)
|
| 441 |
+
if Options.HISTORY_SIZE in options and options[Options.HISTORY_SIZE] <= 0:
|
| 442 |
+
raise ValueError("The size of the history must be positive.")
|
| 443 |
+
history_size = options.get(
|
| 444 |
+
Options.HISTORY_SIZE,
|
| 445 |
+
DEFAULT_OPTIONS[Options.HISTORY_SIZE],
|
| 446 |
+
)
|
| 447 |
+
history_size = int(history_size)
|
| 448 |
+
if Options.FILTER_SIZE in options and options[Options.FILTER_SIZE] <= 0:
|
| 449 |
+
raise ValueError("The size of the filter must be positive.")
|
| 450 |
+
filter_size = options.get(
|
| 451 |
+
Options.FILTER_SIZE,
|
| 452 |
+
DEFAULT_OPTIONS[Options.FILTER_SIZE],
|
| 453 |
+
)
|
| 454 |
+
filter_size = int(filter_size)
|
| 455 |
+
debug = options.get(Options.DEBUG, DEFAULT_OPTIONS[Options.DEBUG])
|
| 456 |
+
debug = bool(debug)
|
| 457 |
+
|
| 458 |
+
# Initialize the objective function.
|
| 459 |
+
if not isinstance(args, tuple):
|
| 460 |
+
args = (args,)
|
| 461 |
+
obj = ObjectiveFunction(fun, verbose, debug, *args)
|
| 462 |
+
|
| 463 |
+
# Initialize the bound constraints.
|
| 464 |
+
if not hasattr(x0, "__len__"):
|
| 465 |
+
x0 = [x0]
|
| 466 |
+
n_orig = len(x0)
|
| 467 |
+
bounds = BoundConstraints(_get_bounds(bounds, n_orig))
|
| 468 |
+
|
| 469 |
+
# Initialize the constraints.
|
| 470 |
+
linear_constraints, nonlinear_constraints = _get_constraints(constraints)
|
| 471 |
+
linear = LinearConstraints(linear_constraints, n_orig, debug)
|
| 472 |
+
nonlinear = NonlinearConstraints(nonlinear_constraints, verbose, debug)
|
| 473 |
+
|
| 474 |
+
# Initialize the problem (and remove the fixed variables).
|
| 475 |
+
pb = Problem(
|
| 476 |
+
obj,
|
| 477 |
+
x0,
|
| 478 |
+
bounds,
|
| 479 |
+
linear,
|
| 480 |
+
nonlinear,
|
| 481 |
+
callback,
|
| 482 |
+
feasibility_tol,
|
| 483 |
+
scale,
|
| 484 |
+
store_history,
|
| 485 |
+
history_size,
|
| 486 |
+
filter_size,
|
| 487 |
+
debug,
|
| 488 |
+
)
|
| 489 |
+
|
| 490 |
+
# Set the default options.
|
| 491 |
+
_set_default_options(options, pb.n)
|
| 492 |
+
constants = _set_default_constants(**kwargs)
|
| 493 |
+
|
| 494 |
+
# Initialize the models and skip the computations whenever possible.
|
| 495 |
+
if not pb.bounds.is_feasible:
|
| 496 |
+
# The bound constraints are infeasible.
|
| 497 |
+
return _build_result(
|
| 498 |
+
pb,
|
| 499 |
+
0.0,
|
| 500 |
+
False,
|
| 501 |
+
ExitStatus.INFEASIBLE_ERROR,
|
| 502 |
+
0,
|
| 503 |
+
options,
|
| 504 |
+
)
|
| 505 |
+
elif pb.n == 0:
|
| 506 |
+
# All variables are fixed by the bound constraints.
|
| 507 |
+
return _build_result(
|
| 508 |
+
pb,
|
| 509 |
+
0.0,
|
| 510 |
+
True,
|
| 511 |
+
ExitStatus.FIXED_SUCCESS,
|
| 512 |
+
0,
|
| 513 |
+
options,
|
| 514 |
+
)
|
| 515 |
+
if verbose:
|
| 516 |
+
print("Starting the optimization procedure.")
|
| 517 |
+
print(f"Initial trust-region radius: {options[Options.RHOBEG]}.")
|
| 518 |
+
print(f"Final trust-region radius: {options[Options.RHOEND]}.")
|
| 519 |
+
print(
|
| 520 |
+
f"Maximum number of function evaluations: "
|
| 521 |
+
f"{options[Options.MAX_EVAL]}."
|
| 522 |
+
)
|
| 523 |
+
print(f"Maximum number of iterations: {options[Options.MAX_ITER]}.")
|
| 524 |
+
print()
|
| 525 |
+
try:
|
| 526 |
+
framework = TrustRegion(pb, options, constants)
|
| 527 |
+
except TargetSuccess:
|
| 528 |
+
# The target on the objective function value has been reached
|
| 529 |
+
return _build_result(
|
| 530 |
+
pb,
|
| 531 |
+
0.0,
|
| 532 |
+
True,
|
| 533 |
+
ExitStatus.TARGET_SUCCESS,
|
| 534 |
+
0,
|
| 535 |
+
options,
|
| 536 |
+
)
|
| 537 |
+
except CallbackSuccess:
|
| 538 |
+
# The callback raised a StopIteration exception.
|
| 539 |
+
return _build_result(
|
| 540 |
+
pb,
|
| 541 |
+
0.0,
|
| 542 |
+
True,
|
| 543 |
+
ExitStatus.CALLBACK_SUCCESS,
|
| 544 |
+
0,
|
| 545 |
+
options,
|
| 546 |
+
)
|
| 547 |
+
except FeasibleSuccess:
|
| 548 |
+
# The feasibility problem has been solved successfully.
|
| 549 |
+
return _build_result(
|
| 550 |
+
pb,
|
| 551 |
+
0.0,
|
| 552 |
+
True,
|
| 553 |
+
ExitStatus.FEASIBLE_SUCCESS,
|
| 554 |
+
0,
|
| 555 |
+
options,
|
| 556 |
+
)
|
| 557 |
+
except MaxEvalError:
|
| 558 |
+
# The maximum number of function evaluations has been exceeded.
|
| 559 |
+
return _build_result(
|
| 560 |
+
pb,
|
| 561 |
+
0.0,
|
| 562 |
+
False,
|
| 563 |
+
ExitStatus.MAX_ITER_WARNING,
|
| 564 |
+
0,
|
| 565 |
+
options,
|
| 566 |
+
)
|
| 567 |
+
except np.linalg.LinAlgError:
|
| 568 |
+
# The construction of the initial interpolation set failed.
|
| 569 |
+
return _build_result(
|
| 570 |
+
pb,
|
| 571 |
+
0.0,
|
| 572 |
+
False,
|
| 573 |
+
ExitStatus.LINALG_ERROR,
|
| 574 |
+
0,
|
| 575 |
+
options,
|
| 576 |
+
)
|
| 577 |
+
|
| 578 |
+
# Start the optimization procedure.
|
| 579 |
+
success = False
|
| 580 |
+
n_iter = 0
|
| 581 |
+
k_new = None
|
| 582 |
+
n_short_steps = 0
|
| 583 |
+
n_very_short_steps = 0
|
| 584 |
+
n_alt_models = 0
|
| 585 |
+
while True:
|
| 586 |
+
# Stop the optimization procedure if the maximum number of iterations
|
| 587 |
+
# has been exceeded. We do not write the main loop as a for loop
|
| 588 |
+
# because we want to access the number of iterations outside the loop.
|
| 589 |
+
if n_iter >= options[Options.MAX_ITER]:
|
| 590 |
+
status = ExitStatus.MAX_ITER_WARNING
|
| 591 |
+
break
|
| 592 |
+
n_iter += 1
|
| 593 |
+
|
| 594 |
+
# Update the point around which the quadratic models are built.
|
| 595 |
+
if (
|
| 596 |
+
np.linalg.norm(
|
| 597 |
+
framework.x_best - framework.models.interpolation.x_base
|
| 598 |
+
)
|
| 599 |
+
>= constants[Constants.LARGE_SHIFT_FACTOR] * framework.radius
|
| 600 |
+
):
|
| 601 |
+
framework.shift_x_base(options)
|
| 602 |
+
|
| 603 |
+
# Evaluate the trial step.
|
| 604 |
+
radius_save = framework.radius
|
| 605 |
+
normal_step, tangential_step = framework.get_trust_region_step(options)
|
| 606 |
+
step = normal_step + tangential_step
|
| 607 |
+
s_norm = np.linalg.norm(step)
|
| 608 |
+
|
| 609 |
+
# If the trial step is too short, we do not attempt to evaluate the
|
| 610 |
+
# objective and constraint functions. Instead, we reduce the
|
| 611 |
+
# trust-region radius and check whether the resolution should be
|
| 612 |
+
# enhanced and whether the geometry of the interpolation set should be
|
| 613 |
+
# improved. Otherwise, we entertain a classical iteration. The
|
| 614 |
+
# criterion for performing an exceptional jump is taken from NEWUOA.
|
| 615 |
+
if (
|
| 616 |
+
s_norm
|
| 617 |
+
<= constants[Constants.SHORT_STEP_THRESHOLD] * framework.resolution
|
| 618 |
+
):
|
| 619 |
+
framework.radius *= constants[Constants.DECREASE_RESOLUTION_FACTOR]
|
| 620 |
+
if radius_save > framework.resolution:
|
| 621 |
+
n_short_steps = 0
|
| 622 |
+
n_very_short_steps = 0
|
| 623 |
+
else:
|
| 624 |
+
n_short_steps += 1
|
| 625 |
+
n_very_short_steps += 1
|
| 626 |
+
if s_norm > 0.1 * framework.resolution:
|
| 627 |
+
n_very_short_steps = 0
|
| 628 |
+
enhance_resolution = n_short_steps >= 5 or n_very_short_steps >= 3
|
| 629 |
+
if enhance_resolution:
|
| 630 |
+
n_short_steps = 0
|
| 631 |
+
n_very_short_steps = 0
|
| 632 |
+
improve_geometry = False
|
| 633 |
+
else:
|
| 634 |
+
try:
|
| 635 |
+
k_new, dist_new = framework.get_index_to_remove()
|
| 636 |
+
except np.linalg.LinAlgError:
|
| 637 |
+
status = ExitStatus.LINALG_ERROR
|
| 638 |
+
break
|
| 639 |
+
improve_geometry = dist_new > max(
|
| 640 |
+
framework.radius,
|
| 641 |
+
constants[Constants.RESOLUTION_FACTOR]
|
| 642 |
+
* framework.resolution,
|
| 643 |
+
)
|
| 644 |
+
else:
|
| 645 |
+
# Increase the penalty parameter if necessary.
|
| 646 |
+
same_best_point = framework.increase_penalty(step)
|
| 647 |
+
if same_best_point:
|
| 648 |
+
# Evaluate the objective and constraint functions.
|
| 649 |
+
try:
|
| 650 |
+
fun_val, cub_val, ceq_val = _eval(
|
| 651 |
+
pb,
|
| 652 |
+
framework,
|
| 653 |
+
step,
|
| 654 |
+
options,
|
| 655 |
+
)
|
| 656 |
+
except TargetSuccess:
|
| 657 |
+
status = ExitStatus.TARGET_SUCCESS
|
| 658 |
+
success = True
|
| 659 |
+
break
|
| 660 |
+
except FeasibleSuccess:
|
| 661 |
+
status = ExitStatus.FEASIBLE_SUCCESS
|
| 662 |
+
success = True
|
| 663 |
+
break
|
| 664 |
+
except CallbackSuccess:
|
| 665 |
+
status = ExitStatus.CALLBACK_SUCCESS
|
| 666 |
+
success = True
|
| 667 |
+
break
|
| 668 |
+
except MaxEvalError:
|
| 669 |
+
status = ExitStatus.MAX_EVAL_WARNING
|
| 670 |
+
break
|
| 671 |
+
|
| 672 |
+
# Perform a second-order correction step if necessary.
|
| 673 |
+
merit_old = framework.merit(
|
| 674 |
+
framework.x_best,
|
| 675 |
+
framework.fun_best,
|
| 676 |
+
framework.cub_best,
|
| 677 |
+
framework.ceq_best,
|
| 678 |
+
)
|
| 679 |
+
merit_new = framework.merit(
|
| 680 |
+
framework.x_best + step, fun_val, cub_val, ceq_val
|
| 681 |
+
)
|
| 682 |
+
if (
|
| 683 |
+
pb.type == "nonlinearly constrained"
|
| 684 |
+
and merit_new > merit_old
|
| 685 |
+
and np.linalg.norm(normal_step)
|
| 686 |
+
> constants[Constants.BYRD_OMOJOKUN_FACTOR] ** 2.0
|
| 687 |
+
* framework.radius
|
| 688 |
+
):
|
| 689 |
+
soc_step = framework.get_second_order_correction_step(
|
| 690 |
+
step, options
|
| 691 |
+
)
|
| 692 |
+
if np.linalg.norm(soc_step) > 0.0:
|
| 693 |
+
step += soc_step
|
| 694 |
+
|
| 695 |
+
# Evaluate the objective and constraint functions.
|
| 696 |
+
try:
|
| 697 |
+
fun_val, cub_val, ceq_val = _eval(
|
| 698 |
+
pb,
|
| 699 |
+
framework,
|
| 700 |
+
step,
|
| 701 |
+
options,
|
| 702 |
+
)
|
| 703 |
+
except TargetSuccess:
|
| 704 |
+
status = ExitStatus.TARGET_SUCCESS
|
| 705 |
+
success = True
|
| 706 |
+
break
|
| 707 |
+
except FeasibleSuccess:
|
| 708 |
+
status = ExitStatus.FEASIBLE_SUCCESS
|
| 709 |
+
success = True
|
| 710 |
+
break
|
| 711 |
+
except CallbackSuccess:
|
| 712 |
+
status = ExitStatus.CALLBACK_SUCCESS
|
| 713 |
+
success = True
|
| 714 |
+
break
|
| 715 |
+
except MaxEvalError:
|
| 716 |
+
status = ExitStatus.MAX_EVAL_WARNING
|
| 717 |
+
break
|
| 718 |
+
|
| 719 |
+
# Calculate the reduction ratio.
|
| 720 |
+
ratio = framework.get_reduction_ratio(
|
| 721 |
+
step,
|
| 722 |
+
fun_val,
|
| 723 |
+
cub_val,
|
| 724 |
+
ceq_val,
|
| 725 |
+
)
|
| 726 |
+
|
| 727 |
+
# Choose an interpolation point to remove.
|
| 728 |
+
try:
|
| 729 |
+
k_new = framework.get_index_to_remove(
|
| 730 |
+
framework.x_best + step
|
| 731 |
+
)[0]
|
| 732 |
+
except np.linalg.LinAlgError:
|
| 733 |
+
status = ExitStatus.LINALG_ERROR
|
| 734 |
+
break
|
| 735 |
+
|
| 736 |
+
# Update the interpolation set.
|
| 737 |
+
try:
|
| 738 |
+
ill_conditioned = framework.models.update_interpolation(
|
| 739 |
+
k_new, framework.x_best + step, fun_val, cub_val,
|
| 740 |
+
ceq_val
|
| 741 |
+
)
|
| 742 |
+
except np.linalg.LinAlgError:
|
| 743 |
+
status = ExitStatus.LINALG_ERROR
|
| 744 |
+
break
|
| 745 |
+
framework.set_best_index()
|
| 746 |
+
|
| 747 |
+
# Update the trust-region radius.
|
| 748 |
+
framework.update_radius(step, ratio)
|
| 749 |
+
|
| 750 |
+
# Attempt to replace the models by the alternative ones.
|
| 751 |
+
if framework.radius <= framework.resolution:
|
| 752 |
+
if ratio >= constants[Constants.VERY_LOW_RATIO]:
|
| 753 |
+
n_alt_models = 0
|
| 754 |
+
else:
|
| 755 |
+
n_alt_models += 1
|
| 756 |
+
grad = framework.models.fun_grad(framework.x_best)
|
| 757 |
+
try:
|
| 758 |
+
grad_alt = framework.models.fun_alt_grad(
|
| 759 |
+
framework.x_best
|
| 760 |
+
)
|
| 761 |
+
except np.linalg.LinAlgError:
|
| 762 |
+
status = ExitStatus.LINALG_ERROR
|
| 763 |
+
break
|
| 764 |
+
if np.linalg.norm(grad) < constants[
|
| 765 |
+
Constants.LARGE_GRADIENT_FACTOR
|
| 766 |
+
] * np.linalg.norm(grad_alt):
|
| 767 |
+
n_alt_models = 0
|
| 768 |
+
if n_alt_models >= 3:
|
| 769 |
+
try:
|
| 770 |
+
framework.models.reset_models()
|
| 771 |
+
except np.linalg.LinAlgError:
|
| 772 |
+
status = ExitStatus.LINALG_ERROR
|
| 773 |
+
break
|
| 774 |
+
n_alt_models = 0
|
| 775 |
+
|
| 776 |
+
# Update the Lagrange multipliers.
|
| 777 |
+
framework.set_multipliers(framework.x_best + step)
|
| 778 |
+
|
| 779 |
+
# Check whether the resolution should be enhanced.
|
| 780 |
+
try:
|
| 781 |
+
k_new, dist_new = framework.get_index_to_remove()
|
| 782 |
+
except np.linalg.LinAlgError:
|
| 783 |
+
status = ExitStatus.LINALG_ERROR
|
| 784 |
+
break
|
| 785 |
+
improve_geometry = (
|
| 786 |
+
ill_conditioned
|
| 787 |
+
or ratio <= constants[Constants.LOW_RATIO]
|
| 788 |
+
and dist_new
|
| 789 |
+
> max(
|
| 790 |
+
framework.radius,
|
| 791 |
+
constants[Constants.RESOLUTION_FACTOR]
|
| 792 |
+
* framework.resolution,
|
| 793 |
+
)
|
| 794 |
+
)
|
| 795 |
+
enhance_resolution = (
|
| 796 |
+
radius_save <= framework.resolution
|
| 797 |
+
and ratio <= constants[Constants.LOW_RATIO]
|
| 798 |
+
and not improve_geometry
|
| 799 |
+
)
|
| 800 |
+
else:
|
| 801 |
+
# When increasing the penalty parameter, the best point so far
|
| 802 |
+
# may change. In this case, we restart the iteration.
|
| 803 |
+
enhance_resolution = False
|
| 804 |
+
improve_geometry = False
|
| 805 |
+
|
| 806 |
+
# Reduce the resolution if necessary.
|
| 807 |
+
if enhance_resolution:
|
| 808 |
+
if framework.resolution <= options[Options.RHOEND]:
|
| 809 |
+
success = True
|
| 810 |
+
status = ExitStatus.RADIUS_SUCCESS
|
| 811 |
+
break
|
| 812 |
+
framework.enhance_resolution(options)
|
| 813 |
+
framework.decrease_penalty()
|
| 814 |
+
|
| 815 |
+
if verbose:
|
| 816 |
+
maxcv_val = pb.maxcv(
|
| 817 |
+
framework.x_best, framework.cub_best, framework.ceq_best
|
| 818 |
+
)
|
| 819 |
+
_print_step(
|
| 820 |
+
f"New trust-region radius: {framework.resolution}",
|
| 821 |
+
pb,
|
| 822 |
+
pb.build_x(framework.x_best),
|
| 823 |
+
framework.fun_best,
|
| 824 |
+
maxcv_val,
|
| 825 |
+
pb.n_eval,
|
| 826 |
+
n_iter,
|
| 827 |
+
)
|
| 828 |
+
print()
|
| 829 |
+
|
| 830 |
+
# Improve the geometry of the interpolation set if necessary.
|
| 831 |
+
if improve_geometry:
|
| 832 |
+
try:
|
| 833 |
+
step = framework.get_geometry_step(k_new, options)
|
| 834 |
+
except np.linalg.LinAlgError:
|
| 835 |
+
status = ExitStatus.LINALG_ERROR
|
| 836 |
+
break
|
| 837 |
+
|
| 838 |
+
# Evaluate the objective and constraint functions.
|
| 839 |
+
try:
|
| 840 |
+
fun_val, cub_val, ceq_val = _eval(pb, framework, step, options)
|
| 841 |
+
except TargetSuccess:
|
| 842 |
+
status = ExitStatus.TARGET_SUCCESS
|
| 843 |
+
success = True
|
| 844 |
+
break
|
| 845 |
+
except FeasibleSuccess:
|
| 846 |
+
status = ExitStatus.FEASIBLE_SUCCESS
|
| 847 |
+
success = True
|
| 848 |
+
break
|
| 849 |
+
except CallbackSuccess:
|
| 850 |
+
status = ExitStatus.CALLBACK_SUCCESS
|
| 851 |
+
success = True
|
| 852 |
+
break
|
| 853 |
+
except MaxEvalError:
|
| 854 |
+
status = ExitStatus.MAX_EVAL_WARNING
|
| 855 |
+
break
|
| 856 |
+
|
| 857 |
+
# Update the interpolation set.
|
| 858 |
+
try:
|
| 859 |
+
framework.models.update_interpolation(
|
| 860 |
+
k_new,
|
| 861 |
+
framework.x_best + step,
|
| 862 |
+
fun_val,
|
| 863 |
+
cub_val,
|
| 864 |
+
ceq_val,
|
| 865 |
+
)
|
| 866 |
+
except np.linalg.LinAlgError:
|
| 867 |
+
status = ExitStatus.LINALG_ERROR
|
| 868 |
+
break
|
| 869 |
+
framework.set_best_index()
|
| 870 |
+
|
| 871 |
+
return _build_result(
|
| 872 |
+
pb,
|
| 873 |
+
framework.penalty,
|
| 874 |
+
success,
|
| 875 |
+
status,
|
| 876 |
+
n_iter,
|
| 877 |
+
options,
|
| 878 |
+
)
|
| 879 |
+
|
| 880 |
+
|
| 881 |
+
def _get_bounds(bounds, n):
|
| 882 |
+
"""
|
| 883 |
+
Uniformize the bounds.
|
| 884 |
+
"""
|
| 885 |
+
if bounds is None:
|
| 886 |
+
return Bounds(np.full(n, -np.inf), np.full(n, np.inf))
|
| 887 |
+
elif isinstance(bounds, Bounds):
|
| 888 |
+
if bounds.lb.shape != (n,) or bounds.ub.shape != (n,):
|
| 889 |
+
raise ValueError(f"The bounds must have {n} elements.")
|
| 890 |
+
return Bounds(bounds.lb, bounds.ub)
|
| 891 |
+
elif hasattr(bounds, "__len__"):
|
| 892 |
+
bounds = np.asarray(bounds)
|
| 893 |
+
if bounds.shape != (n, 2):
|
| 894 |
+
raise ValueError(
|
| 895 |
+
"The shape of the bounds is not compatible with "
|
| 896 |
+
"the number of variables."
|
| 897 |
+
)
|
| 898 |
+
return Bounds(bounds[:, 0], bounds[:, 1])
|
| 899 |
+
else:
|
| 900 |
+
raise TypeError(
|
| 901 |
+
"The bounds must be an instance of "
|
| 902 |
+
"scipy.optimize.Bounds or an array-like object."
|
| 903 |
+
)
|
| 904 |
+
|
| 905 |
+
|
| 906 |
+
def _get_constraints(constraints):
|
| 907 |
+
"""
|
| 908 |
+
Extract the linear and nonlinear constraints.
|
| 909 |
+
"""
|
| 910 |
+
if isinstance(constraints, dict) or not hasattr(constraints, "__len__"):
|
| 911 |
+
constraints = (constraints,)
|
| 912 |
+
|
| 913 |
+
# Extract the linear and nonlinear constraints.
|
| 914 |
+
linear_constraints = []
|
| 915 |
+
nonlinear_constraints = []
|
| 916 |
+
for constraint in constraints:
|
| 917 |
+
if isinstance(constraint, LinearConstraint):
|
| 918 |
+
lb = exact_1d_array(
|
| 919 |
+
constraint.lb,
|
| 920 |
+
"The lower bound of the linear constraints must be a vector.",
|
| 921 |
+
)
|
| 922 |
+
ub = exact_1d_array(
|
| 923 |
+
constraint.ub,
|
| 924 |
+
"The upper bound of the linear constraints must be a vector.",
|
| 925 |
+
)
|
| 926 |
+
linear_constraints.append(
|
| 927 |
+
LinearConstraint(
|
| 928 |
+
constraint.A,
|
| 929 |
+
*np.broadcast_arrays(lb, ub),
|
| 930 |
+
)
|
| 931 |
+
)
|
| 932 |
+
elif isinstance(constraint, NonlinearConstraint):
|
| 933 |
+
lb = exact_1d_array(
|
| 934 |
+
constraint.lb,
|
| 935 |
+
"The lower bound of the "
|
| 936 |
+
"nonlinear constraints must be a "
|
| 937 |
+
"vector.",
|
| 938 |
+
)
|
| 939 |
+
ub = exact_1d_array(
|
| 940 |
+
constraint.ub,
|
| 941 |
+
"The upper bound of the "
|
| 942 |
+
"nonlinear constraints must be a "
|
| 943 |
+
"vector.",
|
| 944 |
+
)
|
| 945 |
+
nonlinear_constraints.append(
|
| 946 |
+
NonlinearConstraint(
|
| 947 |
+
constraint.fun,
|
| 948 |
+
*np.broadcast_arrays(lb, ub),
|
| 949 |
+
)
|
| 950 |
+
)
|
| 951 |
+
elif isinstance(constraint, dict):
|
| 952 |
+
if "type" not in constraint or constraint["type"] not in (
|
| 953 |
+
"eq",
|
| 954 |
+
"ineq",
|
| 955 |
+
):
|
| 956 |
+
raise ValueError('The constraint type must be "eq" or "ineq".')
|
| 957 |
+
if "fun" not in constraint or not callable(constraint["fun"]):
|
| 958 |
+
raise ValueError("The constraint function must be callable.")
|
| 959 |
+
nonlinear_constraints.append(
|
| 960 |
+
{
|
| 961 |
+
"fun": constraint["fun"],
|
| 962 |
+
"type": constraint["type"],
|
| 963 |
+
"args": constraint.get("args", ()),
|
| 964 |
+
}
|
| 965 |
+
)
|
| 966 |
+
else:
|
| 967 |
+
raise TypeError(
|
| 968 |
+
"The constraints must be instances of "
|
| 969 |
+
"scipy.optimize.LinearConstraint, "
|
| 970 |
+
"scipy.optimize.NonlinearConstraint, or dict."
|
| 971 |
+
)
|
| 972 |
+
return linear_constraints, nonlinear_constraints
|
| 973 |
+
|
| 974 |
+
|
| 975 |
+
def _set_default_options(options, n):
|
| 976 |
+
"""
|
| 977 |
+
Set the default options.
|
| 978 |
+
"""
|
| 979 |
+
if Options.RHOBEG in options and options[Options.RHOBEG] <= 0.0:
|
| 980 |
+
raise ValueError("The initial trust-region radius must be positive.")
|
| 981 |
+
if Options.RHOEND in options and options[Options.RHOEND] < 0.0:
|
| 982 |
+
raise ValueError("The final trust-region radius must be nonnegative.")
|
| 983 |
+
if Options.RHOBEG in options and Options.RHOEND in options:
|
| 984 |
+
if options[Options.RHOBEG] < options[Options.RHOEND]:
|
| 985 |
+
raise ValueError(
|
| 986 |
+
"The initial trust-region radius must be greater "
|
| 987 |
+
"than or equal to the final trust-region radius."
|
| 988 |
+
)
|
| 989 |
+
elif Options.RHOBEG in options:
|
| 990 |
+
options[Options.RHOEND.value] = np.min(
|
| 991 |
+
[
|
| 992 |
+
DEFAULT_OPTIONS[Options.RHOEND],
|
| 993 |
+
options[Options.RHOBEG],
|
| 994 |
+
]
|
| 995 |
+
)
|
| 996 |
+
elif Options.RHOEND in options:
|
| 997 |
+
options[Options.RHOBEG.value] = np.max(
|
| 998 |
+
[
|
| 999 |
+
DEFAULT_OPTIONS[Options.RHOBEG],
|
| 1000 |
+
options[Options.RHOEND],
|
| 1001 |
+
]
|
| 1002 |
+
)
|
| 1003 |
+
else:
|
| 1004 |
+
options[Options.RHOBEG.value] = DEFAULT_OPTIONS[Options.RHOBEG]
|
| 1005 |
+
options[Options.RHOEND.value] = DEFAULT_OPTIONS[Options.RHOEND]
|
| 1006 |
+
options[Options.RHOBEG.value] = float(options[Options.RHOBEG])
|
| 1007 |
+
options[Options.RHOEND.value] = float(options[Options.RHOEND])
|
| 1008 |
+
if Options.NPT in options and options[Options.NPT] <= 0:
|
| 1009 |
+
raise ValueError("The number of interpolation points must be "
|
| 1010 |
+
"positive.")
|
| 1011 |
+
if (
|
| 1012 |
+
Options.NPT in options
|
| 1013 |
+
and options[Options.NPT] > ((n + 1) * (n + 2)) // 2
|
| 1014 |
+
):
|
| 1015 |
+
raise ValueError(
|
| 1016 |
+
f"The number of interpolation points must be at most "
|
| 1017 |
+
f"{((n + 1) * (n + 2)) // 2}."
|
| 1018 |
+
)
|
| 1019 |
+
options.setdefault(Options.NPT.value, DEFAULT_OPTIONS[Options.NPT](n))
|
| 1020 |
+
options[Options.NPT.value] = int(options[Options.NPT])
|
| 1021 |
+
if Options.MAX_EVAL in options and options[Options.MAX_EVAL] <= 0:
|
| 1022 |
+
raise ValueError(
|
| 1023 |
+
"The maximum number of function evaluations must be positive."
|
| 1024 |
+
)
|
| 1025 |
+
options.setdefault(
|
| 1026 |
+
Options.MAX_EVAL.value,
|
| 1027 |
+
np.max(
|
| 1028 |
+
[
|
| 1029 |
+
DEFAULT_OPTIONS[Options.MAX_EVAL](n),
|
| 1030 |
+
options[Options.NPT] + 1,
|
| 1031 |
+
]
|
| 1032 |
+
),
|
| 1033 |
+
)
|
| 1034 |
+
options[Options.MAX_EVAL.value] = int(options[Options.MAX_EVAL])
|
| 1035 |
+
if Options.MAX_ITER in options and options[Options.MAX_ITER] <= 0:
|
| 1036 |
+
raise ValueError("The maximum number of iterations must be positive.")
|
| 1037 |
+
options.setdefault(
|
| 1038 |
+
Options.MAX_ITER.value,
|
| 1039 |
+
DEFAULT_OPTIONS[Options.MAX_ITER](n),
|
| 1040 |
+
)
|
| 1041 |
+
options[Options.MAX_ITER.value] = int(options[Options.MAX_ITER])
|
| 1042 |
+
options.setdefault(Options.TARGET.value, DEFAULT_OPTIONS[Options.TARGET])
|
| 1043 |
+
options[Options.TARGET.value] = float(options[Options.TARGET])
|
| 1044 |
+
options.setdefault(
|
| 1045 |
+
Options.FEASIBILITY_TOL.value,
|
| 1046 |
+
DEFAULT_OPTIONS[Options.FEASIBILITY_TOL],
|
| 1047 |
+
)
|
| 1048 |
+
options[Options.FEASIBILITY_TOL.value] = float(
|
| 1049 |
+
options[Options.FEASIBILITY_TOL]
|
| 1050 |
+
)
|
| 1051 |
+
options.setdefault(Options.VERBOSE.value, DEFAULT_OPTIONS[Options.VERBOSE])
|
| 1052 |
+
options[Options.VERBOSE.value] = bool(options[Options.VERBOSE])
|
| 1053 |
+
options.setdefault(Options.SCALE.value, DEFAULT_OPTIONS[Options.SCALE])
|
| 1054 |
+
options[Options.SCALE.value] = bool(options[Options.SCALE])
|
| 1055 |
+
options.setdefault(
|
| 1056 |
+
Options.FILTER_SIZE.value,
|
| 1057 |
+
DEFAULT_OPTIONS[Options.FILTER_SIZE],
|
| 1058 |
+
)
|
| 1059 |
+
options[Options.FILTER_SIZE.value] = int(options[Options.FILTER_SIZE])
|
| 1060 |
+
options.setdefault(
|
| 1061 |
+
Options.STORE_HISTORY.value,
|
| 1062 |
+
DEFAULT_OPTIONS[Options.STORE_HISTORY],
|
| 1063 |
+
)
|
| 1064 |
+
options[Options.STORE_HISTORY.value] = bool(options[Options.STORE_HISTORY])
|
| 1065 |
+
options.setdefault(
|
| 1066 |
+
Options.HISTORY_SIZE.value,
|
| 1067 |
+
DEFAULT_OPTIONS[Options.HISTORY_SIZE],
|
| 1068 |
+
)
|
| 1069 |
+
options[Options.HISTORY_SIZE.value] = int(options[Options.HISTORY_SIZE])
|
| 1070 |
+
options.setdefault(Options.DEBUG.value, DEFAULT_OPTIONS[Options.DEBUG])
|
| 1071 |
+
options[Options.DEBUG.value] = bool(options[Options.DEBUG])
|
| 1072 |
+
|
| 1073 |
+
# Check whether they are any unknown options.
|
| 1074 |
+
for key in options:
|
| 1075 |
+
if key not in Options.__members__.values():
|
| 1076 |
+
warnings.warn(f"Unknown option: {key}.", RuntimeWarning, 3)
|
| 1077 |
+
|
| 1078 |
+
|
| 1079 |
+
def _set_default_constants(**kwargs):
|
| 1080 |
+
"""
|
| 1081 |
+
Set the default constants.
|
| 1082 |
+
"""
|
| 1083 |
+
constants = dict(kwargs)
|
| 1084 |
+
constants.setdefault(
|
| 1085 |
+
Constants.DECREASE_RADIUS_FACTOR.value,
|
| 1086 |
+
DEFAULT_CONSTANTS[Constants.DECREASE_RADIUS_FACTOR],
|
| 1087 |
+
)
|
| 1088 |
+
constants[Constants.DECREASE_RADIUS_FACTOR.value] = float(
|
| 1089 |
+
constants[Constants.DECREASE_RADIUS_FACTOR]
|
| 1090 |
+
)
|
| 1091 |
+
if (
|
| 1092 |
+
constants[Constants.DECREASE_RADIUS_FACTOR] <= 0.0
|
| 1093 |
+
or constants[Constants.DECREASE_RADIUS_FACTOR] >= 1.0
|
| 1094 |
+
):
|
| 1095 |
+
raise ValueError(
|
| 1096 |
+
"The constant decrease_radius_factor must be in the interval "
|
| 1097 |
+
"(0, 1)."
|
| 1098 |
+
)
|
| 1099 |
+
constants.setdefault(
|
| 1100 |
+
Constants.INCREASE_RADIUS_THRESHOLD.value,
|
| 1101 |
+
DEFAULT_CONSTANTS[Constants.INCREASE_RADIUS_THRESHOLD],
|
| 1102 |
+
)
|
| 1103 |
+
constants[Constants.INCREASE_RADIUS_THRESHOLD.value] = float(
|
| 1104 |
+
constants[Constants.INCREASE_RADIUS_THRESHOLD]
|
| 1105 |
+
)
|
| 1106 |
+
if constants[Constants.INCREASE_RADIUS_THRESHOLD] <= 1.0:
|
| 1107 |
+
raise ValueError(
|
| 1108 |
+
"The constant increase_radius_threshold must be greater than 1."
|
| 1109 |
+
)
|
| 1110 |
+
if (
|
| 1111 |
+
Constants.INCREASE_RADIUS_FACTOR in constants
|
| 1112 |
+
and constants[Constants.INCREASE_RADIUS_FACTOR] <= 1.0
|
| 1113 |
+
):
|
| 1114 |
+
raise ValueError(
|
| 1115 |
+
"The constant increase_radius_factor must be greater than 1."
|
| 1116 |
+
)
|
| 1117 |
+
if (
|
| 1118 |
+
Constants.DECREASE_RADIUS_THRESHOLD in constants
|
| 1119 |
+
and constants[Constants.DECREASE_RADIUS_THRESHOLD] <= 1.0
|
| 1120 |
+
):
|
| 1121 |
+
raise ValueError(
|
| 1122 |
+
"The constant decrease_radius_threshold must be greater than 1."
|
| 1123 |
+
)
|
| 1124 |
+
if (
|
| 1125 |
+
Constants.INCREASE_RADIUS_FACTOR in constants
|
| 1126 |
+
and Constants.DECREASE_RADIUS_THRESHOLD in constants
|
| 1127 |
+
):
|
| 1128 |
+
if (
|
| 1129 |
+
constants[Constants.DECREASE_RADIUS_THRESHOLD]
|
| 1130 |
+
>= constants[Constants.INCREASE_RADIUS_FACTOR]
|
| 1131 |
+
):
|
| 1132 |
+
raise ValueError(
|
| 1133 |
+
"The constant decrease_radius_threshold must be "
|
| 1134 |
+
"less than increase_radius_factor."
|
| 1135 |
+
)
|
| 1136 |
+
elif Constants.INCREASE_RADIUS_FACTOR in constants:
|
| 1137 |
+
constants[Constants.DECREASE_RADIUS_THRESHOLD.value] = np.min(
|
| 1138 |
+
[
|
| 1139 |
+
DEFAULT_CONSTANTS[Constants.DECREASE_RADIUS_THRESHOLD],
|
| 1140 |
+
0.5 * (1.0 + constants[Constants.INCREASE_RADIUS_FACTOR]),
|
| 1141 |
+
]
|
| 1142 |
+
)
|
| 1143 |
+
elif Constants.DECREASE_RADIUS_THRESHOLD in constants:
|
| 1144 |
+
constants[Constants.INCREASE_RADIUS_FACTOR.value] = np.max(
|
| 1145 |
+
[
|
| 1146 |
+
DEFAULT_CONSTANTS[Constants.INCREASE_RADIUS_FACTOR],
|
| 1147 |
+
2.0 * constants[Constants.DECREASE_RADIUS_THRESHOLD],
|
| 1148 |
+
]
|
| 1149 |
+
)
|
| 1150 |
+
else:
|
| 1151 |
+
constants[Constants.INCREASE_RADIUS_FACTOR.value] = DEFAULT_CONSTANTS[
|
| 1152 |
+
Constants.INCREASE_RADIUS_FACTOR
|
| 1153 |
+
]
|
| 1154 |
+
constants[Constants.DECREASE_RADIUS_THRESHOLD.value] = (
|
| 1155 |
+
DEFAULT_CONSTANTS[Constants.DECREASE_RADIUS_THRESHOLD])
|
| 1156 |
+
constants.setdefault(
|
| 1157 |
+
Constants.DECREASE_RESOLUTION_FACTOR.value,
|
| 1158 |
+
DEFAULT_CONSTANTS[Constants.DECREASE_RESOLUTION_FACTOR],
|
| 1159 |
+
)
|
| 1160 |
+
constants[Constants.DECREASE_RESOLUTION_FACTOR.value] = float(
|
| 1161 |
+
constants[Constants.DECREASE_RESOLUTION_FACTOR]
|
| 1162 |
+
)
|
| 1163 |
+
if (
|
| 1164 |
+
constants[Constants.DECREASE_RESOLUTION_FACTOR] <= 0.0
|
| 1165 |
+
or constants[Constants.DECREASE_RESOLUTION_FACTOR] >= 1.0
|
| 1166 |
+
):
|
| 1167 |
+
raise ValueError(
|
| 1168 |
+
"The constant decrease_resolution_factor must be in the interval "
|
| 1169 |
+
"(0, 1)."
|
| 1170 |
+
)
|
| 1171 |
+
if (
|
| 1172 |
+
Constants.LARGE_RESOLUTION_THRESHOLD in constants
|
| 1173 |
+
and constants[Constants.LARGE_RESOLUTION_THRESHOLD] <= 1.0
|
| 1174 |
+
):
|
| 1175 |
+
raise ValueError(
|
| 1176 |
+
"The constant large_resolution_threshold must be greater than 1."
|
| 1177 |
+
)
|
| 1178 |
+
if (
|
| 1179 |
+
Constants.MODERATE_RESOLUTION_THRESHOLD in constants
|
| 1180 |
+
and constants[Constants.MODERATE_RESOLUTION_THRESHOLD] <= 1.0
|
| 1181 |
+
):
|
| 1182 |
+
raise ValueError(
|
| 1183 |
+
"The constant moderate_resolution_threshold must be greater than "
|
| 1184 |
+
"1."
|
| 1185 |
+
)
|
| 1186 |
+
if (
|
| 1187 |
+
Constants.LARGE_RESOLUTION_THRESHOLD in constants
|
| 1188 |
+
and Constants.MODERATE_RESOLUTION_THRESHOLD in constants
|
| 1189 |
+
):
|
| 1190 |
+
if (
|
| 1191 |
+
constants[Constants.MODERATE_RESOLUTION_THRESHOLD]
|
| 1192 |
+
> constants[Constants.LARGE_RESOLUTION_THRESHOLD]
|
| 1193 |
+
):
|
| 1194 |
+
raise ValueError(
|
| 1195 |
+
"The constant moderate_resolution_threshold "
|
| 1196 |
+
"must be at most large_resolution_threshold."
|
| 1197 |
+
)
|
| 1198 |
+
elif Constants.LARGE_RESOLUTION_THRESHOLD in constants:
|
| 1199 |
+
constants[Constants.MODERATE_RESOLUTION_THRESHOLD.value] = np.min(
|
| 1200 |
+
[
|
| 1201 |
+
DEFAULT_CONSTANTS[Constants.MODERATE_RESOLUTION_THRESHOLD],
|
| 1202 |
+
constants[Constants.LARGE_RESOLUTION_THRESHOLD],
|
| 1203 |
+
]
|
| 1204 |
+
)
|
| 1205 |
+
elif Constants.MODERATE_RESOLUTION_THRESHOLD in constants:
|
| 1206 |
+
constants[Constants.LARGE_RESOLUTION_THRESHOLD.value] = np.max(
|
| 1207 |
+
[
|
| 1208 |
+
DEFAULT_CONSTANTS[Constants.LARGE_RESOLUTION_THRESHOLD],
|
| 1209 |
+
constants[Constants.MODERATE_RESOLUTION_THRESHOLD],
|
| 1210 |
+
]
|
| 1211 |
+
)
|
| 1212 |
+
else:
|
| 1213 |
+
constants[Constants.LARGE_RESOLUTION_THRESHOLD.value] = (
|
| 1214 |
+
DEFAULT_CONSTANTS[Constants.LARGE_RESOLUTION_THRESHOLD]
|
| 1215 |
+
)
|
| 1216 |
+
constants[Constants.MODERATE_RESOLUTION_THRESHOLD.value] = (
|
| 1217 |
+
DEFAULT_CONSTANTS[Constants.MODERATE_RESOLUTION_THRESHOLD]
|
| 1218 |
+
)
|
| 1219 |
+
if Constants.LOW_RATIO in constants and (
|
| 1220 |
+
constants[Constants.LOW_RATIO] <= 0.0
|
| 1221 |
+
or constants[Constants.LOW_RATIO] >= 1.0
|
| 1222 |
+
):
|
| 1223 |
+
raise ValueError(
|
| 1224 |
+
"The constant low_ratio must be in the interval (0, 1)."
|
| 1225 |
+
)
|
| 1226 |
+
if Constants.HIGH_RATIO in constants and (
|
| 1227 |
+
constants[Constants.HIGH_RATIO] <= 0.0
|
| 1228 |
+
or constants[Constants.HIGH_RATIO] >= 1.0
|
| 1229 |
+
):
|
| 1230 |
+
raise ValueError(
|
| 1231 |
+
"The constant high_ratio must be in the interval (0, 1)."
|
| 1232 |
+
)
|
| 1233 |
+
if Constants.LOW_RATIO in constants and Constants.HIGH_RATIO in constants:
|
| 1234 |
+
if constants[Constants.LOW_RATIO] > constants[Constants.HIGH_RATIO]:
|
| 1235 |
+
raise ValueError(
|
| 1236 |
+
"The constant low_ratio must be at most high_ratio."
|
| 1237 |
+
)
|
| 1238 |
+
elif Constants.LOW_RATIO in constants:
|
| 1239 |
+
constants[Constants.HIGH_RATIO.value] = np.max(
|
| 1240 |
+
[
|
| 1241 |
+
DEFAULT_CONSTANTS[Constants.HIGH_RATIO],
|
| 1242 |
+
constants[Constants.LOW_RATIO],
|
| 1243 |
+
]
|
| 1244 |
+
)
|
| 1245 |
+
elif Constants.HIGH_RATIO in constants:
|
| 1246 |
+
constants[Constants.LOW_RATIO.value] = np.min(
|
| 1247 |
+
[
|
| 1248 |
+
DEFAULT_CONSTANTS[Constants.LOW_RATIO],
|
| 1249 |
+
constants[Constants.HIGH_RATIO],
|
| 1250 |
+
]
|
| 1251 |
+
)
|
| 1252 |
+
else:
|
| 1253 |
+
constants[Constants.LOW_RATIO.value] = DEFAULT_CONSTANTS[
|
| 1254 |
+
Constants.LOW_RATIO
|
| 1255 |
+
]
|
| 1256 |
+
constants[Constants.HIGH_RATIO.value] = DEFAULT_CONSTANTS[
|
| 1257 |
+
Constants.HIGH_RATIO
|
| 1258 |
+
]
|
| 1259 |
+
constants.setdefault(
|
| 1260 |
+
Constants.VERY_LOW_RATIO.value,
|
| 1261 |
+
DEFAULT_CONSTANTS[Constants.VERY_LOW_RATIO],
|
| 1262 |
+
)
|
| 1263 |
+
constants[Constants.VERY_LOW_RATIO.value] = float(
|
| 1264 |
+
constants[Constants.VERY_LOW_RATIO]
|
| 1265 |
+
)
|
| 1266 |
+
if (
|
| 1267 |
+
constants[Constants.VERY_LOW_RATIO] <= 0.0
|
| 1268 |
+
or constants[Constants.VERY_LOW_RATIO] >= 1.0
|
| 1269 |
+
):
|
| 1270 |
+
raise ValueError(
|
| 1271 |
+
"The constant very_low_ratio must be in the interval (0, 1)."
|
| 1272 |
+
)
|
| 1273 |
+
if (
|
| 1274 |
+
Constants.PENALTY_INCREASE_THRESHOLD in constants
|
| 1275 |
+
and constants[Constants.PENALTY_INCREASE_THRESHOLD] < 1.0
|
| 1276 |
+
):
|
| 1277 |
+
raise ValueError(
|
| 1278 |
+
"The constant penalty_increase_threshold must be "
|
| 1279 |
+
"greater than or equal to 1."
|
| 1280 |
+
)
|
| 1281 |
+
if (
|
| 1282 |
+
Constants.PENALTY_INCREASE_FACTOR in constants
|
| 1283 |
+
and constants[Constants.PENALTY_INCREASE_FACTOR] <= 1.0
|
| 1284 |
+
):
|
| 1285 |
+
raise ValueError(
|
| 1286 |
+
"The constant penalty_increase_factor must be greater than 1."
|
| 1287 |
+
)
|
| 1288 |
+
if (
|
| 1289 |
+
Constants.PENALTY_INCREASE_THRESHOLD in constants
|
| 1290 |
+
and Constants.PENALTY_INCREASE_FACTOR in constants
|
| 1291 |
+
):
|
| 1292 |
+
if (
|
| 1293 |
+
constants[Constants.PENALTY_INCREASE_FACTOR]
|
| 1294 |
+
< constants[Constants.PENALTY_INCREASE_THRESHOLD]
|
| 1295 |
+
):
|
| 1296 |
+
raise ValueError(
|
| 1297 |
+
"The constant penalty_increase_factor must be "
|
| 1298 |
+
"greater than or equal to "
|
| 1299 |
+
"penalty_increase_threshold."
|
| 1300 |
+
)
|
| 1301 |
+
elif Constants.PENALTY_INCREASE_THRESHOLD in constants:
|
| 1302 |
+
constants[Constants.PENALTY_INCREASE_FACTOR.value] = np.max(
|
| 1303 |
+
[
|
| 1304 |
+
DEFAULT_CONSTANTS[Constants.PENALTY_INCREASE_FACTOR],
|
| 1305 |
+
constants[Constants.PENALTY_INCREASE_THRESHOLD],
|
| 1306 |
+
]
|
| 1307 |
+
)
|
| 1308 |
+
elif Constants.PENALTY_INCREASE_FACTOR in constants:
|
| 1309 |
+
constants[Constants.PENALTY_INCREASE_THRESHOLD.value] = np.min(
|
| 1310 |
+
[
|
| 1311 |
+
DEFAULT_CONSTANTS[Constants.PENALTY_INCREASE_THRESHOLD],
|
| 1312 |
+
constants[Constants.PENALTY_INCREASE_FACTOR],
|
| 1313 |
+
]
|
| 1314 |
+
)
|
| 1315 |
+
else:
|
| 1316 |
+
constants[Constants.PENALTY_INCREASE_THRESHOLD.value] = (
|
| 1317 |
+
DEFAULT_CONSTANTS[Constants.PENALTY_INCREASE_THRESHOLD]
|
| 1318 |
+
)
|
| 1319 |
+
constants[Constants.PENALTY_INCREASE_FACTOR.value] = DEFAULT_CONSTANTS[
|
| 1320 |
+
Constants.PENALTY_INCREASE_FACTOR
|
| 1321 |
+
]
|
| 1322 |
+
constants.setdefault(
|
| 1323 |
+
Constants.SHORT_STEP_THRESHOLD.value,
|
| 1324 |
+
DEFAULT_CONSTANTS[Constants.SHORT_STEP_THRESHOLD],
|
| 1325 |
+
)
|
| 1326 |
+
constants[Constants.SHORT_STEP_THRESHOLD.value] = float(
|
| 1327 |
+
constants[Constants.SHORT_STEP_THRESHOLD]
|
| 1328 |
+
)
|
| 1329 |
+
if (
|
| 1330 |
+
constants[Constants.SHORT_STEP_THRESHOLD] <= 0.0
|
| 1331 |
+
or constants[Constants.SHORT_STEP_THRESHOLD] >= 1.0
|
| 1332 |
+
):
|
| 1333 |
+
raise ValueError(
|
| 1334 |
+
"The constant short_step_threshold must be in the interval (0, 1)."
|
| 1335 |
+
)
|
| 1336 |
+
constants.setdefault(
|
| 1337 |
+
Constants.LOW_RADIUS_FACTOR.value,
|
| 1338 |
+
DEFAULT_CONSTANTS[Constants.LOW_RADIUS_FACTOR],
|
| 1339 |
+
)
|
| 1340 |
+
constants[Constants.LOW_RADIUS_FACTOR.value] = float(
|
| 1341 |
+
constants[Constants.LOW_RADIUS_FACTOR]
|
| 1342 |
+
)
|
| 1343 |
+
if (
|
| 1344 |
+
constants[Constants.LOW_RADIUS_FACTOR] <= 0.0
|
| 1345 |
+
or constants[Constants.LOW_RADIUS_FACTOR] >= 1.0
|
| 1346 |
+
):
|
| 1347 |
+
raise ValueError(
|
| 1348 |
+
"The constant low_radius_factor must be in the interval (0, 1)."
|
| 1349 |
+
)
|
| 1350 |
+
constants.setdefault(
|
| 1351 |
+
Constants.BYRD_OMOJOKUN_FACTOR.value,
|
| 1352 |
+
DEFAULT_CONSTANTS[Constants.BYRD_OMOJOKUN_FACTOR],
|
| 1353 |
+
)
|
| 1354 |
+
constants[Constants.BYRD_OMOJOKUN_FACTOR.value] = float(
|
| 1355 |
+
constants[Constants.BYRD_OMOJOKUN_FACTOR]
|
| 1356 |
+
)
|
| 1357 |
+
if (
|
| 1358 |
+
constants[Constants.BYRD_OMOJOKUN_FACTOR] <= 0.0
|
| 1359 |
+
or constants[Constants.BYRD_OMOJOKUN_FACTOR] >= 1.0
|
| 1360 |
+
):
|
| 1361 |
+
raise ValueError(
|
| 1362 |
+
"The constant byrd_omojokun_factor must be in the interval (0, 1)."
|
| 1363 |
+
)
|
| 1364 |
+
constants.setdefault(
|
| 1365 |
+
Constants.THRESHOLD_RATIO_CONSTRAINTS.value,
|
| 1366 |
+
DEFAULT_CONSTANTS[Constants.THRESHOLD_RATIO_CONSTRAINTS],
|
| 1367 |
+
)
|
| 1368 |
+
constants[Constants.THRESHOLD_RATIO_CONSTRAINTS.value] = float(
|
| 1369 |
+
constants[Constants.THRESHOLD_RATIO_CONSTRAINTS]
|
| 1370 |
+
)
|
| 1371 |
+
if constants[Constants.THRESHOLD_RATIO_CONSTRAINTS] <= 1.0:
|
| 1372 |
+
raise ValueError(
|
| 1373 |
+
"The constant threshold_ratio_constraints must be greater than 1."
|
| 1374 |
+
)
|
| 1375 |
+
constants.setdefault(
|
| 1376 |
+
Constants.LARGE_SHIFT_FACTOR.value,
|
| 1377 |
+
DEFAULT_CONSTANTS[Constants.LARGE_SHIFT_FACTOR],
|
| 1378 |
+
)
|
| 1379 |
+
constants[Constants.LARGE_SHIFT_FACTOR.value] = float(
|
| 1380 |
+
constants[Constants.LARGE_SHIFT_FACTOR]
|
| 1381 |
+
)
|
| 1382 |
+
if constants[Constants.LARGE_SHIFT_FACTOR] < 0.0:
|
| 1383 |
+
raise ValueError("The constant large_shift_factor must be "
|
| 1384 |
+
"nonnegative.")
|
| 1385 |
+
constants.setdefault(
|
| 1386 |
+
Constants.LARGE_GRADIENT_FACTOR.value,
|
| 1387 |
+
DEFAULT_CONSTANTS[Constants.LARGE_GRADIENT_FACTOR],
|
| 1388 |
+
)
|
| 1389 |
+
constants[Constants.LARGE_GRADIENT_FACTOR.value] = float(
|
| 1390 |
+
constants[Constants.LARGE_GRADIENT_FACTOR]
|
| 1391 |
+
)
|
| 1392 |
+
if constants[Constants.LARGE_GRADIENT_FACTOR] <= 1.0:
|
| 1393 |
+
raise ValueError(
|
| 1394 |
+
"The constant large_gradient_factor must be greater than 1."
|
| 1395 |
+
)
|
| 1396 |
+
constants.setdefault(
|
| 1397 |
+
Constants.RESOLUTION_FACTOR.value,
|
| 1398 |
+
DEFAULT_CONSTANTS[Constants.RESOLUTION_FACTOR],
|
| 1399 |
+
)
|
| 1400 |
+
constants[Constants.RESOLUTION_FACTOR.value] = float(
|
| 1401 |
+
constants[Constants.RESOLUTION_FACTOR]
|
| 1402 |
+
)
|
| 1403 |
+
if constants[Constants.RESOLUTION_FACTOR] <= 1.0:
|
| 1404 |
+
raise ValueError(
|
| 1405 |
+
"The constant resolution_factor must be greater than 1."
|
| 1406 |
+
)
|
| 1407 |
+
constants.setdefault(
|
| 1408 |
+
Constants.IMPROVE_TCG.value,
|
| 1409 |
+
DEFAULT_CONSTANTS[Constants.IMPROVE_TCG],
|
| 1410 |
+
)
|
| 1411 |
+
constants[Constants.IMPROVE_TCG.value] = bool(
|
| 1412 |
+
constants[Constants.IMPROVE_TCG]
|
| 1413 |
+
)
|
| 1414 |
+
|
| 1415 |
+
# Check whether they are any unknown options.
|
| 1416 |
+
for key in kwargs:
|
| 1417 |
+
if key not in Constants.__members__.values():
|
| 1418 |
+
warnings.warn(f"Unknown constant: {key}.", RuntimeWarning, 3)
|
| 1419 |
+
return constants
|
| 1420 |
+
|
| 1421 |
+
|
| 1422 |
+
def _eval(pb, framework, step, options):
|
| 1423 |
+
"""
|
| 1424 |
+
Evaluate the objective and constraint functions.
|
| 1425 |
+
"""
|
| 1426 |
+
if pb.n_eval >= options[Options.MAX_EVAL]:
|
| 1427 |
+
raise MaxEvalError
|
| 1428 |
+
x_eval = framework.x_best + step
|
| 1429 |
+
fun_val, cub_val, ceq_val = pb(x_eval, framework.penalty)
|
| 1430 |
+
r_val = pb.maxcv(x_eval, cub_val, ceq_val)
|
| 1431 |
+
if (
|
| 1432 |
+
fun_val <= options[Options.TARGET]
|
| 1433 |
+
and r_val <= options[Options.FEASIBILITY_TOL]
|
| 1434 |
+
):
|
| 1435 |
+
raise TargetSuccess
|
| 1436 |
+
if pb.is_feasibility and r_val <= options[Options.FEASIBILITY_TOL]:
|
| 1437 |
+
raise FeasibleSuccess
|
| 1438 |
+
return fun_val, cub_val, ceq_val
|
| 1439 |
+
|
| 1440 |
+
|
| 1441 |
+
def _build_result(pb, penalty, success, status, n_iter, options):
|
| 1442 |
+
"""
|
| 1443 |
+
Build the result of the optimization process.
|
| 1444 |
+
"""
|
| 1445 |
+
# Build the result.
|
| 1446 |
+
x, fun, maxcv = pb.best_eval(penalty)
|
| 1447 |
+
success = success and np.isfinite(fun) and np.isfinite(maxcv)
|
| 1448 |
+
if status not in [ExitStatus.TARGET_SUCCESS, ExitStatus.FEASIBLE_SUCCESS]:
|
| 1449 |
+
success = success and maxcv <= options[Options.FEASIBILITY_TOL]
|
| 1450 |
+
result = OptimizeResult()
|
| 1451 |
+
result.message = {
|
| 1452 |
+
ExitStatus.RADIUS_SUCCESS: "The lower bound for the trust-region "
|
| 1453 |
+
"radius has been reached",
|
| 1454 |
+
ExitStatus.TARGET_SUCCESS: "The target objective function value has "
|
| 1455 |
+
"been reached",
|
| 1456 |
+
ExitStatus.FIXED_SUCCESS: "All variables are fixed by the bound "
|
| 1457 |
+
"constraints",
|
| 1458 |
+
ExitStatus.CALLBACK_SUCCESS: "The callback requested to stop the "
|
| 1459 |
+
"optimization procedure",
|
| 1460 |
+
ExitStatus.FEASIBLE_SUCCESS: "The feasibility problem received has "
|
| 1461 |
+
"been solved successfully",
|
| 1462 |
+
ExitStatus.MAX_EVAL_WARNING: "The maximum number of function "
|
| 1463 |
+
"evaluations has been exceeded",
|
| 1464 |
+
ExitStatus.MAX_ITER_WARNING: "The maximum number of iterations has "
|
| 1465 |
+
"been exceeded",
|
| 1466 |
+
ExitStatus.INFEASIBLE_ERROR: "The bound constraints are infeasible",
|
| 1467 |
+
ExitStatus.LINALG_ERROR: "A linear algebra error occurred",
|
| 1468 |
+
}.get(status, "Unknown exit status")
|
| 1469 |
+
result.success = success
|
| 1470 |
+
result.status = status.value
|
| 1471 |
+
result.x = pb.build_x(x)
|
| 1472 |
+
result.fun = fun
|
| 1473 |
+
result.maxcv = maxcv
|
| 1474 |
+
result.nfev = pb.n_eval
|
| 1475 |
+
result.nit = n_iter
|
| 1476 |
+
if options[Options.STORE_HISTORY]:
|
| 1477 |
+
result.fun_history = pb.fun_history
|
| 1478 |
+
result.maxcv_history = pb.maxcv_history
|
| 1479 |
+
|
| 1480 |
+
# Print the result if requested.
|
| 1481 |
+
if options[Options.VERBOSE]:
|
| 1482 |
+
_print_step(
|
| 1483 |
+
result.message,
|
| 1484 |
+
pb,
|
| 1485 |
+
result.x,
|
| 1486 |
+
result.fun,
|
| 1487 |
+
result.maxcv,
|
| 1488 |
+
result.nfev,
|
| 1489 |
+
result.nit,
|
| 1490 |
+
)
|
| 1491 |
+
return result
|
| 1492 |
+
|
| 1493 |
+
|
| 1494 |
+
def _print_step(message, pb, x, fun_val, r_val, n_eval, n_iter):
|
| 1495 |
+
"""
|
| 1496 |
+
Print information about the current state of the optimization process.
|
| 1497 |
+
"""
|
| 1498 |
+
print()
|
| 1499 |
+
print(f"{message}.")
|
| 1500 |
+
print(f"Number of function evaluations: {n_eval}.")
|
| 1501 |
+
print(f"Number of iterations: {n_iter}.")
|
| 1502 |
+
if not pb.is_feasibility:
|
| 1503 |
+
print(f"Least value of {pb.fun_name}: {fun_val}.")
|
| 1504 |
+
print(f"Maximum constraint violation: {r_val}.")
|
| 1505 |
+
with np.printoptions(**PRINT_OPTIONS):
|
| 1506 |
+
print(f"Corresponding point: {x}.")
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/models.py
ADDED
|
@@ -0,0 +1,1529 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import warnings
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
from scipy.linalg import eigh
|
| 5 |
+
|
| 6 |
+
from .settings import Options
|
| 7 |
+
from .utils import MaxEvalError, TargetSuccess, FeasibleSuccess
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
EPS = np.finfo(float).eps
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class Interpolation:
|
| 14 |
+
"""
|
| 15 |
+
Interpolation set.
|
| 16 |
+
|
| 17 |
+
This class stores a base point around which the models are expanded and the
|
| 18 |
+
interpolation points. The coordinates of the interpolation points are
|
| 19 |
+
relative to the base point.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
def __init__(self, pb, options):
|
| 23 |
+
"""
|
| 24 |
+
Initialize the interpolation set.
|
| 25 |
+
|
| 26 |
+
Parameters
|
| 27 |
+
----------
|
| 28 |
+
pb : `cobyqa.problem.Problem`
|
| 29 |
+
Problem to be solved.
|
| 30 |
+
options : dict
|
| 31 |
+
Options of the solver.
|
| 32 |
+
"""
|
| 33 |
+
# Reduce the initial trust-region radius if necessary.
|
| 34 |
+
self._debug = options[Options.DEBUG]
|
| 35 |
+
max_radius = 0.5 * np.min(pb.bounds.xu - pb.bounds.xl)
|
| 36 |
+
if options[Options.RHOBEG] > max_radius:
|
| 37 |
+
options[Options.RHOBEG.value] = max_radius
|
| 38 |
+
options[Options.RHOEND.value] = np.min(
|
| 39 |
+
[
|
| 40 |
+
options[Options.RHOEND],
|
| 41 |
+
max_radius,
|
| 42 |
+
]
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
# Set the initial point around which the models are expanded.
|
| 46 |
+
self._x_base = np.copy(pb.x0)
|
| 47 |
+
very_close_xl_idx = (
|
| 48 |
+
self.x_base <= pb.bounds.xl + 0.5 * options[Options.RHOBEG]
|
| 49 |
+
)
|
| 50 |
+
self.x_base[very_close_xl_idx] = pb.bounds.xl[very_close_xl_idx]
|
| 51 |
+
close_xl_idx = (
|
| 52 |
+
pb.bounds.xl + 0.5 * options[Options.RHOBEG] < self.x_base
|
| 53 |
+
) & (self.x_base <= pb.bounds.xl + options[Options.RHOBEG])
|
| 54 |
+
self.x_base[close_xl_idx] = np.minimum(
|
| 55 |
+
pb.bounds.xl[close_xl_idx] + options[Options.RHOBEG],
|
| 56 |
+
pb.bounds.xu[close_xl_idx],
|
| 57 |
+
)
|
| 58 |
+
very_close_xu_idx = (
|
| 59 |
+
self.x_base >= pb.bounds.xu - 0.5 * options[Options.RHOBEG]
|
| 60 |
+
)
|
| 61 |
+
self.x_base[very_close_xu_idx] = pb.bounds.xu[very_close_xu_idx]
|
| 62 |
+
close_xu_idx = (
|
| 63 |
+
self.x_base < pb.bounds.xu - 0.5 * options[Options.RHOBEG]
|
| 64 |
+
) & (pb.bounds.xu - options[Options.RHOBEG] <= self.x_base)
|
| 65 |
+
self.x_base[close_xu_idx] = np.maximum(
|
| 66 |
+
pb.bounds.xu[close_xu_idx] - options[Options.RHOBEG],
|
| 67 |
+
pb.bounds.xl[close_xu_idx],
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
# Set the initial interpolation set.
|
| 71 |
+
self._xpt = np.zeros((pb.n, options[Options.NPT]))
|
| 72 |
+
for k in range(1, options[Options.NPT]):
|
| 73 |
+
if k <= pb.n:
|
| 74 |
+
if very_close_xu_idx[k - 1]:
|
| 75 |
+
self.xpt[k - 1, k] = -options[Options.RHOBEG]
|
| 76 |
+
else:
|
| 77 |
+
self.xpt[k - 1, k] = options[Options.RHOBEG]
|
| 78 |
+
elif k <= 2 * pb.n:
|
| 79 |
+
if very_close_xl_idx[k - pb.n - 1]:
|
| 80 |
+
self.xpt[k - pb.n - 1, k] = 2.0 * options[Options.RHOBEG]
|
| 81 |
+
elif very_close_xu_idx[k - pb.n - 1]:
|
| 82 |
+
self.xpt[k - pb.n - 1, k] = -2.0 * options[Options.RHOBEG]
|
| 83 |
+
else:
|
| 84 |
+
self.xpt[k - pb.n - 1, k] = -options[Options.RHOBEG]
|
| 85 |
+
else:
|
| 86 |
+
spread = (k - pb.n - 1) // pb.n
|
| 87 |
+
k1 = k - (1 + spread) * pb.n - 1
|
| 88 |
+
k2 = (k1 + spread) % pb.n
|
| 89 |
+
self.xpt[k1, k] = self.xpt[k1, k1 + 1]
|
| 90 |
+
self.xpt[k2, k] = self.xpt[k2, k2 + 1]
|
| 91 |
+
|
| 92 |
+
@property
|
| 93 |
+
def n(self):
|
| 94 |
+
"""
|
| 95 |
+
Number of variables.
|
| 96 |
+
|
| 97 |
+
Returns
|
| 98 |
+
-------
|
| 99 |
+
int
|
| 100 |
+
Number of variables.
|
| 101 |
+
"""
|
| 102 |
+
return self.xpt.shape[0]
|
| 103 |
+
|
| 104 |
+
@property
|
| 105 |
+
def npt(self):
|
| 106 |
+
"""
|
| 107 |
+
Number of interpolation points.
|
| 108 |
+
|
| 109 |
+
Returns
|
| 110 |
+
-------
|
| 111 |
+
int
|
| 112 |
+
Number of interpolation points.
|
| 113 |
+
"""
|
| 114 |
+
return self.xpt.shape[1]
|
| 115 |
+
|
| 116 |
+
@property
|
| 117 |
+
def xpt(self):
|
| 118 |
+
"""
|
| 119 |
+
Interpolation points.
|
| 120 |
+
|
| 121 |
+
Returns
|
| 122 |
+
-------
|
| 123 |
+
`numpy.ndarray`, shape (n, npt)
|
| 124 |
+
Interpolation points.
|
| 125 |
+
"""
|
| 126 |
+
return self._xpt
|
| 127 |
+
|
| 128 |
+
@xpt.setter
|
| 129 |
+
def xpt(self, xpt):
|
| 130 |
+
"""
|
| 131 |
+
Set the interpolation points.
|
| 132 |
+
|
| 133 |
+
Parameters
|
| 134 |
+
----------
|
| 135 |
+
xpt : `numpy.ndarray`, shape (n, npt)
|
| 136 |
+
New interpolation points.
|
| 137 |
+
"""
|
| 138 |
+
if self._debug:
|
| 139 |
+
assert xpt.shape == (
|
| 140 |
+
self.n,
|
| 141 |
+
self.npt,
|
| 142 |
+
), "The shape of `xpt` is not valid."
|
| 143 |
+
self._xpt = xpt
|
| 144 |
+
|
| 145 |
+
@property
|
| 146 |
+
def x_base(self):
|
| 147 |
+
"""
|
| 148 |
+
Base point around which the models are expanded.
|
| 149 |
+
|
| 150 |
+
Returns
|
| 151 |
+
-------
|
| 152 |
+
`numpy.ndarray`, shape (n,)
|
| 153 |
+
Base point around which the models are expanded.
|
| 154 |
+
"""
|
| 155 |
+
return self._x_base
|
| 156 |
+
|
| 157 |
+
@x_base.setter
|
| 158 |
+
def x_base(self, x_base):
|
| 159 |
+
"""
|
| 160 |
+
Set the base point around which the models are expanded.
|
| 161 |
+
|
| 162 |
+
Parameters
|
| 163 |
+
----------
|
| 164 |
+
x_base : `numpy.ndarray`, shape (n,)
|
| 165 |
+
New base point around which the models are expanded.
|
| 166 |
+
"""
|
| 167 |
+
if self._debug:
|
| 168 |
+
assert x_base.shape == (
|
| 169 |
+
self.n,
|
| 170 |
+
), "The shape of `x_base` is not valid."
|
| 171 |
+
self._x_base = x_base
|
| 172 |
+
|
| 173 |
+
def point(self, k):
|
| 174 |
+
"""
|
| 175 |
+
Get the `k`-th interpolation point.
|
| 176 |
+
|
| 177 |
+
The return point is relative to the origin.
|
| 178 |
+
|
| 179 |
+
Parameters
|
| 180 |
+
----------
|
| 181 |
+
k : int
|
| 182 |
+
Index of the interpolation point.
|
| 183 |
+
|
| 184 |
+
Returns
|
| 185 |
+
-------
|
| 186 |
+
`numpy.ndarray`, shape (n,)
|
| 187 |
+
`k`-th interpolation point.
|
| 188 |
+
"""
|
| 189 |
+
if self._debug:
|
| 190 |
+
assert 0 <= k < self.npt, "The index `k` is not valid."
|
| 191 |
+
return self.x_base + self.xpt[:, k]
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
_cache = {"xpt": None, "a": None, "right_scaling": None, "eigh": None}
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
def build_system(interpolation):
|
| 198 |
+
"""
|
| 199 |
+
Build the left-hand side matrix of the interpolation system. The
|
| 200 |
+
matrix below stores W * diag(right_scaling),
|
| 201 |
+
where W is the theoretical matrix of the interpolation system. The
|
| 202 |
+
right scaling matrices is chosen to keep the elements in
|
| 203 |
+
the matrix well-balanced.
|
| 204 |
+
|
| 205 |
+
Parameters
|
| 206 |
+
----------
|
| 207 |
+
interpolation : `cobyqa.models.Interpolation`
|
| 208 |
+
Interpolation set.
|
| 209 |
+
"""
|
| 210 |
+
|
| 211 |
+
# Compute the scaled directions from the base point to the
|
| 212 |
+
# interpolation points. We scale the directions to avoid numerical
|
| 213 |
+
# difficulties.
|
| 214 |
+
if _cache["xpt"] is not None and np.array_equal(
|
| 215 |
+
interpolation.xpt, _cache["xpt"]
|
| 216 |
+
):
|
| 217 |
+
return _cache["a"], _cache["right_scaling"], _cache["eigh"]
|
| 218 |
+
|
| 219 |
+
scale = np.max(np.linalg.norm(interpolation.xpt, axis=0), initial=EPS)
|
| 220 |
+
xpt_scale = interpolation.xpt / scale
|
| 221 |
+
|
| 222 |
+
n, npt = xpt_scale.shape
|
| 223 |
+
a = np.zeros((npt + n + 1, npt + n + 1))
|
| 224 |
+
a[:npt, :npt] = 0.5 * (xpt_scale.T @ xpt_scale) ** 2.0
|
| 225 |
+
a[:npt, npt] = 1.0
|
| 226 |
+
a[:npt, npt + 1:] = xpt_scale.T
|
| 227 |
+
a[npt, :npt] = 1.0
|
| 228 |
+
a[npt + 1:, :npt] = xpt_scale
|
| 229 |
+
|
| 230 |
+
# Build the left and right scaling diagonal matrices.
|
| 231 |
+
right_scaling = np.empty(npt + n + 1)
|
| 232 |
+
right_scaling[:npt] = 1.0 / scale**2.0
|
| 233 |
+
right_scaling[npt] = scale**2.0
|
| 234 |
+
right_scaling[npt + 1:] = scale
|
| 235 |
+
|
| 236 |
+
eig_values, eig_vectors = eigh(a, check_finite=False)
|
| 237 |
+
|
| 238 |
+
_cache["xpt"] = np.copy(interpolation.xpt)
|
| 239 |
+
_cache["a"] = np.copy(a)
|
| 240 |
+
_cache["right_scaling"] = np.copy(right_scaling)
|
| 241 |
+
_cache["eigh"] = (eig_values, eig_vectors)
|
| 242 |
+
|
| 243 |
+
return a, right_scaling, (eig_values, eig_vectors)
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
class Quadratic:
|
| 247 |
+
"""
|
| 248 |
+
Quadratic model.
|
| 249 |
+
|
| 250 |
+
This class stores the Hessian matrix of the quadratic model using the
|
| 251 |
+
implicit/explicit representation designed by Powell for NEWUOA [1]_.
|
| 252 |
+
|
| 253 |
+
References
|
| 254 |
+
----------
|
| 255 |
+
.. [1] M. J. D. Powell. The NEWUOA software for unconstrained optimization
|
| 256 |
+
without derivatives. In G. Di Pillo and M. Roma, editors, *Large-Scale
|
| 257 |
+
Nonlinear Optimization*, volume 83 of Nonconvex Optim. Appl., pages
|
| 258 |
+
255--297. Springer, Boston, MA, USA, 2006. `doi:10.1007/0-387-30065-1_16
|
| 259 |
+
<https://doi.org/10.1007/0-387-30065-1_16>`_.
|
| 260 |
+
"""
|
| 261 |
+
|
| 262 |
+
def __init__(self, interpolation, values, debug):
|
| 263 |
+
"""
|
| 264 |
+
Initialize the quadratic model.
|
| 265 |
+
|
| 266 |
+
Parameters
|
| 267 |
+
----------
|
| 268 |
+
interpolation : `cobyqa.models.Interpolation`
|
| 269 |
+
Interpolation set.
|
| 270 |
+
values : `numpy.ndarray`, shape (npt,)
|
| 271 |
+
Values of the interpolated function at the interpolation points.
|
| 272 |
+
debug : bool
|
| 273 |
+
Whether to make debugging tests during the execution.
|
| 274 |
+
|
| 275 |
+
Raises
|
| 276 |
+
------
|
| 277 |
+
`numpy.linalg.LinAlgError`
|
| 278 |
+
If the interpolation system is ill-defined.
|
| 279 |
+
"""
|
| 280 |
+
self._debug = debug
|
| 281 |
+
if self._debug:
|
| 282 |
+
assert values.shape == (
|
| 283 |
+
interpolation.npt,
|
| 284 |
+
), "The shape of `values` is not valid."
|
| 285 |
+
if interpolation.npt < interpolation.n + 1:
|
| 286 |
+
raise ValueError(
|
| 287 |
+
f"The number of interpolation points must be at least "
|
| 288 |
+
f"{interpolation.n + 1}."
|
| 289 |
+
)
|
| 290 |
+
self._const, self._grad, self._i_hess, _ = self._get_model(
|
| 291 |
+
interpolation,
|
| 292 |
+
values,
|
| 293 |
+
)
|
| 294 |
+
self._e_hess = np.zeros((self.n, self.n))
|
| 295 |
+
|
| 296 |
+
def __call__(self, x, interpolation):
|
| 297 |
+
"""
|
| 298 |
+
Evaluate the quadratic model at a given point.
|
| 299 |
+
|
| 300 |
+
Parameters
|
| 301 |
+
----------
|
| 302 |
+
x : `numpy.ndarray`, shape (n,)
|
| 303 |
+
Point at which the quadratic model is evaluated.
|
| 304 |
+
interpolation : `cobyqa.models.Interpolation`
|
| 305 |
+
Interpolation set.
|
| 306 |
+
|
| 307 |
+
Returns
|
| 308 |
+
-------
|
| 309 |
+
float
|
| 310 |
+
Value of the quadratic model at `x`.
|
| 311 |
+
"""
|
| 312 |
+
if self._debug:
|
| 313 |
+
assert x.shape == (self.n,), "The shape of `x` is not valid."
|
| 314 |
+
x_diff = x - interpolation.x_base
|
| 315 |
+
return (
|
| 316 |
+
self._const
|
| 317 |
+
+ self._grad @ x_diff
|
| 318 |
+
+ 0.5
|
| 319 |
+
* (
|
| 320 |
+
self._i_hess @ (interpolation.xpt.T @ x_diff) ** 2.0
|
| 321 |
+
+ x_diff @ self._e_hess @ x_diff
|
| 322 |
+
)
|
| 323 |
+
)
|
| 324 |
+
|
| 325 |
+
@property
|
| 326 |
+
def n(self):
|
| 327 |
+
"""
|
| 328 |
+
Number of variables.
|
| 329 |
+
|
| 330 |
+
Returns
|
| 331 |
+
-------
|
| 332 |
+
int
|
| 333 |
+
Number of variables.
|
| 334 |
+
"""
|
| 335 |
+
return self._grad.size
|
| 336 |
+
|
| 337 |
+
@property
|
| 338 |
+
def npt(self):
|
| 339 |
+
"""
|
| 340 |
+
Number of interpolation points used to define the quadratic model.
|
| 341 |
+
|
| 342 |
+
Returns
|
| 343 |
+
-------
|
| 344 |
+
int
|
| 345 |
+
Number of interpolation points used to define the quadratic model.
|
| 346 |
+
"""
|
| 347 |
+
return self._i_hess.size
|
| 348 |
+
|
| 349 |
+
def grad(self, x, interpolation):
|
| 350 |
+
"""
|
| 351 |
+
Evaluate the gradient of the quadratic model at a given point.
|
| 352 |
+
|
| 353 |
+
Parameters
|
| 354 |
+
----------
|
| 355 |
+
x : `numpy.ndarray`, shape (n,)
|
| 356 |
+
Point at which the gradient of the quadratic model is evaluated.
|
| 357 |
+
interpolation : `cobyqa.models.Interpolation`
|
| 358 |
+
Interpolation set.
|
| 359 |
+
|
| 360 |
+
Returns
|
| 361 |
+
-------
|
| 362 |
+
`numpy.ndarray`, shape (n,)
|
| 363 |
+
Gradient of the quadratic model at `x`.
|
| 364 |
+
"""
|
| 365 |
+
if self._debug:
|
| 366 |
+
assert x.shape == (self.n,), "The shape of `x` is not valid."
|
| 367 |
+
x_diff = x - interpolation.x_base
|
| 368 |
+
return self._grad + self.hess_prod(x_diff, interpolation)
|
| 369 |
+
|
| 370 |
+
def hess(self, interpolation):
|
| 371 |
+
"""
|
| 372 |
+
Evaluate the Hessian matrix of the quadratic model.
|
| 373 |
+
|
| 374 |
+
Parameters
|
| 375 |
+
----------
|
| 376 |
+
interpolation : `cobyqa.models.Interpolation`
|
| 377 |
+
Interpolation set.
|
| 378 |
+
|
| 379 |
+
Returns
|
| 380 |
+
-------
|
| 381 |
+
`numpy.ndarray`, shape (n, n)
|
| 382 |
+
Hessian matrix of the quadratic model.
|
| 383 |
+
"""
|
| 384 |
+
return self._e_hess + interpolation.xpt @ (
|
| 385 |
+
self._i_hess[:, np.newaxis] * interpolation.xpt.T
|
| 386 |
+
)
|
| 387 |
+
|
| 388 |
+
def hess_prod(self, v, interpolation):
|
| 389 |
+
"""
|
| 390 |
+
Evaluate the right product of the Hessian matrix of the quadratic model
|
| 391 |
+
with a given vector.
|
| 392 |
+
|
| 393 |
+
Parameters
|
| 394 |
+
----------
|
| 395 |
+
v : `numpy.ndarray`, shape (n,)
|
| 396 |
+
Vector with which the Hessian matrix of the quadratic model is
|
| 397 |
+
multiplied from the right.
|
| 398 |
+
interpolation : `cobyqa.models.Interpolation`
|
| 399 |
+
Interpolation set.
|
| 400 |
+
|
| 401 |
+
Returns
|
| 402 |
+
-------
|
| 403 |
+
`numpy.ndarray`, shape (n,)
|
| 404 |
+
Right product of the Hessian matrix of the quadratic model with
|
| 405 |
+
`v`.
|
| 406 |
+
"""
|
| 407 |
+
if self._debug:
|
| 408 |
+
assert v.shape == (self.n,), "The shape of `v` is not valid."
|
| 409 |
+
return self._e_hess @ v + interpolation.xpt @ (
|
| 410 |
+
self._i_hess * (interpolation.xpt.T @ v)
|
| 411 |
+
)
|
| 412 |
+
|
| 413 |
+
def curv(self, v, interpolation):
|
| 414 |
+
"""
|
| 415 |
+
Evaluate the curvature of the quadratic model along a given direction.
|
| 416 |
+
|
| 417 |
+
Parameters
|
| 418 |
+
----------
|
| 419 |
+
v : `numpy.ndarray`, shape (n,)
|
| 420 |
+
Direction along which the curvature of the quadratic model is
|
| 421 |
+
evaluated.
|
| 422 |
+
interpolation : `cobyqa.models.Interpolation`
|
| 423 |
+
Interpolation set.
|
| 424 |
+
|
| 425 |
+
Returns
|
| 426 |
+
-------
|
| 427 |
+
float
|
| 428 |
+
Curvature of the quadratic model along `v`.
|
| 429 |
+
"""
|
| 430 |
+
if self._debug:
|
| 431 |
+
assert v.shape == (self.n,), "The shape of `v` is not valid."
|
| 432 |
+
return (
|
| 433 |
+
v @ self._e_hess @ v
|
| 434 |
+
+ self._i_hess @ (interpolation.xpt.T @ v) ** 2.0
|
| 435 |
+
)
|
| 436 |
+
|
| 437 |
+
def update(self, interpolation, k_new, dir_old, values_diff):
|
| 438 |
+
"""
|
| 439 |
+
Update the quadratic model.
|
| 440 |
+
|
| 441 |
+
This method applies the derivative-free symmetric Broyden update to the
|
| 442 |
+
quadratic model. The `knew`-th interpolation point must be updated
|
| 443 |
+
before calling this method.
|
| 444 |
+
|
| 445 |
+
Parameters
|
| 446 |
+
----------
|
| 447 |
+
interpolation : `cobyqa.models.Interpolation`
|
| 448 |
+
Updated interpolation set.
|
| 449 |
+
k_new : int
|
| 450 |
+
Index of the updated interpolation point.
|
| 451 |
+
dir_old : `numpy.ndarray`, shape (n,)
|
| 452 |
+
Value of ``interpolation.xpt[:, k_new]`` before the update.
|
| 453 |
+
values_diff : `numpy.ndarray`, shape (npt,)
|
| 454 |
+
Differences between the values of the interpolated nonlinear
|
| 455 |
+
function and the previous quadratic model at the updated
|
| 456 |
+
interpolation points.
|
| 457 |
+
|
| 458 |
+
Raises
|
| 459 |
+
------
|
| 460 |
+
`numpy.linalg.LinAlgError`
|
| 461 |
+
If the interpolation system is ill-defined.
|
| 462 |
+
"""
|
| 463 |
+
if self._debug:
|
| 464 |
+
assert 0 <= k_new < self.npt, "The index `k_new` is not valid."
|
| 465 |
+
assert dir_old.shape == (
|
| 466 |
+
self.n,
|
| 467 |
+
), "The shape of `dir_old` is not valid."
|
| 468 |
+
assert values_diff.shape == (
|
| 469 |
+
self.npt,
|
| 470 |
+
), "The shape of `values_diff` is not valid."
|
| 471 |
+
|
| 472 |
+
# Forward the k_new-th element of the implicit Hessian matrix to the
|
| 473 |
+
# explicit Hessian matrix. This must be done because the implicit
|
| 474 |
+
# Hessian matrix is related to the interpolation points, and the
|
| 475 |
+
# k_new-th interpolation point is modified.
|
| 476 |
+
self._e_hess += self._i_hess[k_new] * np.outer(dir_old, dir_old)
|
| 477 |
+
self._i_hess[k_new] = 0.0
|
| 478 |
+
|
| 479 |
+
# Update the quadratic model.
|
| 480 |
+
const, grad, i_hess, ill_conditioned = self._get_model(
|
| 481 |
+
interpolation,
|
| 482 |
+
values_diff,
|
| 483 |
+
)
|
| 484 |
+
self._const += const
|
| 485 |
+
self._grad += grad
|
| 486 |
+
self._i_hess += i_hess
|
| 487 |
+
return ill_conditioned
|
| 488 |
+
|
| 489 |
+
def shift_x_base(self, interpolation, new_x_base):
|
| 490 |
+
"""
|
| 491 |
+
Shift the point around which the quadratic model is defined.
|
| 492 |
+
|
| 493 |
+
Parameters
|
| 494 |
+
----------
|
| 495 |
+
interpolation : `cobyqa.models.Interpolation`
|
| 496 |
+
Previous interpolation set.
|
| 497 |
+
new_x_base : `numpy.ndarray`, shape (n,)
|
| 498 |
+
Point that will replace ``interpolation.x_base``.
|
| 499 |
+
"""
|
| 500 |
+
if self._debug:
|
| 501 |
+
assert new_x_base.shape == (
|
| 502 |
+
self.n,
|
| 503 |
+
), "The shape of `new_x_base` is not valid."
|
| 504 |
+
self._const = self(new_x_base, interpolation)
|
| 505 |
+
self._grad = self.grad(new_x_base, interpolation)
|
| 506 |
+
shift = new_x_base - interpolation.x_base
|
| 507 |
+
update = np.outer(
|
| 508 |
+
shift,
|
| 509 |
+
(interpolation.xpt - 0.5 * shift[:, np.newaxis]) @ self._i_hess,
|
| 510 |
+
)
|
| 511 |
+
self._e_hess += update + update.T
|
| 512 |
+
|
| 513 |
+
@staticmethod
|
| 514 |
+
def solve_systems(interpolation, rhs):
|
| 515 |
+
"""
|
| 516 |
+
Solve the interpolation systems.
|
| 517 |
+
|
| 518 |
+
Parameters
|
| 519 |
+
----------
|
| 520 |
+
interpolation : `cobyqa.models.Interpolation`
|
| 521 |
+
Interpolation set.
|
| 522 |
+
rhs : `numpy.ndarray`, shape (npt + n + 1, m)
|
| 523 |
+
Right-hand side vectors of the ``m`` interpolation systems.
|
| 524 |
+
|
| 525 |
+
Returns
|
| 526 |
+
-------
|
| 527 |
+
`numpy.ndarray`, shape (npt + n + 1, m)
|
| 528 |
+
Solutions of the interpolation systems.
|
| 529 |
+
`numpy.ndarray`, shape (m, )
|
| 530 |
+
Whether the interpolation systems are ill-conditioned.
|
| 531 |
+
|
| 532 |
+
Raises
|
| 533 |
+
------
|
| 534 |
+
`numpy.linalg.LinAlgError`
|
| 535 |
+
If the interpolation systems are ill-defined.
|
| 536 |
+
"""
|
| 537 |
+
n, npt = interpolation.xpt.shape
|
| 538 |
+
assert (
|
| 539 |
+
rhs.ndim == 2 and rhs.shape[0] == npt + n + 1
|
| 540 |
+
), "The shape of `rhs` is not valid."
|
| 541 |
+
|
| 542 |
+
# Build the left-hand side matrix of the interpolation system. The
|
| 543 |
+
# matrix below stores diag(left_scaling) * W * diag(right_scaling),
|
| 544 |
+
# where W is the theoretical matrix of the interpolation system. The
|
| 545 |
+
# left and right scaling matrices are chosen to keep the elements in
|
| 546 |
+
# the matrix well-balanced.
|
| 547 |
+
a, right_scaling, eig = build_system(interpolation)
|
| 548 |
+
|
| 549 |
+
# Build the solution. After a discussion with Mike Saunders and Alexis
|
| 550 |
+
# Montoison during their visit to the Hong Kong Polytechnic University
|
| 551 |
+
# in 2024, we decided to use the eigendecomposition of the symmetric
|
| 552 |
+
# matrix a. This is more stable than the previously employed LBL
|
| 553 |
+
# decomposition, and allows us to directly detect ill-conditioning of
|
| 554 |
+
# the system and to build the least-squares solution if necessary.
|
| 555 |
+
# Numerical experiments have shown that this strategy improves the
|
| 556 |
+
# performance of the solver.
|
| 557 |
+
rhs_scaled = rhs * right_scaling[:, np.newaxis]
|
| 558 |
+
if not (np.all(np.isfinite(a)) and np.all(np.isfinite(rhs_scaled))):
|
| 559 |
+
raise np.linalg.LinAlgError(
|
| 560 |
+
"The interpolation system is ill-defined."
|
| 561 |
+
)
|
| 562 |
+
|
| 563 |
+
# calculated in build_system
|
| 564 |
+
eig_values, eig_vectors = eig
|
| 565 |
+
|
| 566 |
+
large_eig_values = np.abs(eig_values) > EPS
|
| 567 |
+
eig_vectors = eig_vectors[:, large_eig_values]
|
| 568 |
+
inv_eig_values = 1.0 / eig_values[large_eig_values]
|
| 569 |
+
ill_conditioned = ~np.all(large_eig_values, 0)
|
| 570 |
+
left_scaled_solutions = eig_vectors @ (
|
| 571 |
+
(eig_vectors.T @ rhs_scaled) * inv_eig_values[:, np.newaxis]
|
| 572 |
+
)
|
| 573 |
+
return (
|
| 574 |
+
left_scaled_solutions * right_scaling[:, np.newaxis],
|
| 575 |
+
ill_conditioned,
|
| 576 |
+
)
|
| 577 |
+
|
| 578 |
+
@staticmethod
|
| 579 |
+
def _get_model(interpolation, values):
|
| 580 |
+
"""
|
| 581 |
+
Solve the interpolation system.
|
| 582 |
+
|
| 583 |
+
Parameters
|
| 584 |
+
----------
|
| 585 |
+
interpolation : `cobyqa.models.Interpolation`
|
| 586 |
+
Interpolation set.
|
| 587 |
+
values : `numpy.ndarray`, shape (npt,)
|
| 588 |
+
Values of the interpolated function at the interpolation points.
|
| 589 |
+
|
| 590 |
+
Returns
|
| 591 |
+
-------
|
| 592 |
+
float
|
| 593 |
+
Constant term of the quadratic model.
|
| 594 |
+
`numpy.ndarray`, shape (n,)
|
| 595 |
+
Gradient of the quadratic model at ``interpolation.x_base``.
|
| 596 |
+
`numpy.ndarray`, shape (npt,)
|
| 597 |
+
Implicit Hessian matrix of the quadratic model.
|
| 598 |
+
|
| 599 |
+
Raises
|
| 600 |
+
------
|
| 601 |
+
`numpy.linalg.LinAlgError`
|
| 602 |
+
If the interpolation system is ill-defined.
|
| 603 |
+
"""
|
| 604 |
+
assert values.shape == (
|
| 605 |
+
interpolation.npt,
|
| 606 |
+
), "The shape of `values` is not valid."
|
| 607 |
+
n, npt = interpolation.xpt.shape
|
| 608 |
+
x, ill_conditioned = Quadratic.solve_systems(
|
| 609 |
+
interpolation,
|
| 610 |
+
np.block(
|
| 611 |
+
[
|
| 612 |
+
[
|
| 613 |
+
values,
|
| 614 |
+
np.zeros(n + 1),
|
| 615 |
+
]
|
| 616 |
+
]
|
| 617 |
+
).T,
|
| 618 |
+
)
|
| 619 |
+
return x[npt, 0], x[npt + 1:, 0], x[:npt, 0], ill_conditioned
|
| 620 |
+
|
| 621 |
+
|
| 622 |
+
class Models:
|
| 623 |
+
"""
|
| 624 |
+
Models for a nonlinear optimization problem.
|
| 625 |
+
"""
|
| 626 |
+
|
| 627 |
+
def __init__(self, pb, options, penalty):
|
| 628 |
+
"""
|
| 629 |
+
Initialize the models.
|
| 630 |
+
|
| 631 |
+
Parameters
|
| 632 |
+
----------
|
| 633 |
+
pb : `cobyqa.problem.Problem`
|
| 634 |
+
Problem to be solved.
|
| 635 |
+
options : dict
|
| 636 |
+
Options of the solver.
|
| 637 |
+
penalty : float
|
| 638 |
+
Penalty parameter used to select the point in the filter to forward
|
| 639 |
+
to the callback function.
|
| 640 |
+
|
| 641 |
+
Raises
|
| 642 |
+
------
|
| 643 |
+
`cobyqa.utils.MaxEvalError`
|
| 644 |
+
If the maximum number of evaluations is reached.
|
| 645 |
+
`cobyqa.utils.TargetSuccess`
|
| 646 |
+
If a nearly feasible point has been found with an objective
|
| 647 |
+
function value below the target.
|
| 648 |
+
`cobyqa.utils.FeasibleSuccess`
|
| 649 |
+
If a feasible point has been found for a feasibility problem.
|
| 650 |
+
`numpy.linalg.LinAlgError`
|
| 651 |
+
If the interpolation system is ill-defined.
|
| 652 |
+
"""
|
| 653 |
+
# Set the initial interpolation set.
|
| 654 |
+
self._debug = options[Options.DEBUG]
|
| 655 |
+
self._interpolation = Interpolation(pb, options)
|
| 656 |
+
|
| 657 |
+
# Evaluate the nonlinear functions at the initial interpolation points.
|
| 658 |
+
x_eval = self.interpolation.point(0)
|
| 659 |
+
fun_init, cub_init, ceq_init = pb(x_eval, penalty)
|
| 660 |
+
self._fun_val = np.full(options[Options.NPT], np.nan)
|
| 661 |
+
self._cub_val = np.full((options[Options.NPT], cub_init.size), np.nan)
|
| 662 |
+
self._ceq_val = np.full((options[Options.NPT], ceq_init.size), np.nan)
|
| 663 |
+
for k in range(options[Options.NPT]):
|
| 664 |
+
if k >= options[Options.MAX_EVAL]:
|
| 665 |
+
raise MaxEvalError
|
| 666 |
+
if k == 0:
|
| 667 |
+
self.fun_val[k] = fun_init
|
| 668 |
+
self.cub_val[k, :] = cub_init
|
| 669 |
+
self.ceq_val[k, :] = ceq_init
|
| 670 |
+
else:
|
| 671 |
+
x_eval = self.interpolation.point(k)
|
| 672 |
+
self.fun_val[k], self.cub_val[k, :], self.ceq_val[k, :] = pb(
|
| 673 |
+
x_eval,
|
| 674 |
+
penalty,
|
| 675 |
+
)
|
| 676 |
+
|
| 677 |
+
# Stop the iterations if the problem is a feasibility problem and
|
| 678 |
+
# the current interpolation point is feasible.
|
| 679 |
+
if (
|
| 680 |
+
pb.is_feasibility
|
| 681 |
+
and pb.maxcv(
|
| 682 |
+
self.interpolation.point(k),
|
| 683 |
+
self.cub_val[k, :],
|
| 684 |
+
self.ceq_val[k, :],
|
| 685 |
+
)
|
| 686 |
+
<= options[Options.FEASIBILITY_TOL]
|
| 687 |
+
):
|
| 688 |
+
raise FeasibleSuccess
|
| 689 |
+
|
| 690 |
+
# Stop the iterations if the current interpolation point is nearly
|
| 691 |
+
# feasible and has an objective function value below the target.
|
| 692 |
+
if (
|
| 693 |
+
self._fun_val[k] <= options[Options.TARGET]
|
| 694 |
+
and pb.maxcv(
|
| 695 |
+
self.interpolation.point(k),
|
| 696 |
+
self.cub_val[k, :],
|
| 697 |
+
self.ceq_val[k, :],
|
| 698 |
+
)
|
| 699 |
+
<= options[Options.FEASIBILITY_TOL]
|
| 700 |
+
):
|
| 701 |
+
raise TargetSuccess
|
| 702 |
+
|
| 703 |
+
# Build the initial quadratic models.
|
| 704 |
+
self._fun = Quadratic(
|
| 705 |
+
self.interpolation,
|
| 706 |
+
self._fun_val,
|
| 707 |
+
options[Options.DEBUG],
|
| 708 |
+
)
|
| 709 |
+
self._cub = np.empty(self.m_nonlinear_ub, dtype=Quadratic)
|
| 710 |
+
self._ceq = np.empty(self.m_nonlinear_eq, dtype=Quadratic)
|
| 711 |
+
for i in range(self.m_nonlinear_ub):
|
| 712 |
+
self._cub[i] = Quadratic(
|
| 713 |
+
self.interpolation,
|
| 714 |
+
self.cub_val[:, i],
|
| 715 |
+
options[Options.DEBUG],
|
| 716 |
+
)
|
| 717 |
+
for i in range(self.m_nonlinear_eq):
|
| 718 |
+
self._ceq[i] = Quadratic(
|
| 719 |
+
self.interpolation,
|
| 720 |
+
self.ceq_val[:, i],
|
| 721 |
+
options[Options.DEBUG],
|
| 722 |
+
)
|
| 723 |
+
if self._debug:
|
| 724 |
+
self._check_interpolation_conditions()
|
| 725 |
+
|
| 726 |
+
@property
|
| 727 |
+
def n(self):
|
| 728 |
+
"""
|
| 729 |
+
Dimension of the problem.
|
| 730 |
+
|
| 731 |
+
Returns
|
| 732 |
+
-------
|
| 733 |
+
int
|
| 734 |
+
Dimension of the problem.
|
| 735 |
+
"""
|
| 736 |
+
return self.interpolation.n
|
| 737 |
+
|
| 738 |
+
@property
|
| 739 |
+
def npt(self):
|
| 740 |
+
"""
|
| 741 |
+
Number of interpolation points.
|
| 742 |
+
|
| 743 |
+
Returns
|
| 744 |
+
-------
|
| 745 |
+
int
|
| 746 |
+
Number of interpolation points.
|
| 747 |
+
"""
|
| 748 |
+
return self.interpolation.npt
|
| 749 |
+
|
| 750 |
+
@property
|
| 751 |
+
def m_nonlinear_ub(self):
|
| 752 |
+
"""
|
| 753 |
+
Number of nonlinear inequality constraints.
|
| 754 |
+
|
| 755 |
+
Returns
|
| 756 |
+
-------
|
| 757 |
+
int
|
| 758 |
+
Number of nonlinear inequality constraints.
|
| 759 |
+
"""
|
| 760 |
+
return self.cub_val.shape[1]
|
| 761 |
+
|
| 762 |
+
@property
|
| 763 |
+
def m_nonlinear_eq(self):
|
| 764 |
+
"""
|
| 765 |
+
Number of nonlinear equality constraints.
|
| 766 |
+
|
| 767 |
+
Returns
|
| 768 |
+
-------
|
| 769 |
+
int
|
| 770 |
+
Number of nonlinear equality constraints.
|
| 771 |
+
"""
|
| 772 |
+
return self.ceq_val.shape[1]
|
| 773 |
+
|
| 774 |
+
@property
|
| 775 |
+
def interpolation(self):
|
| 776 |
+
"""
|
| 777 |
+
Interpolation set.
|
| 778 |
+
|
| 779 |
+
Returns
|
| 780 |
+
-------
|
| 781 |
+
`cobyqa.models.Interpolation`
|
| 782 |
+
Interpolation set.
|
| 783 |
+
"""
|
| 784 |
+
return self._interpolation
|
| 785 |
+
|
| 786 |
+
@property
|
| 787 |
+
def fun_val(self):
|
| 788 |
+
"""
|
| 789 |
+
Values of the objective function at the interpolation points.
|
| 790 |
+
|
| 791 |
+
Returns
|
| 792 |
+
-------
|
| 793 |
+
`numpy.ndarray`, shape (npt,)
|
| 794 |
+
Values of the objective function at the interpolation points.
|
| 795 |
+
"""
|
| 796 |
+
return self._fun_val
|
| 797 |
+
|
| 798 |
+
@property
|
| 799 |
+
def cub_val(self):
|
| 800 |
+
"""
|
| 801 |
+
Values of the nonlinear inequality constraint functions at the
|
| 802 |
+
interpolation points.
|
| 803 |
+
|
| 804 |
+
Returns
|
| 805 |
+
-------
|
| 806 |
+
`numpy.ndarray`, shape (npt, m_nonlinear_ub)
|
| 807 |
+
Values of the nonlinear inequality constraint functions at the
|
| 808 |
+
interpolation points.
|
| 809 |
+
"""
|
| 810 |
+
return self._cub_val
|
| 811 |
+
|
| 812 |
+
@property
|
| 813 |
+
def ceq_val(self):
|
| 814 |
+
"""
|
| 815 |
+
Values of the nonlinear equality constraint functions at the
|
| 816 |
+
interpolation points.
|
| 817 |
+
|
| 818 |
+
Returns
|
| 819 |
+
-------
|
| 820 |
+
`numpy.ndarray`, shape (npt, m_nonlinear_eq)
|
| 821 |
+
Values of the nonlinear equality constraint functions at the
|
| 822 |
+
interpolation points.
|
| 823 |
+
"""
|
| 824 |
+
return self._ceq_val
|
| 825 |
+
|
| 826 |
+
def fun(self, x):
|
| 827 |
+
"""
|
| 828 |
+
Evaluate the quadratic model of the objective function at a given
|
| 829 |
+
point.
|
| 830 |
+
|
| 831 |
+
Parameters
|
| 832 |
+
----------
|
| 833 |
+
x : `numpy.ndarray`, shape (n,)
|
| 834 |
+
Point at which to evaluate the quadratic model of the objective
|
| 835 |
+
function.
|
| 836 |
+
|
| 837 |
+
Returns
|
| 838 |
+
-------
|
| 839 |
+
float
|
| 840 |
+
Value of the quadratic model of the objective function at `x`.
|
| 841 |
+
"""
|
| 842 |
+
if self._debug:
|
| 843 |
+
assert x.shape == (self.n,), "The shape of `x` is not valid."
|
| 844 |
+
return self._fun(x, self.interpolation)
|
| 845 |
+
|
| 846 |
+
def fun_grad(self, x):
|
| 847 |
+
"""
|
| 848 |
+
Evaluate the gradient of the quadratic model of the objective function
|
| 849 |
+
at a given point.
|
| 850 |
+
|
| 851 |
+
Parameters
|
| 852 |
+
----------
|
| 853 |
+
x : `numpy.ndarray`, shape (n,)
|
| 854 |
+
Point at which to evaluate the gradient of the quadratic model of
|
| 855 |
+
the objective function.
|
| 856 |
+
|
| 857 |
+
Returns
|
| 858 |
+
-------
|
| 859 |
+
`numpy.ndarray`, shape (n,)
|
| 860 |
+
Gradient of the quadratic model of the objective function at `x`.
|
| 861 |
+
"""
|
| 862 |
+
if self._debug:
|
| 863 |
+
assert x.shape == (self.n,), "The shape of `x` is not valid."
|
| 864 |
+
return self._fun.grad(x, self.interpolation)
|
| 865 |
+
|
| 866 |
+
def fun_hess(self):
|
| 867 |
+
"""
|
| 868 |
+
Evaluate the Hessian matrix of the quadratic model of the objective
|
| 869 |
+
function.
|
| 870 |
+
|
| 871 |
+
Returns
|
| 872 |
+
-------
|
| 873 |
+
`numpy.ndarray`, shape (n, n)
|
| 874 |
+
Hessian matrix of the quadratic model of the objective function.
|
| 875 |
+
"""
|
| 876 |
+
return self._fun.hess(self.interpolation)
|
| 877 |
+
|
| 878 |
+
def fun_hess_prod(self, v):
|
| 879 |
+
"""
|
| 880 |
+
Evaluate the right product of the Hessian matrix of the quadratic model
|
| 881 |
+
of the objective function with a given vector.
|
| 882 |
+
|
| 883 |
+
Parameters
|
| 884 |
+
----------
|
| 885 |
+
v : `numpy.ndarray`, shape (n,)
|
| 886 |
+
Vector with which the Hessian matrix of the quadratic model of the
|
| 887 |
+
objective function is multiplied from the right.
|
| 888 |
+
|
| 889 |
+
Returns
|
| 890 |
+
-------
|
| 891 |
+
`numpy.ndarray`, shape (n,)
|
| 892 |
+
Right product of the Hessian matrix of the quadratic model of the
|
| 893 |
+
objective function with `v`.
|
| 894 |
+
"""
|
| 895 |
+
if self._debug:
|
| 896 |
+
assert v.shape == (self.n,), "The shape of `v` is not valid."
|
| 897 |
+
return self._fun.hess_prod(v, self.interpolation)
|
| 898 |
+
|
| 899 |
+
def fun_curv(self, v):
|
| 900 |
+
"""
|
| 901 |
+
Evaluate the curvature of the quadratic model of the objective function
|
| 902 |
+
along a given direction.
|
| 903 |
+
|
| 904 |
+
Parameters
|
| 905 |
+
----------
|
| 906 |
+
v : `numpy.ndarray`, shape (n,)
|
| 907 |
+
Direction along which the curvature of the quadratic model of the
|
| 908 |
+
objective function is evaluated.
|
| 909 |
+
|
| 910 |
+
Returns
|
| 911 |
+
-------
|
| 912 |
+
float
|
| 913 |
+
Curvature of the quadratic model of the objective function along
|
| 914 |
+
`v`.
|
| 915 |
+
"""
|
| 916 |
+
if self._debug:
|
| 917 |
+
assert v.shape == (self.n,), "The shape of `v` is not valid."
|
| 918 |
+
return self._fun.curv(v, self.interpolation)
|
| 919 |
+
|
| 920 |
+
def fun_alt_grad(self, x):
|
| 921 |
+
"""
|
| 922 |
+
Evaluate the gradient of the alternative quadratic model of the
|
| 923 |
+
objective function at a given point.
|
| 924 |
+
|
| 925 |
+
Parameters
|
| 926 |
+
----------
|
| 927 |
+
x : `numpy.ndarray`, shape (n,)
|
| 928 |
+
Point at which to evaluate the gradient of the alternative
|
| 929 |
+
quadratic model of the objective function.
|
| 930 |
+
|
| 931 |
+
Returns
|
| 932 |
+
-------
|
| 933 |
+
`numpy.ndarray`, shape (n,)
|
| 934 |
+
Gradient of the alternative quadratic model of the objective
|
| 935 |
+
function at `x`.
|
| 936 |
+
|
| 937 |
+
Raises
|
| 938 |
+
------
|
| 939 |
+
`numpy.linalg.LinAlgError`
|
| 940 |
+
If the interpolation system is ill-defined.
|
| 941 |
+
"""
|
| 942 |
+
if self._debug:
|
| 943 |
+
assert x.shape == (self.n,), "The shape of `x` is not valid."
|
| 944 |
+
model = Quadratic(self.interpolation, self.fun_val, self._debug)
|
| 945 |
+
return model.grad(x, self.interpolation)
|
| 946 |
+
|
| 947 |
+
def cub(self, x, mask=None):
|
| 948 |
+
"""
|
| 949 |
+
Evaluate the quadratic models of the nonlinear inequality functions at
|
| 950 |
+
a given point.
|
| 951 |
+
|
| 952 |
+
Parameters
|
| 953 |
+
----------
|
| 954 |
+
x : `numpy.ndarray`, shape (n,)
|
| 955 |
+
Point at which to evaluate the quadratic models of the nonlinear
|
| 956 |
+
inequality functions.
|
| 957 |
+
mask : `numpy.ndarray`, shape (m_nonlinear_ub,), optional
|
| 958 |
+
Mask of the quadratic models to consider.
|
| 959 |
+
|
| 960 |
+
Returns
|
| 961 |
+
-------
|
| 962 |
+
`numpy.ndarray`
|
| 963 |
+
Values of the quadratic model of the nonlinear inequality
|
| 964 |
+
functions.
|
| 965 |
+
"""
|
| 966 |
+
if self._debug:
|
| 967 |
+
assert x.shape == (self.n,), "The shape of `x` is not valid."
|
| 968 |
+
assert mask is None or mask.shape == (
|
| 969 |
+
self.m_nonlinear_ub,
|
| 970 |
+
), "The shape of `mask` is not valid."
|
| 971 |
+
return np.array(
|
| 972 |
+
[model(x, self.interpolation) for model in self._get_cub(mask)]
|
| 973 |
+
)
|
| 974 |
+
|
| 975 |
+
def cub_grad(self, x, mask=None):
|
| 976 |
+
"""
|
| 977 |
+
Evaluate the gradients of the quadratic models of the nonlinear
|
| 978 |
+
inequality functions at a given point.
|
| 979 |
+
|
| 980 |
+
Parameters
|
| 981 |
+
----------
|
| 982 |
+
x : `numpy.ndarray`, shape (n,)
|
| 983 |
+
Point at which to evaluate the gradients of the quadratic models of
|
| 984 |
+
the nonlinear inequality functions.
|
| 985 |
+
mask : `numpy.ndarray`, shape (m_nonlinear_eq,), optional
|
| 986 |
+
Mask of the quadratic models to consider.
|
| 987 |
+
|
| 988 |
+
Returns
|
| 989 |
+
-------
|
| 990 |
+
`numpy.ndarray`
|
| 991 |
+
Gradients of the quadratic model of the nonlinear inequality
|
| 992 |
+
functions.
|
| 993 |
+
"""
|
| 994 |
+
if self._debug:
|
| 995 |
+
assert x.shape == (self.n,), "The shape of `x` is not valid."
|
| 996 |
+
assert mask is None or mask.shape == (
|
| 997 |
+
self.m_nonlinear_ub,
|
| 998 |
+
), "The shape of `mask` is not valid."
|
| 999 |
+
return np.reshape(
|
| 1000 |
+
[model.grad(x, self.interpolation)
|
| 1001 |
+
for model in self._get_cub(mask)],
|
| 1002 |
+
(-1, self.n),
|
| 1003 |
+
)
|
| 1004 |
+
|
| 1005 |
+
def cub_hess(self, mask=None):
|
| 1006 |
+
"""
|
| 1007 |
+
Evaluate the Hessian matrices of the quadratic models of the nonlinear
|
| 1008 |
+
inequality functions.
|
| 1009 |
+
|
| 1010 |
+
Parameters
|
| 1011 |
+
----------
|
| 1012 |
+
mask : `numpy.ndarray`, shape (m_nonlinear_ub,), optional
|
| 1013 |
+
Mask of the quadratic models to consider.
|
| 1014 |
+
|
| 1015 |
+
Returns
|
| 1016 |
+
-------
|
| 1017 |
+
`numpy.ndarray`
|
| 1018 |
+
Hessian matrices of the quadratic models of the nonlinear
|
| 1019 |
+
inequality functions.
|
| 1020 |
+
"""
|
| 1021 |
+
if self._debug:
|
| 1022 |
+
assert mask is None or mask.shape == (
|
| 1023 |
+
self.m_nonlinear_ub,
|
| 1024 |
+
), "The shape of `mask` is not valid."
|
| 1025 |
+
return np.reshape(
|
| 1026 |
+
[model.hess(self.interpolation) for model in self._get_cub(mask)],
|
| 1027 |
+
(-1, self.n, self.n),
|
| 1028 |
+
)
|
| 1029 |
+
|
| 1030 |
+
def cub_hess_prod(self, v, mask=None):
|
| 1031 |
+
"""
|
| 1032 |
+
Evaluate the right product of the Hessian matrices of the quadratic
|
| 1033 |
+
models of the nonlinear inequality functions with a given vector.
|
| 1034 |
+
|
| 1035 |
+
Parameters
|
| 1036 |
+
----------
|
| 1037 |
+
v : `numpy.ndarray`, shape (n,)
|
| 1038 |
+
Vector with which the Hessian matrices of the quadratic models of
|
| 1039 |
+
the nonlinear inequality functions are multiplied from the right.
|
| 1040 |
+
mask : `numpy.ndarray`, shape (m_nonlinear_ub,), optional
|
| 1041 |
+
Mask of the quadratic models to consider.
|
| 1042 |
+
|
| 1043 |
+
Returns
|
| 1044 |
+
-------
|
| 1045 |
+
`numpy.ndarray`
|
| 1046 |
+
Right products of the Hessian matrices of the quadratic models of
|
| 1047 |
+
the nonlinear inequality functions with `v`.
|
| 1048 |
+
"""
|
| 1049 |
+
if self._debug:
|
| 1050 |
+
assert v.shape == (self.n,), "The shape of `v` is not valid."
|
| 1051 |
+
assert mask is None or mask.shape == (
|
| 1052 |
+
self.m_nonlinear_ub,
|
| 1053 |
+
), "The shape of `mask` is not valid."
|
| 1054 |
+
return np.reshape(
|
| 1055 |
+
[
|
| 1056 |
+
model.hess_prod(v, self.interpolation)
|
| 1057 |
+
for model in self._get_cub(mask)
|
| 1058 |
+
],
|
| 1059 |
+
(-1, self.n),
|
| 1060 |
+
)
|
| 1061 |
+
|
| 1062 |
+
def cub_curv(self, v, mask=None):
|
| 1063 |
+
"""
|
| 1064 |
+
Evaluate the curvature of the quadratic models of the nonlinear
|
| 1065 |
+
inequality functions along a given direction.
|
| 1066 |
+
|
| 1067 |
+
Parameters
|
| 1068 |
+
----------
|
| 1069 |
+
v : `numpy.ndarray`, shape (n,)
|
| 1070 |
+
Direction along which the curvature of the quadratic models of the
|
| 1071 |
+
nonlinear inequality functions is evaluated.
|
| 1072 |
+
mask : `numpy.ndarray`, shape (m_nonlinear_ub,), optional
|
| 1073 |
+
Mask of the quadratic models to consider.
|
| 1074 |
+
|
| 1075 |
+
Returns
|
| 1076 |
+
-------
|
| 1077 |
+
`numpy.ndarray`
|
| 1078 |
+
Curvature of the quadratic models of the nonlinear inequality
|
| 1079 |
+
functions along `v`.
|
| 1080 |
+
"""
|
| 1081 |
+
if self._debug:
|
| 1082 |
+
assert v.shape == (self.n,), "The shape of `v` is not valid."
|
| 1083 |
+
assert mask is None or mask.shape == (
|
| 1084 |
+
self.m_nonlinear_ub,
|
| 1085 |
+
), "The shape of `mask` is not valid."
|
| 1086 |
+
return np.array(
|
| 1087 |
+
[model.curv(v, self.interpolation)
|
| 1088 |
+
for model in self._get_cub(mask)]
|
| 1089 |
+
)
|
| 1090 |
+
|
| 1091 |
+
def ceq(self, x, mask=None):
|
| 1092 |
+
"""
|
| 1093 |
+
Evaluate the quadratic models of the nonlinear equality functions at a
|
| 1094 |
+
given point.
|
| 1095 |
+
|
| 1096 |
+
Parameters
|
| 1097 |
+
----------
|
| 1098 |
+
x : `numpy.ndarray`, shape (n,)
|
| 1099 |
+
Point at which to evaluate the quadratic models of the nonlinear
|
| 1100 |
+
equality functions.
|
| 1101 |
+
mask : `numpy.ndarray`, shape (m_nonlinear_eq,), optional
|
| 1102 |
+
Mask of the quadratic models to consider.
|
| 1103 |
+
|
| 1104 |
+
Returns
|
| 1105 |
+
-------
|
| 1106 |
+
`numpy.ndarray`
|
| 1107 |
+
Values of the quadratic model of the nonlinear equality functions.
|
| 1108 |
+
"""
|
| 1109 |
+
if self._debug:
|
| 1110 |
+
assert x.shape == (self.n,), "The shape of `x` is not valid."
|
| 1111 |
+
assert mask is None or mask.shape == (
|
| 1112 |
+
self.m_nonlinear_eq,
|
| 1113 |
+
), "The shape of `mask` is not valid."
|
| 1114 |
+
return np.array(
|
| 1115 |
+
[model(x, self.interpolation) for model in self._get_ceq(mask)]
|
| 1116 |
+
)
|
| 1117 |
+
|
| 1118 |
+
def ceq_grad(self, x, mask=None):
|
| 1119 |
+
"""
|
| 1120 |
+
Evaluate the gradients of the quadratic models of the nonlinear
|
| 1121 |
+
equality functions at a given point.
|
| 1122 |
+
|
| 1123 |
+
Parameters
|
| 1124 |
+
----------
|
| 1125 |
+
x : `numpy.ndarray`, shape (n,)
|
| 1126 |
+
Point at which to evaluate the gradients of the quadratic models of
|
| 1127 |
+
the nonlinear equality functions.
|
| 1128 |
+
mask : `numpy.ndarray`, shape (m_nonlinear_eq,), optional
|
| 1129 |
+
Mask of the quadratic models to consider.
|
| 1130 |
+
|
| 1131 |
+
Returns
|
| 1132 |
+
-------
|
| 1133 |
+
`numpy.ndarray`
|
| 1134 |
+
Gradients of the quadratic model of the nonlinear equality
|
| 1135 |
+
functions.
|
| 1136 |
+
"""
|
| 1137 |
+
if self._debug:
|
| 1138 |
+
assert x.shape == (self.n,), "The shape of `x` is not valid."
|
| 1139 |
+
assert mask is None or mask.shape == (
|
| 1140 |
+
self.m_nonlinear_eq,
|
| 1141 |
+
), "The shape of `mask` is not valid."
|
| 1142 |
+
return np.reshape(
|
| 1143 |
+
[model.grad(x, self.interpolation)
|
| 1144 |
+
for model in self._get_ceq(mask)],
|
| 1145 |
+
(-1, self.n),
|
| 1146 |
+
)
|
| 1147 |
+
|
| 1148 |
+
def ceq_hess(self, mask=None):
|
| 1149 |
+
"""
|
| 1150 |
+
Evaluate the Hessian matrices of the quadratic models of the nonlinear
|
| 1151 |
+
equality functions.
|
| 1152 |
+
|
| 1153 |
+
Parameters
|
| 1154 |
+
----------
|
| 1155 |
+
mask : `numpy.ndarray`, shape (m_nonlinear_eq,), optional
|
| 1156 |
+
Mask of the quadratic models to consider.
|
| 1157 |
+
|
| 1158 |
+
Returns
|
| 1159 |
+
-------
|
| 1160 |
+
`numpy.ndarray`
|
| 1161 |
+
Hessian matrices of the quadratic models of the nonlinear equality
|
| 1162 |
+
functions.
|
| 1163 |
+
"""
|
| 1164 |
+
if self._debug:
|
| 1165 |
+
assert mask is None or mask.shape == (
|
| 1166 |
+
self.m_nonlinear_eq,
|
| 1167 |
+
), "The shape of `mask` is not valid."
|
| 1168 |
+
return np.reshape(
|
| 1169 |
+
[model.hess(self.interpolation) for model in self._get_ceq(mask)],
|
| 1170 |
+
(-1, self.n, self.n),
|
| 1171 |
+
)
|
| 1172 |
+
|
| 1173 |
+
def ceq_hess_prod(self, v, mask=None):
|
| 1174 |
+
"""
|
| 1175 |
+
Evaluate the right product of the Hessian matrices of the quadratic
|
| 1176 |
+
models of the nonlinear equality functions with a given vector.
|
| 1177 |
+
|
| 1178 |
+
Parameters
|
| 1179 |
+
----------
|
| 1180 |
+
v : `numpy.ndarray`, shape (n,)
|
| 1181 |
+
Vector with which the Hessian matrices of the quadratic models of
|
| 1182 |
+
the nonlinear equality functions are multiplied from the right.
|
| 1183 |
+
mask : `numpy.ndarray`, shape (m_nonlinear_eq,), optional
|
| 1184 |
+
Mask of the quadratic models to consider.
|
| 1185 |
+
|
| 1186 |
+
Returns
|
| 1187 |
+
-------
|
| 1188 |
+
`numpy.ndarray`
|
| 1189 |
+
Right products of the Hessian matrices of the quadratic models of
|
| 1190 |
+
the nonlinear equality functions with `v`.
|
| 1191 |
+
"""
|
| 1192 |
+
if self._debug:
|
| 1193 |
+
assert v.shape == (self.n,), "The shape of `v` is not valid."
|
| 1194 |
+
assert mask is None or mask.shape == (
|
| 1195 |
+
self.m_nonlinear_eq,
|
| 1196 |
+
), "The shape of `mask` is not valid."
|
| 1197 |
+
return np.reshape(
|
| 1198 |
+
[
|
| 1199 |
+
model.hess_prod(v, self.interpolation)
|
| 1200 |
+
for model in self._get_ceq(mask)
|
| 1201 |
+
],
|
| 1202 |
+
(-1, self.n),
|
| 1203 |
+
)
|
| 1204 |
+
|
| 1205 |
+
def ceq_curv(self, v, mask=None):
|
| 1206 |
+
"""
|
| 1207 |
+
Evaluate the curvature of the quadratic models of the nonlinear
|
| 1208 |
+
equality functions along a given direction.
|
| 1209 |
+
|
| 1210 |
+
Parameters
|
| 1211 |
+
----------
|
| 1212 |
+
v : `numpy.ndarray`, shape (n,)
|
| 1213 |
+
Direction along which the curvature of the quadratic models of the
|
| 1214 |
+
nonlinear equality functions is evaluated.
|
| 1215 |
+
mask : `numpy.ndarray`, shape (m_nonlinear_eq,), optional
|
| 1216 |
+
Mask of the quadratic models to consider.
|
| 1217 |
+
|
| 1218 |
+
Returns
|
| 1219 |
+
-------
|
| 1220 |
+
`numpy.ndarray`
|
| 1221 |
+
Curvature of the quadratic models of the nonlinear equality
|
| 1222 |
+
functions along `v`.
|
| 1223 |
+
"""
|
| 1224 |
+
if self._debug:
|
| 1225 |
+
assert v.shape == (self.n,), "The shape of `v` is not valid."
|
| 1226 |
+
assert mask is None or mask.shape == (
|
| 1227 |
+
self.m_nonlinear_eq,
|
| 1228 |
+
), "The shape of `mask` is not valid."
|
| 1229 |
+
return np.array(
|
| 1230 |
+
[model.curv(v, self.interpolation)
|
| 1231 |
+
for model in self._get_ceq(mask)]
|
| 1232 |
+
)
|
| 1233 |
+
|
| 1234 |
+
def reset_models(self):
|
| 1235 |
+
"""
|
| 1236 |
+
Set the quadratic models of the objective function, nonlinear
|
| 1237 |
+
inequality constraints, and nonlinear equality constraints to the
|
| 1238 |
+
alternative quadratic models.
|
| 1239 |
+
|
| 1240 |
+
Raises
|
| 1241 |
+
------
|
| 1242 |
+
`numpy.linalg.LinAlgError`
|
| 1243 |
+
If the interpolation system is ill-defined.
|
| 1244 |
+
"""
|
| 1245 |
+
self._fun = Quadratic(self.interpolation, self.fun_val, self._debug)
|
| 1246 |
+
for i in range(self.m_nonlinear_ub):
|
| 1247 |
+
self._cub[i] = Quadratic(
|
| 1248 |
+
self.interpolation,
|
| 1249 |
+
self.cub_val[:, i],
|
| 1250 |
+
self._debug,
|
| 1251 |
+
)
|
| 1252 |
+
for i in range(self.m_nonlinear_eq):
|
| 1253 |
+
self._ceq[i] = Quadratic(
|
| 1254 |
+
self.interpolation,
|
| 1255 |
+
self.ceq_val[:, i],
|
| 1256 |
+
self._debug,
|
| 1257 |
+
)
|
| 1258 |
+
if self._debug:
|
| 1259 |
+
self._check_interpolation_conditions()
|
| 1260 |
+
|
| 1261 |
+
def update_interpolation(self, k_new, x_new, fun_val, cub_val, ceq_val):
|
| 1262 |
+
"""
|
| 1263 |
+
Update the interpolation set.
|
| 1264 |
+
|
| 1265 |
+
This method updates the interpolation set by replacing the `knew`-th
|
| 1266 |
+
interpolation point with `xnew`. It also updates the function values
|
| 1267 |
+
and the quadratic models.
|
| 1268 |
+
|
| 1269 |
+
Parameters
|
| 1270 |
+
----------
|
| 1271 |
+
k_new : int
|
| 1272 |
+
Index of the updated interpolation point.
|
| 1273 |
+
x_new : `numpy.ndarray`, shape (n,)
|
| 1274 |
+
New interpolation point. Its value is interpreted as relative to
|
| 1275 |
+
the origin, not the base point.
|
| 1276 |
+
fun_val : float
|
| 1277 |
+
Value of the objective function at `x_new`.
|
| 1278 |
+
Objective function value at `x_new`.
|
| 1279 |
+
cub_val : `numpy.ndarray`, shape (m_nonlinear_ub,)
|
| 1280 |
+
Values of the nonlinear inequality constraints at `x_new`.
|
| 1281 |
+
ceq_val : `numpy.ndarray`, shape (m_nonlinear_eq,)
|
| 1282 |
+
Values of the nonlinear equality constraints at `x_new`.
|
| 1283 |
+
|
| 1284 |
+
Raises
|
| 1285 |
+
------
|
| 1286 |
+
`numpy.linalg.LinAlgError`
|
| 1287 |
+
If the interpolation system is ill-defined.
|
| 1288 |
+
"""
|
| 1289 |
+
if self._debug:
|
| 1290 |
+
assert 0 <= k_new < self.npt, "The index `k_new` is not valid."
|
| 1291 |
+
assert x_new.shape == (self.n,), \
|
| 1292 |
+
"The shape of `x_new` is not valid."
|
| 1293 |
+
assert isinstance(fun_val, float), \
|
| 1294 |
+
"The function value is not valid."
|
| 1295 |
+
assert cub_val.shape == (
|
| 1296 |
+
self.m_nonlinear_ub,
|
| 1297 |
+
), "The shape of `cub_val` is not valid."
|
| 1298 |
+
assert ceq_val.shape == (
|
| 1299 |
+
self.m_nonlinear_eq,
|
| 1300 |
+
), "The shape of `ceq_val` is not valid."
|
| 1301 |
+
|
| 1302 |
+
# Compute the updates in the interpolation conditions.
|
| 1303 |
+
fun_diff = np.zeros(self.npt)
|
| 1304 |
+
cub_diff = np.zeros(self.cub_val.shape)
|
| 1305 |
+
ceq_diff = np.zeros(self.ceq_val.shape)
|
| 1306 |
+
fun_diff[k_new] = fun_val - self.fun(x_new)
|
| 1307 |
+
cub_diff[k_new, :] = cub_val - self.cub(x_new)
|
| 1308 |
+
ceq_diff[k_new, :] = ceq_val - self.ceq(x_new)
|
| 1309 |
+
|
| 1310 |
+
# Update the function values.
|
| 1311 |
+
self.fun_val[k_new] = fun_val
|
| 1312 |
+
self.cub_val[k_new, :] = cub_val
|
| 1313 |
+
self.ceq_val[k_new, :] = ceq_val
|
| 1314 |
+
|
| 1315 |
+
# Update the interpolation set.
|
| 1316 |
+
dir_old = np.copy(self.interpolation.xpt[:, k_new])
|
| 1317 |
+
self.interpolation.xpt[:, k_new] = x_new - self.interpolation.x_base
|
| 1318 |
+
|
| 1319 |
+
# Update the quadratic models.
|
| 1320 |
+
ill_conditioned = self._fun.update(
|
| 1321 |
+
self.interpolation,
|
| 1322 |
+
k_new,
|
| 1323 |
+
dir_old,
|
| 1324 |
+
fun_diff,
|
| 1325 |
+
)
|
| 1326 |
+
for i in range(self.m_nonlinear_ub):
|
| 1327 |
+
ill_conditioned = ill_conditioned or self._cub[i].update(
|
| 1328 |
+
self.interpolation,
|
| 1329 |
+
k_new,
|
| 1330 |
+
dir_old,
|
| 1331 |
+
cub_diff[:, i],
|
| 1332 |
+
)
|
| 1333 |
+
for i in range(self.m_nonlinear_eq):
|
| 1334 |
+
ill_conditioned = ill_conditioned or self._ceq[i].update(
|
| 1335 |
+
self.interpolation,
|
| 1336 |
+
k_new,
|
| 1337 |
+
dir_old,
|
| 1338 |
+
ceq_diff[:, i],
|
| 1339 |
+
)
|
| 1340 |
+
if self._debug:
|
| 1341 |
+
self._check_interpolation_conditions()
|
| 1342 |
+
return ill_conditioned
|
| 1343 |
+
|
| 1344 |
+
def determinants(self, x_new, k_new=None):
|
| 1345 |
+
"""
|
| 1346 |
+
Compute the normalized determinants of the new interpolation systems.
|
| 1347 |
+
|
| 1348 |
+
Parameters
|
| 1349 |
+
----------
|
| 1350 |
+
x_new : `numpy.ndarray`, shape (n,)
|
| 1351 |
+
New interpolation point. Its value is interpreted as relative to
|
| 1352 |
+
the origin, not the base point.
|
| 1353 |
+
k_new : int, optional
|
| 1354 |
+
Index of the updated interpolation point. If `k_new` is not
|
| 1355 |
+
specified, all the possible determinants are computed.
|
| 1356 |
+
|
| 1357 |
+
Returns
|
| 1358 |
+
-------
|
| 1359 |
+
{float, `numpy.ndarray`, shape (npt,)}
|
| 1360 |
+
Determinant(s) of the new interpolation system.
|
| 1361 |
+
|
| 1362 |
+
Raises
|
| 1363 |
+
------
|
| 1364 |
+
`numpy.linalg.LinAlgError`
|
| 1365 |
+
If the interpolation system is ill-defined.
|
| 1366 |
+
|
| 1367 |
+
Notes
|
| 1368 |
+
-----
|
| 1369 |
+
The determinants are normalized by the determinant of the current
|
| 1370 |
+
interpolation system. For stability reasons, the calculations are done
|
| 1371 |
+
using the formula (2.12) in [1]_.
|
| 1372 |
+
|
| 1373 |
+
References
|
| 1374 |
+
----------
|
| 1375 |
+
.. [1] M. J. D. Powell. On updating the inverse of a KKT matrix.
|
| 1376 |
+
Technical Report DAMTP 2004/NA01, Department of Applied Mathematics
|
| 1377 |
+
and Theoretical Physics, University of Cambridge, Cambridge, UK,
|
| 1378 |
+
2004.
|
| 1379 |
+
"""
|
| 1380 |
+
if self._debug:
|
| 1381 |
+
assert x_new.shape == (self.n,), \
|
| 1382 |
+
"The shape of `x_new` is not valid."
|
| 1383 |
+
assert (
|
| 1384 |
+
k_new is None or 0 <= k_new < self.npt
|
| 1385 |
+
), "The index `k_new` is not valid."
|
| 1386 |
+
|
| 1387 |
+
# Compute the values independent of k_new.
|
| 1388 |
+
shift = x_new - self.interpolation.x_base
|
| 1389 |
+
new_col = np.empty((self.npt + self.n + 1, 1))
|
| 1390 |
+
new_col[: self.npt, 0] = (
|
| 1391 |
+
0.5 * (self.interpolation.xpt.T @ shift) ** 2.0)
|
| 1392 |
+
new_col[self.npt, 0] = 1.0
|
| 1393 |
+
new_col[self.npt + 1:, 0] = shift
|
| 1394 |
+
inv_new_col = Quadratic.solve_systems(self.interpolation, new_col)[0]
|
| 1395 |
+
beta = 0.5 * (shift @ shift) ** 2.0 - new_col[:, 0] @ inv_new_col[:, 0]
|
| 1396 |
+
|
| 1397 |
+
# Compute the values that depend on k.
|
| 1398 |
+
if k_new is None:
|
| 1399 |
+
coord_vec = np.eye(self.npt + self.n + 1, self.npt)
|
| 1400 |
+
alpha = np.diag(
|
| 1401 |
+
Quadratic.solve_systems(
|
| 1402 |
+
self.interpolation,
|
| 1403 |
+
coord_vec,
|
| 1404 |
+
)[0]
|
| 1405 |
+
)
|
| 1406 |
+
tau = inv_new_col[: self.npt, 0]
|
| 1407 |
+
else:
|
| 1408 |
+
coord_vec = np.eye(self.npt + self.n + 1, 1, -k_new)
|
| 1409 |
+
alpha = Quadratic.solve_systems(
|
| 1410 |
+
self.interpolation,
|
| 1411 |
+
coord_vec,
|
| 1412 |
+
)[
|
| 1413 |
+
0
|
| 1414 |
+
][k_new, 0]
|
| 1415 |
+
tau = inv_new_col[k_new, 0]
|
| 1416 |
+
return alpha * beta + tau**2.0
|
| 1417 |
+
|
| 1418 |
+
def shift_x_base(self, new_x_base, options):
|
| 1419 |
+
"""
|
| 1420 |
+
Shift the base point without changing the interpolation set.
|
| 1421 |
+
|
| 1422 |
+
Parameters
|
| 1423 |
+
----------
|
| 1424 |
+
new_x_base : `numpy.ndarray`, shape (n,)
|
| 1425 |
+
New base point.
|
| 1426 |
+
options : dict
|
| 1427 |
+
Options of the solver.
|
| 1428 |
+
"""
|
| 1429 |
+
if self._debug:
|
| 1430 |
+
assert new_x_base.shape == (
|
| 1431 |
+
self.n,
|
| 1432 |
+
), "The shape of `new_x_base` is not valid."
|
| 1433 |
+
|
| 1434 |
+
# Update the models.
|
| 1435 |
+
self._fun.shift_x_base(self.interpolation, new_x_base)
|
| 1436 |
+
for model in self._cub:
|
| 1437 |
+
model.shift_x_base(self.interpolation, new_x_base)
|
| 1438 |
+
for model in self._ceq:
|
| 1439 |
+
model.shift_x_base(self.interpolation, new_x_base)
|
| 1440 |
+
|
| 1441 |
+
# Update the base point and the interpolation points.
|
| 1442 |
+
shift = new_x_base - self.interpolation.x_base
|
| 1443 |
+
self.interpolation.x_base += shift
|
| 1444 |
+
self.interpolation.xpt -= shift[:, np.newaxis]
|
| 1445 |
+
if options[Options.DEBUG]:
|
| 1446 |
+
self._check_interpolation_conditions()
|
| 1447 |
+
|
| 1448 |
+
def _get_cub(self, mask=None):
|
| 1449 |
+
"""
|
| 1450 |
+
Get the quadratic models of the nonlinear inequality constraints.
|
| 1451 |
+
|
| 1452 |
+
Parameters
|
| 1453 |
+
----------
|
| 1454 |
+
mask : `numpy.ndarray`, shape (m_nonlinear_ub,), optional
|
| 1455 |
+
Mask of the quadratic models to return.
|
| 1456 |
+
|
| 1457 |
+
Returns
|
| 1458 |
+
-------
|
| 1459 |
+
`numpy.ndarray`
|
| 1460 |
+
Quadratic models of the nonlinear inequality constraints.
|
| 1461 |
+
"""
|
| 1462 |
+
return self._cub if mask is None else self._cub[mask]
|
| 1463 |
+
|
| 1464 |
+
def _get_ceq(self, mask=None):
|
| 1465 |
+
"""
|
| 1466 |
+
Get the quadratic models of the nonlinear equality constraints.
|
| 1467 |
+
|
| 1468 |
+
Parameters
|
| 1469 |
+
----------
|
| 1470 |
+
mask : `numpy.ndarray`, shape (m_nonlinear_eq,), optional
|
| 1471 |
+
Mask of the quadratic models to return.
|
| 1472 |
+
|
| 1473 |
+
Returns
|
| 1474 |
+
-------
|
| 1475 |
+
`numpy.ndarray`
|
| 1476 |
+
Quadratic models of the nonlinear equality constraints.
|
| 1477 |
+
"""
|
| 1478 |
+
return self._ceq if mask is None else self._ceq[mask]
|
| 1479 |
+
|
| 1480 |
+
def _check_interpolation_conditions(self):
|
| 1481 |
+
"""
|
| 1482 |
+
Check the interpolation conditions of all quadratic models.
|
| 1483 |
+
"""
|
| 1484 |
+
error_fun = 0.0
|
| 1485 |
+
error_cub = 0.0
|
| 1486 |
+
error_ceq = 0.0
|
| 1487 |
+
for k in range(self.npt):
|
| 1488 |
+
error_fun = np.max(
|
| 1489 |
+
[
|
| 1490 |
+
error_fun,
|
| 1491 |
+
np.abs(
|
| 1492 |
+
self.fun(self.interpolation.point(k)) - self.fun_val[k]
|
| 1493 |
+
),
|
| 1494 |
+
]
|
| 1495 |
+
)
|
| 1496 |
+
error_cub = np.max(
|
| 1497 |
+
np.abs(
|
| 1498 |
+
self.cub(self.interpolation.point(k)) - self.cub_val[k, :]
|
| 1499 |
+
),
|
| 1500 |
+
initial=error_cub,
|
| 1501 |
+
)
|
| 1502 |
+
error_ceq = np.max(
|
| 1503 |
+
np.abs(
|
| 1504 |
+
self.ceq(self.interpolation.point(k)) - self.ceq_val[k, :]
|
| 1505 |
+
),
|
| 1506 |
+
initial=error_ceq,
|
| 1507 |
+
)
|
| 1508 |
+
tol = 10.0 * np.sqrt(EPS) * max(self.n, self.npt)
|
| 1509 |
+
if error_fun > tol * np.max(np.abs(self.fun_val), initial=1.0):
|
| 1510 |
+
warnings.warn(
|
| 1511 |
+
"The interpolation conditions for the objective function are "
|
| 1512 |
+
"not satisfied.",
|
| 1513 |
+
RuntimeWarning,
|
| 1514 |
+
2,
|
| 1515 |
+
)
|
| 1516 |
+
if error_cub > tol * np.max(np.abs(self.cub_val), initial=1.0):
|
| 1517 |
+
warnings.warn(
|
| 1518 |
+
"The interpolation conditions for the inequality constraint "
|
| 1519 |
+
"function are not satisfied.",
|
| 1520 |
+
RuntimeWarning,
|
| 1521 |
+
2,
|
| 1522 |
+
)
|
| 1523 |
+
if error_ceq > tol * np.max(np.abs(self.ceq_val), initial=1.0):
|
| 1524 |
+
warnings.warn(
|
| 1525 |
+
"The interpolation conditions for the equality constraint "
|
| 1526 |
+
"function are not satisfied.",
|
| 1527 |
+
RuntimeWarning,
|
| 1528 |
+
2,
|
| 1529 |
+
)
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/problem.py
ADDED
|
@@ -0,0 +1,1296 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from contextlib import suppress
|
| 2 |
+
from inspect import signature
|
| 3 |
+
import copy
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
from scipy.optimize import (
|
| 7 |
+
Bounds,
|
| 8 |
+
LinearConstraint,
|
| 9 |
+
NonlinearConstraint,
|
| 10 |
+
OptimizeResult,
|
| 11 |
+
)
|
| 12 |
+
from scipy.optimize._constraints import PreparedConstraint
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
from .settings import PRINT_OPTIONS, BARRIER
|
| 16 |
+
from .utils import CallbackSuccess, get_arrays_tol
|
| 17 |
+
from .utils import exact_1d_array
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class ObjectiveFunction:
|
| 21 |
+
"""
|
| 22 |
+
Real-valued objective function.
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
def __init__(self, fun, verbose, debug, *args):
|
| 26 |
+
"""
|
| 27 |
+
Initialize the objective function.
|
| 28 |
+
|
| 29 |
+
Parameters
|
| 30 |
+
----------
|
| 31 |
+
fun : {callable, None}
|
| 32 |
+
Function to evaluate, or None.
|
| 33 |
+
|
| 34 |
+
``fun(x, *args) -> float``
|
| 35 |
+
|
| 36 |
+
where ``x`` is an array with shape (n,) and `args` is a tuple.
|
| 37 |
+
verbose : bool
|
| 38 |
+
Whether to print the function evaluations.
|
| 39 |
+
debug : bool
|
| 40 |
+
Whether to make debugging tests during the execution.
|
| 41 |
+
*args : tuple
|
| 42 |
+
Additional arguments to be passed to the function.
|
| 43 |
+
"""
|
| 44 |
+
if debug:
|
| 45 |
+
assert fun is None or callable(fun)
|
| 46 |
+
assert isinstance(verbose, bool)
|
| 47 |
+
assert isinstance(debug, bool)
|
| 48 |
+
|
| 49 |
+
self._fun = fun
|
| 50 |
+
self._verbose = verbose
|
| 51 |
+
self._args = args
|
| 52 |
+
self._n_eval = 0
|
| 53 |
+
|
| 54 |
+
def __call__(self, x):
|
| 55 |
+
"""
|
| 56 |
+
Evaluate the objective function.
|
| 57 |
+
|
| 58 |
+
Parameters
|
| 59 |
+
----------
|
| 60 |
+
x : array_like, shape (n,)
|
| 61 |
+
Point at which the objective function is evaluated.
|
| 62 |
+
|
| 63 |
+
Returns
|
| 64 |
+
-------
|
| 65 |
+
float
|
| 66 |
+
Function value at `x`.
|
| 67 |
+
"""
|
| 68 |
+
x = np.array(x, dtype=float)
|
| 69 |
+
if self._fun is None:
|
| 70 |
+
f = 0.0
|
| 71 |
+
else:
|
| 72 |
+
f = float(np.squeeze(self._fun(x, *self._args)))
|
| 73 |
+
self._n_eval += 1
|
| 74 |
+
if self._verbose:
|
| 75 |
+
with np.printoptions(**PRINT_OPTIONS):
|
| 76 |
+
print(f"{self.name}({x}) = {f}")
|
| 77 |
+
return f
|
| 78 |
+
|
| 79 |
+
@property
|
| 80 |
+
def n_eval(self):
|
| 81 |
+
"""
|
| 82 |
+
Number of function evaluations.
|
| 83 |
+
|
| 84 |
+
Returns
|
| 85 |
+
-------
|
| 86 |
+
int
|
| 87 |
+
Number of function evaluations.
|
| 88 |
+
"""
|
| 89 |
+
return self._n_eval
|
| 90 |
+
|
| 91 |
+
@property
|
| 92 |
+
def name(self):
|
| 93 |
+
"""
|
| 94 |
+
Name of the objective function.
|
| 95 |
+
|
| 96 |
+
Returns
|
| 97 |
+
-------
|
| 98 |
+
str
|
| 99 |
+
Name of the objective function.
|
| 100 |
+
"""
|
| 101 |
+
name = ""
|
| 102 |
+
if self._fun is not None:
|
| 103 |
+
try:
|
| 104 |
+
name = self._fun.__name__
|
| 105 |
+
except AttributeError:
|
| 106 |
+
name = "fun"
|
| 107 |
+
return name
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
class BoundConstraints:
|
| 111 |
+
"""
|
| 112 |
+
Bound constraints ``xl <= x <= xu``.
|
| 113 |
+
"""
|
| 114 |
+
|
| 115 |
+
def __init__(self, bounds):
|
| 116 |
+
"""
|
| 117 |
+
Initialize the bound constraints.
|
| 118 |
+
|
| 119 |
+
Parameters
|
| 120 |
+
----------
|
| 121 |
+
bounds : scipy.optimize.Bounds
|
| 122 |
+
Bound constraints.
|
| 123 |
+
"""
|
| 124 |
+
self._xl = np.array(bounds.lb, float)
|
| 125 |
+
self._xu = np.array(bounds.ub, float)
|
| 126 |
+
|
| 127 |
+
# Remove the ill-defined bounds.
|
| 128 |
+
self.xl[np.isnan(self.xl)] = -np.inf
|
| 129 |
+
self.xu[np.isnan(self.xu)] = np.inf
|
| 130 |
+
|
| 131 |
+
self.is_feasible = (
|
| 132 |
+
np.all(self.xl <= self.xu)
|
| 133 |
+
and np.all(self.xl < np.inf)
|
| 134 |
+
and np.all(self.xu > -np.inf)
|
| 135 |
+
)
|
| 136 |
+
self.m = np.count_nonzero(self.xl > -np.inf) + np.count_nonzero(
|
| 137 |
+
self.xu < np.inf
|
| 138 |
+
)
|
| 139 |
+
self.pcs = PreparedConstraint(bounds, np.ones(bounds.lb.size))
|
| 140 |
+
|
| 141 |
+
@property
|
| 142 |
+
def xl(self):
|
| 143 |
+
"""
|
| 144 |
+
Lower bound.
|
| 145 |
+
|
| 146 |
+
Returns
|
| 147 |
+
-------
|
| 148 |
+
`numpy.ndarray`, shape (n,)
|
| 149 |
+
Lower bound.
|
| 150 |
+
"""
|
| 151 |
+
return self._xl
|
| 152 |
+
|
| 153 |
+
@property
|
| 154 |
+
def xu(self):
|
| 155 |
+
"""
|
| 156 |
+
Upper bound.
|
| 157 |
+
|
| 158 |
+
Returns
|
| 159 |
+
-------
|
| 160 |
+
`numpy.ndarray`, shape (n,)
|
| 161 |
+
Upper bound.
|
| 162 |
+
"""
|
| 163 |
+
return self._xu
|
| 164 |
+
|
| 165 |
+
def maxcv(self, x):
|
| 166 |
+
"""
|
| 167 |
+
Evaluate the maximum constraint violation.
|
| 168 |
+
|
| 169 |
+
Parameters
|
| 170 |
+
----------
|
| 171 |
+
x : array_like, shape (n,)
|
| 172 |
+
Point at which the maximum constraint violation is evaluated.
|
| 173 |
+
|
| 174 |
+
Returns
|
| 175 |
+
-------
|
| 176 |
+
float
|
| 177 |
+
Maximum constraint violation at `x`.
|
| 178 |
+
"""
|
| 179 |
+
x = np.asarray(x, dtype=float)
|
| 180 |
+
return self.violation(x)
|
| 181 |
+
|
| 182 |
+
def violation(self, x):
|
| 183 |
+
# shortcut for no bounds
|
| 184 |
+
if self.is_feasible:
|
| 185 |
+
return np.array([0])
|
| 186 |
+
else:
|
| 187 |
+
return self.pcs.violation(x)
|
| 188 |
+
|
| 189 |
+
def project(self, x):
|
| 190 |
+
"""
|
| 191 |
+
Project a point onto the feasible set.
|
| 192 |
+
|
| 193 |
+
Parameters
|
| 194 |
+
----------
|
| 195 |
+
x : array_like, shape (n,)
|
| 196 |
+
Point to be projected.
|
| 197 |
+
|
| 198 |
+
Returns
|
| 199 |
+
-------
|
| 200 |
+
`numpy.ndarray`, shape (n,)
|
| 201 |
+
Projection of `x` onto the feasible set.
|
| 202 |
+
"""
|
| 203 |
+
return np.clip(x, self.xl, self.xu) if self.is_feasible else x
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
class LinearConstraints:
|
| 207 |
+
"""
|
| 208 |
+
Linear constraints ``a_ub @ x <= b_ub`` and ``a_eq @ x == b_eq``.
|
| 209 |
+
"""
|
| 210 |
+
|
| 211 |
+
def __init__(self, constraints, n, debug):
|
| 212 |
+
"""
|
| 213 |
+
Initialize the linear constraints.
|
| 214 |
+
|
| 215 |
+
Parameters
|
| 216 |
+
----------
|
| 217 |
+
constraints : list of LinearConstraint
|
| 218 |
+
Linear constraints.
|
| 219 |
+
n : int
|
| 220 |
+
Number of variables.
|
| 221 |
+
debug : bool
|
| 222 |
+
Whether to make debugging tests during the execution.
|
| 223 |
+
"""
|
| 224 |
+
if debug:
|
| 225 |
+
assert isinstance(constraints, list)
|
| 226 |
+
for constraint in constraints:
|
| 227 |
+
assert isinstance(constraint, LinearConstraint)
|
| 228 |
+
assert isinstance(debug, bool)
|
| 229 |
+
|
| 230 |
+
self._a_ub = np.empty((0, n))
|
| 231 |
+
self._b_ub = np.empty(0)
|
| 232 |
+
self._a_eq = np.empty((0, n))
|
| 233 |
+
self._b_eq = np.empty(0)
|
| 234 |
+
for constraint in constraints:
|
| 235 |
+
is_equality = np.abs(
|
| 236 |
+
constraint.ub - constraint.lb
|
| 237 |
+
) <= get_arrays_tol(constraint.lb, constraint.ub)
|
| 238 |
+
if np.any(is_equality):
|
| 239 |
+
self._a_eq = np.vstack((self.a_eq, constraint.A[is_equality]))
|
| 240 |
+
self._b_eq = np.concatenate(
|
| 241 |
+
(
|
| 242 |
+
self.b_eq,
|
| 243 |
+
0.5
|
| 244 |
+
* (
|
| 245 |
+
constraint.lb[is_equality]
|
| 246 |
+
+ constraint.ub[is_equality]
|
| 247 |
+
),
|
| 248 |
+
)
|
| 249 |
+
)
|
| 250 |
+
if not np.all(is_equality):
|
| 251 |
+
self._a_ub = np.vstack(
|
| 252 |
+
(
|
| 253 |
+
self.a_ub,
|
| 254 |
+
constraint.A[~is_equality],
|
| 255 |
+
-constraint.A[~is_equality],
|
| 256 |
+
)
|
| 257 |
+
)
|
| 258 |
+
self._b_ub = np.concatenate(
|
| 259 |
+
(
|
| 260 |
+
self.b_ub,
|
| 261 |
+
constraint.ub[~is_equality],
|
| 262 |
+
-constraint.lb[~is_equality],
|
| 263 |
+
)
|
| 264 |
+
)
|
| 265 |
+
|
| 266 |
+
# Remove the ill-defined constraints.
|
| 267 |
+
self.a_ub[np.isnan(self.a_ub)] = 0.0
|
| 268 |
+
self.a_eq[np.isnan(self.a_eq)] = 0.0
|
| 269 |
+
undef_ub = np.isnan(self.b_ub) | np.isinf(self.b_ub)
|
| 270 |
+
undef_eq = np.isnan(self.b_eq)
|
| 271 |
+
self._a_ub = self.a_ub[~undef_ub, :]
|
| 272 |
+
self._b_ub = self.b_ub[~undef_ub]
|
| 273 |
+
self._a_eq = self.a_eq[~undef_eq, :]
|
| 274 |
+
self._b_eq = self.b_eq[~undef_eq]
|
| 275 |
+
self.pcs = [
|
| 276 |
+
PreparedConstraint(c, np.ones(n)) for c in constraints if c.A.size
|
| 277 |
+
]
|
| 278 |
+
|
| 279 |
+
@property
|
| 280 |
+
def a_ub(self):
|
| 281 |
+
"""
|
| 282 |
+
Left-hand side matrix of the linear inequality constraints.
|
| 283 |
+
|
| 284 |
+
Returns
|
| 285 |
+
-------
|
| 286 |
+
`numpy.ndarray`, shape (m, n)
|
| 287 |
+
Left-hand side matrix of the linear inequality constraints.
|
| 288 |
+
"""
|
| 289 |
+
return self._a_ub
|
| 290 |
+
|
| 291 |
+
@property
|
| 292 |
+
def b_ub(self):
|
| 293 |
+
"""
|
| 294 |
+
Right-hand side vector of the linear inequality constraints.
|
| 295 |
+
|
| 296 |
+
Returns
|
| 297 |
+
-------
|
| 298 |
+
`numpy.ndarray`, shape (m, n)
|
| 299 |
+
Right-hand side vector of the linear inequality constraints.
|
| 300 |
+
"""
|
| 301 |
+
return self._b_ub
|
| 302 |
+
|
| 303 |
+
@property
|
| 304 |
+
def a_eq(self):
|
| 305 |
+
"""
|
| 306 |
+
Left-hand side matrix of the linear equality constraints.
|
| 307 |
+
|
| 308 |
+
Returns
|
| 309 |
+
-------
|
| 310 |
+
`numpy.ndarray`, shape (m, n)
|
| 311 |
+
Left-hand side matrix of the linear equality constraints.
|
| 312 |
+
"""
|
| 313 |
+
return self._a_eq
|
| 314 |
+
|
| 315 |
+
@property
|
| 316 |
+
def b_eq(self):
|
| 317 |
+
"""
|
| 318 |
+
Right-hand side vector of the linear equality constraints.
|
| 319 |
+
|
| 320 |
+
Returns
|
| 321 |
+
-------
|
| 322 |
+
`numpy.ndarray`, shape (m, n)
|
| 323 |
+
Right-hand side vector of the linear equality constraints.
|
| 324 |
+
"""
|
| 325 |
+
return self._b_eq
|
| 326 |
+
|
| 327 |
+
@property
|
| 328 |
+
def m_ub(self):
|
| 329 |
+
"""
|
| 330 |
+
Number of linear inequality constraints.
|
| 331 |
+
|
| 332 |
+
Returns
|
| 333 |
+
-------
|
| 334 |
+
int
|
| 335 |
+
Number of linear inequality constraints.
|
| 336 |
+
"""
|
| 337 |
+
return self.b_ub.size
|
| 338 |
+
|
| 339 |
+
@property
|
| 340 |
+
def m_eq(self):
|
| 341 |
+
"""
|
| 342 |
+
Number of linear equality constraints.
|
| 343 |
+
|
| 344 |
+
Returns
|
| 345 |
+
-------
|
| 346 |
+
int
|
| 347 |
+
Number of linear equality constraints.
|
| 348 |
+
"""
|
| 349 |
+
return self.b_eq.size
|
| 350 |
+
|
| 351 |
+
def maxcv(self, x):
|
| 352 |
+
"""
|
| 353 |
+
Evaluate the maximum constraint violation.
|
| 354 |
+
|
| 355 |
+
Parameters
|
| 356 |
+
----------
|
| 357 |
+
x : array_like, shape (n,)
|
| 358 |
+
Point at which the maximum constraint violation is evaluated.
|
| 359 |
+
|
| 360 |
+
Returns
|
| 361 |
+
-------
|
| 362 |
+
float
|
| 363 |
+
Maximum constraint violation at `x`.
|
| 364 |
+
"""
|
| 365 |
+
return np.max(self.violation(x), initial=0.0)
|
| 366 |
+
|
| 367 |
+
def violation(self, x):
|
| 368 |
+
if len(self.pcs):
|
| 369 |
+
return np.concatenate([pc.violation(x) for pc in self.pcs])
|
| 370 |
+
return np.array([])
|
| 371 |
+
|
| 372 |
+
|
| 373 |
+
class NonlinearConstraints:
|
| 374 |
+
"""
|
| 375 |
+
Nonlinear constraints ``c_ub(x) <= 0`` and ``c_eq(x) == b_eq``.
|
| 376 |
+
"""
|
| 377 |
+
|
| 378 |
+
def __init__(self, constraints, verbose, debug):
|
| 379 |
+
"""
|
| 380 |
+
Initialize the nonlinear constraints.
|
| 381 |
+
|
| 382 |
+
Parameters
|
| 383 |
+
----------
|
| 384 |
+
constraints : list
|
| 385 |
+
Nonlinear constraints.
|
| 386 |
+
verbose : bool
|
| 387 |
+
Whether to print the function evaluations.
|
| 388 |
+
debug : bool
|
| 389 |
+
Whether to make debugging tests during the execution.
|
| 390 |
+
"""
|
| 391 |
+
if debug:
|
| 392 |
+
assert isinstance(constraints, list)
|
| 393 |
+
for constraint in constraints:
|
| 394 |
+
assert isinstance(constraint, NonlinearConstraint)
|
| 395 |
+
assert isinstance(verbose, bool)
|
| 396 |
+
assert isinstance(debug, bool)
|
| 397 |
+
|
| 398 |
+
self._constraints = constraints
|
| 399 |
+
self.pcs = []
|
| 400 |
+
self._verbose = verbose
|
| 401 |
+
|
| 402 |
+
# map of indexes for equality and inequality constraints
|
| 403 |
+
self._map_ub = None
|
| 404 |
+
self._map_eq = None
|
| 405 |
+
self._m_ub = self._m_eq = None
|
| 406 |
+
|
| 407 |
+
def __call__(self, x):
|
| 408 |
+
"""
|
| 409 |
+
Calculates the residual (slack) for the constraints.
|
| 410 |
+
|
| 411 |
+
Parameters
|
| 412 |
+
----------
|
| 413 |
+
x : array_like, shape (n,)
|
| 414 |
+
Point at which the constraints are evaluated.
|
| 415 |
+
|
| 416 |
+
Returns
|
| 417 |
+
-------
|
| 418 |
+
`numpy.ndarray`, shape (m_nonlinear_ub,)
|
| 419 |
+
Nonlinear inequality constraint slack values.
|
| 420 |
+
`numpy.ndarray`, shape (m_nonlinear_eq,)
|
| 421 |
+
Nonlinear equality constraint slack values.
|
| 422 |
+
"""
|
| 423 |
+
if not len(self._constraints):
|
| 424 |
+
self._m_eq = self._m_ub = 0
|
| 425 |
+
return np.array([]), np.array([])
|
| 426 |
+
|
| 427 |
+
x = np.array(x, dtype=float)
|
| 428 |
+
# first time around the constraints haven't been prepared
|
| 429 |
+
if not len(self.pcs):
|
| 430 |
+
self._map_ub = []
|
| 431 |
+
self._map_eq = []
|
| 432 |
+
self._m_eq = 0
|
| 433 |
+
self._m_ub = 0
|
| 434 |
+
|
| 435 |
+
for constraint in self._constraints:
|
| 436 |
+
if not callable(constraint.jac):
|
| 437 |
+
# having a callable constraint function prevents
|
| 438 |
+
# constraint.fun from being evaluated when preparing
|
| 439 |
+
# constraint
|
| 440 |
+
c = copy.copy(constraint)
|
| 441 |
+
c.jac = lambda x0: x0
|
| 442 |
+
c.hess = lambda x0, v: 0.0
|
| 443 |
+
pc = PreparedConstraint(c, x)
|
| 444 |
+
else:
|
| 445 |
+
pc = PreparedConstraint(constraint, x)
|
| 446 |
+
# we're going to be using the same x value again immediately
|
| 447 |
+
# after this initialisation
|
| 448 |
+
pc.fun.f_updated = True
|
| 449 |
+
|
| 450 |
+
self.pcs.append(pc)
|
| 451 |
+
idx = np.arange(pc.fun.m)
|
| 452 |
+
|
| 453 |
+
# figure out equality and inequality maps
|
| 454 |
+
lb, ub = pc.bounds[0], pc.bounds[1]
|
| 455 |
+
arr_tol = get_arrays_tol(lb, ub)
|
| 456 |
+
is_equality = np.abs(ub - lb) <= arr_tol
|
| 457 |
+
self._map_eq.append(idx[is_equality])
|
| 458 |
+
self._map_ub.append(idx[~is_equality])
|
| 459 |
+
|
| 460 |
+
# these values will be corrected to their proper values later
|
| 461 |
+
self._m_eq += np.count_nonzero(is_equality)
|
| 462 |
+
self._m_ub += np.count_nonzero(~is_equality)
|
| 463 |
+
|
| 464 |
+
c_ub = []
|
| 465 |
+
c_eq = []
|
| 466 |
+
for i, pc in enumerate(self.pcs):
|
| 467 |
+
val = pc.fun.fun(x)
|
| 468 |
+
if self._verbose:
|
| 469 |
+
with np.printoptions(**PRINT_OPTIONS):
|
| 470 |
+
with suppress(AttributeError):
|
| 471 |
+
fun_name = self._constraints[i].fun.__name__
|
| 472 |
+
print(f"{fun_name}({x}) = {val}")
|
| 473 |
+
|
| 474 |
+
# separate violations into c_eq and c_ub
|
| 475 |
+
eq_idx = self._map_eq[i]
|
| 476 |
+
ub_idx = self._map_ub[i]
|
| 477 |
+
|
| 478 |
+
ub_val = val[ub_idx]
|
| 479 |
+
if len(ub_idx):
|
| 480 |
+
xl = pc.bounds[0][ub_idx]
|
| 481 |
+
xu = pc.bounds[1][ub_idx]
|
| 482 |
+
|
| 483 |
+
# calculate slack within lower bound
|
| 484 |
+
finite_xl = xl > -np.inf
|
| 485 |
+
_v = xl[finite_xl] - ub_val[finite_xl]
|
| 486 |
+
c_ub.append(_v)
|
| 487 |
+
|
| 488 |
+
# calculate slack within lower bound
|
| 489 |
+
finite_xu = xu < np.inf
|
| 490 |
+
_v = ub_val[finite_xu] - xu[finite_xu]
|
| 491 |
+
c_ub.append(_v)
|
| 492 |
+
|
| 493 |
+
# equality constraints taken from midpoint between lb and ub
|
| 494 |
+
eq_val = val[eq_idx]
|
| 495 |
+
if len(eq_idx):
|
| 496 |
+
midpoint = 0.5 * (pc.bounds[1][eq_idx] + pc.bounds[0][eq_idx])
|
| 497 |
+
eq_val -= midpoint
|
| 498 |
+
c_eq.append(eq_val)
|
| 499 |
+
|
| 500 |
+
if self._m_eq:
|
| 501 |
+
c_eq = np.concatenate(c_eq)
|
| 502 |
+
else:
|
| 503 |
+
c_eq = np.array([])
|
| 504 |
+
|
| 505 |
+
if self._m_ub:
|
| 506 |
+
c_ub = np.concatenate(c_ub)
|
| 507 |
+
else:
|
| 508 |
+
c_ub = np.array([])
|
| 509 |
+
|
| 510 |
+
self._m_ub = c_ub.size
|
| 511 |
+
self._m_eq = c_eq.size
|
| 512 |
+
|
| 513 |
+
return c_ub, c_eq
|
| 514 |
+
|
| 515 |
+
@property
|
| 516 |
+
def m_ub(self):
|
| 517 |
+
"""
|
| 518 |
+
Number of nonlinear inequality constraints.
|
| 519 |
+
|
| 520 |
+
Returns
|
| 521 |
+
-------
|
| 522 |
+
int
|
| 523 |
+
Number of nonlinear inequality constraints.
|
| 524 |
+
|
| 525 |
+
Raises
|
| 526 |
+
------
|
| 527 |
+
ValueError
|
| 528 |
+
If the number of nonlinear inequality constraints is unknown.
|
| 529 |
+
"""
|
| 530 |
+
if self._m_ub is None:
|
| 531 |
+
raise ValueError(
|
| 532 |
+
"The number of nonlinear inequality constraints is unknown."
|
| 533 |
+
)
|
| 534 |
+
else:
|
| 535 |
+
return self._m_ub
|
| 536 |
+
|
| 537 |
+
@property
|
| 538 |
+
def m_eq(self):
|
| 539 |
+
"""
|
| 540 |
+
Number of nonlinear equality constraints.
|
| 541 |
+
|
| 542 |
+
Returns
|
| 543 |
+
-------
|
| 544 |
+
int
|
| 545 |
+
Number of nonlinear equality constraints.
|
| 546 |
+
|
| 547 |
+
Raises
|
| 548 |
+
------
|
| 549 |
+
ValueError
|
| 550 |
+
If the number of nonlinear equality constraints is unknown.
|
| 551 |
+
"""
|
| 552 |
+
if self._m_eq is None:
|
| 553 |
+
raise ValueError(
|
| 554 |
+
"The number of nonlinear equality constraints is unknown."
|
| 555 |
+
)
|
| 556 |
+
else:
|
| 557 |
+
return self._m_eq
|
| 558 |
+
|
| 559 |
+
@property
|
| 560 |
+
def n_eval(self):
|
| 561 |
+
"""
|
| 562 |
+
Number of function evaluations.
|
| 563 |
+
|
| 564 |
+
Returns
|
| 565 |
+
-------
|
| 566 |
+
int
|
| 567 |
+
Number of function evaluations.
|
| 568 |
+
"""
|
| 569 |
+
if len(self.pcs):
|
| 570 |
+
return self.pcs[0].fun.nfev
|
| 571 |
+
else:
|
| 572 |
+
return 0
|
| 573 |
+
|
| 574 |
+
def maxcv(self, x, cub_val=None, ceq_val=None):
|
| 575 |
+
"""
|
| 576 |
+
Evaluate the maximum constraint violation.
|
| 577 |
+
|
| 578 |
+
Parameters
|
| 579 |
+
----------
|
| 580 |
+
x : array_like, shape (n,)
|
| 581 |
+
Point at which the maximum constraint violation is evaluated.
|
| 582 |
+
cub_val : array_like, shape (m_nonlinear_ub,), optional
|
| 583 |
+
Values of the nonlinear inequality constraints. If not provided,
|
| 584 |
+
the nonlinear inequality constraints are evaluated at `x`.
|
| 585 |
+
ceq_val : array_like, shape (m_nonlinear_eq,), optional
|
| 586 |
+
Values of the nonlinear equality constraints. If not provided,
|
| 587 |
+
the nonlinear equality constraints are evaluated at `x`.
|
| 588 |
+
|
| 589 |
+
Returns
|
| 590 |
+
-------
|
| 591 |
+
float
|
| 592 |
+
Maximum constraint violation at `x`.
|
| 593 |
+
"""
|
| 594 |
+
return np.max(
|
| 595 |
+
self.violation(x, cub_val=cub_val, ceq_val=ceq_val), initial=0.0
|
| 596 |
+
)
|
| 597 |
+
|
| 598 |
+
def violation(self, x, cub_val=None, ceq_val=None):
|
| 599 |
+
return np.concatenate([pc.violation(x) for pc in self.pcs])
|
| 600 |
+
|
| 601 |
+
|
| 602 |
+
class Problem:
|
| 603 |
+
"""
|
| 604 |
+
Optimization problem.
|
| 605 |
+
"""
|
| 606 |
+
|
| 607 |
+
def __init__(
|
| 608 |
+
self,
|
| 609 |
+
obj,
|
| 610 |
+
x0,
|
| 611 |
+
bounds,
|
| 612 |
+
linear,
|
| 613 |
+
nonlinear,
|
| 614 |
+
callback,
|
| 615 |
+
feasibility_tol,
|
| 616 |
+
scale,
|
| 617 |
+
store_history,
|
| 618 |
+
history_size,
|
| 619 |
+
filter_size,
|
| 620 |
+
debug,
|
| 621 |
+
):
|
| 622 |
+
"""
|
| 623 |
+
Initialize the nonlinear problem.
|
| 624 |
+
|
| 625 |
+
The problem is preprocessed to remove all the variables that are fixed
|
| 626 |
+
by the bound constraints.
|
| 627 |
+
|
| 628 |
+
Parameters
|
| 629 |
+
----------
|
| 630 |
+
obj : ObjectiveFunction
|
| 631 |
+
Objective function.
|
| 632 |
+
x0 : array_like, shape (n,)
|
| 633 |
+
Initial guess.
|
| 634 |
+
bounds : BoundConstraints
|
| 635 |
+
Bound constraints.
|
| 636 |
+
linear : LinearConstraints
|
| 637 |
+
Linear constraints.
|
| 638 |
+
nonlinear : NonlinearConstraints
|
| 639 |
+
Nonlinear constraints.
|
| 640 |
+
callback : {callable, None}
|
| 641 |
+
Callback function.
|
| 642 |
+
feasibility_tol : float
|
| 643 |
+
Tolerance on the constraint violation.
|
| 644 |
+
scale : bool
|
| 645 |
+
Whether to scale the problem according to the bounds.
|
| 646 |
+
store_history : bool
|
| 647 |
+
Whether to store the function evaluations.
|
| 648 |
+
history_size : int
|
| 649 |
+
Maximum number of function evaluations to store.
|
| 650 |
+
filter_size : int
|
| 651 |
+
Maximum number of points in the filter.
|
| 652 |
+
debug : bool
|
| 653 |
+
Whether to make debugging tests during the execution.
|
| 654 |
+
"""
|
| 655 |
+
if debug:
|
| 656 |
+
assert isinstance(obj, ObjectiveFunction)
|
| 657 |
+
assert isinstance(bounds, BoundConstraints)
|
| 658 |
+
assert isinstance(linear, LinearConstraints)
|
| 659 |
+
assert isinstance(nonlinear, NonlinearConstraints)
|
| 660 |
+
assert isinstance(feasibility_tol, float)
|
| 661 |
+
assert isinstance(scale, bool)
|
| 662 |
+
assert isinstance(store_history, bool)
|
| 663 |
+
assert isinstance(history_size, int)
|
| 664 |
+
if store_history:
|
| 665 |
+
assert history_size > 0
|
| 666 |
+
assert isinstance(filter_size, int)
|
| 667 |
+
assert filter_size > 0
|
| 668 |
+
assert isinstance(debug, bool)
|
| 669 |
+
|
| 670 |
+
self._obj = obj
|
| 671 |
+
self._linear = linear
|
| 672 |
+
self._nonlinear = nonlinear
|
| 673 |
+
if callback is not None:
|
| 674 |
+
if not callable(callback):
|
| 675 |
+
raise TypeError("The callback must be a callable function.")
|
| 676 |
+
self._callback = callback
|
| 677 |
+
|
| 678 |
+
# Check the consistency of the problem.
|
| 679 |
+
x0 = exact_1d_array(x0, "The initial guess must be a vector.")
|
| 680 |
+
n = x0.size
|
| 681 |
+
if bounds.xl.size != n:
|
| 682 |
+
raise ValueError(f"The bounds must have {n} elements.")
|
| 683 |
+
if linear.a_ub.shape[1] != n:
|
| 684 |
+
raise ValueError(
|
| 685 |
+
f"The left-hand side matrices of the linear constraints must "
|
| 686 |
+
f"have {n} columns."
|
| 687 |
+
)
|
| 688 |
+
|
| 689 |
+
# Check which variables are fixed.
|
| 690 |
+
tol = get_arrays_tol(bounds.xl, bounds.xu)
|
| 691 |
+
self._fixed_idx = (bounds.xl <= bounds.xu) & (
|
| 692 |
+
np.abs(bounds.xl - bounds.xu) < tol
|
| 693 |
+
)
|
| 694 |
+
self._fixed_val = 0.5 * (
|
| 695 |
+
bounds.xl[self._fixed_idx] + bounds.xu[self._fixed_idx]
|
| 696 |
+
)
|
| 697 |
+
self._fixed_val = np.clip(
|
| 698 |
+
self._fixed_val,
|
| 699 |
+
bounds.xl[self._fixed_idx],
|
| 700 |
+
bounds.xu[self._fixed_idx],
|
| 701 |
+
)
|
| 702 |
+
|
| 703 |
+
# Set the bound constraints.
|
| 704 |
+
self._orig_bounds = bounds
|
| 705 |
+
self._bounds = BoundConstraints(
|
| 706 |
+
Bounds(bounds.xl[~self._fixed_idx], bounds.xu[~self._fixed_idx])
|
| 707 |
+
)
|
| 708 |
+
|
| 709 |
+
# Set the initial guess.
|
| 710 |
+
self._x0 = self._bounds.project(x0[~self._fixed_idx])
|
| 711 |
+
|
| 712 |
+
# Set the linear constraints.
|
| 713 |
+
b_eq = linear.b_eq - linear.a_eq[:, self._fixed_idx] @ self._fixed_val
|
| 714 |
+
self._linear = LinearConstraints(
|
| 715 |
+
[
|
| 716 |
+
LinearConstraint(
|
| 717 |
+
linear.a_ub[:, ~self._fixed_idx],
|
| 718 |
+
-np.inf,
|
| 719 |
+
linear.b_ub
|
| 720 |
+
- linear.a_ub[:, self._fixed_idx] @ self._fixed_val,
|
| 721 |
+
),
|
| 722 |
+
LinearConstraint(linear.a_eq[:, ~self._fixed_idx], b_eq, b_eq),
|
| 723 |
+
],
|
| 724 |
+
self.n,
|
| 725 |
+
debug,
|
| 726 |
+
)
|
| 727 |
+
|
| 728 |
+
# Scale the problem if necessary.
|
| 729 |
+
scale = (
|
| 730 |
+
scale
|
| 731 |
+
and self._bounds.is_feasible
|
| 732 |
+
and np.all(np.isfinite(self._bounds.xl))
|
| 733 |
+
and np.all(np.isfinite(self._bounds.xu))
|
| 734 |
+
)
|
| 735 |
+
if scale:
|
| 736 |
+
self._scaling_factor = 0.5 * (self._bounds.xu - self._bounds.xl)
|
| 737 |
+
self._scaling_shift = 0.5 * (self._bounds.xu + self._bounds.xl)
|
| 738 |
+
self._bounds = BoundConstraints(
|
| 739 |
+
Bounds(-np.ones(self.n), np.ones(self.n))
|
| 740 |
+
)
|
| 741 |
+
b_eq = self._linear.b_eq - self._linear.a_eq @ self._scaling_shift
|
| 742 |
+
self._linear = LinearConstraints(
|
| 743 |
+
[
|
| 744 |
+
LinearConstraint(
|
| 745 |
+
self._linear.a_ub @ np.diag(self._scaling_factor),
|
| 746 |
+
-np.inf,
|
| 747 |
+
self._linear.b_ub
|
| 748 |
+
- self._linear.a_ub @ self._scaling_shift,
|
| 749 |
+
),
|
| 750 |
+
LinearConstraint(
|
| 751 |
+
self._linear.a_eq @ np.diag(self._scaling_factor),
|
| 752 |
+
b_eq,
|
| 753 |
+
b_eq,
|
| 754 |
+
),
|
| 755 |
+
],
|
| 756 |
+
self.n,
|
| 757 |
+
debug,
|
| 758 |
+
)
|
| 759 |
+
self._x0 = (self._x0 - self._scaling_shift) / self._scaling_factor
|
| 760 |
+
else:
|
| 761 |
+
self._scaling_factor = np.ones(self.n)
|
| 762 |
+
self._scaling_shift = np.zeros(self.n)
|
| 763 |
+
|
| 764 |
+
# Set the initial filter.
|
| 765 |
+
self._feasibility_tol = feasibility_tol
|
| 766 |
+
self._filter_size = filter_size
|
| 767 |
+
self._fun_filter = []
|
| 768 |
+
self._maxcv_filter = []
|
| 769 |
+
self._x_filter = []
|
| 770 |
+
|
| 771 |
+
# Set the initial history.
|
| 772 |
+
self._store_history = store_history
|
| 773 |
+
self._history_size = history_size
|
| 774 |
+
self._fun_history = []
|
| 775 |
+
self._maxcv_history = []
|
| 776 |
+
self._x_history = []
|
| 777 |
+
|
| 778 |
+
def __call__(self, x, penalty=0.0):
|
| 779 |
+
"""
|
| 780 |
+
Evaluate the objective and nonlinear constraint functions.
|
| 781 |
+
|
| 782 |
+
Parameters
|
| 783 |
+
----------
|
| 784 |
+
x : array_like, shape (n,)
|
| 785 |
+
Point at which the functions are evaluated.
|
| 786 |
+
penalty : float, optional
|
| 787 |
+
Penalty parameter used to select the point in the filter to forward
|
| 788 |
+
to the callback function.
|
| 789 |
+
|
| 790 |
+
Returns
|
| 791 |
+
-------
|
| 792 |
+
float
|
| 793 |
+
Objective function value.
|
| 794 |
+
`numpy.ndarray`, shape (m_nonlinear_ub,)
|
| 795 |
+
Nonlinear inequality constraint function values.
|
| 796 |
+
`numpy.ndarray`, shape (m_nonlinear_eq,)
|
| 797 |
+
Nonlinear equality constraint function values.
|
| 798 |
+
|
| 799 |
+
Raises
|
| 800 |
+
------
|
| 801 |
+
`cobyqa.utils.CallbackSuccess`
|
| 802 |
+
If the callback function raises a ``StopIteration``.
|
| 803 |
+
"""
|
| 804 |
+
# Evaluate the objective and nonlinear constraint functions.
|
| 805 |
+
x = np.asarray(x, dtype=float)
|
| 806 |
+
x_full = self.build_x(x)
|
| 807 |
+
fun_val = self._obj(x_full)
|
| 808 |
+
cub_val, ceq_val = self._nonlinear(x_full)
|
| 809 |
+
maxcv_val = self.maxcv(x, cub_val, ceq_val)
|
| 810 |
+
if self._store_history:
|
| 811 |
+
self._fun_history.append(fun_val)
|
| 812 |
+
self._maxcv_history.append(maxcv_val)
|
| 813 |
+
self._x_history.append(x)
|
| 814 |
+
if len(self._fun_history) > self._history_size:
|
| 815 |
+
self._fun_history.pop(0)
|
| 816 |
+
self._maxcv_history.pop(0)
|
| 817 |
+
self._x_history.pop(0)
|
| 818 |
+
|
| 819 |
+
# Add the point to the filter if it is not dominated by any point.
|
| 820 |
+
if np.isnan(fun_val) and np.isnan(maxcv_val):
|
| 821 |
+
include_point = len(self._fun_filter) == 0
|
| 822 |
+
elif np.isnan(fun_val):
|
| 823 |
+
include_point = all(
|
| 824 |
+
np.isnan(fun_filter)
|
| 825 |
+
and maxcv_val < maxcv_filter
|
| 826 |
+
or np.isnan(maxcv_filter)
|
| 827 |
+
for fun_filter, maxcv_filter in zip(
|
| 828 |
+
self._fun_filter,
|
| 829 |
+
self._maxcv_filter,
|
| 830 |
+
)
|
| 831 |
+
)
|
| 832 |
+
elif np.isnan(maxcv_val):
|
| 833 |
+
include_point = all(
|
| 834 |
+
np.isnan(maxcv_filter)
|
| 835 |
+
and fun_val < fun_filter
|
| 836 |
+
or np.isnan(fun_filter)
|
| 837 |
+
for fun_filter, maxcv_filter in zip(
|
| 838 |
+
self._fun_filter,
|
| 839 |
+
self._maxcv_filter,
|
| 840 |
+
)
|
| 841 |
+
)
|
| 842 |
+
else:
|
| 843 |
+
include_point = all(
|
| 844 |
+
fun_val < fun_filter or maxcv_val < maxcv_filter
|
| 845 |
+
for fun_filter, maxcv_filter in zip(
|
| 846 |
+
self._fun_filter,
|
| 847 |
+
self._maxcv_filter,
|
| 848 |
+
)
|
| 849 |
+
)
|
| 850 |
+
if include_point:
|
| 851 |
+
self._fun_filter.append(fun_val)
|
| 852 |
+
self._maxcv_filter.append(maxcv_val)
|
| 853 |
+
self._x_filter.append(x)
|
| 854 |
+
|
| 855 |
+
# Remove the points in the filter that are dominated by the new
|
| 856 |
+
# point. We must iterate in reverse order to avoid problems when
|
| 857 |
+
# removing elements from the list.
|
| 858 |
+
for k in range(len(self._fun_filter) - 2, -1, -1):
|
| 859 |
+
if np.isnan(fun_val):
|
| 860 |
+
remove_point = np.isnan(self._fun_filter[k])
|
| 861 |
+
elif np.isnan(maxcv_val):
|
| 862 |
+
remove_point = np.isnan(self._maxcv_filter[k])
|
| 863 |
+
else:
|
| 864 |
+
remove_point = (
|
| 865 |
+
np.isnan(self._fun_filter[k])
|
| 866 |
+
or np.isnan(self._maxcv_filter[k])
|
| 867 |
+
or fun_val <= self._fun_filter[k]
|
| 868 |
+
and maxcv_val <= self._maxcv_filter[k]
|
| 869 |
+
)
|
| 870 |
+
if remove_point:
|
| 871 |
+
self._fun_filter.pop(k)
|
| 872 |
+
self._maxcv_filter.pop(k)
|
| 873 |
+
self._x_filter.pop(k)
|
| 874 |
+
|
| 875 |
+
# Keep only the most recent points in the filter.
|
| 876 |
+
if len(self._fun_filter) > self._filter_size:
|
| 877 |
+
self._fun_filter.pop(0)
|
| 878 |
+
self._maxcv_filter.pop(0)
|
| 879 |
+
self._x_filter.pop(0)
|
| 880 |
+
|
| 881 |
+
# Evaluate the callback function after updating the filter to ensure
|
| 882 |
+
# that the current point can be returned by the method.
|
| 883 |
+
if self._callback is not None:
|
| 884 |
+
sig = signature(self._callback)
|
| 885 |
+
try:
|
| 886 |
+
x_best, fun_best, _ = self.best_eval(penalty)
|
| 887 |
+
x_best = self.build_x(x_best)
|
| 888 |
+
if set(sig.parameters) == {"intermediate_result"}:
|
| 889 |
+
intermediate_result = OptimizeResult(
|
| 890 |
+
x=x_best,
|
| 891 |
+
fun=fun_best,
|
| 892 |
+
# maxcv=maxcv_best,
|
| 893 |
+
)
|
| 894 |
+
self._callback(intermediate_result=intermediate_result)
|
| 895 |
+
else:
|
| 896 |
+
self._callback(x_best)
|
| 897 |
+
except StopIteration as exc:
|
| 898 |
+
raise CallbackSuccess from exc
|
| 899 |
+
|
| 900 |
+
# Apply the extreme barriers and return.
|
| 901 |
+
if np.isnan(fun_val):
|
| 902 |
+
fun_val = BARRIER
|
| 903 |
+
cub_val[np.isnan(cub_val)] = BARRIER
|
| 904 |
+
ceq_val[np.isnan(ceq_val)] = BARRIER
|
| 905 |
+
fun_val = max(min(fun_val, BARRIER), -BARRIER)
|
| 906 |
+
cub_val = np.maximum(np.minimum(cub_val, BARRIER), -BARRIER)
|
| 907 |
+
ceq_val = np.maximum(np.minimum(ceq_val, BARRIER), -BARRIER)
|
| 908 |
+
return fun_val, cub_val, ceq_val
|
| 909 |
+
|
| 910 |
+
@property
|
| 911 |
+
def n(self):
|
| 912 |
+
"""
|
| 913 |
+
Number of variables.
|
| 914 |
+
|
| 915 |
+
Returns
|
| 916 |
+
-------
|
| 917 |
+
int
|
| 918 |
+
Number of variables.
|
| 919 |
+
"""
|
| 920 |
+
return self.x0.size
|
| 921 |
+
|
| 922 |
+
@property
|
| 923 |
+
def n_orig(self):
|
| 924 |
+
"""
|
| 925 |
+
Number of variables in the original problem (with fixed variables).
|
| 926 |
+
|
| 927 |
+
Returns
|
| 928 |
+
-------
|
| 929 |
+
int
|
| 930 |
+
Number of variables in the original problem (with fixed variables).
|
| 931 |
+
"""
|
| 932 |
+
return self._fixed_idx.size
|
| 933 |
+
|
| 934 |
+
@property
|
| 935 |
+
def x0(self):
|
| 936 |
+
"""
|
| 937 |
+
Initial guess.
|
| 938 |
+
|
| 939 |
+
Returns
|
| 940 |
+
-------
|
| 941 |
+
`numpy.ndarray`, shape (n,)
|
| 942 |
+
Initial guess.
|
| 943 |
+
"""
|
| 944 |
+
return self._x0
|
| 945 |
+
|
| 946 |
+
@property
|
| 947 |
+
def n_eval(self):
|
| 948 |
+
"""
|
| 949 |
+
Number of function evaluations.
|
| 950 |
+
|
| 951 |
+
Returns
|
| 952 |
+
-------
|
| 953 |
+
int
|
| 954 |
+
Number of function evaluations.
|
| 955 |
+
"""
|
| 956 |
+
return self._obj.n_eval
|
| 957 |
+
|
| 958 |
+
@property
|
| 959 |
+
def fun_name(self):
|
| 960 |
+
"""
|
| 961 |
+
Name of the objective function.
|
| 962 |
+
|
| 963 |
+
Returns
|
| 964 |
+
-------
|
| 965 |
+
str
|
| 966 |
+
Name of the objective function.
|
| 967 |
+
"""
|
| 968 |
+
return self._obj.name
|
| 969 |
+
|
| 970 |
+
@property
|
| 971 |
+
def bounds(self):
|
| 972 |
+
"""
|
| 973 |
+
Bound constraints.
|
| 974 |
+
|
| 975 |
+
Returns
|
| 976 |
+
-------
|
| 977 |
+
BoundConstraints
|
| 978 |
+
Bound constraints.
|
| 979 |
+
"""
|
| 980 |
+
return self._bounds
|
| 981 |
+
|
| 982 |
+
@property
|
| 983 |
+
def linear(self):
|
| 984 |
+
"""
|
| 985 |
+
Linear constraints.
|
| 986 |
+
|
| 987 |
+
Returns
|
| 988 |
+
-------
|
| 989 |
+
LinearConstraints
|
| 990 |
+
Linear constraints.
|
| 991 |
+
"""
|
| 992 |
+
return self._linear
|
| 993 |
+
|
| 994 |
+
@property
|
| 995 |
+
def m_bounds(self):
|
| 996 |
+
"""
|
| 997 |
+
Number of bound constraints.
|
| 998 |
+
|
| 999 |
+
Returns
|
| 1000 |
+
-------
|
| 1001 |
+
int
|
| 1002 |
+
Number of bound constraints.
|
| 1003 |
+
"""
|
| 1004 |
+
return self.bounds.m
|
| 1005 |
+
|
| 1006 |
+
@property
|
| 1007 |
+
def m_linear_ub(self):
|
| 1008 |
+
"""
|
| 1009 |
+
Number of linear inequality constraints.
|
| 1010 |
+
|
| 1011 |
+
Returns
|
| 1012 |
+
-------
|
| 1013 |
+
int
|
| 1014 |
+
Number of linear inequality constraints.
|
| 1015 |
+
"""
|
| 1016 |
+
return self.linear.m_ub
|
| 1017 |
+
|
| 1018 |
+
@property
|
| 1019 |
+
def m_linear_eq(self):
|
| 1020 |
+
"""
|
| 1021 |
+
Number of linear equality constraints.
|
| 1022 |
+
|
| 1023 |
+
Returns
|
| 1024 |
+
-------
|
| 1025 |
+
int
|
| 1026 |
+
Number of linear equality constraints.
|
| 1027 |
+
"""
|
| 1028 |
+
return self.linear.m_eq
|
| 1029 |
+
|
| 1030 |
+
@property
|
| 1031 |
+
def m_nonlinear_ub(self):
|
| 1032 |
+
"""
|
| 1033 |
+
Number of nonlinear inequality constraints.
|
| 1034 |
+
|
| 1035 |
+
Returns
|
| 1036 |
+
-------
|
| 1037 |
+
int
|
| 1038 |
+
Number of nonlinear inequality constraints.
|
| 1039 |
+
|
| 1040 |
+
Raises
|
| 1041 |
+
------
|
| 1042 |
+
ValueError
|
| 1043 |
+
If the number of nonlinear inequality constraints is not known.
|
| 1044 |
+
"""
|
| 1045 |
+
return self._nonlinear.m_ub
|
| 1046 |
+
|
| 1047 |
+
@property
|
| 1048 |
+
def m_nonlinear_eq(self):
|
| 1049 |
+
"""
|
| 1050 |
+
Number of nonlinear equality constraints.
|
| 1051 |
+
|
| 1052 |
+
Returns
|
| 1053 |
+
-------
|
| 1054 |
+
int
|
| 1055 |
+
Number of nonlinear equality constraints.
|
| 1056 |
+
|
| 1057 |
+
Raises
|
| 1058 |
+
------
|
| 1059 |
+
ValueError
|
| 1060 |
+
If the number of nonlinear equality constraints is not known.
|
| 1061 |
+
"""
|
| 1062 |
+
return self._nonlinear.m_eq
|
| 1063 |
+
|
| 1064 |
+
@property
|
| 1065 |
+
def fun_history(self):
|
| 1066 |
+
"""
|
| 1067 |
+
History of objective function evaluations.
|
| 1068 |
+
|
| 1069 |
+
Returns
|
| 1070 |
+
-------
|
| 1071 |
+
`numpy.ndarray`, shape (n_eval,)
|
| 1072 |
+
History of objective function evaluations.
|
| 1073 |
+
"""
|
| 1074 |
+
return np.array(self._fun_history, dtype=float)
|
| 1075 |
+
|
| 1076 |
+
@property
|
| 1077 |
+
def maxcv_history(self):
|
| 1078 |
+
"""
|
| 1079 |
+
History of maximum constraint violations.
|
| 1080 |
+
|
| 1081 |
+
Returns
|
| 1082 |
+
-------
|
| 1083 |
+
`numpy.ndarray`, shape (n_eval,)
|
| 1084 |
+
History of maximum constraint violations.
|
| 1085 |
+
"""
|
| 1086 |
+
return np.array(self._maxcv_history, dtype=float)
|
| 1087 |
+
|
| 1088 |
+
@property
|
| 1089 |
+
def type(self):
|
| 1090 |
+
"""
|
| 1091 |
+
Type of the problem.
|
| 1092 |
+
|
| 1093 |
+
The problem can be either 'unconstrained', 'bound-constrained',
|
| 1094 |
+
'linearly constrained', or 'nonlinearly constrained'.
|
| 1095 |
+
|
| 1096 |
+
Returns
|
| 1097 |
+
-------
|
| 1098 |
+
str
|
| 1099 |
+
Type of the problem.
|
| 1100 |
+
"""
|
| 1101 |
+
try:
|
| 1102 |
+
if self.m_nonlinear_ub > 0 or self.m_nonlinear_eq > 0:
|
| 1103 |
+
return "nonlinearly constrained"
|
| 1104 |
+
elif self.m_linear_ub > 0 or self.m_linear_eq > 0:
|
| 1105 |
+
return "linearly constrained"
|
| 1106 |
+
elif self.m_bounds > 0:
|
| 1107 |
+
return "bound-constrained"
|
| 1108 |
+
else:
|
| 1109 |
+
return "unconstrained"
|
| 1110 |
+
except ValueError:
|
| 1111 |
+
# The number of nonlinear constraints is not known. It may be zero
|
| 1112 |
+
# if the user provided a nonlinear inequality and/or equality
|
| 1113 |
+
# constraint function that returns an empty array. However, as this
|
| 1114 |
+
# is not known before the first call to the function, we assume
|
| 1115 |
+
# that the problem is nonlinearly constrained.
|
| 1116 |
+
return "nonlinearly constrained"
|
| 1117 |
+
|
| 1118 |
+
@property
|
| 1119 |
+
def is_feasibility(self):
|
| 1120 |
+
"""
|
| 1121 |
+
Whether the problem is a feasibility problem.
|
| 1122 |
+
|
| 1123 |
+
Returns
|
| 1124 |
+
-------
|
| 1125 |
+
bool
|
| 1126 |
+
Whether the problem is a feasibility problem.
|
| 1127 |
+
"""
|
| 1128 |
+
return self.fun_name == ""
|
| 1129 |
+
|
| 1130 |
+
def build_x(self, x):
|
| 1131 |
+
"""
|
| 1132 |
+
Build the full vector of variables from the reduced vector.
|
| 1133 |
+
|
| 1134 |
+
Parameters
|
| 1135 |
+
----------
|
| 1136 |
+
x : array_like, shape (n,)
|
| 1137 |
+
Reduced vector of variables.
|
| 1138 |
+
|
| 1139 |
+
Returns
|
| 1140 |
+
-------
|
| 1141 |
+
`numpy.ndarray`, shape (n_orig,)
|
| 1142 |
+
Full vector of variables.
|
| 1143 |
+
"""
|
| 1144 |
+
x_full = np.empty(self.n_orig)
|
| 1145 |
+
x_full[self._fixed_idx] = self._fixed_val
|
| 1146 |
+
x_full[~self._fixed_idx] = (x * self._scaling_factor
|
| 1147 |
+
+ self._scaling_shift)
|
| 1148 |
+
return self._orig_bounds.project(x_full)
|
| 1149 |
+
|
| 1150 |
+
def maxcv(self, x, cub_val=None, ceq_val=None):
|
| 1151 |
+
"""
|
| 1152 |
+
Evaluate the maximum constraint violation.
|
| 1153 |
+
|
| 1154 |
+
Parameters
|
| 1155 |
+
----------
|
| 1156 |
+
x : array_like, shape (n,)
|
| 1157 |
+
Point at which the maximum constraint violation is evaluated.
|
| 1158 |
+
cub_val : array_like, shape (m_nonlinear_ub,), optional
|
| 1159 |
+
Values of the nonlinear inequality constraints. If not provided,
|
| 1160 |
+
the nonlinear inequality constraints are evaluated at `x`.
|
| 1161 |
+
ceq_val : array_like, shape (m_nonlinear_eq,), optional
|
| 1162 |
+
Values of the nonlinear equality constraints. If not provided,
|
| 1163 |
+
the nonlinear equality constraints are evaluated at `x`.
|
| 1164 |
+
|
| 1165 |
+
Returns
|
| 1166 |
+
-------
|
| 1167 |
+
float
|
| 1168 |
+
Maximum constraint violation at `x`.
|
| 1169 |
+
"""
|
| 1170 |
+
violation = self.violation(x, cub_val=cub_val, ceq_val=ceq_val)
|
| 1171 |
+
if np.count_nonzero(violation):
|
| 1172 |
+
return np.max(violation, initial=0.0)
|
| 1173 |
+
else:
|
| 1174 |
+
return 0.0
|
| 1175 |
+
|
| 1176 |
+
def violation(self, x, cub_val=None, ceq_val=None):
|
| 1177 |
+
violation = []
|
| 1178 |
+
if not self.bounds.is_feasible:
|
| 1179 |
+
b = self.bounds.violation(x)
|
| 1180 |
+
violation.append(b)
|
| 1181 |
+
|
| 1182 |
+
if len(self.linear.pcs):
|
| 1183 |
+
lc = self.linear.violation(x)
|
| 1184 |
+
violation.append(lc)
|
| 1185 |
+
if len(self._nonlinear.pcs):
|
| 1186 |
+
nlc = self._nonlinear.violation(x, cub_val, ceq_val)
|
| 1187 |
+
violation.append(nlc)
|
| 1188 |
+
|
| 1189 |
+
if len(violation):
|
| 1190 |
+
return np.concatenate(violation)
|
| 1191 |
+
|
| 1192 |
+
def best_eval(self, penalty):
|
| 1193 |
+
"""
|
| 1194 |
+
Return the best point in the filter and the corresponding objective and
|
| 1195 |
+
nonlinear constraint function evaluations.
|
| 1196 |
+
|
| 1197 |
+
Parameters
|
| 1198 |
+
----------
|
| 1199 |
+
penalty : float
|
| 1200 |
+
Penalty parameter
|
| 1201 |
+
|
| 1202 |
+
Returns
|
| 1203 |
+
-------
|
| 1204 |
+
`numpy.ndarray`, shape (n,)
|
| 1205 |
+
Best point.
|
| 1206 |
+
float
|
| 1207 |
+
Corresponding objective function value.
|
| 1208 |
+
float
|
| 1209 |
+
Corresponding maximum constraint violation.
|
| 1210 |
+
"""
|
| 1211 |
+
# If the filter is empty, i.e., if no function evaluation has been
|
| 1212 |
+
# performed, we evaluate the objective and nonlinear constraint
|
| 1213 |
+
# functions at the initial guess.
|
| 1214 |
+
if len(self._fun_filter) == 0:
|
| 1215 |
+
self(self.x0)
|
| 1216 |
+
|
| 1217 |
+
# Find the best point in the filter.
|
| 1218 |
+
fun_filter = np.array(self._fun_filter)
|
| 1219 |
+
maxcv_filter = np.array(self._maxcv_filter)
|
| 1220 |
+
x_filter = np.array(self._x_filter)
|
| 1221 |
+
finite_idx = np.isfinite(maxcv_filter)
|
| 1222 |
+
if np.any(finite_idx):
|
| 1223 |
+
# At least one point has a finite maximum constraint violation.
|
| 1224 |
+
feasible_idx = maxcv_filter <= self._feasibility_tol
|
| 1225 |
+
if np.any(feasible_idx) and not np.all(
|
| 1226 |
+
np.isnan(fun_filter[feasible_idx])
|
| 1227 |
+
):
|
| 1228 |
+
# At least one point is feasible and has a well-defined
|
| 1229 |
+
# objective function value. We select the point with the least
|
| 1230 |
+
# objective function value. If there is a tie, we select the
|
| 1231 |
+
# point with the least maximum constraint violation. If there
|
| 1232 |
+
# is still a tie, we select the most recent point.
|
| 1233 |
+
fun_min_idx = feasible_idx & (
|
| 1234 |
+
fun_filter <= np.nanmin(fun_filter[feasible_idx])
|
| 1235 |
+
)
|
| 1236 |
+
if np.count_nonzero(fun_min_idx) > 1:
|
| 1237 |
+
fun_min_idx &= maxcv_filter <= np.min(
|
| 1238 |
+
maxcv_filter[fun_min_idx]
|
| 1239 |
+
)
|
| 1240 |
+
i = np.flatnonzero(fun_min_idx)[-1]
|
| 1241 |
+
elif np.any(feasible_idx):
|
| 1242 |
+
# At least one point is feasible but no feasible point has a
|
| 1243 |
+
# well-defined objective function value. We select the most
|
| 1244 |
+
# recent feasible point.
|
| 1245 |
+
i = np.flatnonzero(feasible_idx)[-1]
|
| 1246 |
+
else:
|
| 1247 |
+
# No point is feasible. We first compute the merit function
|
| 1248 |
+
# value for each point.
|
| 1249 |
+
merit_filter = np.full_like(fun_filter, np.nan)
|
| 1250 |
+
merit_filter[finite_idx] = (
|
| 1251 |
+
fun_filter[finite_idx] + penalty * maxcv_filter[finite_idx]
|
| 1252 |
+
)
|
| 1253 |
+
if np.all(np.isnan(merit_filter)):
|
| 1254 |
+
# No point has a well-defined merit function value. In
|
| 1255 |
+
# other words, among the points with a well-defined maximum
|
| 1256 |
+
# constraint violation, none has a well-defined objective
|
| 1257 |
+
# function value. We select the point with the least
|
| 1258 |
+
# maximum constraint violation. If there is a tie, we
|
| 1259 |
+
# select the most recent point.
|
| 1260 |
+
min_maxcv_idx = maxcv_filter <= np.nanmin(maxcv_filter)
|
| 1261 |
+
i = np.flatnonzero(min_maxcv_idx)[-1]
|
| 1262 |
+
else:
|
| 1263 |
+
# At least one point has a well-defined merit function
|
| 1264 |
+
# value. We select the point with the least merit function
|
| 1265 |
+
# value. If there is a tie, we select the point with the
|
| 1266 |
+
# least maximum constraint violation. If there is still a
|
| 1267 |
+
# tie, we select the point with the least objective
|
| 1268 |
+
# function value. If there is still a tie, we select the
|
| 1269 |
+
# most recent point.
|
| 1270 |
+
merit_min_idx = merit_filter <= np.nanmin(merit_filter)
|
| 1271 |
+
if np.count_nonzero(merit_min_idx) > 1:
|
| 1272 |
+
merit_min_idx &= maxcv_filter <= np.min(
|
| 1273 |
+
maxcv_filter[merit_min_idx]
|
| 1274 |
+
)
|
| 1275 |
+
|
| 1276 |
+
if np.count_nonzero(merit_min_idx) > 1:
|
| 1277 |
+
merit_min_idx &= fun_filter <= np.min(
|
| 1278 |
+
fun_filter[merit_min_idx]
|
| 1279 |
+
)
|
| 1280 |
+
i = np.flatnonzero(merit_min_idx)[-1]
|
| 1281 |
+
elif not np.all(np.isnan(fun_filter)):
|
| 1282 |
+
# No maximum constraint violation is well-defined but at least one
|
| 1283 |
+
# point has a well-defined objective function value. We select the
|
| 1284 |
+
# point with the least objective function value. If there is a tie,
|
| 1285 |
+
# we select the most recent point.
|
| 1286 |
+
fun_min_idx = fun_filter <= np.nanmin(fun_filter)
|
| 1287 |
+
i = np.flatnonzero(fun_min_idx)[-1]
|
| 1288 |
+
else:
|
| 1289 |
+
# No point has a well-defined maximum constraint violation or
|
| 1290 |
+
# objective function value. We select the most recent point.
|
| 1291 |
+
i = len(fun_filter) - 1
|
| 1292 |
+
return (
|
| 1293 |
+
self.bounds.project(x_filter[i, :]),
|
| 1294 |
+
fun_filter[i],
|
| 1295 |
+
maxcv_filter[i],
|
| 1296 |
+
)
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/settings.py
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
from enum import Enum
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
# Exit status.
|
| 8 |
+
class ExitStatus(Enum):
|
| 9 |
+
"""
|
| 10 |
+
Exit statuses.
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
RADIUS_SUCCESS = 0
|
| 14 |
+
TARGET_SUCCESS = 1
|
| 15 |
+
FIXED_SUCCESS = 2
|
| 16 |
+
CALLBACK_SUCCESS = 3
|
| 17 |
+
FEASIBLE_SUCCESS = 4
|
| 18 |
+
MAX_EVAL_WARNING = 5
|
| 19 |
+
MAX_ITER_WARNING = 6
|
| 20 |
+
INFEASIBLE_ERROR = -1
|
| 21 |
+
LINALG_ERROR = -2
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class Options(str, Enum):
|
| 25 |
+
"""
|
| 26 |
+
Options.
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
DEBUG = "debug"
|
| 30 |
+
FEASIBILITY_TOL = "feasibility_tol"
|
| 31 |
+
FILTER_SIZE = "filter_size"
|
| 32 |
+
HISTORY_SIZE = "history_size"
|
| 33 |
+
MAX_EVAL = "maxfev"
|
| 34 |
+
MAX_ITER = "maxiter"
|
| 35 |
+
NPT = "nb_points"
|
| 36 |
+
RHOBEG = "radius_init"
|
| 37 |
+
RHOEND = "radius_final"
|
| 38 |
+
SCALE = "scale"
|
| 39 |
+
STORE_HISTORY = "store_history"
|
| 40 |
+
TARGET = "target"
|
| 41 |
+
VERBOSE = "disp"
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
class Constants(str, Enum):
|
| 45 |
+
"""
|
| 46 |
+
Constants.
|
| 47 |
+
"""
|
| 48 |
+
|
| 49 |
+
DECREASE_RADIUS_FACTOR = "decrease_radius_factor"
|
| 50 |
+
INCREASE_RADIUS_FACTOR = "increase_radius_factor"
|
| 51 |
+
INCREASE_RADIUS_THRESHOLD = "increase_radius_threshold"
|
| 52 |
+
DECREASE_RADIUS_THRESHOLD = "decrease_radius_threshold"
|
| 53 |
+
DECREASE_RESOLUTION_FACTOR = "decrease_resolution_factor"
|
| 54 |
+
LARGE_RESOLUTION_THRESHOLD = "large_resolution_threshold"
|
| 55 |
+
MODERATE_RESOLUTION_THRESHOLD = "moderate_resolution_threshold"
|
| 56 |
+
LOW_RATIO = "low_ratio"
|
| 57 |
+
HIGH_RATIO = "high_ratio"
|
| 58 |
+
VERY_LOW_RATIO = "very_low_ratio"
|
| 59 |
+
PENALTY_INCREASE_THRESHOLD = "penalty_increase_threshold"
|
| 60 |
+
PENALTY_INCREASE_FACTOR = "penalty_increase_factor"
|
| 61 |
+
SHORT_STEP_THRESHOLD = "short_step_threshold"
|
| 62 |
+
LOW_RADIUS_FACTOR = "low_radius_factor"
|
| 63 |
+
BYRD_OMOJOKUN_FACTOR = "byrd_omojokun_factor"
|
| 64 |
+
THRESHOLD_RATIO_CONSTRAINTS = "threshold_ratio_constraints"
|
| 65 |
+
LARGE_SHIFT_FACTOR = "large_shift_factor"
|
| 66 |
+
LARGE_GRADIENT_FACTOR = "large_gradient_factor"
|
| 67 |
+
RESOLUTION_FACTOR = "resolution_factor"
|
| 68 |
+
IMPROVE_TCG = "improve_tcg"
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
# Default options.
|
| 72 |
+
DEFAULT_OPTIONS = {
|
| 73 |
+
Options.DEBUG.value: False,
|
| 74 |
+
Options.FEASIBILITY_TOL.value: np.sqrt(np.finfo(float).eps),
|
| 75 |
+
Options.FILTER_SIZE.value: sys.maxsize,
|
| 76 |
+
Options.HISTORY_SIZE.value: sys.maxsize,
|
| 77 |
+
Options.MAX_EVAL.value: lambda n: 500 * n,
|
| 78 |
+
Options.MAX_ITER.value: lambda n: 1000 * n,
|
| 79 |
+
Options.NPT.value: lambda n: 2 * n + 1,
|
| 80 |
+
Options.RHOBEG.value: 1.0,
|
| 81 |
+
Options.RHOEND.value: 1e-6,
|
| 82 |
+
Options.SCALE.value: False,
|
| 83 |
+
Options.STORE_HISTORY.value: False,
|
| 84 |
+
Options.TARGET.value: -np.inf,
|
| 85 |
+
Options.VERBOSE.value: False,
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
# Default constants.
|
| 89 |
+
DEFAULT_CONSTANTS = {
|
| 90 |
+
Constants.DECREASE_RADIUS_FACTOR.value: 0.5,
|
| 91 |
+
Constants.INCREASE_RADIUS_FACTOR.value: np.sqrt(2.0),
|
| 92 |
+
Constants.INCREASE_RADIUS_THRESHOLD.value: 2.0,
|
| 93 |
+
Constants.DECREASE_RADIUS_THRESHOLD.value: 1.4,
|
| 94 |
+
Constants.DECREASE_RESOLUTION_FACTOR.value: 0.1,
|
| 95 |
+
Constants.LARGE_RESOLUTION_THRESHOLD.value: 250.0,
|
| 96 |
+
Constants.MODERATE_RESOLUTION_THRESHOLD.value: 16.0,
|
| 97 |
+
Constants.LOW_RATIO.value: 0.1,
|
| 98 |
+
Constants.HIGH_RATIO.value: 0.7,
|
| 99 |
+
Constants.VERY_LOW_RATIO.value: 0.01,
|
| 100 |
+
Constants.PENALTY_INCREASE_THRESHOLD.value: 1.5,
|
| 101 |
+
Constants.PENALTY_INCREASE_FACTOR.value: 2.0,
|
| 102 |
+
Constants.SHORT_STEP_THRESHOLD.value: 0.5,
|
| 103 |
+
Constants.LOW_RADIUS_FACTOR.value: 0.1,
|
| 104 |
+
Constants.BYRD_OMOJOKUN_FACTOR.value: 0.8,
|
| 105 |
+
Constants.THRESHOLD_RATIO_CONSTRAINTS.value: 2.0,
|
| 106 |
+
Constants.LARGE_SHIFT_FACTOR.value: 10.0,
|
| 107 |
+
Constants.LARGE_GRADIENT_FACTOR.value: 10.0,
|
| 108 |
+
Constants.RESOLUTION_FACTOR.value: 2.0,
|
| 109 |
+
Constants.IMPROVE_TCG.value: True,
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
# Printing options.
|
| 113 |
+
PRINT_OPTIONS = {
|
| 114 |
+
"threshold": 6,
|
| 115 |
+
"edgeitems": 2,
|
| 116 |
+
"linewidth": sys.maxsize,
|
| 117 |
+
"formatter": {
|
| 118 |
+
"float_kind": lambda x: np.format_float_scientific(
|
| 119 |
+
x,
|
| 120 |
+
precision=3,
|
| 121 |
+
unique=False,
|
| 122 |
+
pad_left=2,
|
| 123 |
+
)
|
| 124 |
+
},
|
| 125 |
+
}
|
| 126 |
+
|
| 127 |
+
# Constants.
|
| 128 |
+
BARRIER = 2.0 ** min(
|
| 129 |
+
100,
|
| 130 |
+
np.finfo(float).maxexp // 2,
|
| 131 |
+
-np.finfo(float).minexp // 2,
|
| 132 |
+
)
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/__init__.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .geometry import cauchy_geometry, spider_geometry
|
| 2 |
+
from .optim import (
|
| 3 |
+
tangential_byrd_omojokun,
|
| 4 |
+
constrained_tangential_byrd_omojokun,
|
| 5 |
+
normal_byrd_omojokun,
|
| 6 |
+
)
|
| 7 |
+
|
| 8 |
+
__all__ = [
|
| 9 |
+
"cauchy_geometry",
|
| 10 |
+
"spider_geometry",
|
| 11 |
+
"tangential_byrd_omojokun",
|
| 12 |
+
"constrained_tangential_byrd_omojokun",
|
| 13 |
+
"normal_byrd_omojokun",
|
| 14 |
+
]
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (440 Bytes). View file
|
|
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/__pycache__/geometry.cpython-310.pyc
ADDED
|
Binary file (8.63 kB). View file
|
|
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/__pycache__/optim.cpython-310.pyc
ADDED
|
Binary file (22.3 kB). View file
|
|
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/geometry.py
ADDED
|
@@ -0,0 +1,387 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import inspect
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
from ..utils import get_arrays_tol
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
TINY = np.finfo(float).tiny
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def cauchy_geometry(const, grad, curv, xl, xu, delta, debug):
|
| 12 |
+
r"""
|
| 13 |
+
Maximize approximately the absolute value of a quadratic function subject
|
| 14 |
+
to bound constraints in a trust region.
|
| 15 |
+
|
| 16 |
+
This function solves approximately
|
| 17 |
+
|
| 18 |
+
.. math::
|
| 19 |
+
|
| 20 |
+
\max_{s \in \mathbb{R}^n} \quad \bigg\lvert c + g^{\mathsf{T}} s +
|
| 21 |
+
\frac{1}{2} s^{\mathsf{T}} H s \bigg\rvert \quad \text{s.t.} \quad
|
| 22 |
+
\left\{ \begin{array}{l}
|
| 23 |
+
l \le s \le u,\\
|
| 24 |
+
\lVert s \rVert \le \Delta,
|
| 25 |
+
\end{array} \right.
|
| 26 |
+
|
| 27 |
+
by maximizing the objective function along the constrained Cauchy
|
| 28 |
+
direction.
|
| 29 |
+
|
| 30 |
+
Parameters
|
| 31 |
+
----------
|
| 32 |
+
const : float
|
| 33 |
+
Constant :math:`c` as shown above.
|
| 34 |
+
grad : `numpy.ndarray`, shape (n,)
|
| 35 |
+
Gradient :math:`g` as shown above.
|
| 36 |
+
curv : callable
|
| 37 |
+
Curvature of :math:`H` along any vector.
|
| 38 |
+
|
| 39 |
+
``curv(s) -> float``
|
| 40 |
+
|
| 41 |
+
returns :math:`s^{\mathsf{T}} H s`.
|
| 42 |
+
xl : `numpy.ndarray`, shape (n,)
|
| 43 |
+
Lower bounds :math:`l` as shown above.
|
| 44 |
+
xu : `numpy.ndarray`, shape (n,)
|
| 45 |
+
Upper bounds :math:`u` as shown above.
|
| 46 |
+
delta : float
|
| 47 |
+
Trust-region radius :math:`\Delta` as shown above.
|
| 48 |
+
debug : bool
|
| 49 |
+
Whether to make debugging tests during the execution.
|
| 50 |
+
|
| 51 |
+
Returns
|
| 52 |
+
-------
|
| 53 |
+
`numpy.ndarray`, shape (n,)
|
| 54 |
+
Approximate solution :math:`s`.
|
| 55 |
+
|
| 56 |
+
Notes
|
| 57 |
+
-----
|
| 58 |
+
This function is described as the first alternative in Section 6.5 of [1]_.
|
| 59 |
+
It is assumed that the origin is feasible with respect to the bound
|
| 60 |
+
constraints and that `delta` is finite and positive.
|
| 61 |
+
|
| 62 |
+
References
|
| 63 |
+
----------
|
| 64 |
+
.. [1] T. M. Ragonneau. *Model-Based Derivative-Free Optimization Methods
|
| 65 |
+
and Software*. PhD thesis, Department of Applied Mathematics, The Hong
|
| 66 |
+
Kong Polytechnic University, Hong Kong, China, 2022. URL:
|
| 67 |
+
https://theses.lib.polyu.edu.hk/handle/200/12294.
|
| 68 |
+
"""
|
| 69 |
+
if debug:
|
| 70 |
+
assert isinstance(const, float)
|
| 71 |
+
assert isinstance(grad, np.ndarray) and grad.ndim == 1
|
| 72 |
+
assert inspect.signature(curv).bind(grad)
|
| 73 |
+
assert isinstance(xl, np.ndarray) and xl.shape == grad.shape
|
| 74 |
+
assert isinstance(xu, np.ndarray) and xu.shape == grad.shape
|
| 75 |
+
assert isinstance(delta, float)
|
| 76 |
+
assert isinstance(debug, bool)
|
| 77 |
+
tol = get_arrays_tol(xl, xu)
|
| 78 |
+
assert np.all(xl <= tol)
|
| 79 |
+
assert np.all(xu >= -tol)
|
| 80 |
+
assert np.isfinite(delta) and delta > 0.0
|
| 81 |
+
xl = np.minimum(xl, 0.0)
|
| 82 |
+
xu = np.maximum(xu, 0.0)
|
| 83 |
+
|
| 84 |
+
# To maximize the absolute value of a quadratic function, we maximize the
|
| 85 |
+
# function itself or its negative, and we choose the solution that provides
|
| 86 |
+
# the largest function value.
|
| 87 |
+
step1, q_val1 = _cauchy_geom(const, grad, curv, xl, xu, delta, debug)
|
| 88 |
+
step2, q_val2 = _cauchy_geom(
|
| 89 |
+
-const,
|
| 90 |
+
-grad,
|
| 91 |
+
lambda x: -curv(x),
|
| 92 |
+
xl,
|
| 93 |
+
xu,
|
| 94 |
+
delta,
|
| 95 |
+
debug,
|
| 96 |
+
)
|
| 97 |
+
step = step1 if abs(q_val1) >= abs(q_val2) else step2
|
| 98 |
+
|
| 99 |
+
if debug:
|
| 100 |
+
assert np.all(xl <= step)
|
| 101 |
+
assert np.all(step <= xu)
|
| 102 |
+
assert np.linalg.norm(step) < 1.1 * delta
|
| 103 |
+
return step
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def spider_geometry(const, grad, curv, xpt, xl, xu, delta, debug):
|
| 107 |
+
r"""
|
| 108 |
+
Maximize approximately the absolute value of a quadratic function subject
|
| 109 |
+
to bound constraints in a trust region.
|
| 110 |
+
|
| 111 |
+
This function solves approximately
|
| 112 |
+
|
| 113 |
+
.. math::
|
| 114 |
+
|
| 115 |
+
\max_{s \in \mathbb{R}^n} \quad \bigg\lvert c + g^{\mathsf{T}} s +
|
| 116 |
+
\frac{1}{2} s^{\mathsf{T}} H s \bigg\rvert \quad \text{s.t.} \quad
|
| 117 |
+
\left\{ \begin{array}{l}
|
| 118 |
+
l \le s \le u,\\
|
| 119 |
+
\lVert s \rVert \le \Delta,
|
| 120 |
+
\end{array} \right.
|
| 121 |
+
|
| 122 |
+
by maximizing the objective function along given straight lines.
|
| 123 |
+
|
| 124 |
+
Parameters
|
| 125 |
+
----------
|
| 126 |
+
const : float
|
| 127 |
+
Constant :math:`c` as shown above.
|
| 128 |
+
grad : `numpy.ndarray`, shape (n,)
|
| 129 |
+
Gradient :math:`g` as shown above.
|
| 130 |
+
curv : callable
|
| 131 |
+
Curvature of :math:`H` along any vector.
|
| 132 |
+
|
| 133 |
+
``curv(s) -> float``
|
| 134 |
+
|
| 135 |
+
returns :math:`s^{\mathsf{T}} H s`.
|
| 136 |
+
xpt : `numpy.ndarray`, shape (n, npt)
|
| 137 |
+
Points defining the straight lines. The straight lines considered are
|
| 138 |
+
the ones passing through the origin and the points in `xpt`.
|
| 139 |
+
xl : `numpy.ndarray`, shape (n,)
|
| 140 |
+
Lower bounds :math:`l` as shown above.
|
| 141 |
+
xu : `numpy.ndarray`, shape (n,)
|
| 142 |
+
Upper bounds :math:`u` as shown above.
|
| 143 |
+
delta : float
|
| 144 |
+
Trust-region radius :math:`\Delta` as shown above.
|
| 145 |
+
debug : bool
|
| 146 |
+
Whether to make debugging tests during the execution.
|
| 147 |
+
|
| 148 |
+
Returns
|
| 149 |
+
-------
|
| 150 |
+
`numpy.ndarray`, shape (n,)
|
| 151 |
+
Approximate solution :math:`s`.
|
| 152 |
+
|
| 153 |
+
Notes
|
| 154 |
+
-----
|
| 155 |
+
This function is described as the second alternative in Section 6.5 of
|
| 156 |
+
[1]_. It is assumed that the origin is feasible with respect to the bound
|
| 157 |
+
constraints and that `delta` is finite and positive.
|
| 158 |
+
|
| 159 |
+
References
|
| 160 |
+
----------
|
| 161 |
+
.. [1] T. M. Ragonneau. *Model-Based Derivative-Free Optimization Methods
|
| 162 |
+
and Software*. PhD thesis, Department of Applied Mathematics, The Hong
|
| 163 |
+
Kong Polytechnic University, Hong Kong, China, 2022. URL:
|
| 164 |
+
https://theses.lib.polyu.edu.hk/handle/200/12294.
|
| 165 |
+
"""
|
| 166 |
+
if debug:
|
| 167 |
+
assert isinstance(const, float)
|
| 168 |
+
assert isinstance(grad, np.ndarray) and grad.ndim == 1
|
| 169 |
+
assert inspect.signature(curv).bind(grad)
|
| 170 |
+
assert (
|
| 171 |
+
isinstance(xpt, np.ndarray)
|
| 172 |
+
and xpt.ndim == 2
|
| 173 |
+
and xpt.shape[0] == grad.size
|
| 174 |
+
)
|
| 175 |
+
assert isinstance(xl, np.ndarray) and xl.shape == grad.shape
|
| 176 |
+
assert isinstance(xu, np.ndarray) and xu.shape == grad.shape
|
| 177 |
+
assert isinstance(delta, float)
|
| 178 |
+
assert isinstance(debug, bool)
|
| 179 |
+
tol = get_arrays_tol(xl, xu)
|
| 180 |
+
assert np.all(xl <= tol)
|
| 181 |
+
assert np.all(xu >= -tol)
|
| 182 |
+
assert np.isfinite(delta) and delta > 0.0
|
| 183 |
+
xl = np.minimum(xl, 0.0)
|
| 184 |
+
xu = np.maximum(xu, 0.0)
|
| 185 |
+
|
| 186 |
+
# Iterate through the straight lines.
|
| 187 |
+
step = np.zeros_like(grad)
|
| 188 |
+
q_val = const
|
| 189 |
+
s_norm = np.linalg.norm(xpt, axis=0)
|
| 190 |
+
|
| 191 |
+
# Set alpha_xl to the step size for the lower-bound constraint and
|
| 192 |
+
# alpha_xu to the step size for the upper-bound constraint.
|
| 193 |
+
|
| 194 |
+
# xl.shape = (N,)
|
| 195 |
+
# xpt.shape = (N, M)
|
| 196 |
+
# i_xl_pos.shape = (M, N)
|
| 197 |
+
i_xl_pos = (xl > -np.inf) & (xpt.T > -TINY * xl)
|
| 198 |
+
i_xl_neg = (xl > -np.inf) & (xpt.T < TINY * xl)
|
| 199 |
+
i_xu_pos = (xu < np.inf) & (xpt.T > TINY * xu)
|
| 200 |
+
i_xu_neg = (xu < np.inf) & (xpt.T < -TINY * xu)
|
| 201 |
+
|
| 202 |
+
# (M, N)
|
| 203 |
+
alpha_xl_pos = np.atleast_2d(
|
| 204 |
+
np.broadcast_to(xl, i_xl_pos.shape)[i_xl_pos] / xpt.T[i_xl_pos]
|
| 205 |
+
)
|
| 206 |
+
# (M,)
|
| 207 |
+
alpha_xl_pos = np.max(alpha_xl_pos, axis=1, initial=-np.inf)
|
| 208 |
+
# make sure it's (M,)
|
| 209 |
+
alpha_xl_pos = np.broadcast_to(np.atleast_1d(alpha_xl_pos), xpt.shape[1])
|
| 210 |
+
|
| 211 |
+
alpha_xl_neg = np.atleast_2d(
|
| 212 |
+
np.broadcast_to(xl, i_xl_neg.shape)[i_xl_neg] / xpt.T[i_xl_neg]
|
| 213 |
+
)
|
| 214 |
+
alpha_xl_neg = np.max(alpha_xl_neg, axis=1, initial=np.inf)
|
| 215 |
+
alpha_xl_neg = np.broadcast_to(np.atleast_1d(alpha_xl_neg), xpt.shape[1])
|
| 216 |
+
|
| 217 |
+
alpha_xu_neg = np.atleast_2d(
|
| 218 |
+
np.broadcast_to(xu, i_xu_neg.shape)[i_xu_neg] / xpt.T[i_xu_neg]
|
| 219 |
+
)
|
| 220 |
+
alpha_xu_neg = np.max(alpha_xu_neg, axis=1, initial=-np.inf)
|
| 221 |
+
alpha_xu_neg = np.broadcast_to(np.atleast_1d(alpha_xu_neg), xpt.shape[1])
|
| 222 |
+
|
| 223 |
+
alpha_xu_pos = np.atleast_2d(
|
| 224 |
+
np.broadcast_to(xu, i_xu_pos.shape)[i_xu_pos] / xpt.T[i_xu_pos]
|
| 225 |
+
)
|
| 226 |
+
alpha_xu_pos = np.max(alpha_xu_pos, axis=1, initial=np.inf)
|
| 227 |
+
alpha_xu_pos = np.broadcast_to(np.atleast_1d(alpha_xu_pos), xpt.shape[1])
|
| 228 |
+
|
| 229 |
+
for k in range(xpt.shape[1]):
|
| 230 |
+
# Set alpha_tr to the step size for the trust-region constraint.
|
| 231 |
+
if s_norm[k] > TINY * delta:
|
| 232 |
+
alpha_tr = max(delta / s_norm[k], 0.0)
|
| 233 |
+
else:
|
| 234 |
+
# The current straight line is basically zero.
|
| 235 |
+
continue
|
| 236 |
+
|
| 237 |
+
alpha_bd_pos = max(min(alpha_xu_pos[k], alpha_xl_neg[k]), 0.0)
|
| 238 |
+
alpha_bd_neg = min(max(alpha_xl_pos[k], alpha_xu_neg[k]), 0.0)
|
| 239 |
+
|
| 240 |
+
# Set alpha_quad_pos and alpha_quad_neg to the step size to the extrema
|
| 241 |
+
# of the quadratic function along the positive and negative directions.
|
| 242 |
+
grad_step = grad @ xpt[:, k]
|
| 243 |
+
curv_step = curv(xpt[:, k])
|
| 244 |
+
if (
|
| 245 |
+
grad_step >= 0.0
|
| 246 |
+
and curv_step < -TINY * grad_step
|
| 247 |
+
or grad_step <= 0.0
|
| 248 |
+
and curv_step > -TINY * grad_step
|
| 249 |
+
):
|
| 250 |
+
alpha_quad_pos = max(-grad_step / curv_step, 0.0)
|
| 251 |
+
else:
|
| 252 |
+
alpha_quad_pos = np.inf
|
| 253 |
+
if (
|
| 254 |
+
grad_step >= 0.0
|
| 255 |
+
and curv_step > TINY * grad_step
|
| 256 |
+
or grad_step <= 0.0
|
| 257 |
+
and curv_step < TINY * grad_step
|
| 258 |
+
):
|
| 259 |
+
alpha_quad_neg = min(-grad_step / curv_step, 0.0)
|
| 260 |
+
else:
|
| 261 |
+
alpha_quad_neg = -np.inf
|
| 262 |
+
|
| 263 |
+
# Select the step that provides the largest value of the objective
|
| 264 |
+
# function if it improves the current best. The best positive step is
|
| 265 |
+
# either the one that reaches the constraints or the one that reaches
|
| 266 |
+
# the extremum of the objective function along the current direction
|
| 267 |
+
# (only possible if the resulting step is feasible). We test both, and
|
| 268 |
+
# we perform similar calculations along the negative step.
|
| 269 |
+
# N.B.: we select the largest possible step among all the ones that
|
| 270 |
+
# maximize the objective function. This is to avoid returning the zero
|
| 271 |
+
# step in some extreme cases.
|
| 272 |
+
alpha_pos = min(alpha_tr, alpha_bd_pos)
|
| 273 |
+
alpha_neg = max(-alpha_tr, alpha_bd_neg)
|
| 274 |
+
q_val_pos = (
|
| 275 |
+
const + alpha_pos * grad_step + 0.5 * alpha_pos**2.0 * curv_step
|
| 276 |
+
)
|
| 277 |
+
q_val_neg = (
|
| 278 |
+
const + alpha_neg * grad_step + 0.5 * alpha_neg**2.0 * curv_step
|
| 279 |
+
)
|
| 280 |
+
if alpha_quad_pos < alpha_pos:
|
| 281 |
+
q_val_quad_pos = (
|
| 282 |
+
const
|
| 283 |
+
+ alpha_quad_pos * grad_step
|
| 284 |
+
+ 0.5 * alpha_quad_pos**2.0 * curv_step
|
| 285 |
+
)
|
| 286 |
+
if abs(q_val_quad_pos) > abs(q_val_pos):
|
| 287 |
+
alpha_pos = alpha_quad_pos
|
| 288 |
+
q_val_pos = q_val_quad_pos
|
| 289 |
+
if alpha_quad_neg > alpha_neg:
|
| 290 |
+
q_val_quad_neg = (
|
| 291 |
+
const
|
| 292 |
+
+ alpha_quad_neg * grad_step
|
| 293 |
+
+ 0.5 * alpha_quad_neg**2.0 * curv_step
|
| 294 |
+
)
|
| 295 |
+
if abs(q_val_quad_neg) > abs(q_val_neg):
|
| 296 |
+
alpha_neg = alpha_quad_neg
|
| 297 |
+
q_val_neg = q_val_quad_neg
|
| 298 |
+
if abs(q_val_pos) >= abs(q_val_neg) and abs(q_val_pos) > abs(q_val):
|
| 299 |
+
step = np.clip(alpha_pos * xpt[:, k], xl, xu)
|
| 300 |
+
q_val = q_val_pos
|
| 301 |
+
elif abs(q_val_neg) > abs(q_val_pos) and abs(q_val_neg) > abs(q_val):
|
| 302 |
+
step = np.clip(alpha_neg * xpt[:, k], xl, xu)
|
| 303 |
+
q_val = q_val_neg
|
| 304 |
+
|
| 305 |
+
if debug:
|
| 306 |
+
assert np.all(xl <= step)
|
| 307 |
+
assert np.all(step <= xu)
|
| 308 |
+
assert np.linalg.norm(step) < 1.1 * delta
|
| 309 |
+
return step
|
| 310 |
+
|
| 311 |
+
|
| 312 |
+
def _cauchy_geom(const, grad, curv, xl, xu, delta, debug):
|
| 313 |
+
"""
|
| 314 |
+
Same as `bound_constrained_cauchy_step` without the absolute value.
|
| 315 |
+
"""
|
| 316 |
+
# Calculate the initial active set.
|
| 317 |
+
fixed_xl = (xl < 0.0) & (grad > 0.0)
|
| 318 |
+
fixed_xu = (xu > 0.0) & (grad < 0.0)
|
| 319 |
+
|
| 320 |
+
# Calculate the Cauchy step.
|
| 321 |
+
cauchy_step = np.zeros_like(grad)
|
| 322 |
+
cauchy_step[fixed_xl] = xl[fixed_xl]
|
| 323 |
+
cauchy_step[fixed_xu] = xu[fixed_xu]
|
| 324 |
+
if np.linalg.norm(cauchy_step) > delta:
|
| 325 |
+
working = fixed_xl | fixed_xu
|
| 326 |
+
while True:
|
| 327 |
+
# Calculate the Cauchy step for the directions in the working set.
|
| 328 |
+
g_norm = np.linalg.norm(grad[working])
|
| 329 |
+
delta_reduced = np.sqrt(
|
| 330 |
+
delta**2.0 - cauchy_step[~working] @ cauchy_step[~working]
|
| 331 |
+
)
|
| 332 |
+
if g_norm > TINY * abs(delta_reduced):
|
| 333 |
+
mu = max(delta_reduced / g_norm, 0.0)
|
| 334 |
+
else:
|
| 335 |
+
break
|
| 336 |
+
cauchy_step[working] = mu * grad[working]
|
| 337 |
+
|
| 338 |
+
# Update the working set.
|
| 339 |
+
fixed_xl = working & (cauchy_step < xl)
|
| 340 |
+
fixed_xu = working & (cauchy_step > xu)
|
| 341 |
+
if not np.any(fixed_xl) and not np.any(fixed_xu):
|
| 342 |
+
# Stop the calculations as the Cauchy step is now feasible.
|
| 343 |
+
break
|
| 344 |
+
cauchy_step[fixed_xl] = xl[fixed_xl]
|
| 345 |
+
cauchy_step[fixed_xu] = xu[fixed_xu]
|
| 346 |
+
working = working & ~(fixed_xl | fixed_xu)
|
| 347 |
+
|
| 348 |
+
# Calculate the step that maximizes the quadratic along the Cauchy step.
|
| 349 |
+
grad_step = grad @ cauchy_step
|
| 350 |
+
if grad_step >= 0.0:
|
| 351 |
+
# Set alpha_tr to the step size for the trust-region constraint.
|
| 352 |
+
s_norm = np.linalg.norm(cauchy_step)
|
| 353 |
+
if s_norm > TINY * delta:
|
| 354 |
+
alpha_tr = max(delta / s_norm, 0.0)
|
| 355 |
+
else:
|
| 356 |
+
# The Cauchy step is basically zero.
|
| 357 |
+
alpha_tr = 0.0
|
| 358 |
+
|
| 359 |
+
# Set alpha_quad to the step size for the maximization problem.
|
| 360 |
+
curv_step = curv(cauchy_step)
|
| 361 |
+
if curv_step < -TINY * grad_step:
|
| 362 |
+
alpha_quad = max(-grad_step / curv_step, 0.0)
|
| 363 |
+
else:
|
| 364 |
+
alpha_quad = np.inf
|
| 365 |
+
|
| 366 |
+
# Set alpha_bd to the step size for the bound constraints.
|
| 367 |
+
i_xl = (xl > -np.inf) & (cauchy_step < TINY * xl)
|
| 368 |
+
i_xu = (xu < np.inf) & (cauchy_step > TINY * xu)
|
| 369 |
+
alpha_xl = np.min(xl[i_xl] / cauchy_step[i_xl], initial=np.inf)
|
| 370 |
+
alpha_xu = np.min(xu[i_xu] / cauchy_step[i_xu], initial=np.inf)
|
| 371 |
+
alpha_bd = min(alpha_xl, alpha_xu)
|
| 372 |
+
|
| 373 |
+
# Calculate the solution and the corresponding function value.
|
| 374 |
+
alpha = min(alpha_tr, alpha_quad, alpha_bd)
|
| 375 |
+
step = np.clip(alpha * cauchy_step, xl, xu)
|
| 376 |
+
q_val = const + alpha * grad_step + 0.5 * alpha**2.0 * curv_step
|
| 377 |
+
else:
|
| 378 |
+
# This case is never reached in exact arithmetic. It prevents this
|
| 379 |
+
# function to return a step that decreases the objective function.
|
| 380 |
+
step = np.zeros_like(grad)
|
| 381 |
+
q_val = const
|
| 382 |
+
|
| 383 |
+
if debug:
|
| 384 |
+
assert np.all(xl <= step)
|
| 385 |
+
assert np.all(step <= xu)
|
| 386 |
+
assert np.linalg.norm(step) < 1.1 * delta
|
| 387 |
+
return step, q_val
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/optim.py
ADDED
|
@@ -0,0 +1,1203 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import inspect
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
from scipy.linalg import qr
|
| 5 |
+
|
| 6 |
+
from ..utils import get_arrays_tol
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
TINY = np.finfo(float).tiny
|
| 10 |
+
EPS = np.finfo(float).eps
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def tangential_byrd_omojokun(grad, hess_prod, xl, xu, delta, debug, **kwargs):
|
| 14 |
+
r"""
|
| 15 |
+
Minimize approximately a quadratic function subject to bound constraints in
|
| 16 |
+
a trust region.
|
| 17 |
+
|
| 18 |
+
This function solves approximately
|
| 19 |
+
|
| 20 |
+
.. math::
|
| 21 |
+
|
| 22 |
+
\min_{s \in \mathbb{R}^n} \quad g^{\mathsf{T}} s + \frac{1}{2}
|
| 23 |
+
s^{\mathsf{T}} H s \quad \text{s.t.} \quad
|
| 24 |
+
\left\{ \begin{array}{l}
|
| 25 |
+
l \le s \le u\\
|
| 26 |
+
\lVert s \rVert \le \Delta,
|
| 27 |
+
\end{array} \right.
|
| 28 |
+
|
| 29 |
+
using an active-set variation of the truncated conjugate gradient method.
|
| 30 |
+
|
| 31 |
+
Parameters
|
| 32 |
+
----------
|
| 33 |
+
grad : `numpy.ndarray`, shape (n,)
|
| 34 |
+
Gradient :math:`g` as shown above.
|
| 35 |
+
hess_prod : callable
|
| 36 |
+
Product of the Hessian matrix :math:`H` with any vector.
|
| 37 |
+
|
| 38 |
+
``hess_prod(s) -> `numpy.ndarray`, shape (n,)``
|
| 39 |
+
|
| 40 |
+
returns the product :math:`H s`.
|
| 41 |
+
xl : `numpy.ndarray`, shape (n,)
|
| 42 |
+
Lower bounds :math:`l` as shown above.
|
| 43 |
+
xu : `numpy.ndarray`, shape (n,)
|
| 44 |
+
Upper bounds :math:`u` as shown above.
|
| 45 |
+
delta : float
|
| 46 |
+
Trust-region radius :math:`\Delta` as shown above.
|
| 47 |
+
debug : bool
|
| 48 |
+
Whether to make debugging tests during the execution.
|
| 49 |
+
|
| 50 |
+
Returns
|
| 51 |
+
-------
|
| 52 |
+
`numpy.ndarray`, shape (n,)
|
| 53 |
+
Approximate solution :math:`s`.
|
| 54 |
+
|
| 55 |
+
Other Parameters
|
| 56 |
+
----------------
|
| 57 |
+
improve_tcg : bool, optional
|
| 58 |
+
If True, a solution generated by the truncated conjugate gradient
|
| 59 |
+
method that is on the boundary of the trust region is improved by
|
| 60 |
+
moving around the trust-region boundary on the two-dimensional space
|
| 61 |
+
spanned by the solution and the gradient of the quadratic function at
|
| 62 |
+
the solution (default is True).
|
| 63 |
+
|
| 64 |
+
Notes
|
| 65 |
+
-----
|
| 66 |
+
This function implements Algorithm 6.2 of [1]_. It is assumed that the
|
| 67 |
+
origin is feasible with respect to the bound constraints and that `delta`
|
| 68 |
+
is finite and positive.
|
| 69 |
+
|
| 70 |
+
References
|
| 71 |
+
----------
|
| 72 |
+
.. [1] T. M. Ragonneau. *Model-Based Derivative-Free Optimization Methods
|
| 73 |
+
and Software*. PhD thesis, Department of Applied Mathematics, The Hong
|
| 74 |
+
Kong Polytechnic University, Hong Kong, China, 2022. URL:
|
| 75 |
+
https://theses.lib.polyu.edu.hk/handle/200/12294.
|
| 76 |
+
"""
|
| 77 |
+
if debug:
|
| 78 |
+
assert isinstance(grad, np.ndarray) and grad.ndim == 1
|
| 79 |
+
assert inspect.signature(hess_prod).bind(grad)
|
| 80 |
+
assert isinstance(xl, np.ndarray) and xl.shape == grad.shape
|
| 81 |
+
assert isinstance(xu, np.ndarray) and xu.shape == grad.shape
|
| 82 |
+
assert isinstance(delta, float)
|
| 83 |
+
assert isinstance(debug, bool)
|
| 84 |
+
tol = get_arrays_tol(xl, xu)
|
| 85 |
+
assert np.all(xl <= tol)
|
| 86 |
+
assert np.all(xu >= -tol)
|
| 87 |
+
assert np.isfinite(delta) and delta > 0.0
|
| 88 |
+
xl = np.minimum(xl, 0.0)
|
| 89 |
+
xu = np.maximum(xu, 0.0)
|
| 90 |
+
|
| 91 |
+
# Copy the arrays that may be modified by the code below.
|
| 92 |
+
n = grad.size
|
| 93 |
+
grad = np.copy(grad)
|
| 94 |
+
grad_orig = np.copy(grad)
|
| 95 |
+
|
| 96 |
+
# Calculate the initial active set.
|
| 97 |
+
free_bd = ((xl < 0.0) | (grad < 0.0)) & ((xu > 0.0) | (grad > 0.0))
|
| 98 |
+
|
| 99 |
+
# Set the initial iterate and the initial search direction.
|
| 100 |
+
step = np.zeros_like(grad)
|
| 101 |
+
sd = np.zeros_like(step)
|
| 102 |
+
sd[free_bd] = -grad[free_bd]
|
| 103 |
+
|
| 104 |
+
k = 0
|
| 105 |
+
reduct = 0.0
|
| 106 |
+
boundary_reached = False
|
| 107 |
+
while k < np.count_nonzero(free_bd):
|
| 108 |
+
# Stop the computations if sd is not a descent direction.
|
| 109 |
+
grad_sd = grad @ sd
|
| 110 |
+
if grad_sd >= -10.0 * EPS * n * max(1.0, np.linalg.norm(grad)):
|
| 111 |
+
break
|
| 112 |
+
|
| 113 |
+
# Set alpha_tr to the step size for the trust-region constraint.
|
| 114 |
+
try:
|
| 115 |
+
alpha_tr = _alpha_tr(step, sd, delta)
|
| 116 |
+
except ZeroDivisionError:
|
| 117 |
+
break
|
| 118 |
+
|
| 119 |
+
# Stop the computations if a step along sd is expected to give a
|
| 120 |
+
# relatively small reduction in the objective function.
|
| 121 |
+
if -alpha_tr * grad_sd <= 1e-8 * reduct:
|
| 122 |
+
break
|
| 123 |
+
|
| 124 |
+
# Set alpha_quad to the step size for the minimization problem.
|
| 125 |
+
hess_sd = hess_prod(sd)
|
| 126 |
+
curv_sd = sd @ hess_sd
|
| 127 |
+
if curv_sd > TINY * abs(grad_sd):
|
| 128 |
+
alpha_quad = max(-grad_sd / curv_sd, 0.0)
|
| 129 |
+
else:
|
| 130 |
+
alpha_quad = np.inf
|
| 131 |
+
|
| 132 |
+
# Stop the computations if the reduction in the objective function
|
| 133 |
+
# provided by an unconstrained step is small.
|
| 134 |
+
alpha = min(alpha_tr, alpha_quad)
|
| 135 |
+
if -alpha * (grad_sd + 0.5 * alpha * curv_sd) <= 1e-8 * reduct:
|
| 136 |
+
break
|
| 137 |
+
|
| 138 |
+
# Set alpha_bd to the step size for the bound constraints.
|
| 139 |
+
i_xl = (xl > -np.inf) & (sd < -TINY * np.abs(xl - step))
|
| 140 |
+
i_xu = (xu < np.inf) & (sd > TINY * np.abs(xu - step))
|
| 141 |
+
all_alpha_xl = np.full_like(step, np.inf)
|
| 142 |
+
all_alpha_xu = np.full_like(step, np.inf)
|
| 143 |
+
all_alpha_xl[i_xl] = np.maximum(
|
| 144 |
+
(xl[i_xl] - step[i_xl]) / sd[i_xl],
|
| 145 |
+
0.0,
|
| 146 |
+
)
|
| 147 |
+
all_alpha_xu[i_xu] = np.maximum(
|
| 148 |
+
(xu[i_xu] - step[i_xu]) / sd[i_xu],
|
| 149 |
+
0.0,
|
| 150 |
+
)
|
| 151 |
+
alpha_xl = np.min(all_alpha_xl)
|
| 152 |
+
alpha_xu = np.min(all_alpha_xu)
|
| 153 |
+
alpha_bd = min(alpha_xl, alpha_xu)
|
| 154 |
+
|
| 155 |
+
# Update the iterate.
|
| 156 |
+
alpha = min(alpha, alpha_bd)
|
| 157 |
+
if alpha > 0.0:
|
| 158 |
+
step[free_bd] = np.clip(
|
| 159 |
+
step[free_bd] + alpha * sd[free_bd],
|
| 160 |
+
xl[free_bd],
|
| 161 |
+
xu[free_bd],
|
| 162 |
+
)
|
| 163 |
+
grad += alpha * hess_sd
|
| 164 |
+
reduct -= alpha * (grad_sd + 0.5 * alpha * curv_sd)
|
| 165 |
+
|
| 166 |
+
if alpha < min(alpha_tr, alpha_bd):
|
| 167 |
+
# The current iteration is a conjugate gradient iteration. Update
|
| 168 |
+
# the search direction so that it is conjugate (with respect to H)
|
| 169 |
+
# to all the previous search directions.
|
| 170 |
+
beta = (grad[free_bd] @ hess_sd[free_bd]) / curv_sd
|
| 171 |
+
sd[free_bd] = beta * sd[free_bd] - grad[free_bd]
|
| 172 |
+
sd[~free_bd] = 0.0
|
| 173 |
+
k += 1
|
| 174 |
+
elif alpha < alpha_tr:
|
| 175 |
+
# The iterate is restricted by a bound constraint. Add this bound
|
| 176 |
+
# constraint to the active set, and restart the calculations.
|
| 177 |
+
if alpha_xl <= alpha:
|
| 178 |
+
i_new = np.argmin(all_alpha_xl)
|
| 179 |
+
step[i_new] = xl[i_new]
|
| 180 |
+
else:
|
| 181 |
+
i_new = np.argmin(all_alpha_xu)
|
| 182 |
+
step[i_new] = xu[i_new]
|
| 183 |
+
free_bd[i_new] = False
|
| 184 |
+
sd[free_bd] = -grad[free_bd]
|
| 185 |
+
sd[~free_bd] = 0.0
|
| 186 |
+
k = 0
|
| 187 |
+
else:
|
| 188 |
+
# The current iterate is on the trust-region boundary. Add all the
|
| 189 |
+
# active bounds to the working set to prepare for the improvement
|
| 190 |
+
# of the solution, and stop the iterations.
|
| 191 |
+
if alpha_xl <= alpha:
|
| 192 |
+
i_new = _argmin(all_alpha_xl)
|
| 193 |
+
step[i_new] = xl[i_new]
|
| 194 |
+
free_bd[i_new] = False
|
| 195 |
+
if alpha_xu <= alpha:
|
| 196 |
+
i_new = _argmin(all_alpha_xu)
|
| 197 |
+
step[i_new] = xu[i_new]
|
| 198 |
+
free_bd[i_new] = False
|
| 199 |
+
boundary_reached = True
|
| 200 |
+
break
|
| 201 |
+
|
| 202 |
+
# Attempt to improve the solution on the trust-region boundary.
|
| 203 |
+
if kwargs.get("improve_tcg", True) and boundary_reached:
|
| 204 |
+
step_base = np.copy(step)
|
| 205 |
+
step_comparator = grad_orig @ step_base + 0.5 * step_base @ hess_prod(
|
| 206 |
+
step_base
|
| 207 |
+
)
|
| 208 |
+
|
| 209 |
+
while np.count_nonzero(free_bd) > 0:
|
| 210 |
+
# Check whether a substantial reduction in the objective function
|
| 211 |
+
# is possible, and set the search direction.
|
| 212 |
+
step_sq = step[free_bd] @ step[free_bd]
|
| 213 |
+
grad_sq = grad[free_bd] @ grad[free_bd]
|
| 214 |
+
grad_step = grad[free_bd] @ step[free_bd]
|
| 215 |
+
grad_sd = -np.sqrt(max(step_sq * grad_sq - grad_step**2.0, 0.0))
|
| 216 |
+
sd[free_bd] = grad_step * step[free_bd] - step_sq * grad[free_bd]
|
| 217 |
+
sd[~free_bd] = 0.0
|
| 218 |
+
if grad_sd >= -1e-8 * reduct or np.any(
|
| 219 |
+
grad_sd >= -TINY * np.abs(sd[free_bd])
|
| 220 |
+
):
|
| 221 |
+
break
|
| 222 |
+
sd[free_bd] /= -grad_sd
|
| 223 |
+
|
| 224 |
+
# Calculate an upper bound for the tangent of half the angle theta
|
| 225 |
+
# of this alternative iteration. The step will be updated as:
|
| 226 |
+
# step = cos(theta) * step + sin(theta) * sd.
|
| 227 |
+
temp_xl = np.zeros(n)
|
| 228 |
+
temp_xu = np.zeros(n)
|
| 229 |
+
temp_xl[free_bd] = (
|
| 230 |
+
step[free_bd] ** 2.0 + sd[free_bd] ** 2.0 - xl[free_bd] ** 2.0
|
| 231 |
+
)
|
| 232 |
+
temp_xu[free_bd] = (
|
| 233 |
+
step[free_bd] ** 2.0 + sd[free_bd] ** 2.0 - xu[free_bd] ** 2.0
|
| 234 |
+
)
|
| 235 |
+
temp_xl[temp_xl > 0.0] = (
|
| 236 |
+
np.sqrt(temp_xl[temp_xl > 0.0]) - sd[temp_xl > 0.0]
|
| 237 |
+
)
|
| 238 |
+
temp_xu[temp_xu > 0.0] = (
|
| 239 |
+
np.sqrt(temp_xu[temp_xu > 0.0]) + sd[temp_xu > 0.0]
|
| 240 |
+
)
|
| 241 |
+
dist_xl = np.maximum(step - xl, 0.0)
|
| 242 |
+
dist_xu = np.maximum(xu - step, 0.0)
|
| 243 |
+
i_xl = temp_xl > TINY * dist_xl
|
| 244 |
+
i_xu = temp_xu > TINY * dist_xu
|
| 245 |
+
all_t_xl = np.ones(n)
|
| 246 |
+
all_t_xu = np.ones(n)
|
| 247 |
+
all_t_xl[i_xl] = np.minimum(
|
| 248 |
+
all_t_xl[i_xl],
|
| 249 |
+
dist_xl[i_xl] / temp_xl[i_xl],
|
| 250 |
+
)
|
| 251 |
+
all_t_xu[i_xu] = np.minimum(
|
| 252 |
+
all_t_xu[i_xu],
|
| 253 |
+
dist_xu[i_xu] / temp_xu[i_xu],
|
| 254 |
+
)
|
| 255 |
+
t_xl = np.min(all_t_xl)
|
| 256 |
+
t_xu = np.min(all_t_xu)
|
| 257 |
+
t_bd = min(t_xl, t_xu)
|
| 258 |
+
|
| 259 |
+
# Calculate some curvature information.
|
| 260 |
+
hess_step = hess_prod(step)
|
| 261 |
+
hess_sd = hess_prod(sd)
|
| 262 |
+
curv_step = step @ hess_step
|
| 263 |
+
curv_sd = sd @ hess_sd
|
| 264 |
+
curv_step_sd = step @ hess_sd
|
| 265 |
+
|
| 266 |
+
# For a range of equally spaced values of tan(0.5 * theta),
|
| 267 |
+
# calculate the reduction in the objective function that would be
|
| 268 |
+
# obtained by accepting the corresponding angle.
|
| 269 |
+
n_samples = 20
|
| 270 |
+
n_samples = int((n_samples - 3) * t_bd + 3)
|
| 271 |
+
t_samples = np.linspace(t_bd / n_samples, t_bd, n_samples)
|
| 272 |
+
sin_values = 2.0 * t_samples / (1.0 + t_samples**2.0)
|
| 273 |
+
all_reduct = sin_values * (
|
| 274 |
+
grad_step * t_samples
|
| 275 |
+
- grad_sd
|
| 276 |
+
- t_samples * curv_step
|
| 277 |
+
+ sin_values
|
| 278 |
+
* (t_samples * curv_step_sd - 0.5 * (curv_sd - curv_step))
|
| 279 |
+
)
|
| 280 |
+
if np.all(all_reduct <= 0.0):
|
| 281 |
+
# No reduction in the objective function is obtained.
|
| 282 |
+
break
|
| 283 |
+
|
| 284 |
+
# Accept the angle that provides the largest reduction in the
|
| 285 |
+
# objective function, and update the iterate.
|
| 286 |
+
i_max = np.argmax(all_reduct)
|
| 287 |
+
cos_value = (1.0 - t_samples[i_max] ** 2.0) / (
|
| 288 |
+
1.0 + t_samples[i_max] ** 2.0
|
| 289 |
+
)
|
| 290 |
+
step[free_bd] = (
|
| 291 |
+
cos_value * step[free_bd] + sin_values[i_max] * sd[free_bd]
|
| 292 |
+
)
|
| 293 |
+
grad += (cos_value - 1.0) * hess_step + sin_values[i_max] * hess_sd
|
| 294 |
+
reduct += all_reduct[i_max]
|
| 295 |
+
|
| 296 |
+
# If the above angle is restricted by bound constraints, add them
|
| 297 |
+
# to the working set, and restart the alternative iteration.
|
| 298 |
+
# Otherwise, the calculations are terminated.
|
| 299 |
+
if t_bd < 1.0 and i_max == n_samples - 1:
|
| 300 |
+
if t_xl <= t_bd:
|
| 301 |
+
i_new = _argmin(all_t_xl)
|
| 302 |
+
step[i_new] = xl[i_new]
|
| 303 |
+
free_bd[i_new] = False
|
| 304 |
+
if t_xu <= t_bd:
|
| 305 |
+
i_new = _argmin(all_t_xu)
|
| 306 |
+
step[i_new] = xu[i_new]
|
| 307 |
+
free_bd[i_new] = False
|
| 308 |
+
else:
|
| 309 |
+
break
|
| 310 |
+
|
| 311 |
+
# Ensure that the alternative iteration improves the objective
|
| 312 |
+
# function.
|
| 313 |
+
if grad_orig @ step + 0.5 * step @ hess_prod(step) > step_comparator:
|
| 314 |
+
step = step_base
|
| 315 |
+
|
| 316 |
+
if debug:
|
| 317 |
+
assert np.all(xl <= step)
|
| 318 |
+
assert np.all(step <= xu)
|
| 319 |
+
assert np.linalg.norm(step) < 1.1 * delta
|
| 320 |
+
return step
|
| 321 |
+
|
| 322 |
+
|
| 323 |
+
def constrained_tangential_byrd_omojokun(
|
| 324 |
+
grad,
|
| 325 |
+
hess_prod,
|
| 326 |
+
xl,
|
| 327 |
+
xu,
|
| 328 |
+
aub,
|
| 329 |
+
bub,
|
| 330 |
+
aeq,
|
| 331 |
+
delta,
|
| 332 |
+
debug,
|
| 333 |
+
**kwargs,
|
| 334 |
+
):
|
| 335 |
+
r"""
|
| 336 |
+
Minimize approximately a quadratic function subject to bound and linear
|
| 337 |
+
constraints in a trust region.
|
| 338 |
+
|
| 339 |
+
This function solves approximately
|
| 340 |
+
|
| 341 |
+
.. math::
|
| 342 |
+
|
| 343 |
+
\min_{s \in \mathbb{R}^n} \quad g^{\mathsf{T}} s + \frac{1}{2}
|
| 344 |
+
s^{\mathsf{T}} H s \quad \text{s.t.} \quad
|
| 345 |
+
\left\{ \begin{array}{l}
|
| 346 |
+
l \le s \le u,\\
|
| 347 |
+
A_{\scriptscriptstyle I} s \le b_{\scriptscriptstyle I},\\
|
| 348 |
+
A_{\scriptscriptstyle E} s = 0,\\
|
| 349 |
+
\lVert s \rVert \le \Delta,
|
| 350 |
+
\end{array} \right.
|
| 351 |
+
|
| 352 |
+
using an active-set variation of the truncated conjugate gradient method.
|
| 353 |
+
|
| 354 |
+
Parameters
|
| 355 |
+
----------
|
| 356 |
+
grad : `numpy.ndarray`, shape (n,)
|
| 357 |
+
Gradient :math:`g` as shown above.
|
| 358 |
+
hess_prod : callable
|
| 359 |
+
Product of the Hessian matrix :math:`H` with any vector.
|
| 360 |
+
|
| 361 |
+
``hess_prod(s) -> `numpy.ndarray`, shape (n,)``
|
| 362 |
+
|
| 363 |
+
returns the product :math:`H s`.
|
| 364 |
+
xl : `numpy.ndarray`, shape (n,)
|
| 365 |
+
Lower bounds :math:`l` as shown above.
|
| 366 |
+
xu : `numpy.ndarray`, shape (n,)
|
| 367 |
+
Upper bounds :math:`u` as shown above.
|
| 368 |
+
aub : `numpy.ndarray`, shape (m_linear_ub, n)
|
| 369 |
+
Coefficient matrix :math:`A_{\scriptscriptstyle I}` as shown above.
|
| 370 |
+
bub : `numpy.ndarray`, shape (m_linear_ub,)
|
| 371 |
+
Right-hand side :math:`b_{\scriptscriptstyle I}` as shown above.
|
| 372 |
+
aeq : `numpy.ndarray`, shape (m_linear_eq, n)
|
| 373 |
+
Coefficient matrix :math:`A_{\scriptscriptstyle E}` as shown above.
|
| 374 |
+
delta : float
|
| 375 |
+
Trust-region radius :math:`\Delta` as shown above.
|
| 376 |
+
debug : bool
|
| 377 |
+
Whether to make debugging tests during the execution.
|
| 378 |
+
|
| 379 |
+
Returns
|
| 380 |
+
-------
|
| 381 |
+
`numpy.ndarray`, shape (n,)
|
| 382 |
+
Approximate solution :math:`s`.
|
| 383 |
+
|
| 384 |
+
Other Parameters
|
| 385 |
+
----------------
|
| 386 |
+
improve_tcg : bool, optional
|
| 387 |
+
If True, a solution generated by the truncated conjugate gradient
|
| 388 |
+
method that is on the boundary of the trust region is improved by
|
| 389 |
+
moving around the trust-region boundary on the two-dimensional space
|
| 390 |
+
spanned by the solution and the gradient of the quadratic function at
|
| 391 |
+
the solution (default is True).
|
| 392 |
+
|
| 393 |
+
Notes
|
| 394 |
+
-----
|
| 395 |
+
This function implements Algorithm 6.3 of [1]_. It is assumed that the
|
| 396 |
+
origin is feasible with respect to the bound and linear constraints, and
|
| 397 |
+
that `delta` is finite and positive.
|
| 398 |
+
|
| 399 |
+
References
|
| 400 |
+
----------
|
| 401 |
+
.. [1] T. M. Ragonneau. *Model-Based Derivative-Free Optimization Methods
|
| 402 |
+
and Software*. PhD thesis, Department of Applied Mathematics, The Hong
|
| 403 |
+
Kong Polytechnic University, Hong Kong, China, 2022. URL:
|
| 404 |
+
https://theses.lib.polyu.edu.hk/handle/200/12294.
|
| 405 |
+
"""
|
| 406 |
+
if debug:
|
| 407 |
+
assert isinstance(grad, np.ndarray) and grad.ndim == 1
|
| 408 |
+
assert inspect.signature(hess_prod).bind(grad)
|
| 409 |
+
assert isinstance(xl, np.ndarray) and xl.shape == grad.shape
|
| 410 |
+
assert isinstance(xu, np.ndarray) and xu.shape == grad.shape
|
| 411 |
+
assert (
|
| 412 |
+
isinstance(aub, np.ndarray)
|
| 413 |
+
and aub.ndim == 2
|
| 414 |
+
and aub.shape[1] == grad.size
|
| 415 |
+
)
|
| 416 |
+
assert (
|
| 417 |
+
isinstance(bub, np.ndarray)
|
| 418 |
+
and bub.ndim == 1
|
| 419 |
+
and bub.size == aub.shape[0]
|
| 420 |
+
)
|
| 421 |
+
assert (
|
| 422 |
+
isinstance(aeq, np.ndarray)
|
| 423 |
+
and aeq.ndim == 2
|
| 424 |
+
and aeq.shape[1] == grad.size
|
| 425 |
+
)
|
| 426 |
+
assert isinstance(delta, float)
|
| 427 |
+
assert isinstance(debug, bool)
|
| 428 |
+
tol = get_arrays_tol(xl, xu)
|
| 429 |
+
assert np.all(xl <= tol)
|
| 430 |
+
assert np.all(xu >= -tol)
|
| 431 |
+
assert np.all(bub >= -tol)
|
| 432 |
+
assert np.isfinite(delta) and delta > 0.0
|
| 433 |
+
xl = np.minimum(xl, 0.0)
|
| 434 |
+
xu = np.maximum(xu, 0.0)
|
| 435 |
+
bub = np.maximum(bub, 0.0)
|
| 436 |
+
|
| 437 |
+
# Copy the arrays that may be modified by the code below.
|
| 438 |
+
n = grad.size
|
| 439 |
+
grad = np.copy(grad)
|
| 440 |
+
grad_orig = np.copy(grad)
|
| 441 |
+
|
| 442 |
+
# Calculate the initial active set.
|
| 443 |
+
free_xl = (xl < 0.0) | (grad < 0.0)
|
| 444 |
+
free_xu = (xu > 0.0) | (grad > 0.0)
|
| 445 |
+
free_ub = (bub > 0.0) | (aub @ grad > 0.0)
|
| 446 |
+
n_act, q = qr_tangential_byrd_omojokun(aub, aeq, free_xl, free_xu, free_ub)
|
| 447 |
+
|
| 448 |
+
# Set the initial iterate and the initial search direction.
|
| 449 |
+
step = np.zeros_like(grad)
|
| 450 |
+
sd = -q[:, n_act:] @ (q[:, n_act:].T @ grad)
|
| 451 |
+
resid = np.copy(bub)
|
| 452 |
+
|
| 453 |
+
k = 0
|
| 454 |
+
reduct = 0.0
|
| 455 |
+
boundary_reached = False
|
| 456 |
+
while k < n - n_act:
|
| 457 |
+
# Stop the computations if sd is not a descent direction.
|
| 458 |
+
grad_sd = grad @ sd
|
| 459 |
+
if grad_sd >= -10.0 * EPS * n * max(1.0, np.linalg.norm(grad)):
|
| 460 |
+
break
|
| 461 |
+
|
| 462 |
+
# Set alpha_tr to the step size for the trust-region constraint.
|
| 463 |
+
try:
|
| 464 |
+
alpha_tr = _alpha_tr(step, sd, delta)
|
| 465 |
+
except ZeroDivisionError:
|
| 466 |
+
break
|
| 467 |
+
|
| 468 |
+
# Stop the computations if a step along sd is expected to give a
|
| 469 |
+
# relatively small reduction in the objective function.
|
| 470 |
+
if -alpha_tr * grad_sd <= 1e-8 * reduct:
|
| 471 |
+
break
|
| 472 |
+
|
| 473 |
+
# Set alpha_quad to the step size for the minimization problem.
|
| 474 |
+
hess_sd = hess_prod(sd)
|
| 475 |
+
curv_sd = sd @ hess_sd
|
| 476 |
+
if curv_sd > TINY * abs(grad_sd):
|
| 477 |
+
alpha_quad = max(-grad_sd / curv_sd, 0.0)
|
| 478 |
+
else:
|
| 479 |
+
alpha_quad = np.inf
|
| 480 |
+
|
| 481 |
+
# Stop the computations if the reduction in the objective function
|
| 482 |
+
# provided by an unconstrained step is small.
|
| 483 |
+
alpha = min(alpha_tr, alpha_quad)
|
| 484 |
+
if -alpha * (grad_sd + 0.5 * alpha * curv_sd) <= 1e-8 * reduct:
|
| 485 |
+
break
|
| 486 |
+
|
| 487 |
+
# Set alpha_bd to the step size for the bound constraints.
|
| 488 |
+
i_xl = free_xl & (xl > -np.inf) & (sd < -TINY * np.abs(xl - step))
|
| 489 |
+
i_xu = free_xu & (xu < np.inf) & (sd > TINY * np.abs(xu - step))
|
| 490 |
+
all_alpha_xl = np.full_like(step, np.inf)
|
| 491 |
+
all_alpha_xu = np.full_like(step, np.inf)
|
| 492 |
+
all_alpha_xl[i_xl] = np.maximum(
|
| 493 |
+
(xl[i_xl] - step[i_xl]) / sd[i_xl],
|
| 494 |
+
0.0,
|
| 495 |
+
)
|
| 496 |
+
all_alpha_xu[i_xu] = np.maximum(
|
| 497 |
+
(xu[i_xu] - step[i_xu]) / sd[i_xu],
|
| 498 |
+
0.0,
|
| 499 |
+
)
|
| 500 |
+
alpha_xl = np.min(all_alpha_xl)
|
| 501 |
+
alpha_xu = np.min(all_alpha_xu)
|
| 502 |
+
alpha_bd = min(alpha_xl, alpha_xu)
|
| 503 |
+
|
| 504 |
+
# Set alpha_ub to the step size for the linear constraints.
|
| 505 |
+
aub_sd = aub @ sd
|
| 506 |
+
i_ub = free_ub & (aub_sd > TINY * np.abs(resid))
|
| 507 |
+
all_alpha_ub = np.full_like(bub, np.inf)
|
| 508 |
+
all_alpha_ub[i_ub] = resid[i_ub] / aub_sd[i_ub]
|
| 509 |
+
alpha_ub = np.min(all_alpha_ub, initial=np.inf)
|
| 510 |
+
|
| 511 |
+
# Update the iterate.
|
| 512 |
+
alpha = min(alpha, alpha_bd, alpha_ub)
|
| 513 |
+
if alpha > 0.0:
|
| 514 |
+
step = np.clip(step + alpha * sd, xl, xu)
|
| 515 |
+
grad += alpha * hess_sd
|
| 516 |
+
resid = np.maximum(0.0, resid - alpha * aub_sd)
|
| 517 |
+
reduct -= alpha * (grad_sd + 0.5 * alpha * curv_sd)
|
| 518 |
+
|
| 519 |
+
if alpha < min(alpha_tr, alpha_bd, alpha_ub):
|
| 520 |
+
# The current iteration is a conjugate gradient iteration. Update
|
| 521 |
+
# the search direction so that it is conjugate (with respect to H)
|
| 522 |
+
# to all the previous search directions.
|
| 523 |
+
grad_proj = q[:, n_act:] @ (q[:, n_act:].T @ grad)
|
| 524 |
+
beta = (grad_proj @ hess_sd) / curv_sd
|
| 525 |
+
sd = beta * sd - grad_proj
|
| 526 |
+
k += 1
|
| 527 |
+
elif alpha < alpha_tr:
|
| 528 |
+
# The iterate is restricted by a bound/linear constraint. Add this
|
| 529 |
+
# constraint to the active set, and restart the calculations.
|
| 530 |
+
if alpha_xl <= alpha:
|
| 531 |
+
i_new = np.argmin(all_alpha_xl)
|
| 532 |
+
step[i_new] = xl[i_new]
|
| 533 |
+
free_xl[i_new] = False
|
| 534 |
+
elif alpha_xu <= alpha:
|
| 535 |
+
i_new = np.argmin(all_alpha_xu)
|
| 536 |
+
step[i_new] = xu[i_new]
|
| 537 |
+
free_xu[i_new] = False
|
| 538 |
+
else:
|
| 539 |
+
i_new = np.argmin(all_alpha_ub)
|
| 540 |
+
free_ub[i_new] = False
|
| 541 |
+
n_act, q = qr_tangential_byrd_omojokun(
|
| 542 |
+
aub,
|
| 543 |
+
aeq,
|
| 544 |
+
free_xl,
|
| 545 |
+
free_xu,
|
| 546 |
+
free_ub,
|
| 547 |
+
)
|
| 548 |
+
sd = -q[:, n_act:] @ (q[:, n_act:].T @ grad)
|
| 549 |
+
k = 0
|
| 550 |
+
else:
|
| 551 |
+
# The current iterate is on the trust-region boundary. Add all the
|
| 552 |
+
# active bound/linear constraints to the working set to prepare for
|
| 553 |
+
# the improvement of the solution, and stop the iterations.
|
| 554 |
+
if alpha_xl <= alpha:
|
| 555 |
+
i_new = _argmin(all_alpha_xl)
|
| 556 |
+
step[i_new] = xl[i_new]
|
| 557 |
+
free_xl[i_new] = False
|
| 558 |
+
if alpha_xu <= alpha:
|
| 559 |
+
i_new = _argmin(all_alpha_xu)
|
| 560 |
+
step[i_new] = xu[i_new]
|
| 561 |
+
free_xu[i_new] = False
|
| 562 |
+
if alpha_ub <= alpha:
|
| 563 |
+
i_new = _argmin(all_alpha_ub)
|
| 564 |
+
free_ub[i_new] = False
|
| 565 |
+
n_act, q = qr_tangential_byrd_omojokun(
|
| 566 |
+
aub,
|
| 567 |
+
aeq,
|
| 568 |
+
free_xl,
|
| 569 |
+
free_xu,
|
| 570 |
+
free_ub,
|
| 571 |
+
)
|
| 572 |
+
boundary_reached = True
|
| 573 |
+
break
|
| 574 |
+
|
| 575 |
+
# Attempt to improve the solution on the trust-region boundary.
|
| 576 |
+
if kwargs.get("improve_tcg", True) and boundary_reached and n_act < n:
|
| 577 |
+
step_base = np.copy(step)
|
| 578 |
+
while n_act < n:
|
| 579 |
+
# Check whether a substantial reduction in the objective function
|
| 580 |
+
# is possible, and set the search direction.
|
| 581 |
+
step_proj = q[:, n_act:] @ (q[:, n_act:].T @ step)
|
| 582 |
+
grad_proj = q[:, n_act:] @ (q[:, n_act:].T @ grad)
|
| 583 |
+
step_sq = step_proj @ step_proj
|
| 584 |
+
grad_sq = grad_proj @ grad_proj
|
| 585 |
+
grad_step = grad_proj @ step_proj
|
| 586 |
+
grad_sd = -np.sqrt(max(step_sq * grad_sq - grad_step**2.0, 0.0))
|
| 587 |
+
sd = q[:, n_act:] @ (
|
| 588 |
+
q[:, n_act:].T @ (grad_step * step - step_sq * grad)
|
| 589 |
+
)
|
| 590 |
+
if grad_sd >= -1e-8 * reduct or np.any(
|
| 591 |
+
grad_sd >= -TINY * np.abs(sd)
|
| 592 |
+
):
|
| 593 |
+
break
|
| 594 |
+
sd /= -grad_sd
|
| 595 |
+
|
| 596 |
+
# Calculate an upper bound for the tangent of half the angle theta
|
| 597 |
+
# of this alternative iteration for the bound constraints. The step
|
| 598 |
+
# will be updated as:
|
| 599 |
+
# step += (cos(theta) - 1) * step_proj + sin(theta) * sd.
|
| 600 |
+
temp_xl = np.zeros(n)
|
| 601 |
+
temp_xu = np.zeros(n)
|
| 602 |
+
dist_xl = np.maximum(step - xl, 0.0)
|
| 603 |
+
dist_xu = np.maximum(xu - step, 0.0)
|
| 604 |
+
temp_xl[free_xl] = sd[free_xl] ** 2.0 - dist_xl[free_xl] * (
|
| 605 |
+
dist_xl[free_xl] - 2.0 * step_proj[free_xl]
|
| 606 |
+
)
|
| 607 |
+
temp_xu[free_xu] = sd[free_xu] ** 2.0 - dist_xu[free_xu] * (
|
| 608 |
+
dist_xu[free_xu] + 2.0 * step_proj[free_xu]
|
| 609 |
+
)
|
| 610 |
+
temp_xl[temp_xl > 0.0] = (
|
| 611 |
+
np.sqrt(temp_xl[temp_xl > 0.0]) - sd[temp_xl > 0.0]
|
| 612 |
+
)
|
| 613 |
+
temp_xu[temp_xu > 0.0] = (
|
| 614 |
+
np.sqrt(temp_xu[temp_xu > 0.0]) + sd[temp_xu > 0.0]
|
| 615 |
+
)
|
| 616 |
+
i_xl = temp_xl > TINY * dist_xl
|
| 617 |
+
i_xu = temp_xu > TINY * dist_xu
|
| 618 |
+
all_t_xl = np.ones(n)
|
| 619 |
+
all_t_xu = np.ones(n)
|
| 620 |
+
all_t_xl[i_xl] = np.minimum(
|
| 621 |
+
all_t_xl[i_xl],
|
| 622 |
+
dist_xl[i_xl] / temp_xl[i_xl],
|
| 623 |
+
)
|
| 624 |
+
all_t_xu[i_xu] = np.minimum(
|
| 625 |
+
all_t_xu[i_xu],
|
| 626 |
+
dist_xu[i_xu] / temp_xu[i_xu],
|
| 627 |
+
)
|
| 628 |
+
t_xl = np.min(all_t_xl)
|
| 629 |
+
t_xu = np.min(all_t_xu)
|
| 630 |
+
t_bd = min(t_xl, t_xu)
|
| 631 |
+
|
| 632 |
+
# Calculate an upper bound for the tangent of half the angle theta
|
| 633 |
+
# of this alternative iteration for the linear constraints.
|
| 634 |
+
temp_ub = np.zeros_like(resid)
|
| 635 |
+
aub_step = aub @ step_proj
|
| 636 |
+
aub_sd = aub @ sd
|
| 637 |
+
temp_ub[free_ub] = aub_sd[free_ub] ** 2.0 - resid[free_ub] * (
|
| 638 |
+
resid[free_ub] + 2.0 * aub_step[free_ub]
|
| 639 |
+
)
|
| 640 |
+
temp_ub[temp_ub > 0.0] = (
|
| 641 |
+
np.sqrt(temp_ub[temp_ub > 0.0]) + aub_sd[temp_ub > 0.0]
|
| 642 |
+
)
|
| 643 |
+
i_ub = temp_ub > TINY * resid
|
| 644 |
+
all_t_ub = np.ones_like(resid)
|
| 645 |
+
all_t_ub[i_ub] = np.minimum(
|
| 646 |
+
all_t_ub[i_ub],
|
| 647 |
+
resid[i_ub] / temp_ub[i_ub],
|
| 648 |
+
)
|
| 649 |
+
t_ub = np.min(all_t_ub, initial=1.0)
|
| 650 |
+
t_min = min(t_bd, t_ub)
|
| 651 |
+
|
| 652 |
+
# Calculate some curvature information.
|
| 653 |
+
hess_step = hess_prod(step_proj)
|
| 654 |
+
hess_sd = hess_prod(sd)
|
| 655 |
+
curv_step = step_proj @ hess_step
|
| 656 |
+
curv_sd = sd @ hess_sd
|
| 657 |
+
curv_step_sd = step_proj @ hess_sd
|
| 658 |
+
|
| 659 |
+
# For a range of equally spaced values of tan(0.5 * theta),
|
| 660 |
+
# calculate the reduction in the objective function that would be
|
| 661 |
+
# obtained by accepting the corresponding angle.
|
| 662 |
+
n_samples = 20
|
| 663 |
+
n_samples = int((n_samples - 3) * t_min + 3)
|
| 664 |
+
t_samples = np.linspace(t_min / n_samples, t_min, n_samples)
|
| 665 |
+
sin_values = 2.0 * t_samples / (1.0 + t_samples**2.0)
|
| 666 |
+
all_reduct = sin_values * (
|
| 667 |
+
grad_step * t_samples
|
| 668 |
+
- grad_sd
|
| 669 |
+
- sin_values
|
| 670 |
+
* (
|
| 671 |
+
0.5 * t_samples**2.0 * curv_step
|
| 672 |
+
- 2.0 * t_samples * curv_step_sd
|
| 673 |
+
+ 0.5 * curv_sd
|
| 674 |
+
)
|
| 675 |
+
)
|
| 676 |
+
if np.all(all_reduct <= 0.0):
|
| 677 |
+
# No reduction in the objective function is obtained.
|
| 678 |
+
break
|
| 679 |
+
|
| 680 |
+
# Accept the angle that provides the largest reduction in the
|
| 681 |
+
# objective function, and update the iterate.
|
| 682 |
+
i_max = np.argmax(all_reduct)
|
| 683 |
+
cos_value = (1.0 - t_samples[i_max] ** 2.0) / (
|
| 684 |
+
1.0 + t_samples[i_max] ** 2.0
|
| 685 |
+
)
|
| 686 |
+
step = np.clip(
|
| 687 |
+
step + (cos_value - 1.0) * step_proj + sin_values[i_max] * sd,
|
| 688 |
+
xl,
|
| 689 |
+
xu,
|
| 690 |
+
)
|
| 691 |
+
grad += (cos_value - 1.0) * hess_step + sin_values[i_max] * hess_sd
|
| 692 |
+
resid = np.maximum(
|
| 693 |
+
0.0,
|
| 694 |
+
resid
|
| 695 |
+
- (cos_value - 1.0) * aub_step
|
| 696 |
+
- sin_values[i_max] * aub_sd,
|
| 697 |
+
)
|
| 698 |
+
reduct += all_reduct[i_max]
|
| 699 |
+
|
| 700 |
+
# If the above angle is restricted by bound constraints, add them
|
| 701 |
+
# to the working set, and restart the alternative iteration.
|
| 702 |
+
# Otherwise, the calculations are terminated.
|
| 703 |
+
if t_min < 1.0 and i_max == n_samples - 1:
|
| 704 |
+
if t_xl <= t_min:
|
| 705 |
+
i_new = _argmin(all_t_xl)
|
| 706 |
+
step[i_new] = xl[i_new]
|
| 707 |
+
free_xl[i_new] = False
|
| 708 |
+
if t_xu <= t_min:
|
| 709 |
+
i_new = _argmin(all_t_xu)
|
| 710 |
+
step[i_new] = xu[i_new]
|
| 711 |
+
free_xl[i_new] = False
|
| 712 |
+
if t_ub <= t_min:
|
| 713 |
+
i_new = _argmin(all_t_ub)
|
| 714 |
+
free_ub[i_new] = False
|
| 715 |
+
n_act, q = qr_tangential_byrd_omojokun(
|
| 716 |
+
aub,
|
| 717 |
+
aeq,
|
| 718 |
+
free_xl,
|
| 719 |
+
free_xu,
|
| 720 |
+
free_ub,
|
| 721 |
+
)
|
| 722 |
+
else:
|
| 723 |
+
break
|
| 724 |
+
|
| 725 |
+
# Ensure that the alternative iteration improves the objective
|
| 726 |
+
# function.
|
| 727 |
+
if grad_orig @ step + 0.5 * step @ hess_prod(
|
| 728 |
+
step
|
| 729 |
+
) > grad_orig @ step_base + 0.5 * step_base @ hess_prod(step_base):
|
| 730 |
+
step = step_base
|
| 731 |
+
|
| 732 |
+
if debug:
|
| 733 |
+
tol = get_arrays_tol(xl, xu)
|
| 734 |
+
assert np.all(xl <= step)
|
| 735 |
+
assert np.all(step <= xu)
|
| 736 |
+
assert np.all(aub @ step <= bub + tol)
|
| 737 |
+
assert np.all(np.abs(aeq @ step) <= tol)
|
| 738 |
+
assert np.linalg.norm(step) < 1.1 * delta
|
| 739 |
+
return step
|
| 740 |
+
|
| 741 |
+
|
| 742 |
+
def normal_byrd_omojokun(aub, bub, aeq, beq, xl, xu, delta, debug, **kwargs):
|
| 743 |
+
r"""
|
| 744 |
+
Minimize approximately a linear constraint violation subject to bound
|
| 745 |
+
constraints in a trust region.
|
| 746 |
+
|
| 747 |
+
This function solves approximately
|
| 748 |
+
|
| 749 |
+
.. math::
|
| 750 |
+
|
| 751 |
+
\min_{s \in \mathbb{R}^n} \quad \frac{1}{2} \big( \lVert \max \{
|
| 752 |
+
A_{\scriptscriptstyle I} s - b_{\scriptscriptstyle I}, 0 \} \rVert^2 +
|
| 753 |
+
\lVert A_{\scriptscriptstyle E} s - b_{\scriptscriptstyle E} \rVert^2
|
| 754 |
+
\big) \quad \text{s.t.}
|
| 755 |
+
\quad
|
| 756 |
+
\left\{ \begin{array}{l}
|
| 757 |
+
l \le s \le u,\\
|
| 758 |
+
\lVert s \rVert \le \Delta,
|
| 759 |
+
\end{array} \right.
|
| 760 |
+
|
| 761 |
+
using a variation of the truncated conjugate gradient method.
|
| 762 |
+
|
| 763 |
+
Parameters
|
| 764 |
+
----------
|
| 765 |
+
aub : `numpy.ndarray`, shape (m_linear_ub, n)
|
| 766 |
+
Matrix :math:`A_{\scriptscriptstyle I}` as shown above.
|
| 767 |
+
bub : `numpy.ndarray`, shape (m_linear_ub,)
|
| 768 |
+
Vector :math:`b_{\scriptscriptstyle I}` as shown above.
|
| 769 |
+
aeq : `numpy.ndarray`, shape (m_linear_eq, n)
|
| 770 |
+
Matrix :math:`A_{\scriptscriptstyle E}` as shown above.
|
| 771 |
+
beq : `numpy.ndarray`, shape (m_linear_eq,)
|
| 772 |
+
Vector :math:`b_{\scriptscriptstyle E}` as shown above.
|
| 773 |
+
xl : `numpy.ndarray`, shape (n,)
|
| 774 |
+
Lower bounds :math:`l` as shown above.
|
| 775 |
+
xu : `numpy.ndarray`, shape (n,)
|
| 776 |
+
Upper bounds :math:`u` as shown above.
|
| 777 |
+
delta : float
|
| 778 |
+
Trust-region radius :math:`\Delta` as shown above.
|
| 779 |
+
debug : bool
|
| 780 |
+
Whether to make debugging tests during the execution.
|
| 781 |
+
|
| 782 |
+
Returns
|
| 783 |
+
-------
|
| 784 |
+
`numpy.ndarray`, shape (n,)
|
| 785 |
+
Approximate solution :math:`s`.
|
| 786 |
+
|
| 787 |
+
Other Parameters
|
| 788 |
+
----------------
|
| 789 |
+
improve_tcg : bool, optional
|
| 790 |
+
If True, a solution generated by the truncated conjugate gradient
|
| 791 |
+
method that is on the boundary of the trust region is improved by
|
| 792 |
+
moving around the trust-region boundary on the two-dimensional space
|
| 793 |
+
spanned by the solution and the gradient of the quadratic function at
|
| 794 |
+
the solution (default is True).
|
| 795 |
+
|
| 796 |
+
Notes
|
| 797 |
+
-----
|
| 798 |
+
This function implements Algorithm 6.4 of [1]_. It is assumed that the
|
| 799 |
+
origin is feasible with respect to the bound constraints and that `delta`
|
| 800 |
+
is finite and positive.
|
| 801 |
+
|
| 802 |
+
References
|
| 803 |
+
----------
|
| 804 |
+
.. [1] T. M. Ragonneau. *Model-Based Derivative-Free Optimization Methods
|
| 805 |
+
and Software*. PhD thesis, Department of Applied Mathematics, The Hong
|
| 806 |
+
Kong Polytechnic University, Hong Kong, China, 2022. URL:
|
| 807 |
+
https://theses.lib.polyu.edu.hk/handle/200/12294.
|
| 808 |
+
"""
|
| 809 |
+
if debug:
|
| 810 |
+
assert isinstance(aub, np.ndarray) and aub.ndim == 2
|
| 811 |
+
assert (
|
| 812 |
+
isinstance(bub, np.ndarray)
|
| 813 |
+
and bub.ndim == 1
|
| 814 |
+
and bub.size == aub.shape[0]
|
| 815 |
+
)
|
| 816 |
+
assert (
|
| 817 |
+
isinstance(aeq, np.ndarray)
|
| 818 |
+
and aeq.ndim == 2
|
| 819 |
+
and aeq.shape[1] == aub.shape[1]
|
| 820 |
+
)
|
| 821 |
+
assert (
|
| 822 |
+
isinstance(beq, np.ndarray)
|
| 823 |
+
and beq.ndim == 1
|
| 824 |
+
and beq.size == aeq.shape[0]
|
| 825 |
+
)
|
| 826 |
+
assert isinstance(xl, np.ndarray) and xl.shape == (aub.shape[1],)
|
| 827 |
+
assert isinstance(xu, np.ndarray) and xu.shape == (aub.shape[1],)
|
| 828 |
+
assert isinstance(delta, float)
|
| 829 |
+
assert isinstance(debug, bool)
|
| 830 |
+
tol = get_arrays_tol(xl, xu)
|
| 831 |
+
assert np.all(xl <= tol)
|
| 832 |
+
assert np.all(xu >= -tol)
|
| 833 |
+
assert np.isfinite(delta) and delta > 0.0
|
| 834 |
+
xl = np.minimum(xl, 0.0)
|
| 835 |
+
xu = np.maximum(xu, 0.0)
|
| 836 |
+
|
| 837 |
+
# Calculate the initial active set.
|
| 838 |
+
m_linear_ub, n = aub.shape
|
| 839 |
+
grad = np.r_[aeq.T @ -beq, np.maximum(0.0, -bub)]
|
| 840 |
+
free_xl = (xl < 0.0) | (grad[:n] < 0.0)
|
| 841 |
+
free_xu = (xu > 0.0) | (grad[:n] > 0.0)
|
| 842 |
+
free_slack = bub < 0.0
|
| 843 |
+
free_ub = (bub > 0.0) | (aub @ grad[:n] - grad[n:] > 0.0)
|
| 844 |
+
n_act, q = qr_normal_byrd_omojokun(
|
| 845 |
+
aub,
|
| 846 |
+
free_xl,
|
| 847 |
+
free_xu,
|
| 848 |
+
free_slack,
|
| 849 |
+
free_ub,
|
| 850 |
+
)
|
| 851 |
+
|
| 852 |
+
# Calculate an upper bound on the norm of the slack variables. It is not
|
| 853 |
+
# used in the original algorithm, but it may prevent undesired behaviors
|
| 854 |
+
# engendered by computer rounding errors.
|
| 855 |
+
delta_slack = np.sqrt(beq @ beq + grad[n:] @ grad[n:])
|
| 856 |
+
|
| 857 |
+
# Set the initial iterate and the initial search direction.
|
| 858 |
+
step = np.zeros(n)
|
| 859 |
+
sd = -q[:, n_act:] @ (q[:, n_act:].T @ grad)
|
| 860 |
+
resid = bub + grad[n:]
|
| 861 |
+
|
| 862 |
+
k = 0
|
| 863 |
+
reduct = 0.0
|
| 864 |
+
boundary_reached = False
|
| 865 |
+
while k < n + m_linear_ub - n_act:
|
| 866 |
+
# Stop the computations if sd is not a descent direction.
|
| 867 |
+
grad_sd = grad @ sd
|
| 868 |
+
if grad_sd >= -10.0 * EPS * n * max(1.0, np.linalg.norm(grad)):
|
| 869 |
+
break
|
| 870 |
+
|
| 871 |
+
# Set alpha_tr to the step size for the trust-region constraint.
|
| 872 |
+
try:
|
| 873 |
+
alpha_tr = _alpha_tr(step, sd[:n], delta)
|
| 874 |
+
except ZeroDivisionError:
|
| 875 |
+
alpha_tr = np.inf
|
| 876 |
+
|
| 877 |
+
# Prevent undesired behaviors engendered by computer rounding errors by
|
| 878 |
+
# considering the trust-region constraint on the slack variables.
|
| 879 |
+
try:
|
| 880 |
+
alpha_tr = min(alpha_tr, _alpha_tr(grad[n:], sd[n:], delta_slack))
|
| 881 |
+
except ZeroDivisionError:
|
| 882 |
+
pass
|
| 883 |
+
|
| 884 |
+
# Stop the computations if a step along sd is expected to give a
|
| 885 |
+
# relatively small reduction in the objective function.
|
| 886 |
+
if -alpha_tr * grad_sd <= 1e-8 * reduct:
|
| 887 |
+
break
|
| 888 |
+
|
| 889 |
+
# Set alpha_quad to the step size for the minimization problem.
|
| 890 |
+
hess_sd = np.r_[aeq.T @ (aeq @ sd[:n]), sd[n:]]
|
| 891 |
+
curv_sd = sd @ hess_sd
|
| 892 |
+
if curv_sd > TINY * abs(grad_sd):
|
| 893 |
+
alpha_quad = max(-grad_sd / curv_sd, 0.0)
|
| 894 |
+
else:
|
| 895 |
+
alpha_quad = np.inf
|
| 896 |
+
|
| 897 |
+
# Stop the computations if the reduction in the objective function
|
| 898 |
+
# provided by an unconstrained step is small.
|
| 899 |
+
alpha = min(alpha_tr, alpha_quad)
|
| 900 |
+
if -alpha * (grad_sd + 0.5 * alpha * curv_sd) <= 1e-8 * reduct:
|
| 901 |
+
break
|
| 902 |
+
|
| 903 |
+
# Set alpha_bd to the step size for the bound constraints.
|
| 904 |
+
i_xl = free_xl & (xl > -np.inf) & (sd[:n] < -TINY * np.abs(xl - step))
|
| 905 |
+
i_xu = free_xu & (xu < np.inf) & (sd[:n] > TINY * np.abs(xu - step))
|
| 906 |
+
i_slack = free_slack & (sd[n:] < -TINY * np.abs(grad[n:]))
|
| 907 |
+
all_alpha_xl = np.full_like(step, np.inf)
|
| 908 |
+
all_alpha_xu = np.full_like(step, np.inf)
|
| 909 |
+
all_alpha_slack = np.full_like(bub, np.inf)
|
| 910 |
+
all_alpha_xl[i_xl] = np.maximum(
|
| 911 |
+
(xl[i_xl] - step[i_xl]) / sd[:n][i_xl],
|
| 912 |
+
0.0,
|
| 913 |
+
)
|
| 914 |
+
all_alpha_xu[i_xu] = np.maximum(
|
| 915 |
+
(xu[i_xu] - step[i_xu]) / sd[:n][i_xu],
|
| 916 |
+
0.0,
|
| 917 |
+
)
|
| 918 |
+
all_alpha_slack[i_slack] = np.maximum(
|
| 919 |
+
-grad[n:][i_slack] / sd[n:][i_slack],
|
| 920 |
+
0.0,
|
| 921 |
+
)
|
| 922 |
+
alpha_xl = np.min(all_alpha_xl)
|
| 923 |
+
alpha_xu = np.min(all_alpha_xu)
|
| 924 |
+
alpha_slack = np.min(all_alpha_slack, initial=np.inf)
|
| 925 |
+
alpha_bd = min(alpha_xl, alpha_xu, alpha_slack)
|
| 926 |
+
|
| 927 |
+
# Set alpha_ub to the step size for the linear constraints.
|
| 928 |
+
aub_sd = aub @ sd[:n] - sd[n:]
|
| 929 |
+
i_ub = free_ub & (aub_sd > TINY * np.abs(resid))
|
| 930 |
+
all_alpha_ub = np.full_like(bub, np.inf)
|
| 931 |
+
all_alpha_ub[i_ub] = resid[i_ub] / aub_sd[i_ub]
|
| 932 |
+
alpha_ub = np.min(all_alpha_ub, initial=np.inf)
|
| 933 |
+
|
| 934 |
+
# Update the iterate.
|
| 935 |
+
alpha = min(alpha, alpha_bd, alpha_ub)
|
| 936 |
+
if alpha > 0.0:
|
| 937 |
+
step = np.clip(step + alpha * sd[:n], xl, xu)
|
| 938 |
+
grad += alpha * hess_sd
|
| 939 |
+
resid = np.maximum(0.0, resid - alpha * aub_sd)
|
| 940 |
+
reduct -= alpha * (grad_sd + 0.5 * alpha * curv_sd)
|
| 941 |
+
|
| 942 |
+
if alpha < min(alpha_tr, alpha_bd, alpha_ub):
|
| 943 |
+
# The current iteration is a conjugate gradient iteration. Update
|
| 944 |
+
# the search direction so that it is conjugate (with respect to H)
|
| 945 |
+
# to all the previous search directions.
|
| 946 |
+
grad_proj = q[:, n_act:] @ (q[:, n_act:].T @ grad)
|
| 947 |
+
beta = (grad_proj @ hess_sd) / curv_sd
|
| 948 |
+
sd = beta * sd - grad_proj
|
| 949 |
+
k += 1
|
| 950 |
+
elif alpha < alpha_tr:
|
| 951 |
+
# The iterate is restricted by a bound/linear constraint. Add this
|
| 952 |
+
# constraint to the active set, and restart the calculations.
|
| 953 |
+
if alpha_xl <= alpha:
|
| 954 |
+
i_new = np.argmin(all_alpha_xl)
|
| 955 |
+
step[i_new] = xl[i_new]
|
| 956 |
+
free_xl[i_new] = False
|
| 957 |
+
elif alpha_xu <= alpha:
|
| 958 |
+
i_new = np.argmin(all_alpha_xu)
|
| 959 |
+
step[i_new] = xu[i_new]
|
| 960 |
+
free_xu[i_new] = False
|
| 961 |
+
elif alpha_slack <= alpha:
|
| 962 |
+
i_new = np.argmin(all_alpha_slack)
|
| 963 |
+
free_slack[i_new] = False
|
| 964 |
+
else:
|
| 965 |
+
i_new = np.argmin(all_alpha_ub)
|
| 966 |
+
free_ub[i_new] = False
|
| 967 |
+
n_act, q = qr_normal_byrd_omojokun(
|
| 968 |
+
aub, free_xl, free_xu, free_slack, free_ub
|
| 969 |
+
)
|
| 970 |
+
sd = -q[:, n_act:] @ (q[:, n_act:].T @ grad)
|
| 971 |
+
k = 0
|
| 972 |
+
else:
|
| 973 |
+
# The current iterate is on the trust-region boundary. Add all the
|
| 974 |
+
# active bound constraints to the working set to prepare for the
|
| 975 |
+
# improvement of the solution, and stop the iterations.
|
| 976 |
+
if alpha_xl <= alpha:
|
| 977 |
+
i_new = _argmin(all_alpha_xl)
|
| 978 |
+
step[i_new] = xl[i_new]
|
| 979 |
+
free_xl[i_new] = False
|
| 980 |
+
if alpha_xu <= alpha:
|
| 981 |
+
i_new = _argmin(all_alpha_xu)
|
| 982 |
+
step[i_new] = xu[i_new]
|
| 983 |
+
free_xu[i_new] = False
|
| 984 |
+
boundary_reached = True
|
| 985 |
+
break
|
| 986 |
+
|
| 987 |
+
# Attempt to improve the solution on the trust-region boundary.
|
| 988 |
+
if kwargs.get("improve_tcg", True) and boundary_reached:
|
| 989 |
+
step_base = np.copy(step)
|
| 990 |
+
free_bd = free_xl & free_xu
|
| 991 |
+
grad = aub.T @ np.maximum(aub @ step - bub, 0.0) + aeq.T @ (
|
| 992 |
+
aeq @ step - beq
|
| 993 |
+
)
|
| 994 |
+
sd = np.zeros(n)
|
| 995 |
+
while np.count_nonzero(free_bd) > 0:
|
| 996 |
+
# Check whether a substantial reduction in the objective function
|
| 997 |
+
# is possible, and set the search direction.
|
| 998 |
+
step_sq = step[free_bd] @ step[free_bd]
|
| 999 |
+
grad_sq = grad[free_bd] @ grad[free_bd]
|
| 1000 |
+
grad_step = grad[free_bd] @ step[free_bd]
|
| 1001 |
+
grad_sd = -np.sqrt(max(step_sq * grad_sq - grad_step**2.0, 0.0))
|
| 1002 |
+
sd[free_bd] = grad_step * step[free_bd] - step_sq * grad[free_bd]
|
| 1003 |
+
sd[~free_bd] = 0.0
|
| 1004 |
+
if grad_sd >= -1e-8 * reduct or np.any(
|
| 1005 |
+
grad_sd >= -TINY * np.abs(sd[free_bd])
|
| 1006 |
+
):
|
| 1007 |
+
break
|
| 1008 |
+
sd[free_bd] /= -grad_sd
|
| 1009 |
+
|
| 1010 |
+
# Calculate an upper bound for the tangent of half the angle theta
|
| 1011 |
+
# of this alternative iteration. The step will be updated as:
|
| 1012 |
+
# step = cos(theta) * step + sin(theta) * sd.
|
| 1013 |
+
temp_xl = np.zeros(n)
|
| 1014 |
+
temp_xu = np.zeros(n)
|
| 1015 |
+
temp_xl[free_bd] = (
|
| 1016 |
+
step[free_bd] ** 2.0 + sd[free_bd] ** 2.0 - xl[free_bd] ** 2.0
|
| 1017 |
+
)
|
| 1018 |
+
temp_xu[free_bd] = (
|
| 1019 |
+
step[free_bd] ** 2.0 + sd[free_bd] ** 2.0 - xu[free_bd] ** 2.0
|
| 1020 |
+
)
|
| 1021 |
+
temp_xl[temp_xl > 0.0] = (
|
| 1022 |
+
np.sqrt(temp_xl[temp_xl > 0.0]) - sd[temp_xl > 0.0]
|
| 1023 |
+
)
|
| 1024 |
+
temp_xu[temp_xu > 0.0] = (
|
| 1025 |
+
np.sqrt(temp_xu[temp_xu > 0.0]) + sd[temp_xu > 0.0]
|
| 1026 |
+
)
|
| 1027 |
+
dist_xl = np.maximum(step - xl, 0.0)
|
| 1028 |
+
dist_xu = np.maximum(xu - step, 0.0)
|
| 1029 |
+
i_xl = temp_xl > TINY * dist_xl
|
| 1030 |
+
i_xu = temp_xu > TINY * dist_xu
|
| 1031 |
+
all_t_xl = np.ones(n)
|
| 1032 |
+
all_t_xu = np.ones(n)
|
| 1033 |
+
all_t_xl[i_xl] = np.minimum(
|
| 1034 |
+
all_t_xl[i_xl],
|
| 1035 |
+
dist_xl[i_xl] / temp_xl[i_xl],
|
| 1036 |
+
)
|
| 1037 |
+
all_t_xu[i_xu] = np.minimum(
|
| 1038 |
+
all_t_xu[i_xu],
|
| 1039 |
+
dist_xu[i_xu] / temp_xu[i_xu],
|
| 1040 |
+
)
|
| 1041 |
+
t_xl = np.min(all_t_xl)
|
| 1042 |
+
t_xu = np.min(all_t_xu)
|
| 1043 |
+
t_bd = min(t_xl, t_xu)
|
| 1044 |
+
|
| 1045 |
+
# For a range of equally spaced values of tan(0.5 * theta),
|
| 1046 |
+
# calculate the reduction in the objective function that would be
|
| 1047 |
+
# obtained by accepting the corresponding angle.
|
| 1048 |
+
n_samples = 20
|
| 1049 |
+
n_samples = int((n_samples - 3) * t_bd + 3)
|
| 1050 |
+
t_samples = np.linspace(t_bd / n_samples, t_bd, n_samples)
|
| 1051 |
+
resid_ub = np.maximum(aub @ step - bub, 0.0)
|
| 1052 |
+
resid_eq = aeq @ step - beq
|
| 1053 |
+
step_proj = np.copy(step)
|
| 1054 |
+
step_proj[~free_bd] = 0.0
|
| 1055 |
+
all_reduct = np.empty(n_samples)
|
| 1056 |
+
for i in range(n_samples):
|
| 1057 |
+
sin_value = 2.0 * t_samples[i] / (1.0 + t_samples[i] ** 2.0)
|
| 1058 |
+
step_alt = np.clip(
|
| 1059 |
+
step + sin_value * (sd - t_samples[i] * step_proj),
|
| 1060 |
+
xl,
|
| 1061 |
+
xu,
|
| 1062 |
+
)
|
| 1063 |
+
resid_ub_alt = np.maximum(aub @ step_alt - bub, 0.0)
|
| 1064 |
+
resid_eq_alt = aeq @ step_alt - beq
|
| 1065 |
+
all_reduct[i] = 0.5 * (
|
| 1066 |
+
resid_ub @ resid_ub
|
| 1067 |
+
+ resid_eq @ resid_eq
|
| 1068 |
+
- resid_ub_alt @ resid_ub_alt
|
| 1069 |
+
- resid_eq_alt @ resid_eq_alt
|
| 1070 |
+
)
|
| 1071 |
+
if np.all(all_reduct <= 0.0):
|
| 1072 |
+
# No reduction in the objective function is obtained.
|
| 1073 |
+
break
|
| 1074 |
+
|
| 1075 |
+
# Accept the angle that provides the largest reduction in the
|
| 1076 |
+
# objective function, and update the iterate.
|
| 1077 |
+
i_max = np.argmax(all_reduct)
|
| 1078 |
+
cos_value = (1.0 - t_samples[i_max] ** 2.0) / (
|
| 1079 |
+
1.0 + t_samples[i_max] ** 2.0
|
| 1080 |
+
)
|
| 1081 |
+
sin_value = (2.0 * t_samples[i_max]
|
| 1082 |
+
/ (1.0 + t_samples[i_max] ** 2.0))
|
| 1083 |
+
step[free_bd] = cos_value * step[free_bd] + sin_value * sd[free_bd]
|
| 1084 |
+
grad = aub.T @ np.maximum(aub @ step - bub, 0.0) + aeq.T @ (
|
| 1085 |
+
aeq @ step - beq
|
| 1086 |
+
)
|
| 1087 |
+
reduct += all_reduct[i_max]
|
| 1088 |
+
|
| 1089 |
+
# If the above angle is restricted by bound constraints, add them
|
| 1090 |
+
# to the working set, and restart the alternative iteration.
|
| 1091 |
+
# Otherwise, the calculations are terminated.
|
| 1092 |
+
if t_bd < 1.0 and i_max == n_samples - 1:
|
| 1093 |
+
if t_xl <= t_bd:
|
| 1094 |
+
i_new = _argmin(all_t_xl)
|
| 1095 |
+
step[i_new] = xl[i_new]
|
| 1096 |
+
free_bd[i_new] = False
|
| 1097 |
+
if t_xu <= t_bd:
|
| 1098 |
+
i_new = _argmin(all_t_xu)
|
| 1099 |
+
step[i_new] = xu[i_new]
|
| 1100 |
+
free_bd[i_new] = False
|
| 1101 |
+
else:
|
| 1102 |
+
break
|
| 1103 |
+
|
| 1104 |
+
# Ensure that the alternative iteration improves the objective
|
| 1105 |
+
# function.
|
| 1106 |
+
resid_ub = np.maximum(aub @ step - bub, 0.0)
|
| 1107 |
+
resid_ub_base = np.maximum(aub @ step_base - bub, 0.0)
|
| 1108 |
+
resid_eq = aeq @ step - beq
|
| 1109 |
+
resid_eq_base = aeq @ step_base - beq
|
| 1110 |
+
if (
|
| 1111 |
+
resid_ub @ resid_ub + resid_eq @ resid_eq
|
| 1112 |
+
> resid_ub_base @ resid_ub_base + resid_eq_base @ resid_eq_base
|
| 1113 |
+
):
|
| 1114 |
+
step = step_base
|
| 1115 |
+
|
| 1116 |
+
if debug:
|
| 1117 |
+
assert np.all(xl <= step)
|
| 1118 |
+
assert np.all(step <= xu)
|
| 1119 |
+
assert np.linalg.norm(step) < 1.1 * delta
|
| 1120 |
+
return step
|
| 1121 |
+
|
| 1122 |
+
|
| 1123 |
+
def qr_tangential_byrd_omojokun(aub, aeq, free_xl, free_xu, free_ub):
|
| 1124 |
+
n = free_xl.size
|
| 1125 |
+
identity = np.eye(n)
|
| 1126 |
+
q, r, _ = qr(
|
| 1127 |
+
np.block(
|
| 1128 |
+
[
|
| 1129 |
+
[aeq],
|
| 1130 |
+
[aub[~free_ub, :]],
|
| 1131 |
+
[-identity[~free_xl, :]],
|
| 1132 |
+
[identity[~free_xu, :]],
|
| 1133 |
+
]
|
| 1134 |
+
).T,
|
| 1135 |
+
pivoting=True,
|
| 1136 |
+
)
|
| 1137 |
+
n_act = np.count_nonzero(
|
| 1138 |
+
np.abs(np.diag(r))
|
| 1139 |
+
>= 10.0
|
| 1140 |
+
* EPS
|
| 1141 |
+
* n
|
| 1142 |
+
* np.linalg.norm(r[: np.min(r.shape), : np.min(r.shape)], axis=0)
|
| 1143 |
+
)
|
| 1144 |
+
return n_act, q
|
| 1145 |
+
|
| 1146 |
+
|
| 1147 |
+
def qr_normal_byrd_omojokun(aub, free_xl, free_xu, free_slack, free_ub):
|
| 1148 |
+
m_linear_ub, n = aub.shape
|
| 1149 |
+
identity_n = np.eye(n)
|
| 1150 |
+
identity_m = np.eye(m_linear_ub)
|
| 1151 |
+
q, r, _ = qr(
|
| 1152 |
+
np.block(
|
| 1153 |
+
[
|
| 1154 |
+
[
|
| 1155 |
+
aub[~free_ub, :],
|
| 1156 |
+
-identity_m[~free_ub, :],
|
| 1157 |
+
],
|
| 1158 |
+
[
|
| 1159 |
+
np.zeros((m_linear_ub - np.count_nonzero(free_slack), n)),
|
| 1160 |
+
-identity_m[~free_slack, :],
|
| 1161 |
+
],
|
| 1162 |
+
[
|
| 1163 |
+
-identity_n[~free_xl, :],
|
| 1164 |
+
np.zeros((n - np.count_nonzero(free_xl), m_linear_ub)),
|
| 1165 |
+
],
|
| 1166 |
+
[
|
| 1167 |
+
identity_n[~free_xu, :],
|
| 1168 |
+
np.zeros((n - np.count_nonzero(free_xu), m_linear_ub)),
|
| 1169 |
+
],
|
| 1170 |
+
]
|
| 1171 |
+
).T,
|
| 1172 |
+
pivoting=True,
|
| 1173 |
+
)
|
| 1174 |
+
n_act = np.count_nonzero(
|
| 1175 |
+
np.abs(np.diag(r))
|
| 1176 |
+
>= 10.0
|
| 1177 |
+
* EPS
|
| 1178 |
+
* (n + m_linear_ub)
|
| 1179 |
+
* np.linalg.norm(r[: np.min(r.shape), : np.min(r.shape)], axis=0)
|
| 1180 |
+
)
|
| 1181 |
+
return n_act, q
|
| 1182 |
+
|
| 1183 |
+
|
| 1184 |
+
def _alpha_tr(step, sd, delta):
|
| 1185 |
+
step_sd = step @ sd
|
| 1186 |
+
sd_sq = sd @ sd
|
| 1187 |
+
dist_tr_sq = delta**2.0 - step @ step
|
| 1188 |
+
temp = np.sqrt(max(step_sd**2.0 + sd_sq * dist_tr_sq, 0.0))
|
| 1189 |
+
if step_sd <= 0.0 and sd_sq > TINY * abs(temp - step_sd):
|
| 1190 |
+
alpha_tr = max((temp - step_sd) / sd_sq, 0.0)
|
| 1191 |
+
elif abs(temp + step_sd) > TINY * dist_tr_sq:
|
| 1192 |
+
alpha_tr = max(dist_tr_sq / (temp + step_sd), 0.0)
|
| 1193 |
+
else:
|
| 1194 |
+
raise ZeroDivisionError
|
| 1195 |
+
return alpha_tr
|
| 1196 |
+
|
| 1197 |
+
|
| 1198 |
+
def _argmax(x):
|
| 1199 |
+
return np.flatnonzero(x >= np.max(x))
|
| 1200 |
+
|
| 1201 |
+
|
| 1202 |
+
def _argmin(x):
|
| 1203 |
+
return np.flatnonzero(x <= np.min(x))
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/__init__.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .exceptions import (
|
| 2 |
+
MaxEvalError,
|
| 3 |
+
TargetSuccess,
|
| 4 |
+
CallbackSuccess,
|
| 5 |
+
FeasibleSuccess,
|
| 6 |
+
)
|
| 7 |
+
from .math import get_arrays_tol, exact_1d_array
|
| 8 |
+
from .versions import show_versions
|
| 9 |
+
|
| 10 |
+
__all__ = [
|
| 11 |
+
"MaxEvalError",
|
| 12 |
+
"TargetSuccess",
|
| 13 |
+
"CallbackSuccess",
|
| 14 |
+
"FeasibleSuccess",
|
| 15 |
+
"get_arrays_tol",
|
| 16 |
+
"exact_1d_array",
|
| 17 |
+
"show_versions",
|
| 18 |
+
]
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (476 Bytes). View file
|
|
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/__pycache__/exceptions.cpython-310.pyc
ADDED
|
Binary file (1.04 kB). View file
|
|
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/__pycache__/math.cpython-310.pyc
ADDED
|
Binary file (2.18 kB). View file
|
|
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/__pycache__/versions.cpython-310.pyc
ADDED
|
Binary file (1.81 kB). View file
|
|
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/exceptions.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
class MaxEvalError(Exception):
|
| 2 |
+
"""
|
| 3 |
+
Exception raised when the maximum number of evaluations is reached.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class TargetSuccess(Exception):
|
| 8 |
+
"""
|
| 9 |
+
Exception raised when the target value is reached.
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class CallbackSuccess(StopIteration):
|
| 14 |
+
"""
|
| 15 |
+
Exception raised when the callback function raises a ``StopIteration``.
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class FeasibleSuccess(Exception):
|
| 20 |
+
"""
|
| 21 |
+
Exception raised when a feasible point of a feasible problem is found.
|
| 22 |
+
"""
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/math.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
EPS = np.finfo(float).eps
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def get_arrays_tol(*arrays):
|
| 8 |
+
"""
|
| 9 |
+
Get a relative tolerance for a set of arrays.
|
| 10 |
+
|
| 11 |
+
Parameters
|
| 12 |
+
----------
|
| 13 |
+
*arrays: tuple
|
| 14 |
+
Set of `numpy.ndarray` to get the tolerance for.
|
| 15 |
+
|
| 16 |
+
Returns
|
| 17 |
+
-------
|
| 18 |
+
float
|
| 19 |
+
Relative tolerance for the set of arrays.
|
| 20 |
+
|
| 21 |
+
Raises
|
| 22 |
+
------
|
| 23 |
+
ValueError
|
| 24 |
+
If no array is provided.
|
| 25 |
+
"""
|
| 26 |
+
if len(arrays) == 0:
|
| 27 |
+
raise ValueError("At least one array must be provided.")
|
| 28 |
+
size = max(array.size for array in arrays)
|
| 29 |
+
weight = max(
|
| 30 |
+
np.max(np.abs(array[np.isfinite(array)]), initial=1.0)
|
| 31 |
+
for array in arrays
|
| 32 |
+
)
|
| 33 |
+
return 10.0 * EPS * max(size, 1.0) * weight
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def exact_1d_array(x, message):
|
| 37 |
+
"""
|
| 38 |
+
Preprocess a 1-dimensional array.
|
| 39 |
+
|
| 40 |
+
Parameters
|
| 41 |
+
----------
|
| 42 |
+
x : array_like
|
| 43 |
+
Array to be preprocessed.
|
| 44 |
+
message : str
|
| 45 |
+
Error message if `x` cannot be interpreter as a 1-dimensional array.
|
| 46 |
+
|
| 47 |
+
Returns
|
| 48 |
+
-------
|
| 49 |
+
`numpy.ndarray`
|
| 50 |
+
Preprocessed array.
|
| 51 |
+
"""
|
| 52 |
+
x = np.atleast_1d(np.squeeze(x)).astype(float)
|
| 53 |
+
if x.ndim != 1:
|
| 54 |
+
raise ValueError(message)
|
| 55 |
+
return x
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def exact_2d_array(x, message):
|
| 59 |
+
"""
|
| 60 |
+
Preprocess a 2-dimensional array.
|
| 61 |
+
|
| 62 |
+
Parameters
|
| 63 |
+
----------
|
| 64 |
+
x : array_like
|
| 65 |
+
Array to be preprocessed.
|
| 66 |
+
message : str
|
| 67 |
+
Error message if `x` cannot be interpreter as a 2-dimensional array.
|
| 68 |
+
|
| 69 |
+
Returns
|
| 70 |
+
-------
|
| 71 |
+
`numpy.ndarray`
|
| 72 |
+
Preprocessed array.
|
| 73 |
+
"""
|
| 74 |
+
x = np.atleast_2d(x).astype(float)
|
| 75 |
+
if x.ndim != 2:
|
| 76 |
+
raise ValueError(message)
|
| 77 |
+
return x
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/versions.py
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import platform
|
| 3 |
+
import sys
|
| 4 |
+
from importlib.metadata import PackageNotFoundError, version
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def _get_sys_info():
|
| 8 |
+
"""
|
| 9 |
+
Get useful system information.
|
| 10 |
+
|
| 11 |
+
Returns
|
| 12 |
+
-------
|
| 13 |
+
dict
|
| 14 |
+
Useful system information.
|
| 15 |
+
"""
|
| 16 |
+
return {
|
| 17 |
+
"python": sys.version.replace(os.linesep, " "),
|
| 18 |
+
"executable": sys.executable,
|
| 19 |
+
"machine": platform.platform(),
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def _get_deps_info():
|
| 24 |
+
"""
|
| 25 |
+
Get the versions of the dependencies.
|
| 26 |
+
|
| 27 |
+
Returns
|
| 28 |
+
-------
|
| 29 |
+
dict
|
| 30 |
+
Versions of the dependencies.
|
| 31 |
+
"""
|
| 32 |
+
deps = ["cobyqa", "numpy", "scipy", "setuptools", "pip"]
|
| 33 |
+
deps_info = {}
|
| 34 |
+
for module in deps:
|
| 35 |
+
try:
|
| 36 |
+
deps_info[module] = version(module)
|
| 37 |
+
except PackageNotFoundError:
|
| 38 |
+
deps_info[module] = None
|
| 39 |
+
return deps_info
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def show_versions():
|
| 43 |
+
"""
|
| 44 |
+
Display useful system and dependencies information.
|
| 45 |
+
|
| 46 |
+
When reporting issues, please include this information.
|
| 47 |
+
"""
|
| 48 |
+
print("System settings")
|
| 49 |
+
print("---------------")
|
| 50 |
+
sys_info = _get_sys_info()
|
| 51 |
+
print(
|
| 52 |
+
"\n".join(
|
| 53 |
+
f"{k:>{max(map(len, sys_info.keys())) + 1}}: {v}"
|
| 54 |
+
for k, v in sys_info.items()
|
| 55 |
+
)
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
print()
|
| 59 |
+
print("Python dependencies")
|
| 60 |
+
print("-------------------")
|
| 61 |
+
deps_info = _get_deps_info()
|
| 62 |
+
print(
|
| 63 |
+
"\n".join(
|
| 64 |
+
f"{k:>{max(map(len, deps_info.keys())) + 1}}: {v}"
|
| 65 |
+
for k, v in deps_info.items()
|
| 66 |
+
)
|
| 67 |
+
)
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/__init__.py
ADDED
|
File without changes
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_bunch.cpython-310.pyc
ADDED
|
Binary file (6.99 kB). View file
|
|
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_ccallback.cpython-310.pyc
ADDED
|
Binary file (6.56 kB). View file
|
|
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test__gcutils.py
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
""" Test for assert_deallocated context manager and gc utilities
|
| 2 |
+
"""
|
| 3 |
+
import gc
|
| 4 |
+
from threading import Lock
|
| 5 |
+
|
| 6 |
+
from scipy._lib._gcutils import (set_gc_state, gc_state, assert_deallocated,
|
| 7 |
+
ReferenceError, IS_PYPY)
|
| 8 |
+
|
| 9 |
+
from numpy.testing import assert_equal
|
| 10 |
+
|
| 11 |
+
import pytest
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
@pytest.fixture
|
| 15 |
+
def gc_lock():
|
| 16 |
+
return Lock()
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def test_set_gc_state(gc_lock):
|
| 20 |
+
with gc_lock:
|
| 21 |
+
gc_status = gc.isenabled()
|
| 22 |
+
try:
|
| 23 |
+
for state in (True, False):
|
| 24 |
+
gc.enable()
|
| 25 |
+
set_gc_state(state)
|
| 26 |
+
assert_equal(gc.isenabled(), state)
|
| 27 |
+
gc.disable()
|
| 28 |
+
set_gc_state(state)
|
| 29 |
+
assert_equal(gc.isenabled(), state)
|
| 30 |
+
finally:
|
| 31 |
+
if gc_status:
|
| 32 |
+
gc.enable()
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def test_gc_state(gc_lock):
|
| 36 |
+
# Test gc_state context manager
|
| 37 |
+
with gc_lock:
|
| 38 |
+
gc_status = gc.isenabled()
|
| 39 |
+
try:
|
| 40 |
+
for pre_state in (True, False):
|
| 41 |
+
set_gc_state(pre_state)
|
| 42 |
+
for with_state in (True, False):
|
| 43 |
+
# Check the gc state is with_state in with block
|
| 44 |
+
with gc_state(with_state):
|
| 45 |
+
assert_equal(gc.isenabled(), with_state)
|
| 46 |
+
# And returns to previous state outside block
|
| 47 |
+
assert_equal(gc.isenabled(), pre_state)
|
| 48 |
+
# Even if the gc state is set explicitly within the block
|
| 49 |
+
with gc_state(with_state):
|
| 50 |
+
assert_equal(gc.isenabled(), with_state)
|
| 51 |
+
set_gc_state(not with_state)
|
| 52 |
+
assert_equal(gc.isenabled(), pre_state)
|
| 53 |
+
finally:
|
| 54 |
+
if gc_status:
|
| 55 |
+
gc.enable()
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
|
| 59 |
+
def test_assert_deallocated(gc_lock):
|
| 60 |
+
# Ordinary use
|
| 61 |
+
class C:
|
| 62 |
+
def __init__(self, arg0, arg1, name='myname'):
|
| 63 |
+
self.name = name
|
| 64 |
+
with gc_lock:
|
| 65 |
+
for gc_current in (True, False):
|
| 66 |
+
with gc_state(gc_current):
|
| 67 |
+
# We are deleting from with-block context, so that's OK
|
| 68 |
+
with assert_deallocated(C, 0, 2, 'another name') as c:
|
| 69 |
+
assert_equal(c.name, 'another name')
|
| 70 |
+
del c
|
| 71 |
+
# Or not using the thing in with-block context, also OK
|
| 72 |
+
with assert_deallocated(C, 0, 2, name='third name'):
|
| 73 |
+
pass
|
| 74 |
+
assert_equal(gc.isenabled(), gc_current)
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
|
| 78 |
+
def test_assert_deallocated_nodel():
|
| 79 |
+
class C:
|
| 80 |
+
pass
|
| 81 |
+
with pytest.raises(ReferenceError):
|
| 82 |
+
# Need to delete after using if in with-block context
|
| 83 |
+
# Note: assert_deallocated(C) needs to be assigned for the test
|
| 84 |
+
# to function correctly. It is assigned to _, but _ itself is
|
| 85 |
+
# not referenced in the body of the with, it is only there for
|
| 86 |
+
# the refcount.
|
| 87 |
+
with assert_deallocated(C) as _:
|
| 88 |
+
pass
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
|
| 92 |
+
def test_assert_deallocated_circular():
|
| 93 |
+
class C:
|
| 94 |
+
def __init__(self):
|
| 95 |
+
self._circular = self
|
| 96 |
+
with pytest.raises(ReferenceError):
|
| 97 |
+
# Circular reference, no automatic garbage collection
|
| 98 |
+
with assert_deallocated(C) as c:
|
| 99 |
+
del c
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
|
| 103 |
+
def test_assert_deallocated_circular2():
|
| 104 |
+
class C:
|
| 105 |
+
def __init__(self):
|
| 106 |
+
self._circular = self
|
| 107 |
+
with pytest.raises(ReferenceError):
|
| 108 |
+
# Still circular reference, no automatic garbage collection
|
| 109 |
+
with assert_deallocated(C):
|
| 110 |
+
pass
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test__pep440.py
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pytest import raises as assert_raises
|
| 2 |
+
from scipy._lib._pep440 import Version, parse
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def test_main_versions():
|
| 6 |
+
assert Version('1.8.0') == Version('1.8.0')
|
| 7 |
+
for ver in ['1.9.0', '2.0.0', '1.8.1']:
|
| 8 |
+
assert Version('1.8.0') < Version(ver)
|
| 9 |
+
|
| 10 |
+
for ver in ['1.7.0', '1.7.1', '0.9.9']:
|
| 11 |
+
assert Version('1.8.0') > Version(ver)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def test_version_1_point_10():
|
| 15 |
+
# regression test for gh-2998.
|
| 16 |
+
assert Version('1.9.0') < Version('1.10.0')
|
| 17 |
+
assert Version('1.11.0') < Version('1.11.1')
|
| 18 |
+
assert Version('1.11.0') == Version('1.11.0')
|
| 19 |
+
assert Version('1.99.11') < Version('1.99.12')
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def test_alpha_beta_rc():
|
| 23 |
+
assert Version('1.8.0rc1') == Version('1.8.0rc1')
|
| 24 |
+
for ver in ['1.8.0', '1.8.0rc2']:
|
| 25 |
+
assert Version('1.8.0rc1') < Version(ver)
|
| 26 |
+
|
| 27 |
+
for ver in ['1.8.0a2', '1.8.0b3', '1.7.2rc4']:
|
| 28 |
+
assert Version('1.8.0rc1') > Version(ver)
|
| 29 |
+
|
| 30 |
+
assert Version('1.8.0b1') > Version('1.8.0a2')
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def test_dev_version():
|
| 34 |
+
assert Version('1.9.0.dev+Unknown') < Version('1.9.0')
|
| 35 |
+
for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev+ffffffff', '1.9.0.dev1']:
|
| 36 |
+
assert Version('1.9.0.dev+f16acvda') < Version(ver)
|
| 37 |
+
|
| 38 |
+
assert Version('1.9.0.dev+f16acvda') == Version('1.9.0.dev+f16acvda')
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def test_dev_a_b_rc_mixed():
|
| 42 |
+
assert Version('1.9.0a2.dev+f16acvda') == Version('1.9.0a2.dev+f16acvda')
|
| 43 |
+
assert Version('1.9.0a2.dev+6acvda54') < Version('1.9.0a2')
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def test_dev0_version():
|
| 47 |
+
assert Version('1.9.0.dev0+Unknown') < Version('1.9.0')
|
| 48 |
+
for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev0+ffffffff']:
|
| 49 |
+
assert Version('1.9.0.dev0+f16acvda') < Version(ver)
|
| 50 |
+
|
| 51 |
+
assert Version('1.9.0.dev0+f16acvda') == Version('1.9.0.dev0+f16acvda')
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def test_dev0_a_b_rc_mixed():
|
| 55 |
+
assert Version('1.9.0a2.dev0+f16acvda') == Version('1.9.0a2.dev0+f16acvda')
|
| 56 |
+
assert Version('1.9.0a2.dev0+6acvda54') < Version('1.9.0a2')
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def test_raises():
|
| 60 |
+
for ver in ['1,9.0', '1.7.x']:
|
| 61 |
+
assert_raises(ValueError, Version, ver)
|
| 62 |
+
|
| 63 |
+
def test_legacy_version():
|
| 64 |
+
# Non-PEP-440 version identifiers always compare less. For NumPy this only
|
| 65 |
+
# occurs on dev builds prior to 1.10.0 which are unsupported anyway.
|
| 66 |
+
assert parse('invalid') < Version('0.0.0')
|
| 67 |
+
assert parse('1.9.0-f16acvda') < Version('1.0.0')
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test__testutils.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
from scipy._lib._testutils import _parse_size, _get_mem_available
|
| 3 |
+
import pytest
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def test__parse_size():
|
| 7 |
+
expected = {
|
| 8 |
+
'12': 12e6,
|
| 9 |
+
'12 b': 12,
|
| 10 |
+
'12k': 12e3,
|
| 11 |
+
' 12 M ': 12e6,
|
| 12 |
+
' 12 G ': 12e9,
|
| 13 |
+
' 12Tb ': 12e12,
|
| 14 |
+
'12 Mib ': 12 * 1024.0**2,
|
| 15 |
+
'12Tib': 12 * 1024.0**4,
|
| 16 |
+
}
|
| 17 |
+
|
| 18 |
+
for inp, outp in sorted(expected.items()):
|
| 19 |
+
if outp is None:
|
| 20 |
+
with pytest.raises(ValueError):
|
| 21 |
+
_parse_size(inp)
|
| 22 |
+
else:
|
| 23 |
+
assert _parse_size(inp) == outp
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def test__mem_available():
|
| 27 |
+
# May return None on non-Linux platforms
|
| 28 |
+
available = _get_mem_available()
|
| 29 |
+
if sys.platform.startswith('linux'):
|
| 30 |
+
assert available >= 0
|
| 31 |
+
else:
|
| 32 |
+
assert available is None or available >= 0
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test__threadsafety.py
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import threading
|
| 2 |
+
import time
|
| 3 |
+
import traceback
|
| 4 |
+
|
| 5 |
+
from numpy.testing import assert_
|
| 6 |
+
from pytest import raises as assert_raises
|
| 7 |
+
|
| 8 |
+
from scipy._lib._threadsafety import ReentrancyLock, non_reentrant, ReentrancyError
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def test_parallel_threads():
|
| 12 |
+
# Check that ReentrancyLock serializes work in parallel threads.
|
| 13 |
+
#
|
| 14 |
+
# The test is not fully deterministic, and may succeed falsely if
|
| 15 |
+
# the timings go wrong.
|
| 16 |
+
|
| 17 |
+
lock = ReentrancyLock("failure")
|
| 18 |
+
|
| 19 |
+
failflag = [False]
|
| 20 |
+
exceptions_raised = []
|
| 21 |
+
|
| 22 |
+
def worker(k):
|
| 23 |
+
try:
|
| 24 |
+
with lock:
|
| 25 |
+
assert_(not failflag[0])
|
| 26 |
+
failflag[0] = True
|
| 27 |
+
time.sleep(0.1 * k)
|
| 28 |
+
assert_(failflag[0])
|
| 29 |
+
failflag[0] = False
|
| 30 |
+
except Exception:
|
| 31 |
+
exceptions_raised.append(traceback.format_exc(2))
|
| 32 |
+
|
| 33 |
+
threads = [threading.Thread(target=lambda k=k: worker(k))
|
| 34 |
+
for k in range(3)]
|
| 35 |
+
for t in threads:
|
| 36 |
+
t.start()
|
| 37 |
+
for t in threads:
|
| 38 |
+
t.join()
|
| 39 |
+
|
| 40 |
+
exceptions_raised = "\n".join(exceptions_raised)
|
| 41 |
+
assert_(not exceptions_raised, exceptions_raised)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def test_reentering():
|
| 45 |
+
# Check that ReentrancyLock prevents re-entering from the same thread.
|
| 46 |
+
|
| 47 |
+
@non_reentrant()
|
| 48 |
+
def func(x):
|
| 49 |
+
return func(x)
|
| 50 |
+
|
| 51 |
+
assert_raises(ReentrancyError, func, 0)
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test__util.py
ADDED
|
@@ -0,0 +1,657 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from multiprocessing import Pool
|
| 2 |
+
from multiprocessing.pool import Pool as PWL
|
| 3 |
+
import re
|
| 4 |
+
import math
|
| 5 |
+
from fractions import Fraction
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
from numpy.testing import assert_equal, assert_
|
| 9 |
+
import pytest
|
| 10 |
+
from pytest import raises as assert_raises
|
| 11 |
+
import hypothesis.extra.numpy as npst
|
| 12 |
+
from hypothesis import given, strategies, reproduce_failure # noqa: F401
|
| 13 |
+
from scipy.conftest import array_api_compatible, skip_xp_invalid_arg
|
| 14 |
+
|
| 15 |
+
from scipy._lib._array_api import (xp_assert_equal, xp_assert_close, is_numpy,
|
| 16 |
+
xp_copy, is_array_api_strict)
|
| 17 |
+
from scipy._lib._util import (_aligned_zeros, check_random_state, MapWrapper,
|
| 18 |
+
getfullargspec_no_self, FullArgSpec,
|
| 19 |
+
rng_integers, _validate_int, _rename_parameter,
|
| 20 |
+
_contains_nan, _rng_html_rewrite, _lazywhere)
|
| 21 |
+
from scipy import cluster, interpolate, linalg, optimize, sparse, spatial, stats
|
| 22 |
+
|
| 23 |
+
skip_xp_backends = pytest.mark.skip_xp_backends
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
@pytest.mark.slow
|
| 27 |
+
def test__aligned_zeros():
|
| 28 |
+
niter = 10
|
| 29 |
+
|
| 30 |
+
def check(shape, dtype, order, align):
|
| 31 |
+
err_msg = repr((shape, dtype, order, align))
|
| 32 |
+
x = _aligned_zeros(shape, dtype, order, align=align)
|
| 33 |
+
if align is None:
|
| 34 |
+
align = np.dtype(dtype).alignment
|
| 35 |
+
assert_equal(x.__array_interface__['data'][0] % align, 0)
|
| 36 |
+
if hasattr(shape, '__len__'):
|
| 37 |
+
assert_equal(x.shape, shape, err_msg)
|
| 38 |
+
else:
|
| 39 |
+
assert_equal(x.shape, (shape,), err_msg)
|
| 40 |
+
assert_equal(x.dtype, dtype)
|
| 41 |
+
if order == "C":
|
| 42 |
+
assert_(x.flags.c_contiguous, err_msg)
|
| 43 |
+
elif order == "F":
|
| 44 |
+
if x.size > 0:
|
| 45 |
+
# Size-0 arrays get invalid flags on NumPy 1.5
|
| 46 |
+
assert_(x.flags.f_contiguous, err_msg)
|
| 47 |
+
elif order is None:
|
| 48 |
+
assert_(x.flags.c_contiguous, err_msg)
|
| 49 |
+
else:
|
| 50 |
+
raise ValueError()
|
| 51 |
+
|
| 52 |
+
# try various alignments
|
| 53 |
+
for align in [1, 2, 3, 4, 8, 16, 32, 64, None]:
|
| 54 |
+
for n in [0, 1, 3, 11]:
|
| 55 |
+
for order in ["C", "F", None]:
|
| 56 |
+
for dtype in [np.uint8, np.float64]:
|
| 57 |
+
for shape in [n, (1, 2, 3, n)]:
|
| 58 |
+
for j in range(niter):
|
| 59 |
+
check(shape, dtype, order, align)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def test_check_random_state():
|
| 63 |
+
# If seed is None, return the RandomState singleton used by np.random.
|
| 64 |
+
# If seed is an int, return a new RandomState instance seeded with seed.
|
| 65 |
+
# If seed is already a RandomState instance, return it.
|
| 66 |
+
# Otherwise raise ValueError.
|
| 67 |
+
rsi = check_random_state(1)
|
| 68 |
+
assert_equal(type(rsi), np.random.RandomState)
|
| 69 |
+
rsi = check_random_state(rsi)
|
| 70 |
+
assert_equal(type(rsi), np.random.RandomState)
|
| 71 |
+
rsi = check_random_state(None)
|
| 72 |
+
assert_equal(type(rsi), np.random.RandomState)
|
| 73 |
+
assert_raises(ValueError, check_random_state, 'a')
|
| 74 |
+
rg = np.random.Generator(np.random.PCG64())
|
| 75 |
+
rsi = check_random_state(rg)
|
| 76 |
+
assert_equal(type(rsi), np.random.Generator)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def test_getfullargspec_no_self():
|
| 80 |
+
p = MapWrapper(1)
|
| 81 |
+
argspec = getfullargspec_no_self(p.__init__)
|
| 82 |
+
assert_equal(argspec, FullArgSpec(['pool'], None, None, (1,), [],
|
| 83 |
+
None, {}))
|
| 84 |
+
argspec = getfullargspec_no_self(p.__call__)
|
| 85 |
+
assert_equal(argspec, FullArgSpec(['func', 'iterable'], None, None, None,
|
| 86 |
+
[], None, {}))
|
| 87 |
+
|
| 88 |
+
class _rv_generic:
|
| 89 |
+
def _rvs(self, a, b=2, c=3, *args, size=None, **kwargs):
|
| 90 |
+
return None
|
| 91 |
+
|
| 92 |
+
rv_obj = _rv_generic()
|
| 93 |
+
argspec = getfullargspec_no_self(rv_obj._rvs)
|
| 94 |
+
assert_equal(argspec, FullArgSpec(['a', 'b', 'c'], 'args', 'kwargs',
|
| 95 |
+
(2, 3), ['size'], {'size': None}, {}))
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def test_mapwrapper_serial():
|
| 99 |
+
in_arg = np.arange(10.)
|
| 100 |
+
out_arg = np.sin(in_arg)
|
| 101 |
+
|
| 102 |
+
p = MapWrapper(1)
|
| 103 |
+
assert_(p._mapfunc is map)
|
| 104 |
+
assert_(p.pool is None)
|
| 105 |
+
assert_(p._own_pool is False)
|
| 106 |
+
out = list(p(np.sin, in_arg))
|
| 107 |
+
assert_equal(out, out_arg)
|
| 108 |
+
|
| 109 |
+
with assert_raises(RuntimeError):
|
| 110 |
+
p = MapWrapper(0)
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def test_pool():
|
| 114 |
+
with Pool(2) as p:
|
| 115 |
+
p.map(math.sin, [1, 2, 3, 4])
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def test_mapwrapper_parallel():
|
| 119 |
+
in_arg = np.arange(10.)
|
| 120 |
+
out_arg = np.sin(in_arg)
|
| 121 |
+
|
| 122 |
+
with MapWrapper(2) as p:
|
| 123 |
+
out = p(np.sin, in_arg)
|
| 124 |
+
assert_equal(list(out), out_arg)
|
| 125 |
+
|
| 126 |
+
assert_(p._own_pool is True)
|
| 127 |
+
assert_(isinstance(p.pool, PWL))
|
| 128 |
+
assert_(p._mapfunc is not None)
|
| 129 |
+
|
| 130 |
+
# the context manager should've closed the internal pool
|
| 131 |
+
# check that it has by asking it to calculate again.
|
| 132 |
+
with assert_raises(Exception) as excinfo:
|
| 133 |
+
p(np.sin, in_arg)
|
| 134 |
+
|
| 135 |
+
assert_(excinfo.type is ValueError)
|
| 136 |
+
|
| 137 |
+
# can also set a PoolWrapper up with a map-like callable instance
|
| 138 |
+
with Pool(2) as p:
|
| 139 |
+
q = MapWrapper(p.map)
|
| 140 |
+
|
| 141 |
+
assert_(q._own_pool is False)
|
| 142 |
+
q.close()
|
| 143 |
+
|
| 144 |
+
# closing the PoolWrapper shouldn't close the internal pool
|
| 145 |
+
# because it didn't create it
|
| 146 |
+
out = p.map(np.sin, in_arg)
|
| 147 |
+
assert_equal(list(out), out_arg)
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
def test_rng_integers():
|
| 151 |
+
rng = np.random.RandomState()
|
| 152 |
+
|
| 153 |
+
# test that numbers are inclusive of high point
|
| 154 |
+
arr = rng_integers(rng, low=2, high=5, size=100, endpoint=True)
|
| 155 |
+
assert np.max(arr) == 5
|
| 156 |
+
assert np.min(arr) == 2
|
| 157 |
+
assert arr.shape == (100, )
|
| 158 |
+
|
| 159 |
+
# test that numbers are inclusive of high point
|
| 160 |
+
arr = rng_integers(rng, low=5, size=100, endpoint=True)
|
| 161 |
+
assert np.max(arr) == 5
|
| 162 |
+
assert np.min(arr) == 0
|
| 163 |
+
assert arr.shape == (100, )
|
| 164 |
+
|
| 165 |
+
# test that numbers are exclusive of high point
|
| 166 |
+
arr = rng_integers(rng, low=2, high=5, size=100, endpoint=False)
|
| 167 |
+
assert np.max(arr) == 4
|
| 168 |
+
assert np.min(arr) == 2
|
| 169 |
+
assert arr.shape == (100, )
|
| 170 |
+
|
| 171 |
+
# test that numbers are exclusive of high point
|
| 172 |
+
arr = rng_integers(rng, low=5, size=100, endpoint=False)
|
| 173 |
+
assert np.max(arr) == 4
|
| 174 |
+
assert np.min(arr) == 0
|
| 175 |
+
assert arr.shape == (100, )
|
| 176 |
+
|
| 177 |
+
# now try with np.random.Generator
|
| 178 |
+
try:
|
| 179 |
+
rng = np.random.default_rng()
|
| 180 |
+
except AttributeError:
|
| 181 |
+
return
|
| 182 |
+
|
| 183 |
+
# test that numbers are inclusive of high point
|
| 184 |
+
arr = rng_integers(rng, low=2, high=5, size=100, endpoint=True)
|
| 185 |
+
assert np.max(arr) == 5
|
| 186 |
+
assert np.min(arr) == 2
|
| 187 |
+
assert arr.shape == (100, )
|
| 188 |
+
|
| 189 |
+
# test that numbers are inclusive of high point
|
| 190 |
+
arr = rng_integers(rng, low=5, size=100, endpoint=True)
|
| 191 |
+
assert np.max(arr) == 5
|
| 192 |
+
assert np.min(arr) == 0
|
| 193 |
+
assert arr.shape == (100, )
|
| 194 |
+
|
| 195 |
+
# test that numbers are exclusive of high point
|
| 196 |
+
arr = rng_integers(rng, low=2, high=5, size=100, endpoint=False)
|
| 197 |
+
assert np.max(arr) == 4
|
| 198 |
+
assert np.min(arr) == 2
|
| 199 |
+
assert arr.shape == (100, )
|
| 200 |
+
|
| 201 |
+
# test that numbers are exclusive of high point
|
| 202 |
+
arr = rng_integers(rng, low=5, size=100, endpoint=False)
|
| 203 |
+
assert np.max(arr) == 4
|
| 204 |
+
assert np.min(arr) == 0
|
| 205 |
+
assert arr.shape == (100, )
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
class TestValidateInt:
|
| 209 |
+
|
| 210 |
+
@pytest.mark.parametrize('n', [4, np.uint8(4), np.int16(4), np.array(4)])
|
| 211 |
+
def test_validate_int(self, n):
|
| 212 |
+
n = _validate_int(n, 'n')
|
| 213 |
+
assert n == 4
|
| 214 |
+
|
| 215 |
+
@pytest.mark.parametrize('n', [4.0, np.array([4]), Fraction(4, 1)])
|
| 216 |
+
def test_validate_int_bad(self, n):
|
| 217 |
+
with pytest.raises(TypeError, match='n must be an integer'):
|
| 218 |
+
_validate_int(n, 'n')
|
| 219 |
+
|
| 220 |
+
def test_validate_int_below_min(self):
|
| 221 |
+
with pytest.raises(ValueError, match='n must be an integer not '
|
| 222 |
+
'less than 0'):
|
| 223 |
+
_validate_int(-1, 'n', 0)
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
class TestRenameParameter:
|
| 227 |
+
# check that wrapper `_rename_parameter` for backward-compatible
|
| 228 |
+
# keyword renaming works correctly
|
| 229 |
+
|
| 230 |
+
# Example method/function that still accepts keyword `old`
|
| 231 |
+
@_rename_parameter("old", "new")
|
| 232 |
+
def old_keyword_still_accepted(self, new):
|
| 233 |
+
return new
|
| 234 |
+
|
| 235 |
+
# Example method/function for which keyword `old` is deprecated
|
| 236 |
+
@_rename_parameter("old", "new", dep_version="1.9.0")
|
| 237 |
+
def old_keyword_deprecated(self, new):
|
| 238 |
+
return new
|
| 239 |
+
|
| 240 |
+
def test_old_keyword_still_accepted(self):
|
| 241 |
+
# positional argument and both keyword work identically
|
| 242 |
+
res1 = self.old_keyword_still_accepted(10)
|
| 243 |
+
res2 = self.old_keyword_still_accepted(new=10)
|
| 244 |
+
res3 = self.old_keyword_still_accepted(old=10)
|
| 245 |
+
assert res1 == res2 == res3 == 10
|
| 246 |
+
|
| 247 |
+
# unexpected keyword raises an error
|
| 248 |
+
message = re.escape("old_keyword_still_accepted() got an unexpected")
|
| 249 |
+
with pytest.raises(TypeError, match=message):
|
| 250 |
+
self.old_keyword_still_accepted(unexpected=10)
|
| 251 |
+
|
| 252 |
+
# multiple values for the same parameter raises an error
|
| 253 |
+
message = re.escape("old_keyword_still_accepted() got multiple")
|
| 254 |
+
with pytest.raises(TypeError, match=message):
|
| 255 |
+
self.old_keyword_still_accepted(10, new=10)
|
| 256 |
+
with pytest.raises(TypeError, match=message):
|
| 257 |
+
self.old_keyword_still_accepted(10, old=10)
|
| 258 |
+
with pytest.raises(TypeError, match=message):
|
| 259 |
+
self.old_keyword_still_accepted(new=10, old=10)
|
| 260 |
+
|
| 261 |
+
@pytest.fixture
|
| 262 |
+
def kwarg_lock(self):
|
| 263 |
+
from threading import Lock
|
| 264 |
+
return Lock()
|
| 265 |
+
|
| 266 |
+
def test_old_keyword_deprecated(self, kwarg_lock):
|
| 267 |
+
# positional argument and both keyword work identically,
|
| 268 |
+
# but use of old keyword results in DeprecationWarning
|
| 269 |
+
dep_msg = "Use of keyword argument `old` is deprecated"
|
| 270 |
+
res1 = self.old_keyword_deprecated(10)
|
| 271 |
+
res2 = self.old_keyword_deprecated(new=10)
|
| 272 |
+
# pytest warning filter is not thread-safe, enforce serialization
|
| 273 |
+
with kwarg_lock:
|
| 274 |
+
with pytest.warns(DeprecationWarning, match=dep_msg):
|
| 275 |
+
res3 = self.old_keyword_deprecated(old=10)
|
| 276 |
+
assert res1 == res2 == res3 == 10
|
| 277 |
+
|
| 278 |
+
# unexpected keyword raises an error
|
| 279 |
+
message = re.escape("old_keyword_deprecated() got an unexpected")
|
| 280 |
+
with pytest.raises(TypeError, match=message):
|
| 281 |
+
self.old_keyword_deprecated(unexpected=10)
|
| 282 |
+
|
| 283 |
+
# multiple values for the same parameter raises an error and,
|
| 284 |
+
# if old keyword is used, results in DeprecationWarning
|
| 285 |
+
message = re.escape("old_keyword_deprecated() got multiple")
|
| 286 |
+
with pytest.raises(TypeError, match=message):
|
| 287 |
+
self.old_keyword_deprecated(10, new=10)
|
| 288 |
+
with kwarg_lock:
|
| 289 |
+
with pytest.raises(TypeError, match=message), \
|
| 290 |
+
pytest.warns(DeprecationWarning, match=dep_msg):
|
| 291 |
+
# breakpoint()
|
| 292 |
+
self.old_keyword_deprecated(10, old=10)
|
| 293 |
+
with kwarg_lock:
|
| 294 |
+
with pytest.raises(TypeError, match=message), \
|
| 295 |
+
pytest.warns(DeprecationWarning, match=dep_msg):
|
| 296 |
+
self.old_keyword_deprecated(new=10, old=10)
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
class TestContainsNaNTest:
|
| 300 |
+
|
| 301 |
+
def test_policy(self):
|
| 302 |
+
data = np.array([1, 2, 3, np.nan])
|
| 303 |
+
|
| 304 |
+
contains_nan, nan_policy = _contains_nan(data, nan_policy="propagate")
|
| 305 |
+
assert contains_nan
|
| 306 |
+
assert nan_policy == "propagate"
|
| 307 |
+
|
| 308 |
+
contains_nan, nan_policy = _contains_nan(data, nan_policy="omit")
|
| 309 |
+
assert contains_nan
|
| 310 |
+
assert nan_policy == "omit"
|
| 311 |
+
|
| 312 |
+
msg = "The input contains nan values"
|
| 313 |
+
with pytest.raises(ValueError, match=msg):
|
| 314 |
+
_contains_nan(data, nan_policy="raise")
|
| 315 |
+
|
| 316 |
+
msg = "nan_policy must be one of"
|
| 317 |
+
with pytest.raises(ValueError, match=msg):
|
| 318 |
+
_contains_nan(data, nan_policy="nan")
|
| 319 |
+
|
| 320 |
+
def test_contains_nan(self):
|
| 321 |
+
data1 = np.array([1, 2, 3])
|
| 322 |
+
assert not _contains_nan(data1)[0]
|
| 323 |
+
|
| 324 |
+
data2 = np.array([1, 2, 3, np.nan])
|
| 325 |
+
assert _contains_nan(data2)[0]
|
| 326 |
+
|
| 327 |
+
data3 = np.array([np.nan, 2, 3, np.nan])
|
| 328 |
+
assert _contains_nan(data3)[0]
|
| 329 |
+
|
| 330 |
+
data4 = np.array([[1, 2], [3, 4]])
|
| 331 |
+
assert not _contains_nan(data4)[0]
|
| 332 |
+
|
| 333 |
+
data5 = np.array([[1, 2], [3, np.nan]])
|
| 334 |
+
assert _contains_nan(data5)[0]
|
| 335 |
+
|
| 336 |
+
@skip_xp_invalid_arg
|
| 337 |
+
def test_contains_nan_with_strings(self):
|
| 338 |
+
data1 = np.array([1, 2, "3", np.nan]) # converted to string "nan"
|
| 339 |
+
assert not _contains_nan(data1)[0]
|
| 340 |
+
|
| 341 |
+
data2 = np.array([1, 2, "3", np.nan], dtype='object')
|
| 342 |
+
assert _contains_nan(data2)[0]
|
| 343 |
+
|
| 344 |
+
data3 = np.array([["1", 2], [3, np.nan]]) # converted to string "nan"
|
| 345 |
+
assert not _contains_nan(data3)[0]
|
| 346 |
+
|
| 347 |
+
data4 = np.array([["1", 2], [3, np.nan]], dtype='object')
|
| 348 |
+
assert _contains_nan(data4)[0]
|
| 349 |
+
|
| 350 |
+
@skip_xp_backends('jax.numpy',
|
| 351 |
+
reason="JAX arrays do not support item assignment")
|
| 352 |
+
@pytest.mark.usefixtures("skip_xp_backends")
|
| 353 |
+
@array_api_compatible
|
| 354 |
+
@pytest.mark.parametrize("nan_policy", ['propagate', 'omit', 'raise'])
|
| 355 |
+
def test_array_api(self, xp, nan_policy):
|
| 356 |
+
rng = np.random.default_rng(932347235892482)
|
| 357 |
+
x0 = rng.random(size=(2, 3, 4))
|
| 358 |
+
x = xp.asarray(x0)
|
| 359 |
+
x_nan = xp_copy(x, xp=xp)
|
| 360 |
+
x_nan[1, 2, 1] = np.nan
|
| 361 |
+
|
| 362 |
+
contains_nan, nan_policy_out = _contains_nan(x, nan_policy=nan_policy)
|
| 363 |
+
assert not contains_nan
|
| 364 |
+
assert nan_policy_out == nan_policy
|
| 365 |
+
|
| 366 |
+
if nan_policy == 'raise':
|
| 367 |
+
message = 'The input contains...'
|
| 368 |
+
with pytest.raises(ValueError, match=message):
|
| 369 |
+
_contains_nan(x_nan, nan_policy=nan_policy)
|
| 370 |
+
elif nan_policy == 'omit' and not is_numpy(xp):
|
| 371 |
+
message = "`nan_policy='omit' is incompatible..."
|
| 372 |
+
with pytest.raises(ValueError, match=message):
|
| 373 |
+
_contains_nan(x_nan, nan_policy=nan_policy)
|
| 374 |
+
elif nan_policy == 'propagate':
|
| 375 |
+
contains_nan, nan_policy_out = _contains_nan(
|
| 376 |
+
x_nan, nan_policy=nan_policy)
|
| 377 |
+
assert contains_nan
|
| 378 |
+
assert nan_policy_out == nan_policy
|
| 379 |
+
|
| 380 |
+
|
| 381 |
+
def test__rng_html_rewrite():
|
| 382 |
+
def mock_str():
|
| 383 |
+
lines = [
|
| 384 |
+
'np.random.default_rng(8989843)',
|
| 385 |
+
'np.random.default_rng(seed)',
|
| 386 |
+
'np.random.default_rng(0x9a71b21474694f919882289dc1559ca)',
|
| 387 |
+
' bob ',
|
| 388 |
+
]
|
| 389 |
+
return lines
|
| 390 |
+
|
| 391 |
+
res = _rng_html_rewrite(mock_str)()
|
| 392 |
+
ref = [
|
| 393 |
+
'np.random.default_rng()',
|
| 394 |
+
'np.random.default_rng(seed)',
|
| 395 |
+
'np.random.default_rng()',
|
| 396 |
+
' bob ',
|
| 397 |
+
]
|
| 398 |
+
|
| 399 |
+
assert res == ref
|
| 400 |
+
|
| 401 |
+
|
| 402 |
+
class TestTransitionToRNG:
|
| 403 |
+
def kmeans(self, **kwargs):
|
| 404 |
+
rng = np.random.default_rng(3458934594269824562)
|
| 405 |
+
return cluster.vq.kmeans2(rng.random(size=(20, 3)), 3, **kwargs)
|
| 406 |
+
|
| 407 |
+
def kmeans2(self, **kwargs):
|
| 408 |
+
rng = np.random.default_rng(3458934594269824562)
|
| 409 |
+
return cluster.vq.kmeans2(rng.random(size=(20, 3)), 3, **kwargs)
|
| 410 |
+
|
| 411 |
+
def barycentric(self, **kwargs):
|
| 412 |
+
rng = np.random.default_rng(3458934594269824562)
|
| 413 |
+
x1, x2, y1 = rng.random((3, 10))
|
| 414 |
+
f = interpolate.BarycentricInterpolator(x1, y1, **kwargs)
|
| 415 |
+
return f(x2)
|
| 416 |
+
|
| 417 |
+
def clarkson_woodruff_transform(self, **kwargs):
|
| 418 |
+
rng = np.random.default_rng(3458934594269824562)
|
| 419 |
+
return linalg.clarkson_woodruff_transform(rng.random((10, 10)), 3, **kwargs)
|
| 420 |
+
|
| 421 |
+
def basinhopping(self, **kwargs):
|
| 422 |
+
rng = np.random.default_rng(3458934594269824562)
|
| 423 |
+
return optimize.basinhopping(optimize.rosen, rng.random(3), **kwargs).x
|
| 424 |
+
|
| 425 |
+
def opt(self, fun, **kwargs):
|
| 426 |
+
rng = np.random.default_rng(3458934594269824562)
|
| 427 |
+
bounds = optimize.Bounds(-rng.random(3) * 10, rng.random(3) * 10)
|
| 428 |
+
return fun(optimize.rosen, bounds, **kwargs).x
|
| 429 |
+
|
| 430 |
+
def differential_evolution(self, **kwargs):
|
| 431 |
+
return self.opt(optimize.differential_evolution, **kwargs)
|
| 432 |
+
|
| 433 |
+
def dual_annealing(self, **kwargs):
|
| 434 |
+
return self.opt(optimize.dual_annealing, **kwargs)
|
| 435 |
+
|
| 436 |
+
def check_grad(self, **kwargs):
|
| 437 |
+
rng = np.random.default_rng(3458934594269824562)
|
| 438 |
+
x = rng.random(3)
|
| 439 |
+
return optimize.check_grad(optimize.rosen, optimize.rosen_der, x,
|
| 440 |
+
direction='random', **kwargs)
|
| 441 |
+
|
| 442 |
+
def random_array(self, **kwargs):
|
| 443 |
+
return sparse.random_array((10, 10), density=1.0, **kwargs).toarray()
|
| 444 |
+
|
| 445 |
+
def random(self, **kwargs):
|
| 446 |
+
return sparse.random(10, 10, density=1.0, **kwargs).toarray()
|
| 447 |
+
|
| 448 |
+
def rand(self, **kwargs):
|
| 449 |
+
return sparse.rand(10, 10, density=1.0, **kwargs).toarray()
|
| 450 |
+
|
| 451 |
+
def svds(self, **kwargs):
|
| 452 |
+
rng = np.random.default_rng(3458934594269824562)
|
| 453 |
+
A = rng.random((10, 10))
|
| 454 |
+
return sparse.linalg.svds(A, **kwargs)
|
| 455 |
+
|
| 456 |
+
def random_rotation(self, **kwargs):
|
| 457 |
+
return spatial.transform.Rotation.random(3, **kwargs).as_matrix()
|
| 458 |
+
|
| 459 |
+
def goodness_of_fit(self, **kwargs):
|
| 460 |
+
rng = np.random.default_rng(3458934594269824562)
|
| 461 |
+
data = rng.random(100)
|
| 462 |
+
return stats.goodness_of_fit(stats.laplace, data, **kwargs).pvalue
|
| 463 |
+
|
| 464 |
+
def permutation_test(self, **kwargs):
|
| 465 |
+
rng = np.random.default_rng(3458934594269824562)
|
| 466 |
+
data = tuple(rng.random((2, 100)))
|
| 467 |
+
def statistic(x, y, axis): return np.mean(x, axis=axis) - np.mean(y, axis=axis)
|
| 468 |
+
return stats.permutation_test(data, statistic, **kwargs).pvalue
|
| 469 |
+
|
| 470 |
+
def bootstrap(self, **kwargs):
|
| 471 |
+
rng = np.random.default_rng(3458934594269824562)
|
| 472 |
+
data = (rng.random(100),)
|
| 473 |
+
return stats.bootstrap(data, np.mean, **kwargs).confidence_interval
|
| 474 |
+
|
| 475 |
+
def dunnett(self, **kwargs):
|
| 476 |
+
rng = np.random.default_rng(3458934594269824562)
|
| 477 |
+
x, y, control = rng.random((3, 100))
|
| 478 |
+
return stats.dunnett(x, y, control=control, **kwargs).pvalue
|
| 479 |
+
|
| 480 |
+
def sobol_indices(self, **kwargs):
|
| 481 |
+
def f_ishigami(x): return (np.sin(x[0]) + 7 * np.sin(x[1]) ** 2
|
| 482 |
+
+ 0.1 * (x[2] ** 4) * np.sin(x[0]))
|
| 483 |
+
dists = [stats.uniform(loc=-np.pi, scale=2 * np.pi),
|
| 484 |
+
stats.uniform(loc=-np.pi, scale=2 * np.pi),
|
| 485 |
+
stats.uniform(loc=-np.pi, scale=2 * np.pi)]
|
| 486 |
+
res = stats.sobol_indices(func=f_ishigami, n=1024, dists=dists, **kwargs)
|
| 487 |
+
return res.first_order
|
| 488 |
+
|
| 489 |
+
def qmc_engine(self, engine, **kwargs):
|
| 490 |
+
qrng = engine(d=1, **kwargs)
|
| 491 |
+
return qrng.random(4)
|
| 492 |
+
|
| 493 |
+
def halton(self, **kwargs):
|
| 494 |
+
return self.qmc_engine(stats.qmc.Halton, **kwargs)
|
| 495 |
+
|
| 496 |
+
def sobol(self, **kwargs):
|
| 497 |
+
return self.qmc_engine(stats.qmc.Sobol, **kwargs)
|
| 498 |
+
|
| 499 |
+
def latin_hypercube(self, **kwargs):
|
| 500 |
+
return self.qmc_engine(stats.qmc.LatinHypercube, **kwargs)
|
| 501 |
+
|
| 502 |
+
def poisson_disk(self, **kwargs):
|
| 503 |
+
return self.qmc_engine(stats.qmc.PoissonDisk, **kwargs)
|
| 504 |
+
|
| 505 |
+
def multivariate_normal_qmc(self, **kwargs):
|
| 506 |
+
X = stats.qmc.MultivariateNormalQMC([0], **kwargs)
|
| 507 |
+
return X.random(4)
|
| 508 |
+
|
| 509 |
+
def multinomial_qmc(self, **kwargs):
|
| 510 |
+
X = stats.qmc.MultinomialQMC([0.5, 0.5], 4, **kwargs)
|
| 511 |
+
return X.random(4)
|
| 512 |
+
|
| 513 |
+
def permutation_method(self, **kwargs):
|
| 514 |
+
rng = np.random.default_rng(3458934594269824562)
|
| 515 |
+
data = tuple(rng.random((2, 100)))
|
| 516 |
+
method = stats.PermutationMethod(**kwargs)
|
| 517 |
+
return stats.pearsonr(*data, method=method).pvalue
|
| 518 |
+
|
| 519 |
+
def bootstrap_method(self, **kwargs):
|
| 520 |
+
rng = np.random.default_rng(3458934594269824562)
|
| 521 |
+
data = tuple(rng.random((2, 100)))
|
| 522 |
+
res = stats.pearsonr(*data)
|
| 523 |
+
method = stats.BootstrapMethod(**kwargs)
|
| 524 |
+
return res.confidence_interval(method=method)
|
| 525 |
+
|
| 526 |
+
@pytest.mark.fail_slow(10)
|
| 527 |
+
@pytest.mark.slow
|
| 528 |
+
@pytest.mark.parametrize("method, arg_name", [
|
| 529 |
+
(kmeans, "seed"),
|
| 530 |
+
(kmeans2, "seed"),
|
| 531 |
+
(barycentric, "random_state"),
|
| 532 |
+
(clarkson_woodruff_transform, "seed"),
|
| 533 |
+
(basinhopping, "seed"),
|
| 534 |
+
(differential_evolution, "seed"),
|
| 535 |
+
(dual_annealing, "seed"),
|
| 536 |
+
(check_grad, "seed"),
|
| 537 |
+
(random_array, 'random_state'),
|
| 538 |
+
(random, 'random_state'),
|
| 539 |
+
(rand, 'random_state'),
|
| 540 |
+
(svds, "random_state"),
|
| 541 |
+
(random_rotation, "random_state"),
|
| 542 |
+
(goodness_of_fit, "random_state"),
|
| 543 |
+
(permutation_test, "random_state"),
|
| 544 |
+
(bootstrap, "random_state"),
|
| 545 |
+
(permutation_method, "random_state"),
|
| 546 |
+
(bootstrap_method, "random_state"),
|
| 547 |
+
(dunnett, "random_state"),
|
| 548 |
+
(sobol_indices, "random_state"),
|
| 549 |
+
(halton, "seed"),
|
| 550 |
+
(sobol, "seed"),
|
| 551 |
+
(latin_hypercube, "seed"),
|
| 552 |
+
(poisson_disk, "seed"),
|
| 553 |
+
(multivariate_normal_qmc, "seed"),
|
| 554 |
+
(multinomial_qmc, "seed"),
|
| 555 |
+
])
|
| 556 |
+
def test_rng_deterministic(self, method, arg_name):
|
| 557 |
+
np.random.seed(None)
|
| 558 |
+
seed = 2949672964
|
| 559 |
+
|
| 560 |
+
rng = np.random.default_rng(seed)
|
| 561 |
+
message = "got multiple values for argument now known as `rng`"
|
| 562 |
+
with pytest.raises(TypeError, match=message):
|
| 563 |
+
method(self, **{'rng': rng, arg_name: seed})
|
| 564 |
+
|
| 565 |
+
rng = np.random.default_rng(seed)
|
| 566 |
+
res1 = method(self, rng=rng)
|
| 567 |
+
res2 = method(self, rng=seed)
|
| 568 |
+
assert_equal(res2, res1)
|
| 569 |
+
|
| 570 |
+
if method.__name__ in {"dunnett", "sobol_indices"}:
|
| 571 |
+
# the two kwargs have essentially the same behavior for these functions
|
| 572 |
+
res3 = method(self, **{arg_name: seed})
|
| 573 |
+
assert_equal(res3, res1)
|
| 574 |
+
return
|
| 575 |
+
|
| 576 |
+
rng = np.random.RandomState(seed)
|
| 577 |
+
res1 = method(self, **{arg_name: rng})
|
| 578 |
+
res2 = method(self, **{arg_name: seed})
|
| 579 |
+
|
| 580 |
+
if method.__name__ in {"halton", "sobol", "latin_hypercube", "poisson_disk",
|
| 581 |
+
"multivariate_normal_qmc", "multinomial_qmc"}:
|
| 582 |
+
# For these, passing `random_state=RandomState(seed)` is not the same as
|
| 583 |
+
# passing integer `seed`.
|
| 584 |
+
res1b = method(self, **{arg_name: np.random.RandomState(seed)})
|
| 585 |
+
assert_equal(res1b, res1)
|
| 586 |
+
res2b = method(self, **{arg_name: seed})
|
| 587 |
+
assert_equal(res2b, res2)
|
| 588 |
+
return
|
| 589 |
+
|
| 590 |
+
np.random.seed(seed)
|
| 591 |
+
res3 = method(self, **{arg_name: None})
|
| 592 |
+
assert_equal(res2, res1)
|
| 593 |
+
assert_equal(res3, res1)
|
| 594 |
+
|
| 595 |
+
|
| 596 |
+
class TestLazywhere:
|
| 597 |
+
n_arrays = strategies.integers(min_value=1, max_value=3)
|
| 598 |
+
rng_seed = strategies.integers(min_value=1000000000, max_value=9999999999)
|
| 599 |
+
dtype = strategies.sampled_from((np.float32, np.float64))
|
| 600 |
+
p = strategies.floats(min_value=0, max_value=1)
|
| 601 |
+
data = strategies.data()
|
| 602 |
+
|
| 603 |
+
@pytest.mark.fail_slow(10)
|
| 604 |
+
@pytest.mark.filterwarnings('ignore::RuntimeWarning') # overflows, etc.
|
| 605 |
+
@skip_xp_backends('jax.numpy',
|
| 606 |
+
reason="JAX arrays do not support item assignment")
|
| 607 |
+
@pytest.mark.usefixtures("skip_xp_backends")
|
| 608 |
+
@array_api_compatible
|
| 609 |
+
@given(n_arrays=n_arrays, rng_seed=rng_seed, dtype=dtype, p=p, data=data)
|
| 610 |
+
@pytest.mark.thread_unsafe
|
| 611 |
+
def test_basic(self, n_arrays, rng_seed, dtype, p, data, xp):
|
| 612 |
+
mbs = npst.mutually_broadcastable_shapes(num_shapes=n_arrays+1,
|
| 613 |
+
min_side=0)
|
| 614 |
+
input_shapes, result_shape = data.draw(mbs)
|
| 615 |
+
cond_shape, *shapes = input_shapes
|
| 616 |
+
elements = {'allow_subnormal': False} # cupy/cupy#8382
|
| 617 |
+
fillvalue = xp.asarray(data.draw(npst.arrays(dtype=dtype, shape=tuple(),
|
| 618 |
+
elements=elements)))
|
| 619 |
+
float_fillvalue = float(fillvalue)
|
| 620 |
+
arrays = [xp.asarray(data.draw(npst.arrays(dtype=dtype, shape=shape)))
|
| 621 |
+
for shape in shapes]
|
| 622 |
+
|
| 623 |
+
def f(*args):
|
| 624 |
+
return sum(arg for arg in args)
|
| 625 |
+
|
| 626 |
+
def f2(*args):
|
| 627 |
+
return sum(arg for arg in args) / 2
|
| 628 |
+
|
| 629 |
+
rng = np.random.default_rng(rng_seed)
|
| 630 |
+
cond = xp.asarray(rng.random(size=cond_shape) > p)
|
| 631 |
+
|
| 632 |
+
res1 = _lazywhere(cond, arrays, f, fillvalue)
|
| 633 |
+
res2 = _lazywhere(cond, arrays, f, f2=f2)
|
| 634 |
+
if not is_array_api_strict(xp):
|
| 635 |
+
res3 = _lazywhere(cond, arrays, f, float_fillvalue)
|
| 636 |
+
|
| 637 |
+
# Ensure arrays are at least 1d to follow sane type promotion rules.
|
| 638 |
+
# This can be removed when minimum supported NumPy is 2.0
|
| 639 |
+
if xp == np:
|
| 640 |
+
cond, fillvalue, *arrays = np.atleast_1d(cond, fillvalue, *arrays)
|
| 641 |
+
|
| 642 |
+
ref1 = xp.where(cond, f(*arrays), fillvalue)
|
| 643 |
+
ref2 = xp.where(cond, f(*arrays), f2(*arrays))
|
| 644 |
+
if not is_array_api_strict(xp):
|
| 645 |
+
# Array API standard doesn't currently define behavior when fillvalue is a
|
| 646 |
+
# Python scalar. When it does, test can be run with array_api_strict, too.
|
| 647 |
+
ref3 = xp.where(cond, f(*arrays), float_fillvalue)
|
| 648 |
+
|
| 649 |
+
if xp == np: # because we ensured arrays are at least 1d
|
| 650 |
+
ref1 = ref1.reshape(result_shape)
|
| 651 |
+
ref2 = ref2.reshape(result_shape)
|
| 652 |
+
ref3 = ref3.reshape(result_shape)
|
| 653 |
+
|
| 654 |
+
xp_assert_close(res1, ref1, rtol=2e-16)
|
| 655 |
+
xp_assert_equal(res2, ref2)
|
| 656 |
+
if not is_array_api_strict(xp):
|
| 657 |
+
xp_assert_equal(res3, ref3)
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_array_api.py
ADDED
|
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
from scipy.conftest import array_api_compatible
|
| 5 |
+
from scipy._lib._array_api import (
|
| 6 |
+
_GLOBAL_CONFIG, array_namespace, _asarray, xp_copy, xp_assert_equal, is_numpy,
|
| 7 |
+
np_compat,
|
| 8 |
+
)
|
| 9 |
+
from scipy._lib._array_api_no_0d import xp_assert_equal as xp_assert_equal_no_0d
|
| 10 |
+
|
| 11 |
+
skip_xp_backends = pytest.mark.skip_xp_backends
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
@pytest.mark.skipif(not _GLOBAL_CONFIG["SCIPY_ARRAY_API"],
|
| 15 |
+
reason="Array API test; set environment variable SCIPY_ARRAY_API=1 to run it")
|
| 16 |
+
class TestArrayAPI:
|
| 17 |
+
|
| 18 |
+
def test_array_namespace(self):
|
| 19 |
+
x, y = np.array([0, 1, 2]), np.array([0, 1, 2])
|
| 20 |
+
xp = array_namespace(x, y)
|
| 21 |
+
assert 'array_api_compat.numpy' in xp.__name__
|
| 22 |
+
|
| 23 |
+
_GLOBAL_CONFIG["SCIPY_ARRAY_API"] = False
|
| 24 |
+
xp = array_namespace(x, y)
|
| 25 |
+
assert 'array_api_compat.numpy' in xp.__name__
|
| 26 |
+
_GLOBAL_CONFIG["SCIPY_ARRAY_API"] = True
|
| 27 |
+
|
| 28 |
+
@array_api_compatible
|
| 29 |
+
def test_asarray(self, xp):
|
| 30 |
+
x, y = _asarray([0, 1, 2], xp=xp), _asarray(np.arange(3), xp=xp)
|
| 31 |
+
ref = xp.asarray([0, 1, 2])
|
| 32 |
+
xp_assert_equal(x, ref)
|
| 33 |
+
xp_assert_equal(y, ref)
|
| 34 |
+
|
| 35 |
+
@pytest.mark.filterwarnings("ignore: the matrix subclass")
|
| 36 |
+
def test_raises(self):
|
| 37 |
+
msg = "of type `numpy.ma.MaskedArray` are not supported"
|
| 38 |
+
with pytest.raises(TypeError, match=msg):
|
| 39 |
+
array_namespace(np.ma.array(1), np.array(1))
|
| 40 |
+
|
| 41 |
+
msg = "of type `numpy.matrix` are not supported"
|
| 42 |
+
with pytest.raises(TypeError, match=msg):
|
| 43 |
+
array_namespace(np.array(1), np.matrix(1))
|
| 44 |
+
|
| 45 |
+
msg = "only boolean and numerical dtypes are supported"
|
| 46 |
+
with pytest.raises(TypeError, match=msg):
|
| 47 |
+
array_namespace([object()])
|
| 48 |
+
with pytest.raises(TypeError, match=msg):
|
| 49 |
+
array_namespace('abc')
|
| 50 |
+
|
| 51 |
+
def test_array_likes(self):
|
| 52 |
+
# should be no exceptions
|
| 53 |
+
array_namespace([0, 1, 2])
|
| 54 |
+
array_namespace(1, 2, 3)
|
| 55 |
+
array_namespace(1)
|
| 56 |
+
|
| 57 |
+
@skip_xp_backends('jax.numpy',
|
| 58 |
+
reason="JAX arrays do not support item assignment")
|
| 59 |
+
@pytest.mark.usefixtures("skip_xp_backends")
|
| 60 |
+
@array_api_compatible
|
| 61 |
+
def test_copy(self, xp):
|
| 62 |
+
for _xp in [xp, None]:
|
| 63 |
+
x = xp.asarray([1, 2, 3])
|
| 64 |
+
y = xp_copy(x, xp=_xp)
|
| 65 |
+
# with numpy we'd want to use np.shared_memory, but that's not specified
|
| 66 |
+
# in the array-api
|
| 67 |
+
x[0] = 10
|
| 68 |
+
x[1] = 11
|
| 69 |
+
x[2] = 12
|
| 70 |
+
|
| 71 |
+
assert x[0] != y[0]
|
| 72 |
+
assert x[1] != y[1]
|
| 73 |
+
assert x[2] != y[2]
|
| 74 |
+
assert id(x) != id(y)
|
| 75 |
+
|
| 76 |
+
@array_api_compatible
|
| 77 |
+
@pytest.mark.parametrize('dtype', ['int32', 'int64', 'float32', 'float64'])
|
| 78 |
+
@pytest.mark.parametrize('shape', [(), (3,)])
|
| 79 |
+
def test_strict_checks(self, xp, dtype, shape):
|
| 80 |
+
# Check that `_strict_check` behaves as expected
|
| 81 |
+
dtype = getattr(xp, dtype)
|
| 82 |
+
x = xp.broadcast_to(xp.asarray(1, dtype=dtype), shape)
|
| 83 |
+
x = x if shape else x[()]
|
| 84 |
+
y = np_compat.asarray(1)[()]
|
| 85 |
+
|
| 86 |
+
kwarg_names = ["check_namespace", "check_dtype", "check_shape", "check_0d"]
|
| 87 |
+
options = dict(zip(kwarg_names, [True, False, False, False]))
|
| 88 |
+
if xp == np:
|
| 89 |
+
xp_assert_equal(x, y, **options)
|
| 90 |
+
else:
|
| 91 |
+
with pytest.raises(AssertionError, match="Namespaces do not match."):
|
| 92 |
+
xp_assert_equal(x, y, **options)
|
| 93 |
+
|
| 94 |
+
options = dict(zip(kwarg_names, [False, True, False, False]))
|
| 95 |
+
if y.dtype.name in str(x.dtype):
|
| 96 |
+
xp_assert_equal(x, y, **options)
|
| 97 |
+
else:
|
| 98 |
+
with pytest.raises(AssertionError, match="dtypes do not match."):
|
| 99 |
+
xp_assert_equal(x, y, **options)
|
| 100 |
+
|
| 101 |
+
options = dict(zip(kwarg_names, [False, False, True, False]))
|
| 102 |
+
if x.shape == y.shape:
|
| 103 |
+
xp_assert_equal(x, y, **options)
|
| 104 |
+
else:
|
| 105 |
+
with pytest.raises(AssertionError, match="Shapes do not match."):
|
| 106 |
+
xp_assert_equal(x, xp.asarray(y), **options)
|
| 107 |
+
|
| 108 |
+
options = dict(zip(kwarg_names, [False, False, False, True]))
|
| 109 |
+
if is_numpy(xp) and x.shape == y.shape:
|
| 110 |
+
xp_assert_equal(x, y, **options)
|
| 111 |
+
elif is_numpy(xp):
|
| 112 |
+
with pytest.raises(AssertionError, match="Array-ness does not match."):
|
| 113 |
+
xp_assert_equal(x, y, **options)
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
@array_api_compatible
|
| 117 |
+
def test_check_scalar(self, xp):
|
| 118 |
+
if not is_numpy(xp):
|
| 119 |
+
pytest.skip("Scalars only exist in NumPy")
|
| 120 |
+
|
| 121 |
+
# identity always passes
|
| 122 |
+
xp_assert_equal(xp.float64(0), xp.float64(0))
|
| 123 |
+
xp_assert_equal(xp.asarray(0.), xp.asarray(0.))
|
| 124 |
+
xp_assert_equal(xp.float64(0), xp.float64(0), check_0d=False)
|
| 125 |
+
xp_assert_equal(xp.asarray(0.), xp.asarray(0.), check_0d=False)
|
| 126 |
+
|
| 127 |
+
# Check default convention: 0d-arrays are distinguished from scalars
|
| 128 |
+
message = "Array-ness does not match:.*"
|
| 129 |
+
with pytest.raises(AssertionError, match=message):
|
| 130 |
+
xp_assert_equal(xp.asarray(0.), xp.float64(0))
|
| 131 |
+
with pytest.raises(AssertionError, match=message):
|
| 132 |
+
xp_assert_equal(xp.float64(0), xp.asarray(0.))
|
| 133 |
+
with pytest.raises(AssertionError, match=message):
|
| 134 |
+
xp_assert_equal(xp.asarray(42), xp.int64(42))
|
| 135 |
+
with pytest.raises(AssertionError, match=message):
|
| 136 |
+
xp_assert_equal(xp.int64(42), xp.asarray(42))
|
| 137 |
+
|
| 138 |
+
# with `check_0d=False`, scalars-vs-0d passes (if values match)
|
| 139 |
+
xp_assert_equal(xp.asarray(0.), xp.float64(0), check_0d=False)
|
| 140 |
+
xp_assert_equal(xp.float64(0), xp.asarray(0.), check_0d=False)
|
| 141 |
+
# also with regular python objects
|
| 142 |
+
xp_assert_equal(xp.asarray(0.), 0., check_0d=False)
|
| 143 |
+
xp_assert_equal(0., xp.asarray(0.), check_0d=False)
|
| 144 |
+
xp_assert_equal(xp.asarray(42), 42, check_0d=False)
|
| 145 |
+
xp_assert_equal(42, xp.asarray(42), check_0d=False)
|
| 146 |
+
|
| 147 |
+
# as an alternative to `check_0d=False`, explicitly expect scalar
|
| 148 |
+
xp_assert_equal(xp.float64(0), xp.asarray(0.)[()])
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
@array_api_compatible
|
| 152 |
+
def test_check_scalar_no_0d(self, xp):
|
| 153 |
+
if not is_numpy(xp):
|
| 154 |
+
pytest.skip("Scalars only exist in NumPy")
|
| 155 |
+
|
| 156 |
+
# identity passes, if first argument is not 0d (or check_0d=True)
|
| 157 |
+
xp_assert_equal_no_0d(xp.float64(0), xp.float64(0))
|
| 158 |
+
xp_assert_equal_no_0d(xp.float64(0), xp.float64(0), check_0d=True)
|
| 159 |
+
xp_assert_equal_no_0d(xp.asarray(0.), xp.asarray(0.), check_0d=True)
|
| 160 |
+
|
| 161 |
+
# by default, 0d values are forbidden as the first argument
|
| 162 |
+
message = "Result is a NumPy 0d-array.*"
|
| 163 |
+
with pytest.raises(AssertionError, match=message):
|
| 164 |
+
xp_assert_equal_no_0d(xp.asarray(0.), xp.asarray(0.))
|
| 165 |
+
with pytest.raises(AssertionError, match=message):
|
| 166 |
+
xp_assert_equal_no_0d(xp.asarray(0.), xp.float64(0))
|
| 167 |
+
with pytest.raises(AssertionError, match=message):
|
| 168 |
+
xp_assert_equal_no_0d(xp.asarray(42), xp.int64(42))
|
| 169 |
+
|
| 170 |
+
# Check default convention: 0d-arrays are NOT distinguished from scalars
|
| 171 |
+
xp_assert_equal_no_0d(xp.float64(0), xp.asarray(0.))
|
| 172 |
+
xp_assert_equal_no_0d(xp.int64(42), xp.asarray(42))
|
| 173 |
+
|
| 174 |
+
# opt in to 0d-check remains possible
|
| 175 |
+
message = "Array-ness does not match:.*"
|
| 176 |
+
with pytest.raises(AssertionError, match=message):
|
| 177 |
+
xp_assert_equal_no_0d(xp.asarray(0.), xp.float64(0), check_0d=True)
|
| 178 |
+
with pytest.raises(AssertionError, match=message):
|
| 179 |
+
xp_assert_equal_no_0d(xp.float64(0), xp.asarray(0.), check_0d=True)
|
| 180 |
+
with pytest.raises(AssertionError, match=message):
|
| 181 |
+
xp_assert_equal_no_0d(xp.asarray(42), xp.int64(0), check_0d=True)
|
| 182 |
+
with pytest.raises(AssertionError, match=message):
|
| 183 |
+
xp_assert_equal_no_0d(xp.int64(0), xp.asarray(42), check_0d=True)
|
| 184 |
+
|
| 185 |
+
# scalars-vs-0d passes (if values match) also with regular python objects
|
| 186 |
+
xp_assert_equal_no_0d(0., xp.asarray(0.))
|
| 187 |
+
xp_assert_equal_no_0d(42, xp.asarray(42))
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_bunch.py
ADDED
|
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
import pickle
|
| 3 |
+
from numpy.testing import assert_equal
|
| 4 |
+
from scipy._lib._bunch import _make_tuple_bunch
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
# `Result` is defined at the top level of the module so it can be
|
| 8 |
+
# used to test pickling.
|
| 9 |
+
Result = _make_tuple_bunch('Result', ['x', 'y', 'z'], ['w', 'beta'])
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class TestMakeTupleBunch:
|
| 13 |
+
|
| 14 |
+
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
| 15 |
+
# Tests with Result
|
| 16 |
+
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
| 17 |
+
|
| 18 |
+
def setup_method(self):
|
| 19 |
+
# Set up an instance of Result.
|
| 20 |
+
self.result = Result(x=1, y=2, z=3, w=99, beta=0.5)
|
| 21 |
+
|
| 22 |
+
def test_attribute_access(self):
|
| 23 |
+
assert_equal(self.result.x, 1)
|
| 24 |
+
assert_equal(self.result.y, 2)
|
| 25 |
+
assert_equal(self.result.z, 3)
|
| 26 |
+
assert_equal(self.result.w, 99)
|
| 27 |
+
assert_equal(self.result.beta, 0.5)
|
| 28 |
+
|
| 29 |
+
def test_indexing(self):
|
| 30 |
+
assert_equal(self.result[0], 1)
|
| 31 |
+
assert_equal(self.result[1], 2)
|
| 32 |
+
assert_equal(self.result[2], 3)
|
| 33 |
+
assert_equal(self.result[-1], 3)
|
| 34 |
+
with pytest.raises(IndexError, match='index out of range'):
|
| 35 |
+
self.result[3]
|
| 36 |
+
|
| 37 |
+
def test_unpacking(self):
|
| 38 |
+
x0, y0, z0 = self.result
|
| 39 |
+
assert_equal((x0, y0, z0), (1, 2, 3))
|
| 40 |
+
assert_equal(self.result, (1, 2, 3))
|
| 41 |
+
|
| 42 |
+
def test_slice(self):
|
| 43 |
+
assert_equal(self.result[1:], (2, 3))
|
| 44 |
+
assert_equal(self.result[::2], (1, 3))
|
| 45 |
+
assert_equal(self.result[::-1], (3, 2, 1))
|
| 46 |
+
|
| 47 |
+
def test_len(self):
|
| 48 |
+
assert_equal(len(self.result), 3)
|
| 49 |
+
|
| 50 |
+
def test_repr(self):
|
| 51 |
+
s = repr(self.result)
|
| 52 |
+
assert_equal(s, 'Result(x=1, y=2, z=3, w=99, beta=0.5)')
|
| 53 |
+
|
| 54 |
+
def test_hash(self):
|
| 55 |
+
assert_equal(hash(self.result), hash((1, 2, 3)))
|
| 56 |
+
|
| 57 |
+
def test_pickle(self):
|
| 58 |
+
s = pickle.dumps(self.result)
|
| 59 |
+
obj = pickle.loads(s)
|
| 60 |
+
assert isinstance(obj, Result)
|
| 61 |
+
assert_equal(obj.x, self.result.x)
|
| 62 |
+
assert_equal(obj.y, self.result.y)
|
| 63 |
+
assert_equal(obj.z, self.result.z)
|
| 64 |
+
assert_equal(obj.w, self.result.w)
|
| 65 |
+
assert_equal(obj.beta, self.result.beta)
|
| 66 |
+
|
| 67 |
+
def test_read_only_existing(self):
|
| 68 |
+
with pytest.raises(AttributeError, match="can't set attribute"):
|
| 69 |
+
self.result.x = -1
|
| 70 |
+
|
| 71 |
+
def test_read_only_new(self):
|
| 72 |
+
self.result.plate_of_shrimp = "lattice of coincidence"
|
| 73 |
+
assert self.result.plate_of_shrimp == "lattice of coincidence"
|
| 74 |
+
|
| 75 |
+
def test_constructor_missing_parameter(self):
|
| 76 |
+
with pytest.raises(TypeError, match='missing'):
|
| 77 |
+
# `w` is missing.
|
| 78 |
+
Result(x=1, y=2, z=3, beta=0.75)
|
| 79 |
+
|
| 80 |
+
def test_constructor_incorrect_parameter(self):
|
| 81 |
+
with pytest.raises(TypeError, match='unexpected'):
|
| 82 |
+
# `foo` is not an existing field.
|
| 83 |
+
Result(x=1, y=2, z=3, w=123, beta=0.75, foo=999)
|
| 84 |
+
|
| 85 |
+
def test_module(self):
|
| 86 |
+
m = 'scipy._lib.tests.test_bunch'
|
| 87 |
+
assert_equal(Result.__module__, m)
|
| 88 |
+
assert_equal(self.result.__module__, m)
|
| 89 |
+
|
| 90 |
+
def test_extra_fields_per_instance(self):
|
| 91 |
+
# This test exists to ensure that instances of the same class
|
| 92 |
+
# store their own values for the extra fields. That is, the values
|
| 93 |
+
# are stored per instance and not in the class.
|
| 94 |
+
result1 = Result(x=1, y=2, z=3, w=-1, beta=0.0)
|
| 95 |
+
result2 = Result(x=4, y=5, z=6, w=99, beta=1.0)
|
| 96 |
+
assert_equal(result1.w, -1)
|
| 97 |
+
assert_equal(result1.beta, 0.0)
|
| 98 |
+
# The rest of these checks aren't essential, but let's check
|
| 99 |
+
# them anyway.
|
| 100 |
+
assert_equal(result1[:], (1, 2, 3))
|
| 101 |
+
assert_equal(result2.w, 99)
|
| 102 |
+
assert_equal(result2.beta, 1.0)
|
| 103 |
+
assert_equal(result2[:], (4, 5, 6))
|
| 104 |
+
|
| 105 |
+
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
| 106 |
+
# Other tests
|
| 107 |
+
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
| 108 |
+
|
| 109 |
+
def test_extra_field_names_is_optional(self):
|
| 110 |
+
Square = _make_tuple_bunch('Square', ['width', 'height'])
|
| 111 |
+
sq = Square(width=1, height=2)
|
| 112 |
+
assert_equal(sq.width, 1)
|
| 113 |
+
assert_equal(sq.height, 2)
|
| 114 |
+
s = repr(sq)
|
| 115 |
+
assert_equal(s, 'Square(width=1, height=2)')
|
| 116 |
+
|
| 117 |
+
def test_tuple_like(self):
|
| 118 |
+
Tup = _make_tuple_bunch('Tup', ['a', 'b'])
|
| 119 |
+
tu = Tup(a=1, b=2)
|
| 120 |
+
assert isinstance(tu, tuple)
|
| 121 |
+
assert isinstance(tu + (1,), tuple)
|
| 122 |
+
|
| 123 |
+
def test_explicit_module(self):
|
| 124 |
+
m = 'some.module.name'
|
| 125 |
+
Foo = _make_tuple_bunch('Foo', ['x'], ['a', 'b'], module=m)
|
| 126 |
+
foo = Foo(x=1, a=355, b=113)
|
| 127 |
+
assert_equal(Foo.__module__, m)
|
| 128 |
+
assert_equal(foo.__module__, m)
|
| 129 |
+
|
| 130 |
+
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
| 131 |
+
# Argument validation
|
| 132 |
+
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
| 133 |
+
|
| 134 |
+
@pytest.mark.parametrize('args', [('123', ['a'], ['b']),
|
| 135 |
+
('Foo', ['-3'], ['x']),
|
| 136 |
+
('Foo', ['a'], ['+-*/'])])
|
| 137 |
+
def test_identifiers_not_allowed(self, args):
|
| 138 |
+
with pytest.raises(ValueError, match='identifiers'):
|
| 139 |
+
_make_tuple_bunch(*args)
|
| 140 |
+
|
| 141 |
+
@pytest.mark.parametrize('args', [('Foo', ['a', 'b', 'a'], ['x']),
|
| 142 |
+
('Foo', ['a', 'b'], ['b', 'x'])])
|
| 143 |
+
def test_repeated_field_names(self, args):
|
| 144 |
+
with pytest.raises(ValueError, match='Duplicate'):
|
| 145 |
+
_make_tuple_bunch(*args)
|
| 146 |
+
|
| 147 |
+
@pytest.mark.parametrize('args', [('Foo', ['_a'], ['x']),
|
| 148 |
+
('Foo', ['a'], ['_x'])])
|
| 149 |
+
def test_leading_underscore_not_allowed(self, args):
|
| 150 |
+
with pytest.raises(ValueError, match='underscore'):
|
| 151 |
+
_make_tuple_bunch(*args)
|
| 152 |
+
|
| 153 |
+
@pytest.mark.parametrize('args', [('Foo', ['def'], ['x']),
|
| 154 |
+
('Foo', ['a'], ['or']),
|
| 155 |
+
('and', ['a'], ['x'])])
|
| 156 |
+
def test_keyword_not_allowed_in_fields(self, args):
|
| 157 |
+
with pytest.raises(ValueError, match='keyword'):
|
| 158 |
+
_make_tuple_bunch(*args)
|
| 159 |
+
|
| 160 |
+
def test_at_least_one_field_name_required(self):
|
| 161 |
+
with pytest.raises(ValueError, match='at least one name'):
|
| 162 |
+
_make_tuple_bunch('Qwerty', [], ['a', 'b'])
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_ccallback.py
ADDED
|
@@ -0,0 +1,204 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from numpy.testing import assert_equal, assert_
|
| 2 |
+
from pytest import raises as assert_raises
|
| 3 |
+
|
| 4 |
+
import time
|
| 5 |
+
import pytest
|
| 6 |
+
import ctypes
|
| 7 |
+
import threading
|
| 8 |
+
from scipy._lib import _ccallback_c as _test_ccallback_cython
|
| 9 |
+
from scipy._lib import _test_ccallback
|
| 10 |
+
from scipy._lib._ccallback import LowLevelCallable
|
| 11 |
+
|
| 12 |
+
try:
|
| 13 |
+
import cffi
|
| 14 |
+
HAVE_CFFI = True
|
| 15 |
+
except ImportError:
|
| 16 |
+
HAVE_CFFI = False
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
ERROR_VALUE = 2.0
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def callback_python(a, user_data=None):
|
| 23 |
+
if a == ERROR_VALUE:
|
| 24 |
+
raise ValueError("bad value")
|
| 25 |
+
|
| 26 |
+
if user_data is None:
|
| 27 |
+
return a + 1
|
| 28 |
+
else:
|
| 29 |
+
return a + user_data
|
| 30 |
+
|
| 31 |
+
def _get_cffi_func(base, signature):
|
| 32 |
+
if not HAVE_CFFI:
|
| 33 |
+
pytest.skip("cffi not installed")
|
| 34 |
+
|
| 35 |
+
# Get function address
|
| 36 |
+
voidp = ctypes.cast(base, ctypes.c_void_p)
|
| 37 |
+
address = voidp.value
|
| 38 |
+
|
| 39 |
+
# Create corresponding cffi handle
|
| 40 |
+
ffi = cffi.FFI()
|
| 41 |
+
func = ffi.cast(signature, address)
|
| 42 |
+
return func
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def _get_ctypes_data():
|
| 46 |
+
value = ctypes.c_double(2.0)
|
| 47 |
+
return ctypes.cast(ctypes.pointer(value), ctypes.c_voidp)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def _get_cffi_data():
|
| 51 |
+
if not HAVE_CFFI:
|
| 52 |
+
pytest.skip("cffi not installed")
|
| 53 |
+
ffi = cffi.FFI()
|
| 54 |
+
return ffi.new('double *', 2.0)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
CALLERS = {
|
| 58 |
+
'simple': _test_ccallback.test_call_simple,
|
| 59 |
+
'nodata': _test_ccallback.test_call_nodata,
|
| 60 |
+
'nonlocal': _test_ccallback.test_call_nonlocal,
|
| 61 |
+
'cython': _test_ccallback_cython.test_call_cython,
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
# These functions have signatures known to the callers
|
| 65 |
+
FUNCS = {
|
| 66 |
+
'python': lambda: callback_python,
|
| 67 |
+
'capsule': lambda: _test_ccallback.test_get_plus1_capsule(),
|
| 68 |
+
'cython': lambda: LowLevelCallable.from_cython(_test_ccallback_cython,
|
| 69 |
+
"plus1_cython"),
|
| 70 |
+
'ctypes': lambda: _test_ccallback_cython.plus1_ctypes,
|
| 71 |
+
'cffi': lambda: _get_cffi_func(_test_ccallback_cython.plus1_ctypes,
|
| 72 |
+
'double (*)(double, int *, void *)'),
|
| 73 |
+
'capsule_b': lambda: _test_ccallback.test_get_plus1b_capsule(),
|
| 74 |
+
'cython_b': lambda: LowLevelCallable.from_cython(_test_ccallback_cython,
|
| 75 |
+
"plus1b_cython"),
|
| 76 |
+
'ctypes_b': lambda: _test_ccallback_cython.plus1b_ctypes,
|
| 77 |
+
'cffi_b': lambda: _get_cffi_func(_test_ccallback_cython.plus1b_ctypes,
|
| 78 |
+
'double (*)(double, double, int *, void *)'),
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
# These functions have signatures the callers don't know
|
| 82 |
+
BAD_FUNCS = {
|
| 83 |
+
'capsule_bc': lambda: _test_ccallback.test_get_plus1bc_capsule(),
|
| 84 |
+
'cython_bc': lambda: LowLevelCallable.from_cython(_test_ccallback_cython,
|
| 85 |
+
"plus1bc_cython"),
|
| 86 |
+
'ctypes_bc': lambda: _test_ccallback_cython.plus1bc_ctypes,
|
| 87 |
+
'cffi_bc': lambda: _get_cffi_func(
|
| 88 |
+
_test_ccallback_cython.plus1bc_ctypes,
|
| 89 |
+
'double (*)(double, double, double, int *, void *)'
|
| 90 |
+
),
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
USER_DATAS = {
|
| 94 |
+
'ctypes': _get_ctypes_data,
|
| 95 |
+
'cffi': _get_cffi_data,
|
| 96 |
+
'capsule': _test_ccallback.test_get_data_capsule,
|
| 97 |
+
}
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def test_callbacks():
|
| 101 |
+
def check(caller, func, user_data):
|
| 102 |
+
caller = CALLERS[caller]
|
| 103 |
+
func = FUNCS[func]()
|
| 104 |
+
user_data = USER_DATAS[user_data]()
|
| 105 |
+
|
| 106 |
+
if func is callback_python:
|
| 107 |
+
def func2(x):
|
| 108 |
+
return func(x, 2.0)
|
| 109 |
+
else:
|
| 110 |
+
func2 = LowLevelCallable(func, user_data)
|
| 111 |
+
func = LowLevelCallable(func)
|
| 112 |
+
|
| 113 |
+
# Test basic call
|
| 114 |
+
assert_equal(caller(func, 1.0), 2.0)
|
| 115 |
+
|
| 116 |
+
# Test 'bad' value resulting to an error
|
| 117 |
+
assert_raises(ValueError, caller, func, ERROR_VALUE)
|
| 118 |
+
|
| 119 |
+
# Test passing in user_data
|
| 120 |
+
assert_equal(caller(func2, 1.0), 3.0)
|
| 121 |
+
|
| 122 |
+
for caller in sorted(CALLERS.keys()):
|
| 123 |
+
for func in sorted(FUNCS.keys()):
|
| 124 |
+
for user_data in sorted(USER_DATAS.keys()):
|
| 125 |
+
check(caller, func, user_data)
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def test_bad_callbacks():
|
| 129 |
+
def check(caller, func, user_data):
|
| 130 |
+
caller = CALLERS[caller]
|
| 131 |
+
user_data = USER_DATAS[user_data]()
|
| 132 |
+
func = BAD_FUNCS[func]()
|
| 133 |
+
|
| 134 |
+
if func is callback_python:
|
| 135 |
+
def func2(x):
|
| 136 |
+
return func(x, 2.0)
|
| 137 |
+
else:
|
| 138 |
+
func2 = LowLevelCallable(func, user_data)
|
| 139 |
+
func = LowLevelCallable(func)
|
| 140 |
+
|
| 141 |
+
# Test that basic call fails
|
| 142 |
+
assert_raises(ValueError, caller, LowLevelCallable(func), 1.0)
|
| 143 |
+
|
| 144 |
+
# Test that passing in user_data also fails
|
| 145 |
+
assert_raises(ValueError, caller, func2, 1.0)
|
| 146 |
+
|
| 147 |
+
# Test error message
|
| 148 |
+
llfunc = LowLevelCallable(func)
|
| 149 |
+
try:
|
| 150 |
+
caller(llfunc, 1.0)
|
| 151 |
+
except ValueError as err:
|
| 152 |
+
msg = str(err)
|
| 153 |
+
assert_(llfunc.signature in msg, msg)
|
| 154 |
+
assert_('double (double, double, int *, void *)' in msg, msg)
|
| 155 |
+
|
| 156 |
+
for caller in sorted(CALLERS.keys()):
|
| 157 |
+
for func in sorted(BAD_FUNCS.keys()):
|
| 158 |
+
for user_data in sorted(USER_DATAS.keys()):
|
| 159 |
+
check(caller, func, user_data)
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
def test_signature_override():
|
| 163 |
+
caller = _test_ccallback.test_call_simple
|
| 164 |
+
func = _test_ccallback.test_get_plus1_capsule()
|
| 165 |
+
|
| 166 |
+
llcallable = LowLevelCallable(func, signature="bad signature")
|
| 167 |
+
assert_equal(llcallable.signature, "bad signature")
|
| 168 |
+
assert_raises(ValueError, caller, llcallable, 3)
|
| 169 |
+
|
| 170 |
+
llcallable = LowLevelCallable(func, signature="double (double, int *, void *)")
|
| 171 |
+
assert_equal(llcallable.signature, "double (double, int *, void *)")
|
| 172 |
+
assert_equal(caller(llcallable, 3), 4)
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
def test_threadsafety():
|
| 176 |
+
def callback(a, caller):
|
| 177 |
+
if a <= 0:
|
| 178 |
+
return 1
|
| 179 |
+
else:
|
| 180 |
+
res = caller(lambda x: callback(x, caller), a - 1)
|
| 181 |
+
return 2*res
|
| 182 |
+
|
| 183 |
+
def check(caller):
|
| 184 |
+
caller = CALLERS[caller]
|
| 185 |
+
|
| 186 |
+
results = []
|
| 187 |
+
|
| 188 |
+
count = 10
|
| 189 |
+
|
| 190 |
+
def run():
|
| 191 |
+
time.sleep(0.01)
|
| 192 |
+
r = caller(lambda x: callback(x, caller), count)
|
| 193 |
+
results.append(r)
|
| 194 |
+
|
| 195 |
+
threads = [threading.Thread(target=run) for j in range(20)]
|
| 196 |
+
for thread in threads:
|
| 197 |
+
thread.start()
|
| 198 |
+
for thread in threads:
|
| 199 |
+
thread.join()
|
| 200 |
+
|
| 201 |
+
assert_equal(results, [2.0**count]*len(threads))
|
| 202 |
+
|
| 203 |
+
for caller in CALLERS.keys():
|
| 204 |
+
check(caller)
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_config.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Check the SciPy config is valid.
|
| 3 |
+
"""
|
| 4 |
+
import scipy
|
| 5 |
+
import pytest
|
| 6 |
+
from unittest.mock import patch
|
| 7 |
+
|
| 8 |
+
pytestmark = pytest.mark.skipif(
|
| 9 |
+
not hasattr(scipy.__config__, "_built_with_meson"),
|
| 10 |
+
reason="Requires Meson builds",
|
| 11 |
+
)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class TestSciPyConfigs:
|
| 15 |
+
REQUIRED_CONFIG_KEYS = [
|
| 16 |
+
"Compilers",
|
| 17 |
+
"Machine Information",
|
| 18 |
+
"Python Information",
|
| 19 |
+
]
|
| 20 |
+
|
| 21 |
+
@pytest.mark.thread_unsafe
|
| 22 |
+
@patch("scipy.__config__._check_pyyaml")
|
| 23 |
+
def test_pyyaml_not_found(self, mock_yaml_importer):
|
| 24 |
+
mock_yaml_importer.side_effect = ModuleNotFoundError()
|
| 25 |
+
with pytest.warns(UserWarning):
|
| 26 |
+
scipy.show_config()
|
| 27 |
+
|
| 28 |
+
def test_dict_mode(self):
|
| 29 |
+
config = scipy.show_config(mode="dicts")
|
| 30 |
+
|
| 31 |
+
assert isinstance(config, dict)
|
| 32 |
+
assert all([key in config for key in self.REQUIRED_CONFIG_KEYS]), (
|
| 33 |
+
"Required key missing,"
|
| 34 |
+
" see index of `False` with `REQUIRED_CONFIG_KEYS`"
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
def test_invalid_mode(self):
|
| 38 |
+
with pytest.raises(AttributeError):
|
| 39 |
+
scipy.show_config(mode="foo")
|
| 40 |
+
|
| 41 |
+
def test_warn_to_add_tests(self):
|
| 42 |
+
assert len(scipy.__config__.DisplayModes) == 2, (
|
| 43 |
+
"New mode detected,"
|
| 44 |
+
" please add UT if applicable and increment this count"
|
| 45 |
+
)
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_deprecation.py
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
|
| 3 |
+
@pytest.mark.thread_unsafe
|
| 4 |
+
def test_cython_api_deprecation():
|
| 5 |
+
match = ("`scipy._lib._test_deprecation_def.foo_deprecated` "
|
| 6 |
+
"is deprecated, use `foo` instead!\n"
|
| 7 |
+
"Deprecated in Scipy 42.0.0")
|
| 8 |
+
with pytest.warns(DeprecationWarning, match=match):
|
| 9 |
+
from .. import _test_deprecation_call
|
| 10 |
+
assert _test_deprecation_call.call() == (1, 1)
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_doccer.py
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
''' Some tests for the documenting decorator and support functions '''
|
| 2 |
+
|
| 3 |
+
import sys
|
| 4 |
+
import pytest
|
| 5 |
+
from numpy.testing import assert_equal, suppress_warnings
|
| 6 |
+
|
| 7 |
+
from scipy._lib import doccer
|
| 8 |
+
|
| 9 |
+
# python -OO strips docstrings
|
| 10 |
+
DOCSTRINGS_STRIPPED = sys.flags.optimize > 1
|
| 11 |
+
|
| 12 |
+
docstring = \
|
| 13 |
+
"""Docstring
|
| 14 |
+
%(strtest1)s
|
| 15 |
+
%(strtest2)s
|
| 16 |
+
%(strtest3)s
|
| 17 |
+
"""
|
| 18 |
+
param_doc1 = \
|
| 19 |
+
"""Another test
|
| 20 |
+
with some indent"""
|
| 21 |
+
|
| 22 |
+
param_doc2 = \
|
| 23 |
+
"""Another test, one line"""
|
| 24 |
+
|
| 25 |
+
param_doc3 = \
|
| 26 |
+
""" Another test
|
| 27 |
+
with some indent"""
|
| 28 |
+
|
| 29 |
+
doc_dict = {'strtest1':param_doc1,
|
| 30 |
+
'strtest2':param_doc2,
|
| 31 |
+
'strtest3':param_doc3}
|
| 32 |
+
|
| 33 |
+
filled_docstring = \
|
| 34 |
+
"""Docstring
|
| 35 |
+
Another test
|
| 36 |
+
with some indent
|
| 37 |
+
Another test, one line
|
| 38 |
+
Another test
|
| 39 |
+
with some indent
|
| 40 |
+
"""
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def test_unindent():
|
| 44 |
+
with suppress_warnings() as sup:
|
| 45 |
+
sup.filter(category=DeprecationWarning)
|
| 46 |
+
assert_equal(doccer.unindent_string(param_doc1), param_doc1)
|
| 47 |
+
assert_equal(doccer.unindent_string(param_doc2), param_doc2)
|
| 48 |
+
assert_equal(doccer.unindent_string(param_doc3), param_doc1)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def test_unindent_dict():
|
| 52 |
+
with suppress_warnings() as sup:
|
| 53 |
+
sup.filter(category=DeprecationWarning)
|
| 54 |
+
d2 = doccer.unindent_dict(doc_dict)
|
| 55 |
+
assert_equal(d2['strtest1'], doc_dict['strtest1'])
|
| 56 |
+
assert_equal(d2['strtest2'], doc_dict['strtest2'])
|
| 57 |
+
assert_equal(d2['strtest3'], doc_dict['strtest1'])
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def test_docformat():
|
| 61 |
+
with suppress_warnings() as sup:
|
| 62 |
+
sup.filter(category=DeprecationWarning)
|
| 63 |
+
udd = doccer.unindent_dict(doc_dict)
|
| 64 |
+
formatted = doccer.docformat(docstring, udd)
|
| 65 |
+
assert_equal(formatted, filled_docstring)
|
| 66 |
+
single_doc = 'Single line doc %(strtest1)s'
|
| 67 |
+
formatted = doccer.docformat(single_doc, doc_dict)
|
| 68 |
+
# Note - initial indent of format string does not
|
| 69 |
+
# affect subsequent indent of inserted parameter
|
| 70 |
+
assert_equal(formatted, """Single line doc Another test
|
| 71 |
+
with some indent""")
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstrings stripped")
|
| 75 |
+
def test_decorator():
|
| 76 |
+
with suppress_warnings() as sup:
|
| 77 |
+
sup.filter(category=DeprecationWarning)
|
| 78 |
+
# with unindentation of parameters
|
| 79 |
+
decorator = doccer.filldoc(doc_dict, True)
|
| 80 |
+
|
| 81 |
+
@decorator
|
| 82 |
+
def func():
|
| 83 |
+
""" Docstring
|
| 84 |
+
%(strtest3)s
|
| 85 |
+
"""
|
| 86 |
+
|
| 87 |
+
def expected():
|
| 88 |
+
""" Docstring
|
| 89 |
+
Another test
|
| 90 |
+
with some indent
|
| 91 |
+
"""
|
| 92 |
+
assert_equal(func.__doc__, expected.__doc__)
|
| 93 |
+
|
| 94 |
+
# without unindentation of parameters
|
| 95 |
+
|
| 96 |
+
# The docstring should be unindented for Python 3.13+
|
| 97 |
+
# because of https://github.com/python/cpython/issues/81283
|
| 98 |
+
decorator = doccer.filldoc(doc_dict, False if \
|
| 99 |
+
sys.version_info < (3, 13) else True)
|
| 100 |
+
|
| 101 |
+
@decorator
|
| 102 |
+
def func():
|
| 103 |
+
""" Docstring
|
| 104 |
+
%(strtest3)s
|
| 105 |
+
"""
|
| 106 |
+
def expected():
|
| 107 |
+
""" Docstring
|
| 108 |
+
Another test
|
| 109 |
+
with some indent
|
| 110 |
+
"""
|
| 111 |
+
assert_equal(func.__doc__, expected.__doc__)
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstrings stripped")
|
| 115 |
+
def test_inherit_docstring_from():
|
| 116 |
+
|
| 117 |
+
with suppress_warnings() as sup:
|
| 118 |
+
sup.filter(category=DeprecationWarning)
|
| 119 |
+
|
| 120 |
+
class Foo:
|
| 121 |
+
def func(self):
|
| 122 |
+
'''Do something useful.'''
|
| 123 |
+
return
|
| 124 |
+
|
| 125 |
+
def func2(self):
|
| 126 |
+
'''Something else.'''
|
| 127 |
+
|
| 128 |
+
class Bar(Foo):
|
| 129 |
+
@doccer.inherit_docstring_from(Foo)
|
| 130 |
+
def func(self):
|
| 131 |
+
'''%(super)sABC'''
|
| 132 |
+
return
|
| 133 |
+
|
| 134 |
+
@doccer.inherit_docstring_from(Foo)
|
| 135 |
+
def func2(self):
|
| 136 |
+
# No docstring.
|
| 137 |
+
return
|
| 138 |
+
|
| 139 |
+
assert_equal(Bar.func.__doc__, Foo.func.__doc__ + 'ABC')
|
| 140 |
+
assert_equal(Bar.func2.__doc__, Foo.func2.__doc__)
|
| 141 |
+
bar = Bar()
|
| 142 |
+
assert_equal(bar.func.__doc__, Foo.func.__doc__ + 'ABC')
|
| 143 |
+
assert_equal(bar.func2.__doc__, Foo.func2.__doc__)
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_import_cycles.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
import sys
|
| 3 |
+
import subprocess
|
| 4 |
+
|
| 5 |
+
from .test_public_api import PUBLIC_MODULES
|
| 6 |
+
|
| 7 |
+
# Regression tests for gh-6793.
|
| 8 |
+
# Check that all modules are importable in a new Python process.
|
| 9 |
+
# This is not necessarily true if there are import cycles present.
|
| 10 |
+
|
| 11 |
+
@pytest.mark.fail_slow(40)
|
| 12 |
+
@pytest.mark.slow
|
| 13 |
+
@pytest.mark.thread_unsafe
|
| 14 |
+
def test_public_modules_importable():
|
| 15 |
+
pids = [subprocess.Popen([sys.executable, '-c', f'import {module}'])
|
| 16 |
+
for module in PUBLIC_MODULES]
|
| 17 |
+
for i, pid in enumerate(pids):
|
| 18 |
+
assert pid.wait() == 0, f'Failed to import {PUBLIC_MODULES[i]}'
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_public_api.py
ADDED
|
@@ -0,0 +1,469 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
This test script is adopted from:
|
| 3 |
+
https://github.com/numpy/numpy/blob/main/numpy/tests/test_public_api.py
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import pkgutil
|
| 7 |
+
import types
|
| 8 |
+
import importlib
|
| 9 |
+
import warnings
|
| 10 |
+
from importlib import import_module
|
| 11 |
+
|
| 12 |
+
import pytest
|
| 13 |
+
|
| 14 |
+
import numpy as np
|
| 15 |
+
import scipy
|
| 16 |
+
|
| 17 |
+
from scipy.conftest import xp_available_backends
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def test_dir_testing():
|
| 21 |
+
"""Assert that output of dir has only one "testing/tester"
|
| 22 |
+
attribute without duplicate"""
|
| 23 |
+
assert len(dir(scipy)) == len(set(dir(scipy)))
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
# Historically SciPy has not used leading underscores for private submodules
|
| 27 |
+
# much. This has resulted in lots of things that look like public modules
|
| 28 |
+
# (i.e. things that can be imported as `import scipy.somesubmodule.somefile`),
|
| 29 |
+
# but were never intended to be public. The PUBLIC_MODULES list contains
|
| 30 |
+
# modules that are either public because they were meant to be, or because they
|
| 31 |
+
# contain public functions/objects that aren't present in any other namespace
|
| 32 |
+
# for whatever reason and therefore should be treated as public.
|
| 33 |
+
PUBLIC_MODULES = ["scipy." + s for s in [
|
| 34 |
+
"cluster",
|
| 35 |
+
"cluster.vq",
|
| 36 |
+
"cluster.hierarchy",
|
| 37 |
+
"constants",
|
| 38 |
+
"datasets",
|
| 39 |
+
"differentiate",
|
| 40 |
+
"fft",
|
| 41 |
+
"fftpack",
|
| 42 |
+
"integrate",
|
| 43 |
+
"interpolate",
|
| 44 |
+
"io",
|
| 45 |
+
"io.arff",
|
| 46 |
+
"io.matlab",
|
| 47 |
+
"io.wavfile",
|
| 48 |
+
"linalg",
|
| 49 |
+
"linalg.blas",
|
| 50 |
+
"linalg.cython_blas",
|
| 51 |
+
"linalg.lapack",
|
| 52 |
+
"linalg.cython_lapack",
|
| 53 |
+
"linalg.interpolative",
|
| 54 |
+
"ndimage",
|
| 55 |
+
"odr",
|
| 56 |
+
"optimize",
|
| 57 |
+
"optimize.elementwise",
|
| 58 |
+
"signal",
|
| 59 |
+
"signal.windows",
|
| 60 |
+
"sparse",
|
| 61 |
+
"sparse.linalg",
|
| 62 |
+
"sparse.csgraph",
|
| 63 |
+
"spatial",
|
| 64 |
+
"spatial.distance",
|
| 65 |
+
"spatial.transform",
|
| 66 |
+
"special",
|
| 67 |
+
"stats",
|
| 68 |
+
"stats.contingency",
|
| 69 |
+
"stats.distributions",
|
| 70 |
+
"stats.mstats",
|
| 71 |
+
"stats.qmc",
|
| 72 |
+
"stats.sampling"
|
| 73 |
+
]]
|
| 74 |
+
|
| 75 |
+
# The PRIVATE_BUT_PRESENT_MODULES list contains modules that lacked underscores
|
| 76 |
+
# in their name and hence looked public, but weren't meant to be. All these
|
| 77 |
+
# namespace were deprecated in the 1.8.0 release - see "clear split between
|
| 78 |
+
# public and private API" in the 1.8.0 release notes.
|
| 79 |
+
# These private modules support will be removed in SciPy v2.0.0, as the
|
| 80 |
+
# deprecation messages emitted by each of these modules say.
|
| 81 |
+
PRIVATE_BUT_PRESENT_MODULES = [
|
| 82 |
+
'scipy.constants.codata',
|
| 83 |
+
'scipy.constants.constants',
|
| 84 |
+
'scipy.fftpack.basic',
|
| 85 |
+
'scipy.fftpack.convolve',
|
| 86 |
+
'scipy.fftpack.helper',
|
| 87 |
+
'scipy.fftpack.pseudo_diffs',
|
| 88 |
+
'scipy.fftpack.realtransforms',
|
| 89 |
+
'scipy.integrate.dop',
|
| 90 |
+
'scipy.integrate.lsoda',
|
| 91 |
+
'scipy.integrate.odepack',
|
| 92 |
+
'scipy.integrate.quadpack',
|
| 93 |
+
'scipy.integrate.vode',
|
| 94 |
+
'scipy.interpolate.dfitpack',
|
| 95 |
+
'scipy.interpolate.fitpack',
|
| 96 |
+
'scipy.interpolate.fitpack2',
|
| 97 |
+
'scipy.interpolate.interpnd',
|
| 98 |
+
'scipy.interpolate.interpolate',
|
| 99 |
+
'scipy.interpolate.ndgriddata',
|
| 100 |
+
'scipy.interpolate.polyint',
|
| 101 |
+
'scipy.interpolate.rbf',
|
| 102 |
+
'scipy.io.arff.arffread',
|
| 103 |
+
'scipy.io.harwell_boeing',
|
| 104 |
+
'scipy.io.idl',
|
| 105 |
+
'scipy.io.matlab.byteordercodes',
|
| 106 |
+
'scipy.io.matlab.mio',
|
| 107 |
+
'scipy.io.matlab.mio4',
|
| 108 |
+
'scipy.io.matlab.mio5',
|
| 109 |
+
'scipy.io.matlab.mio5_params',
|
| 110 |
+
'scipy.io.matlab.mio5_utils',
|
| 111 |
+
'scipy.io.matlab.mio_utils',
|
| 112 |
+
'scipy.io.matlab.miobase',
|
| 113 |
+
'scipy.io.matlab.streams',
|
| 114 |
+
'scipy.io.mmio',
|
| 115 |
+
'scipy.io.netcdf',
|
| 116 |
+
'scipy.linalg.basic',
|
| 117 |
+
'scipy.linalg.decomp',
|
| 118 |
+
'scipy.linalg.decomp_cholesky',
|
| 119 |
+
'scipy.linalg.decomp_lu',
|
| 120 |
+
'scipy.linalg.decomp_qr',
|
| 121 |
+
'scipy.linalg.decomp_schur',
|
| 122 |
+
'scipy.linalg.decomp_svd',
|
| 123 |
+
'scipy.linalg.matfuncs',
|
| 124 |
+
'scipy.linalg.misc',
|
| 125 |
+
'scipy.linalg.special_matrices',
|
| 126 |
+
'scipy.misc',
|
| 127 |
+
'scipy.misc.common',
|
| 128 |
+
'scipy.misc.doccer',
|
| 129 |
+
'scipy.ndimage.filters',
|
| 130 |
+
'scipy.ndimage.fourier',
|
| 131 |
+
'scipy.ndimage.interpolation',
|
| 132 |
+
'scipy.ndimage.measurements',
|
| 133 |
+
'scipy.ndimage.morphology',
|
| 134 |
+
'scipy.odr.models',
|
| 135 |
+
'scipy.odr.odrpack',
|
| 136 |
+
'scipy.optimize.cobyla',
|
| 137 |
+
'scipy.optimize.cython_optimize',
|
| 138 |
+
'scipy.optimize.lbfgsb',
|
| 139 |
+
'scipy.optimize.linesearch',
|
| 140 |
+
'scipy.optimize.minpack',
|
| 141 |
+
'scipy.optimize.minpack2',
|
| 142 |
+
'scipy.optimize.moduleTNC',
|
| 143 |
+
'scipy.optimize.nonlin',
|
| 144 |
+
'scipy.optimize.optimize',
|
| 145 |
+
'scipy.optimize.slsqp',
|
| 146 |
+
'scipy.optimize.tnc',
|
| 147 |
+
'scipy.optimize.zeros',
|
| 148 |
+
'scipy.signal.bsplines',
|
| 149 |
+
'scipy.signal.filter_design',
|
| 150 |
+
'scipy.signal.fir_filter_design',
|
| 151 |
+
'scipy.signal.lti_conversion',
|
| 152 |
+
'scipy.signal.ltisys',
|
| 153 |
+
'scipy.signal.signaltools',
|
| 154 |
+
'scipy.signal.spectral',
|
| 155 |
+
'scipy.signal.spline',
|
| 156 |
+
'scipy.signal.waveforms',
|
| 157 |
+
'scipy.signal.wavelets',
|
| 158 |
+
'scipy.signal.windows.windows',
|
| 159 |
+
'scipy.sparse.base',
|
| 160 |
+
'scipy.sparse.bsr',
|
| 161 |
+
'scipy.sparse.compressed',
|
| 162 |
+
'scipy.sparse.construct',
|
| 163 |
+
'scipy.sparse.coo',
|
| 164 |
+
'scipy.sparse.csc',
|
| 165 |
+
'scipy.sparse.csr',
|
| 166 |
+
'scipy.sparse.data',
|
| 167 |
+
'scipy.sparse.dia',
|
| 168 |
+
'scipy.sparse.dok',
|
| 169 |
+
'scipy.sparse.extract',
|
| 170 |
+
'scipy.sparse.lil',
|
| 171 |
+
'scipy.sparse.linalg.dsolve',
|
| 172 |
+
'scipy.sparse.linalg.eigen',
|
| 173 |
+
'scipy.sparse.linalg.interface',
|
| 174 |
+
'scipy.sparse.linalg.isolve',
|
| 175 |
+
'scipy.sparse.linalg.matfuncs',
|
| 176 |
+
'scipy.sparse.sparsetools',
|
| 177 |
+
'scipy.sparse.spfuncs',
|
| 178 |
+
'scipy.sparse.sputils',
|
| 179 |
+
'scipy.spatial.ckdtree',
|
| 180 |
+
'scipy.spatial.kdtree',
|
| 181 |
+
'scipy.spatial.qhull',
|
| 182 |
+
'scipy.spatial.transform.rotation',
|
| 183 |
+
'scipy.special.add_newdocs',
|
| 184 |
+
'scipy.special.basic',
|
| 185 |
+
'scipy.special.cython_special',
|
| 186 |
+
'scipy.special.orthogonal',
|
| 187 |
+
'scipy.special.sf_error',
|
| 188 |
+
'scipy.special.specfun',
|
| 189 |
+
'scipy.special.spfun_stats',
|
| 190 |
+
'scipy.stats.biasedurn',
|
| 191 |
+
'scipy.stats.kde',
|
| 192 |
+
'scipy.stats.morestats',
|
| 193 |
+
'scipy.stats.mstats_basic',
|
| 194 |
+
'scipy.stats.mstats_extras',
|
| 195 |
+
'scipy.stats.mvn',
|
| 196 |
+
'scipy.stats.stats',
|
| 197 |
+
]
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
def is_unexpected(name):
|
| 201 |
+
"""Check if this needs to be considered."""
|
| 202 |
+
if '._' in name or '.tests' in name or '.setup' in name:
|
| 203 |
+
return False
|
| 204 |
+
|
| 205 |
+
if name in PUBLIC_MODULES:
|
| 206 |
+
return False
|
| 207 |
+
|
| 208 |
+
if name in PRIVATE_BUT_PRESENT_MODULES:
|
| 209 |
+
return False
|
| 210 |
+
|
| 211 |
+
return True
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
SKIP_LIST = [
|
| 215 |
+
'scipy.conftest',
|
| 216 |
+
'scipy.version',
|
| 217 |
+
'scipy.special.libsf_error_state'
|
| 218 |
+
]
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
# XXX: this test does more than it says on the tin - in using `pkgutil.walk_packages`,
|
| 222 |
+
# it will raise if it encounters any exceptions which are not handled by `ignore_errors`
|
| 223 |
+
# while attempting to import each discovered package.
|
| 224 |
+
# For now, `ignore_errors` only ignores what is necessary, but this could be expanded -
|
| 225 |
+
# for example, to all errors from private modules or git subpackages - if desired.
|
| 226 |
+
@pytest.mark.thread_unsafe
|
| 227 |
+
def test_all_modules_are_expected():
|
| 228 |
+
"""
|
| 229 |
+
Test that we don't add anything that looks like a new public module by
|
| 230 |
+
accident. Check is based on filenames.
|
| 231 |
+
"""
|
| 232 |
+
|
| 233 |
+
def ignore_errors(name):
|
| 234 |
+
# if versions of other array libraries are installed which are incompatible
|
| 235 |
+
# with the installed NumPy version, there can be errors on importing
|
| 236 |
+
# `array_api_compat`. This should only raise if SciPy is configured with
|
| 237 |
+
# that library as an available backend.
|
| 238 |
+
backends = {'cupy', 'torch', 'dask.array'}
|
| 239 |
+
for backend in backends:
|
| 240 |
+
path = f'array_api_compat.{backend}'
|
| 241 |
+
if path in name and backend not in xp_available_backends:
|
| 242 |
+
return
|
| 243 |
+
raise
|
| 244 |
+
|
| 245 |
+
modnames = []
|
| 246 |
+
|
| 247 |
+
with np.testing.suppress_warnings() as sup:
|
| 248 |
+
sup.filter(DeprecationWarning,"scipy.misc")
|
| 249 |
+
for _, modname, _ in pkgutil.walk_packages(path=scipy.__path__,
|
| 250 |
+
prefix=scipy.__name__ + '.',
|
| 251 |
+
onerror=ignore_errors):
|
| 252 |
+
if is_unexpected(modname) and modname not in SKIP_LIST:
|
| 253 |
+
# We have a name that is new. If that's on purpose, add it to
|
| 254 |
+
# PUBLIC_MODULES. We don't expect to have to add anything to
|
| 255 |
+
# PRIVATE_BUT_PRESENT_MODULES. Use an underscore in the name!
|
| 256 |
+
modnames.append(modname)
|
| 257 |
+
|
| 258 |
+
if modnames:
|
| 259 |
+
raise AssertionError(f'Found unexpected modules: {modnames}')
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
# Stuff that clearly shouldn't be in the API and is detected by the next test
|
| 263 |
+
# below
|
| 264 |
+
SKIP_LIST_2 = [
|
| 265 |
+
'scipy.char',
|
| 266 |
+
'scipy.rec',
|
| 267 |
+
'scipy.emath',
|
| 268 |
+
'scipy.math',
|
| 269 |
+
'scipy.random',
|
| 270 |
+
'scipy.ctypeslib',
|
| 271 |
+
'scipy.ma'
|
| 272 |
+
]
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
def test_all_modules_are_expected_2():
|
| 276 |
+
"""
|
| 277 |
+
Method checking all objects. The pkgutil-based method in
|
| 278 |
+
`test_all_modules_are_expected` does not catch imports into a namespace,
|
| 279 |
+
only filenames.
|
| 280 |
+
"""
|
| 281 |
+
|
| 282 |
+
def find_unexpected_members(mod_name):
|
| 283 |
+
members = []
|
| 284 |
+
module = importlib.import_module(mod_name)
|
| 285 |
+
if hasattr(module, '__all__'):
|
| 286 |
+
objnames = module.__all__
|
| 287 |
+
else:
|
| 288 |
+
objnames = dir(module)
|
| 289 |
+
|
| 290 |
+
for objname in objnames:
|
| 291 |
+
if not objname.startswith('_'):
|
| 292 |
+
fullobjname = mod_name + '.' + objname
|
| 293 |
+
if isinstance(getattr(module, objname), types.ModuleType):
|
| 294 |
+
if is_unexpected(fullobjname) and fullobjname not in SKIP_LIST_2:
|
| 295 |
+
members.append(fullobjname)
|
| 296 |
+
|
| 297 |
+
return members
|
| 298 |
+
with np.testing.suppress_warnings() as sup:
|
| 299 |
+
sup.filter(DeprecationWarning, "scipy.misc")
|
| 300 |
+
unexpected_members = find_unexpected_members("scipy")
|
| 301 |
+
|
| 302 |
+
for modname in PUBLIC_MODULES:
|
| 303 |
+
unexpected_members.extend(find_unexpected_members(modname))
|
| 304 |
+
|
| 305 |
+
if unexpected_members:
|
| 306 |
+
raise AssertionError("Found unexpected object(s) that look like "
|
| 307 |
+
f"modules: {unexpected_members}")
|
| 308 |
+
|
| 309 |
+
|
| 310 |
+
def test_api_importable():
|
| 311 |
+
"""
|
| 312 |
+
Check that all submodules listed higher up in this file can be imported
|
| 313 |
+
Note that if a PRIVATE_BUT_PRESENT_MODULES entry goes missing, it may
|
| 314 |
+
simply need to be removed from the list (deprecation may or may not be
|
| 315 |
+
needed - apply common sense).
|
| 316 |
+
"""
|
| 317 |
+
def check_importable(module_name):
|
| 318 |
+
try:
|
| 319 |
+
importlib.import_module(module_name)
|
| 320 |
+
except (ImportError, AttributeError):
|
| 321 |
+
return False
|
| 322 |
+
|
| 323 |
+
return True
|
| 324 |
+
|
| 325 |
+
module_names = []
|
| 326 |
+
for module_name in PUBLIC_MODULES:
|
| 327 |
+
if not check_importable(module_name):
|
| 328 |
+
module_names.append(module_name)
|
| 329 |
+
|
| 330 |
+
if module_names:
|
| 331 |
+
raise AssertionError("Modules in the public API that cannot be "
|
| 332 |
+
f"imported: {module_names}")
|
| 333 |
+
|
| 334 |
+
with warnings.catch_warnings(record=True):
|
| 335 |
+
warnings.filterwarnings('always', category=DeprecationWarning)
|
| 336 |
+
warnings.filterwarnings('always', category=ImportWarning)
|
| 337 |
+
for module_name in PRIVATE_BUT_PRESENT_MODULES:
|
| 338 |
+
if not check_importable(module_name):
|
| 339 |
+
module_names.append(module_name)
|
| 340 |
+
|
| 341 |
+
if module_names:
|
| 342 |
+
raise AssertionError("Modules that are not really public but looked "
|
| 343 |
+
"public and can not be imported: "
|
| 344 |
+
f"{module_names}")
|
| 345 |
+
|
| 346 |
+
|
| 347 |
+
@pytest.mark.thread_unsafe
|
| 348 |
+
@pytest.mark.parametrize(("module_name", "correct_module"),
|
| 349 |
+
[('scipy.constants.codata', None),
|
| 350 |
+
('scipy.constants.constants', None),
|
| 351 |
+
('scipy.fftpack.basic', None),
|
| 352 |
+
('scipy.fftpack.helper', None),
|
| 353 |
+
('scipy.fftpack.pseudo_diffs', None),
|
| 354 |
+
('scipy.fftpack.realtransforms', None),
|
| 355 |
+
('scipy.integrate.dop', None),
|
| 356 |
+
('scipy.integrate.lsoda', None),
|
| 357 |
+
('scipy.integrate.odepack', None),
|
| 358 |
+
('scipy.integrate.quadpack', None),
|
| 359 |
+
('scipy.integrate.vode', None),
|
| 360 |
+
('scipy.interpolate.fitpack', None),
|
| 361 |
+
('scipy.interpolate.fitpack2', None),
|
| 362 |
+
('scipy.interpolate.interpolate', None),
|
| 363 |
+
('scipy.interpolate.ndgriddata', None),
|
| 364 |
+
('scipy.interpolate.polyint', None),
|
| 365 |
+
('scipy.interpolate.rbf', None),
|
| 366 |
+
('scipy.io.harwell_boeing', None),
|
| 367 |
+
('scipy.io.idl', None),
|
| 368 |
+
('scipy.io.mmio', None),
|
| 369 |
+
('scipy.io.netcdf', None),
|
| 370 |
+
('scipy.io.arff.arffread', 'arff'),
|
| 371 |
+
('scipy.io.matlab.byteordercodes', 'matlab'),
|
| 372 |
+
('scipy.io.matlab.mio_utils', 'matlab'),
|
| 373 |
+
('scipy.io.matlab.mio', 'matlab'),
|
| 374 |
+
('scipy.io.matlab.mio4', 'matlab'),
|
| 375 |
+
('scipy.io.matlab.mio5_params', 'matlab'),
|
| 376 |
+
('scipy.io.matlab.mio5_utils', 'matlab'),
|
| 377 |
+
('scipy.io.matlab.mio5', 'matlab'),
|
| 378 |
+
('scipy.io.matlab.miobase', 'matlab'),
|
| 379 |
+
('scipy.io.matlab.streams', 'matlab'),
|
| 380 |
+
('scipy.linalg.basic', None),
|
| 381 |
+
('scipy.linalg.decomp', None),
|
| 382 |
+
('scipy.linalg.decomp_cholesky', None),
|
| 383 |
+
('scipy.linalg.decomp_lu', None),
|
| 384 |
+
('scipy.linalg.decomp_qr', None),
|
| 385 |
+
('scipy.linalg.decomp_schur', None),
|
| 386 |
+
('scipy.linalg.decomp_svd', None),
|
| 387 |
+
('scipy.linalg.matfuncs', None),
|
| 388 |
+
('scipy.linalg.misc', None),
|
| 389 |
+
('scipy.linalg.special_matrices', None),
|
| 390 |
+
('scipy.ndimage.filters', None),
|
| 391 |
+
('scipy.ndimage.fourier', None),
|
| 392 |
+
('scipy.ndimage.interpolation', None),
|
| 393 |
+
('scipy.ndimage.measurements', None),
|
| 394 |
+
('scipy.ndimage.morphology', None),
|
| 395 |
+
('scipy.odr.models', None),
|
| 396 |
+
('scipy.odr.odrpack', None),
|
| 397 |
+
('scipy.optimize.cobyla', None),
|
| 398 |
+
('scipy.optimize.lbfgsb', None),
|
| 399 |
+
('scipy.optimize.linesearch', None),
|
| 400 |
+
('scipy.optimize.minpack', None),
|
| 401 |
+
('scipy.optimize.minpack2', None),
|
| 402 |
+
('scipy.optimize.moduleTNC', None),
|
| 403 |
+
('scipy.optimize.nonlin', None),
|
| 404 |
+
('scipy.optimize.optimize', None),
|
| 405 |
+
('scipy.optimize.slsqp', None),
|
| 406 |
+
('scipy.optimize.tnc', None),
|
| 407 |
+
('scipy.optimize.zeros', None),
|
| 408 |
+
('scipy.signal.bsplines', None),
|
| 409 |
+
('scipy.signal.filter_design', None),
|
| 410 |
+
('scipy.signal.fir_filter_design', None),
|
| 411 |
+
('scipy.signal.lti_conversion', None),
|
| 412 |
+
('scipy.signal.ltisys', None),
|
| 413 |
+
('scipy.signal.signaltools', None),
|
| 414 |
+
('scipy.signal.spectral', None),
|
| 415 |
+
('scipy.signal.waveforms', None),
|
| 416 |
+
('scipy.signal.wavelets', None),
|
| 417 |
+
('scipy.signal.windows.windows', 'windows'),
|
| 418 |
+
('scipy.sparse.lil', None),
|
| 419 |
+
('scipy.sparse.linalg.dsolve', 'linalg'),
|
| 420 |
+
('scipy.sparse.linalg.eigen', 'linalg'),
|
| 421 |
+
('scipy.sparse.linalg.interface', 'linalg'),
|
| 422 |
+
('scipy.sparse.linalg.isolve', 'linalg'),
|
| 423 |
+
('scipy.sparse.linalg.matfuncs', 'linalg'),
|
| 424 |
+
('scipy.sparse.sparsetools', None),
|
| 425 |
+
('scipy.sparse.spfuncs', None),
|
| 426 |
+
('scipy.sparse.sputils', None),
|
| 427 |
+
('scipy.spatial.ckdtree', None),
|
| 428 |
+
('scipy.spatial.kdtree', None),
|
| 429 |
+
('scipy.spatial.qhull', None),
|
| 430 |
+
('scipy.spatial.transform.rotation', 'transform'),
|
| 431 |
+
('scipy.special.add_newdocs', None),
|
| 432 |
+
('scipy.special.basic', None),
|
| 433 |
+
('scipy.special.orthogonal', None),
|
| 434 |
+
('scipy.special.sf_error', None),
|
| 435 |
+
('scipy.special.specfun', None),
|
| 436 |
+
('scipy.special.spfun_stats', None),
|
| 437 |
+
('scipy.stats.biasedurn', None),
|
| 438 |
+
('scipy.stats.kde', None),
|
| 439 |
+
('scipy.stats.morestats', None),
|
| 440 |
+
('scipy.stats.mstats_basic', 'mstats'),
|
| 441 |
+
('scipy.stats.mstats_extras', 'mstats'),
|
| 442 |
+
('scipy.stats.mvn', None),
|
| 443 |
+
('scipy.stats.stats', None)])
|
| 444 |
+
def test_private_but_present_deprecation(module_name, correct_module):
|
| 445 |
+
# gh-18279, gh-17572, gh-17771 noted that deprecation warnings
|
| 446 |
+
# for imports from private modules
|
| 447 |
+
# were misleading. Check that this is resolved.
|
| 448 |
+
module = import_module(module_name)
|
| 449 |
+
if correct_module is None:
|
| 450 |
+
import_name = f'scipy.{module_name.split(".")[1]}'
|
| 451 |
+
else:
|
| 452 |
+
import_name = f'scipy.{module_name.split(".")[1]}.{correct_module}'
|
| 453 |
+
|
| 454 |
+
correct_import = import_module(import_name)
|
| 455 |
+
|
| 456 |
+
# Attributes that were formerly in `module_name` can still be imported from
|
| 457 |
+
# `module_name`, albeit with a deprecation warning.
|
| 458 |
+
for attr_name in module.__all__:
|
| 459 |
+
# ensure attribute is present where the warning is pointing
|
| 460 |
+
assert getattr(correct_import, attr_name, None) is not None
|
| 461 |
+
message = f"Please import `{attr_name}` from the `{import_name}`..."
|
| 462 |
+
with pytest.deprecated_call(match=message):
|
| 463 |
+
getattr(module, attr_name)
|
| 464 |
+
|
| 465 |
+
# Attributes that were not in `module_name` get an error notifying the user
|
| 466 |
+
# that the attribute is not in `module_name` and that `module_name` is deprecated.
|
| 467 |
+
message = f"`{module_name}` is deprecated..."
|
| 468 |
+
with pytest.raises(AttributeError, match=message):
|
| 469 |
+
getattr(module, "ekki")
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_scipy_version.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
|
| 3 |
+
import scipy
|
| 4 |
+
import scipy.version
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def test_valid_scipy_version():
|
| 8 |
+
# Verify that the SciPy version is a valid one (no .post suffix or other
|
| 9 |
+
# nonsense). See NumPy issue gh-6431 for an issue caused by an invalid
|
| 10 |
+
# version.
|
| 11 |
+
version_pattern = r"^[0-9]+\.[0-9]+\.[0-9]+(|a[0-9]|b[0-9]|rc[0-9])"
|
| 12 |
+
dev_suffix = r"((.dev0)|(\.dev0+\+git[0-9]{8}.[0-9a-f]{7}))"
|
| 13 |
+
if scipy.version.release:
|
| 14 |
+
res = re.match(version_pattern, scipy.__version__)
|
| 15 |
+
else:
|
| 16 |
+
res = re.match(version_pattern + dev_suffix, scipy.__version__)
|
| 17 |
+
|
| 18 |
+
assert res is not None
|
| 19 |
+
assert scipy.__version__
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def test_version_submodule_members():
|
| 23 |
+
"""`scipy.version` may not be quite public, but we install it.
|
| 24 |
+
|
| 25 |
+
So check that we don't silently change its contents.
|
| 26 |
+
"""
|
| 27 |
+
for attr in ('version', 'full_version', 'short_version', 'git_revision', 'release'):
|
| 28 |
+
assert hasattr(scipy.version, attr)
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_tmpdirs.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
""" Test tmpdirs module """
|
| 2 |
+
from os import getcwd
|
| 3 |
+
from os.path import realpath, abspath, dirname, isfile, join as pjoin, exists
|
| 4 |
+
|
| 5 |
+
from scipy._lib._tmpdirs import tempdir, in_tempdir, in_dir
|
| 6 |
+
|
| 7 |
+
from numpy.testing import assert_, assert_equal
|
| 8 |
+
|
| 9 |
+
import pytest
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
MY_PATH = abspath(__file__)
|
| 13 |
+
MY_DIR = dirname(MY_PATH)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
@pytest.mark.thread_unsafe
|
| 17 |
+
def test_tempdir():
|
| 18 |
+
with tempdir() as tmpdir:
|
| 19 |
+
fname = pjoin(tmpdir, 'example_file.txt')
|
| 20 |
+
with open(fname, "w") as fobj:
|
| 21 |
+
fobj.write('a string\\n')
|
| 22 |
+
assert_(not exists(tmpdir))
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
@pytest.mark.thread_unsafe
|
| 26 |
+
def test_in_tempdir():
|
| 27 |
+
my_cwd = getcwd()
|
| 28 |
+
with in_tempdir() as tmpdir:
|
| 29 |
+
with open('test.txt', "w") as f:
|
| 30 |
+
f.write('some text')
|
| 31 |
+
assert_(isfile('test.txt'))
|
| 32 |
+
assert_(isfile(pjoin(tmpdir, 'test.txt')))
|
| 33 |
+
assert_(not exists(tmpdir))
|
| 34 |
+
assert_equal(getcwd(), my_cwd)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
@pytest.mark.thread_unsafe
|
| 38 |
+
def test_given_directory():
|
| 39 |
+
# Test InGivenDirectory
|
| 40 |
+
cwd = getcwd()
|
| 41 |
+
with in_dir() as tmpdir:
|
| 42 |
+
assert_equal(tmpdir, abspath(cwd))
|
| 43 |
+
assert_equal(tmpdir, abspath(getcwd()))
|
| 44 |
+
with in_dir(MY_DIR) as tmpdir:
|
| 45 |
+
assert_equal(tmpdir, MY_DIR)
|
| 46 |
+
assert_equal(realpath(MY_DIR), realpath(abspath(getcwd())))
|
| 47 |
+
# We were deleting the given directory! Check not so now.
|
| 48 |
+
assert_(isfile(MY_PATH))
|
mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_warnings.py
ADDED
|
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Tests which scan for certain occurrences in the code, they may not find
|
| 3 |
+
all of these occurrences but should catch almost all. This file was adapted
|
| 4 |
+
from NumPy.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
import os
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
import ast
|
| 11 |
+
import tokenize
|
| 12 |
+
|
| 13 |
+
import scipy
|
| 14 |
+
|
| 15 |
+
import pytest
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class ParseCall(ast.NodeVisitor):
|
| 19 |
+
def __init__(self):
|
| 20 |
+
self.ls = []
|
| 21 |
+
|
| 22 |
+
def visit_Attribute(self, node):
|
| 23 |
+
ast.NodeVisitor.generic_visit(self, node)
|
| 24 |
+
self.ls.append(node.attr)
|
| 25 |
+
|
| 26 |
+
def visit_Name(self, node):
|
| 27 |
+
self.ls.append(node.id)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class FindFuncs(ast.NodeVisitor):
|
| 31 |
+
def __init__(self, filename):
|
| 32 |
+
super().__init__()
|
| 33 |
+
self.__filename = filename
|
| 34 |
+
self.bad_filters = []
|
| 35 |
+
self.bad_stacklevels = []
|
| 36 |
+
|
| 37 |
+
def visit_Call(self, node):
|
| 38 |
+
p = ParseCall()
|
| 39 |
+
p.visit(node.func)
|
| 40 |
+
ast.NodeVisitor.generic_visit(self, node)
|
| 41 |
+
|
| 42 |
+
if p.ls[-1] == 'simplefilter' or p.ls[-1] == 'filterwarnings':
|
| 43 |
+
# get first argument of the `args` node of the filter call
|
| 44 |
+
match node.args[0]:
|
| 45 |
+
case ast.Constant() as c:
|
| 46 |
+
argtext = c.value
|
| 47 |
+
case ast.JoinedStr() as js:
|
| 48 |
+
# if we get an f-string, discard the templated pieces, which
|
| 49 |
+
# are likely the type or specific message; we're interested
|
| 50 |
+
# in the action, which is less likely to use a template
|
| 51 |
+
argtext = "".join(
|
| 52 |
+
x.value for x in js.values if isinstance(x, ast.Constant)
|
| 53 |
+
)
|
| 54 |
+
case _:
|
| 55 |
+
raise ValueError("unknown ast node type")
|
| 56 |
+
# check if filter is set to ignore
|
| 57 |
+
if argtext == "ignore":
|
| 58 |
+
self.bad_filters.append(
|
| 59 |
+
f"{self.__filename}:{node.lineno}")
|
| 60 |
+
|
| 61 |
+
if p.ls[-1] == 'warn' and (
|
| 62 |
+
len(p.ls) == 1 or p.ls[-2] == 'warnings'):
|
| 63 |
+
|
| 64 |
+
if self.__filename == "_lib/tests/test_warnings.py":
|
| 65 |
+
# This file
|
| 66 |
+
return
|
| 67 |
+
|
| 68 |
+
# See if stacklevel exists:
|
| 69 |
+
if len(node.args) == 3:
|
| 70 |
+
return
|
| 71 |
+
args = {kw.arg for kw in node.keywords}
|
| 72 |
+
if "stacklevel" not in args:
|
| 73 |
+
self.bad_stacklevels.append(
|
| 74 |
+
f"{self.__filename}:{node.lineno}")
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
@pytest.fixture(scope="session")
|
| 78 |
+
def warning_calls():
|
| 79 |
+
# combined "ignore" and stacklevel error
|
| 80 |
+
base = Path(scipy.__file__).parent
|
| 81 |
+
|
| 82 |
+
bad_filters = []
|
| 83 |
+
bad_stacklevels = []
|
| 84 |
+
|
| 85 |
+
for path in base.rglob("*.py"):
|
| 86 |
+
# use tokenize to auto-detect encoding on systems where no
|
| 87 |
+
# default encoding is defined (e.g., LANG='C')
|
| 88 |
+
with tokenize.open(str(path)) as file:
|
| 89 |
+
tree = ast.parse(file.read(), filename=str(path))
|
| 90 |
+
finder = FindFuncs(path.relative_to(base))
|
| 91 |
+
finder.visit(tree)
|
| 92 |
+
bad_filters.extend(finder.bad_filters)
|
| 93 |
+
bad_stacklevels.extend(finder.bad_stacklevels)
|
| 94 |
+
|
| 95 |
+
return bad_filters, bad_stacklevels
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
@pytest.mark.fail_slow(40)
|
| 99 |
+
@pytest.mark.slow
|
| 100 |
+
def test_warning_calls_filters(warning_calls):
|
| 101 |
+
bad_filters, bad_stacklevels = warning_calls
|
| 102 |
+
|
| 103 |
+
# We try not to add filters in the code base, because those filters aren't
|
| 104 |
+
# thread-safe. We aim to only filter in tests with
|
| 105 |
+
# np.testing.suppress_warnings. However, in some cases it may prove
|
| 106 |
+
# necessary to filter out warnings, because we can't (easily) fix the root
|
| 107 |
+
# cause for them and we don't want users to see some warnings when they use
|
| 108 |
+
# SciPy correctly. So we list exceptions here. Add new entries only if
|
| 109 |
+
# there's a good reason.
|
| 110 |
+
allowed_filters = (
|
| 111 |
+
os.path.join('datasets', '_fetchers.py'),
|
| 112 |
+
os.path.join('datasets', '__init__.py'),
|
| 113 |
+
os.path.join('optimize', '_optimize.py'),
|
| 114 |
+
os.path.join('optimize', '_constraints.py'),
|
| 115 |
+
os.path.join('optimize', '_nnls.py'),
|
| 116 |
+
os.path.join('signal', '_ltisys.py'),
|
| 117 |
+
os.path.join('sparse', '__init__.py'), # np.matrix pending-deprecation
|
| 118 |
+
os.path.join('special', '_basic.py'), # gh-21801
|
| 119 |
+
os.path.join('stats', '_discrete_distns.py'), # gh-14901
|
| 120 |
+
os.path.join('stats', '_continuous_distns.py'),
|
| 121 |
+
os.path.join('stats', '_binned_statistic.py'), # gh-19345
|
| 122 |
+
os.path.join('stats', '_stats_py.py'), # gh-20743
|
| 123 |
+
os.path.join('stats', 'tests', 'test_axis_nan_policy.py'), # gh-20694
|
| 124 |
+
os.path.join('_lib', '_util.py'), # gh-19341
|
| 125 |
+
os.path.join('sparse', 'linalg', '_dsolve', 'linsolve.py'), # gh-17924
|
| 126 |
+
"conftest.py",
|
| 127 |
+
)
|
| 128 |
+
bad_filters = [item for item in bad_filters if item.split(':')[0] not in
|
| 129 |
+
allowed_filters]
|
| 130 |
+
|
| 131 |
+
if bad_filters:
|
| 132 |
+
raise AssertionError(
|
| 133 |
+
"warning ignore filter should not be used, instead, use\n"
|
| 134 |
+
"numpy.testing.suppress_warnings (in tests only);\n"
|
| 135 |
+
"found in:\n {}".format(
|
| 136 |
+
"\n ".join(bad_filters)))
|
| 137 |
+
|
moondream/lib/python3.10/site-packages/pandas/tests/__pycache__/test_aggregation.cpython-310.pyc
ADDED
|
Binary file (3.07 kB). View file
|
|
|
moondream/lib/python3.10/site-packages/pandas/tests/__pycache__/test_algos.cpython-310.pyc
ADDED
|
Binary file (60.4 kB). View file
|
|
|
moondream/lib/python3.10/site-packages/pandas/tests/__pycache__/test_common.cpython-310.pyc
ADDED
|
Binary file (7.95 kB). View file
|
|
|
moondream/lib/python3.10/site-packages/pandas/tests/__pycache__/test_nanops.cpython-310.pyc
ADDED
|
Binary file (32.8 kB). View file
|
|
|
moondream/lib/python3.10/site-packages/pandas/tests/__pycache__/test_optional_dependency.cpython-310.pyc
ADDED
|
Binary file (2.7 kB). View file
|
|
|