Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +3 -0
- deepseek/lib/python3.10/importlib/metadata/__pycache__/_text.cpython-310.pyc +0 -0
- deepseek/lib/python3.10/site-packages/attrdict-2.0.1.dist-info/RECORD +20 -0
- deepseek/lib/python3.10/site-packages/attrdict-2.0.1.dist-info/WHEEL +6 -0
- deepseek/lib/python3.10/site-packages/attrdict-2.0.1.dist-info/top_level.txt +1 -0
- deepseek/lib/python3.10/site-packages/click-8.1.7.dist-info/top_level.txt +1 -0
- deepseek/lib/python3.10/site-packages/cv2/detail/__init__.pyi +600 -0
- deepseek/lib/python3.10/site-packages/cv2/gapi/__pycache__/__init__.cpython-310.pyc +0 -0
- deepseek/lib/python3.10/site-packages/cv2/gapi/core/ocl/__init__.pyi +9 -0
- deepseek/lib/python3.10/site-packages/cv2/gapi/ie/detail/__init__.pyi +12 -0
- deepseek/lib/python3.10/site-packages/cv2/gapi/ot/cpu/__init__.pyi +9 -0
- deepseek/lib/python3.10/site-packages/cv2/gapi/ov/__init__.pyi +74 -0
- deepseek/lib/python3.10/site-packages/filelock-3.16.1.dist-info/METADATA +59 -0
- deepseek/lib/python3.10/site-packages/filelock-3.16.1.dist-info/RECORD +25 -0
- deepseek/lib/python3.10/site-packages/filelock-3.16.1.dist-info/WHEEL +4 -0
- deepseek/lib/python3.10/site-packages/isympy.py +342 -0
- deepseek/lib/python3.10/site-packages/prometheus_fastapi_instrumentator-7.0.0.dist-info/LICENSE +15 -0
- deepseek/lib/python3.10/site-packages/prometheus_fastapi_instrumentator-7.0.0.dist-info/METADATA +339 -0
- deepseek/lib/python3.10/site-packages/prometheus_fastapi_instrumentator-7.0.0.dist-info/RECORD +17 -0
- deepseek/lib/python3.10/site-packages/sentencepiece/__pycache__/sentencepiece_model_pb2.cpython-310.pyc +0 -0
- deepseek/lib/python3.10/site-packages/sentencepiece/_version.py +1 -0
- deepseek/lib/python3.10/site-packages/sentencepiece/sentencepiece_model_pb2.py +44 -0
- deepseek/lib/python3.10/site-packages/torch-2.5.1.dist-info/INSTALLER +1 -0
- deepseek/lib/python3.10/site-packages/torch-2.5.1.dist-info/METADATA +557 -0
- deepseek/lib/python3.10/site-packages/torch-2.5.1.dist-info/RECORD +0 -0
- deepseek/lib/python3.10/site-packages/torch-2.5.1.dist-info/top_level.txt +3 -0
- deepseek/lib/python3.10/site-packages/typing_extensions.py +0 -0
- evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_basinhopping.py +753 -0
- evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_cobyqa_py.py +62 -0
- evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_dcsrch.py +728 -0
- evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_differentiate.py +856 -0
- evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_direct_py.py +278 -0
- evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_group_columns.cpython-310-x86_64-linux-gnu.so +0 -0
- evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_hessian_update_strategy.py +475 -0
- evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_lbfgsb.cpython-310-x86_64-linux-gnu.so +3 -0
- evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_linprog.py +716 -0
- evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_linprog_highs.py +440 -0
- evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_linprog_ip.py +1126 -0
- evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_linprog_rs.py +572 -0
- evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_linprog_simplex.py +661 -0
- evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_linprog_util.py +1522 -0
- evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_minimize.py +1116 -0
- evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_minpack2.cpython-310-x86_64-linux-gnu.so +0 -0
- evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_moduleTNC.cpython-310-x86_64-linux-gnu.so +3 -0
- evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_nnls.py +164 -0
- evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_nonlin.py +1585 -0
- evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_numdiff.py +779 -0
- evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_optimize.py +0 -0
- evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_slsqp.cpython-310-x86_64-linux-gnu.so +0 -0
- evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_spectral.py +260 -0
.gitattributes
CHANGED
|
@@ -1380,3 +1380,6 @@ evalkit_tf446/lib/python3.10/site-packages/scipy/io/matlab/_mio5_utils.cpython-3
|
|
| 1380 |
evalkit_tf446/lib/python3.10/site-packages/scipy/io/matlab/_streams.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1381 |
evalkit_tf446/lib/python3.10/site-packages/scipy/stats/__pycache__/_mstats_basic.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1382 |
deepseek/bin/lzma filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
| 1380 |
evalkit_tf446/lib/python3.10/site-packages/scipy/io/matlab/_streams.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1381 |
evalkit_tf446/lib/python3.10/site-packages/scipy/stats/__pycache__/_mstats_basic.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1382 |
deepseek/bin/lzma filter=lfs diff=lfs merge=lfs -text
|
| 1383 |
+
evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_moduleTNC.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1384 |
+
evalkit_tf446/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_basic.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1385 |
+
evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_lbfgsb.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
deepseek/lib/python3.10/importlib/metadata/__pycache__/_text.cpython-310.pyc
ADDED
|
Binary file (3.31 kB). View file
|
|
|
deepseek/lib/python3.10/site-packages/attrdict-2.0.1.dist-info/RECORD
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
attrdict-2.0.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
| 2 |
+
attrdict-2.0.1.dist-info/LICENSE.txt,sha256=7bXwDR-EXRD9ybjGNRq4IPk_ZEm3aWev8xkme2Fb4k4,1066
|
| 3 |
+
attrdict-2.0.1.dist-info/METADATA,sha256=qp1NCkW6JHByrhJOt9Pt6JuS45qvdtTxRWIf0jdIGlA,6690
|
| 4 |
+
attrdict-2.0.1.dist-info/RECORD,,
|
| 5 |
+
attrdict-2.0.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 6 |
+
attrdict-2.0.1.dist-info/WHEEL,sha256=_wJFdOYk7i3xxT8ElOkUJvOdOvfNGbR9g-bf6UQT6sU,110
|
| 7 |
+
attrdict-2.0.1.dist-info/top_level.txt,sha256=2f1-Wyfr5ZHsGvOFLqcj3y6OfZglxI3gjETO12COZRc,9
|
| 8 |
+
attrdict-2.0.1.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
| 9 |
+
attrdict/__init__.py,sha256=fdJfkB3hQK2tcqck7FSmCKjyzmjx29Z3MBQyAev43E0,267
|
| 10 |
+
attrdict/__pycache__/__init__.cpython-310.pyc,,
|
| 11 |
+
attrdict/__pycache__/default.cpython-310.pyc,,
|
| 12 |
+
attrdict/__pycache__/dictionary.cpython-310.pyc,,
|
| 13 |
+
attrdict/__pycache__/mapping.cpython-310.pyc,,
|
| 14 |
+
attrdict/__pycache__/merge.cpython-310.pyc,,
|
| 15 |
+
attrdict/__pycache__/mixins.cpython-310.pyc,,
|
| 16 |
+
attrdict/default.py,sha256=dpolSpC0J185AIAG75E0Sm6fqOkp-7hssH-x1u6v8co,3540
|
| 17 |
+
attrdict/dictionary.py,sha256=EjolfMd-kzn5K009pTx2Mr_O4OCEPEg-57Z_6-Lsixw,1462
|
| 18 |
+
attrdict/mapping.py,sha256=QGEy-z-3O3OnPXBB9XgE7WXlWtM_nheQwBaqmSfmfRs,2464
|
| 19 |
+
attrdict/merge.py,sha256=ffljqIQ1fKRWUOcLRtoZnDzZp9h06aYmGFGYz66-wlY,1083
|
| 20 |
+
attrdict/mixins.py,sha256=Jya5crO7NsUbZJYVMUnZvc2QjxpSWDUFoIEq7xcMd6U,6624
|
deepseek/lib/python3.10/site-packages/attrdict-2.0.1.dist-info/WHEEL
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Wheel-Version: 1.0
|
| 2 |
+
Generator: bdist_wheel (0.32.3)
|
| 3 |
+
Root-Is-Purelib: true
|
| 4 |
+
Tag: py2-none-any
|
| 5 |
+
Tag: py3-none-any
|
| 6 |
+
|
deepseek/lib/python3.10/site-packages/attrdict-2.0.1.dist-info/top_level.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
attrdict
|
deepseek/lib/python3.10/site-packages/click-8.1.7.dist-info/top_level.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
click
|
deepseek/lib/python3.10/site-packages/cv2/detail/__init__.pyi
ADDED
|
@@ -0,0 +1,600 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__all__: list[str] = []
|
| 2 |
+
|
| 3 |
+
import cv2
|
| 4 |
+
import cv2.gapi
|
| 5 |
+
import cv2.gapi.ie
|
| 6 |
+
import cv2.gapi.onnx
|
| 7 |
+
import cv2.gapi.ov
|
| 8 |
+
import cv2.typing
|
| 9 |
+
import numpy
|
| 10 |
+
import typing as _typing
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
# Enumerations
|
| 14 |
+
TEST_CUSTOM: int
|
| 15 |
+
TEST_EQ: int
|
| 16 |
+
TEST_NE: int
|
| 17 |
+
TEST_LE: int
|
| 18 |
+
TEST_LT: int
|
| 19 |
+
TEST_GE: int
|
| 20 |
+
TEST_GT: int
|
| 21 |
+
TestOp = int
|
| 22 |
+
"""One of [TEST_CUSTOM, TEST_EQ, TEST_NE, TEST_LE, TEST_LT, TEST_GE, TEST_GT]"""
|
| 23 |
+
|
| 24 |
+
WAVE_CORRECT_HORIZ: int
|
| 25 |
+
WAVE_CORRECT_VERT: int
|
| 26 |
+
WAVE_CORRECT_AUTO: int
|
| 27 |
+
WaveCorrectKind = int
|
| 28 |
+
"""One of [WAVE_CORRECT_HORIZ, WAVE_CORRECT_VERT, WAVE_CORRECT_AUTO]"""
|
| 29 |
+
|
| 30 |
+
OpaqueKind_CV_UNKNOWN: int
|
| 31 |
+
OPAQUE_KIND_CV_UNKNOWN: int
|
| 32 |
+
OpaqueKind_CV_BOOL: int
|
| 33 |
+
OPAQUE_KIND_CV_BOOL: int
|
| 34 |
+
OpaqueKind_CV_INT: int
|
| 35 |
+
OPAQUE_KIND_CV_INT: int
|
| 36 |
+
OpaqueKind_CV_INT64: int
|
| 37 |
+
OPAQUE_KIND_CV_INT64: int
|
| 38 |
+
OpaqueKind_CV_DOUBLE: int
|
| 39 |
+
OPAQUE_KIND_CV_DOUBLE: int
|
| 40 |
+
OpaqueKind_CV_FLOAT: int
|
| 41 |
+
OPAQUE_KIND_CV_FLOAT: int
|
| 42 |
+
OpaqueKind_CV_UINT64: int
|
| 43 |
+
OPAQUE_KIND_CV_UINT64: int
|
| 44 |
+
OpaqueKind_CV_STRING: int
|
| 45 |
+
OPAQUE_KIND_CV_STRING: int
|
| 46 |
+
OpaqueKind_CV_POINT: int
|
| 47 |
+
OPAQUE_KIND_CV_POINT: int
|
| 48 |
+
OpaqueKind_CV_POINT2F: int
|
| 49 |
+
OPAQUE_KIND_CV_POINT2F: int
|
| 50 |
+
OpaqueKind_CV_POINT3F: int
|
| 51 |
+
OPAQUE_KIND_CV_POINT3F: int
|
| 52 |
+
OpaqueKind_CV_SIZE: int
|
| 53 |
+
OPAQUE_KIND_CV_SIZE: int
|
| 54 |
+
OpaqueKind_CV_RECT: int
|
| 55 |
+
OPAQUE_KIND_CV_RECT: int
|
| 56 |
+
OpaqueKind_CV_SCALAR: int
|
| 57 |
+
OPAQUE_KIND_CV_SCALAR: int
|
| 58 |
+
OpaqueKind_CV_MAT: int
|
| 59 |
+
OPAQUE_KIND_CV_MAT: int
|
| 60 |
+
OpaqueKind_CV_DRAW_PRIM: int
|
| 61 |
+
OPAQUE_KIND_CV_DRAW_PRIM: int
|
| 62 |
+
OpaqueKind = int
|
| 63 |
+
"""One of [OpaqueKind_CV_UNKNOWN, OPAQUE_KIND_CV_UNKNOWN, OpaqueKind_CV_BOOL, OPAQUE_KIND_CV_BOOL, OpaqueKind_CV_INT, OPAQUE_KIND_CV_INT, OpaqueKind_CV_INT64, OPAQUE_KIND_CV_INT64, OpaqueKind_CV_DOUBLE, OPAQUE_KIND_CV_DOUBLE, OpaqueKind_CV_FLOAT, OPAQUE_KIND_CV_FLOAT, OpaqueKind_CV_UINT64, OPAQUE_KIND_CV_UINT64, OpaqueKind_CV_STRING, OPAQUE_KIND_CV_STRING, OpaqueKind_CV_POINT, OPAQUE_KIND_CV_POINT, OpaqueKind_CV_POINT2F, OPAQUE_KIND_CV_POINT2F, OpaqueKind_CV_POINT3F, OPAQUE_KIND_CV_POINT3F, OpaqueKind_CV_SIZE, OPAQUE_KIND_CV_SIZE, OpaqueKind_CV_RECT, OPAQUE_KIND_CV_RECT, OpaqueKind_CV_SCALAR, OPAQUE_KIND_CV_SCALAR, OpaqueKind_CV_MAT, OPAQUE_KIND_CV_MAT, OpaqueKind_CV_DRAW_PRIM, OPAQUE_KIND_CV_DRAW_PRIM]"""
|
| 64 |
+
|
| 65 |
+
ArgKind_OPAQUE_VAL: int
|
| 66 |
+
ARG_KIND_OPAQUE_VAL: int
|
| 67 |
+
ArgKind_OPAQUE: int
|
| 68 |
+
ARG_KIND_OPAQUE: int
|
| 69 |
+
ArgKind_GOBJREF: int
|
| 70 |
+
ARG_KIND_GOBJREF: int
|
| 71 |
+
ArgKind_GMAT: int
|
| 72 |
+
ARG_KIND_GMAT: int
|
| 73 |
+
ArgKind_GMATP: int
|
| 74 |
+
ARG_KIND_GMATP: int
|
| 75 |
+
ArgKind_GFRAME: int
|
| 76 |
+
ARG_KIND_GFRAME: int
|
| 77 |
+
ArgKind_GSCALAR: int
|
| 78 |
+
ARG_KIND_GSCALAR: int
|
| 79 |
+
ArgKind_GARRAY: int
|
| 80 |
+
ARG_KIND_GARRAY: int
|
| 81 |
+
ArgKind_GOPAQUE: int
|
| 82 |
+
ARG_KIND_GOPAQUE: int
|
| 83 |
+
ArgKind = int
|
| 84 |
+
"""One of [ArgKind_OPAQUE_VAL, ARG_KIND_OPAQUE_VAL, ArgKind_OPAQUE, ARG_KIND_OPAQUE, ArgKind_GOBJREF, ARG_KIND_GOBJREF, ArgKind_GMAT, ARG_KIND_GMAT, ArgKind_GMATP, ARG_KIND_GMATP, ArgKind_GFRAME, ARG_KIND_GFRAME, ArgKind_GSCALAR, ARG_KIND_GSCALAR, ArgKind_GARRAY, ARG_KIND_GARRAY, ArgKind_GOPAQUE, ARG_KIND_GOPAQUE]"""
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
Blender_NO: int
|
| 88 |
+
BLENDER_NO: int
|
| 89 |
+
Blender_FEATHER: int
|
| 90 |
+
BLENDER_FEATHER: int
|
| 91 |
+
Blender_MULTI_BAND: int
|
| 92 |
+
BLENDER_MULTI_BAND: int
|
| 93 |
+
|
| 94 |
+
ExposureCompensator_NO: int
|
| 95 |
+
EXPOSURE_COMPENSATOR_NO: int
|
| 96 |
+
ExposureCompensator_GAIN: int
|
| 97 |
+
EXPOSURE_COMPENSATOR_GAIN: int
|
| 98 |
+
ExposureCompensator_GAIN_BLOCKS: int
|
| 99 |
+
EXPOSURE_COMPENSATOR_GAIN_BLOCKS: int
|
| 100 |
+
ExposureCompensator_CHANNELS: int
|
| 101 |
+
EXPOSURE_COMPENSATOR_CHANNELS: int
|
| 102 |
+
ExposureCompensator_CHANNELS_BLOCKS: int
|
| 103 |
+
EXPOSURE_COMPENSATOR_CHANNELS_BLOCKS: int
|
| 104 |
+
|
| 105 |
+
SeamFinder_NO: int
|
| 106 |
+
SEAM_FINDER_NO: int
|
| 107 |
+
SeamFinder_VORONOI_SEAM: int
|
| 108 |
+
SEAM_FINDER_VORONOI_SEAM: int
|
| 109 |
+
SeamFinder_DP_SEAM: int
|
| 110 |
+
SEAM_FINDER_DP_SEAM: int
|
| 111 |
+
|
| 112 |
+
DpSeamFinder_COLOR: int
|
| 113 |
+
DP_SEAM_FINDER_COLOR: int
|
| 114 |
+
DpSeamFinder_COLOR_GRAD: int
|
| 115 |
+
DP_SEAM_FINDER_COLOR_GRAD: int
|
| 116 |
+
DpSeamFinder_CostFunction = int
|
| 117 |
+
"""One of [DpSeamFinder_COLOR, DP_SEAM_FINDER_COLOR, DpSeamFinder_COLOR_GRAD, DP_SEAM_FINDER_COLOR_GRAD]"""
|
| 118 |
+
|
| 119 |
+
Timelapser_AS_IS: int
|
| 120 |
+
TIMELAPSER_AS_IS: int
|
| 121 |
+
Timelapser_CROP: int
|
| 122 |
+
TIMELAPSER_CROP: int
|
| 123 |
+
|
| 124 |
+
GraphCutSeamFinderBase_COST_COLOR: int
|
| 125 |
+
GRAPH_CUT_SEAM_FINDER_BASE_COST_COLOR: int
|
| 126 |
+
GraphCutSeamFinderBase_COST_COLOR_GRAD: int
|
| 127 |
+
GRAPH_CUT_SEAM_FINDER_BASE_COST_COLOR_GRAD: int
|
| 128 |
+
GraphCutSeamFinderBase_CostType = int
|
| 129 |
+
"""One of [GraphCutSeamFinderBase_COST_COLOR, GRAPH_CUT_SEAM_FINDER_BASE_COST_COLOR, GraphCutSeamFinderBase_COST_COLOR_GRAD, GRAPH_CUT_SEAM_FINDER_BASE_COST_COLOR_GRAD]"""
|
| 130 |
+
|
| 131 |
+
TrackerSamplerCSC_MODE_INIT_POS: int
|
| 132 |
+
TRACKER_SAMPLER_CSC_MODE_INIT_POS: int
|
| 133 |
+
TrackerSamplerCSC_MODE_INIT_NEG: int
|
| 134 |
+
TRACKER_SAMPLER_CSC_MODE_INIT_NEG: int
|
| 135 |
+
TrackerSamplerCSC_MODE_TRACK_POS: int
|
| 136 |
+
TRACKER_SAMPLER_CSC_MODE_TRACK_POS: int
|
| 137 |
+
TrackerSamplerCSC_MODE_TRACK_NEG: int
|
| 138 |
+
TRACKER_SAMPLER_CSC_MODE_TRACK_NEG: int
|
| 139 |
+
TrackerSamplerCSC_MODE_DETECT: int
|
| 140 |
+
TRACKER_SAMPLER_CSC_MODE_DETECT: int
|
| 141 |
+
TrackerSamplerCSC_MODE = int
|
| 142 |
+
"""One of [TrackerSamplerCSC_MODE_INIT_POS, TRACKER_SAMPLER_CSC_MODE_INIT_POS, TrackerSamplerCSC_MODE_INIT_NEG, TRACKER_SAMPLER_CSC_MODE_INIT_NEG, TrackerSamplerCSC_MODE_TRACK_POS, TRACKER_SAMPLER_CSC_MODE_TRACK_POS, TrackerSamplerCSC_MODE_TRACK_NEG, TRACKER_SAMPLER_CSC_MODE_TRACK_NEG, TrackerSamplerCSC_MODE_DETECT, TRACKER_SAMPLER_CSC_MODE_DETECT]"""
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
# Classes
|
| 146 |
+
class Blender:
|
| 147 |
+
# Functions
|
| 148 |
+
@classmethod
|
| 149 |
+
def createDefault(cls, type: int, try_gpu: bool = ...) -> Blender: ...
|
| 150 |
+
|
| 151 |
+
@_typing.overload
|
| 152 |
+
def prepare(self, corners: _typing.Sequence[cv2.typing.Point], sizes: _typing.Sequence[cv2.typing.Size]) -> None: ...
|
| 153 |
+
@_typing.overload
|
| 154 |
+
def prepare(self, dst_roi: cv2.typing.Rect) -> None: ...
|
| 155 |
+
|
| 156 |
+
@_typing.overload
|
| 157 |
+
def feed(self, img: cv2.typing.MatLike, mask: cv2.typing.MatLike, tl: cv2.typing.Point) -> None: ...
|
| 158 |
+
@_typing.overload
|
| 159 |
+
def feed(self, img: cv2.UMat, mask: cv2.UMat, tl: cv2.typing.Point) -> None: ...
|
| 160 |
+
|
| 161 |
+
@_typing.overload
|
| 162 |
+
def blend(self, dst: cv2.typing.MatLike, dst_mask: cv2.typing.MatLike) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ...
|
| 163 |
+
@_typing.overload
|
| 164 |
+
def blend(self, dst: cv2.UMat, dst_mask: cv2.UMat) -> tuple[cv2.UMat, cv2.UMat]: ...
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
class FeatherBlender(Blender):
|
| 168 |
+
# Functions
|
| 169 |
+
def __init__(self, sharpness: float = ...) -> None: ...
|
| 170 |
+
|
| 171 |
+
def sharpness(self) -> float: ...
|
| 172 |
+
|
| 173 |
+
def setSharpness(self, val: float) -> None: ...
|
| 174 |
+
|
| 175 |
+
def prepare(self, dst_roi: cv2.typing.Rect) -> None: ...
|
| 176 |
+
|
| 177 |
+
@_typing.overload
|
| 178 |
+
def feed(self, img: cv2.typing.MatLike, mask: cv2.typing.MatLike, tl: cv2.typing.Point) -> None: ...
|
| 179 |
+
@_typing.overload
|
| 180 |
+
def feed(self, img: cv2.UMat, mask: cv2.UMat, tl: cv2.typing.Point) -> None: ...
|
| 181 |
+
|
| 182 |
+
@_typing.overload
|
| 183 |
+
def blend(self, dst: cv2.typing.MatLike, dst_mask: cv2.typing.MatLike) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ...
|
| 184 |
+
@_typing.overload
|
| 185 |
+
def blend(self, dst: cv2.UMat, dst_mask: cv2.UMat) -> tuple[cv2.UMat, cv2.UMat]: ...
|
| 186 |
+
|
| 187 |
+
def createWeightMaps(self, masks: _typing.Sequence[cv2.UMat], corners: _typing.Sequence[cv2.typing.Point], weight_maps: _typing.Sequence[cv2.UMat]) -> tuple[cv2.typing.Rect, _typing.Sequence[cv2.UMat]]: ...
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
class MultiBandBlender(Blender):
|
| 191 |
+
# Functions
|
| 192 |
+
def __init__(self, try_gpu: int = ..., num_bands: int = ..., weight_type: int = ...) -> None: ...
|
| 193 |
+
|
| 194 |
+
def numBands(self) -> int: ...
|
| 195 |
+
|
| 196 |
+
def setNumBands(self, val: int) -> None: ...
|
| 197 |
+
|
| 198 |
+
def prepare(self, dst_roi: cv2.typing.Rect) -> None: ...
|
| 199 |
+
|
| 200 |
+
@_typing.overload
|
| 201 |
+
def feed(self, img: cv2.typing.MatLike, mask: cv2.typing.MatLike, tl: cv2.typing.Point) -> None: ...
|
| 202 |
+
@_typing.overload
|
| 203 |
+
def feed(self, img: cv2.UMat, mask: cv2.UMat, tl: cv2.typing.Point) -> None: ...
|
| 204 |
+
|
| 205 |
+
@_typing.overload
|
| 206 |
+
def blend(self, dst: cv2.typing.MatLike, dst_mask: cv2.typing.MatLike) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ...
|
| 207 |
+
@_typing.overload
|
| 208 |
+
def blend(self, dst: cv2.UMat, dst_mask: cv2.UMat) -> tuple[cv2.UMat, cv2.UMat]: ...
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
class CameraParams:
|
| 212 |
+
focal: float
|
| 213 |
+
aspect: float
|
| 214 |
+
ppx: float
|
| 215 |
+
ppy: float
|
| 216 |
+
R: cv2.typing.MatLike
|
| 217 |
+
t: cv2.typing.MatLike
|
| 218 |
+
|
| 219 |
+
# Functions
|
| 220 |
+
def K(self) -> cv2.typing.MatLike: ...
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
class ExposureCompensator:
|
| 224 |
+
# Functions
|
| 225 |
+
@classmethod
|
| 226 |
+
def createDefault(cls, type: int) -> ExposureCompensator: ...
|
| 227 |
+
|
| 228 |
+
def feed(self, corners: _typing.Sequence[cv2.typing.Point], images: _typing.Sequence[cv2.UMat], masks: _typing.Sequence[cv2.UMat]) -> None: ...
|
| 229 |
+
|
| 230 |
+
@_typing.overload
|
| 231 |
+
def apply(self, index: int, corner: cv2.typing.Point, image: cv2.typing.MatLike, mask: cv2.typing.MatLike) -> cv2.typing.MatLike: ...
|
| 232 |
+
@_typing.overload
|
| 233 |
+
def apply(self, index: int, corner: cv2.typing.Point, image: cv2.UMat, mask: cv2.UMat) -> cv2.UMat: ...
|
| 234 |
+
|
| 235 |
+
def getMatGains(self, arg1: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[cv2.typing.MatLike]: ...
|
| 236 |
+
|
| 237 |
+
def setMatGains(self, arg1: _typing.Sequence[cv2.typing.MatLike]) -> None: ...
|
| 238 |
+
|
| 239 |
+
def setUpdateGain(self, b: bool) -> None: ...
|
| 240 |
+
|
| 241 |
+
def getUpdateGain(self) -> bool: ...
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
class NoExposureCompensator(ExposureCompensator):
|
| 245 |
+
# Functions
|
| 246 |
+
@_typing.overload
|
| 247 |
+
def apply(self, arg1: int, arg2: cv2.typing.Point, arg3: cv2.typing.MatLike, arg4: cv2.typing.MatLike) -> cv2.typing.MatLike: ...
|
| 248 |
+
@_typing.overload
|
| 249 |
+
def apply(self, arg1: int, arg2: cv2.typing.Point, arg3: cv2.UMat, arg4: cv2.UMat) -> cv2.UMat: ...
|
| 250 |
+
|
| 251 |
+
def getMatGains(self, umv: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[cv2.typing.MatLike]: ...
|
| 252 |
+
|
| 253 |
+
def setMatGains(self, umv: _typing.Sequence[cv2.typing.MatLike]) -> None: ...
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
class GainCompensator(ExposureCompensator):
|
| 257 |
+
# Functions
|
| 258 |
+
@_typing.overload
|
| 259 |
+
def __init__(self) -> None: ...
|
| 260 |
+
@_typing.overload
|
| 261 |
+
def __init__(self, nr_feeds: int) -> None: ...
|
| 262 |
+
|
| 263 |
+
@_typing.overload
|
| 264 |
+
def apply(self, index: int, corner: cv2.typing.Point, image: cv2.typing.MatLike, mask: cv2.typing.MatLike) -> cv2.typing.MatLike: ...
|
| 265 |
+
@_typing.overload
|
| 266 |
+
def apply(self, index: int, corner: cv2.typing.Point, image: cv2.UMat, mask: cv2.UMat) -> cv2.UMat: ...
|
| 267 |
+
|
| 268 |
+
def getMatGains(self, umv: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[cv2.typing.MatLike]: ...
|
| 269 |
+
|
| 270 |
+
def setMatGains(self, umv: _typing.Sequence[cv2.typing.MatLike]) -> None: ...
|
| 271 |
+
|
| 272 |
+
def setNrFeeds(self, nr_feeds: int) -> None: ...
|
| 273 |
+
|
| 274 |
+
def getNrFeeds(self) -> int: ...
|
| 275 |
+
|
| 276 |
+
def setSimilarityThreshold(self, similarity_threshold: float) -> None: ...
|
| 277 |
+
|
| 278 |
+
def getSimilarityThreshold(self) -> float: ...
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
class ChannelsCompensator(ExposureCompensator):
|
| 282 |
+
# Functions
|
| 283 |
+
def __init__(self, nr_feeds: int = ...) -> None: ...
|
| 284 |
+
|
| 285 |
+
@_typing.overload
|
| 286 |
+
def apply(self, index: int, corner: cv2.typing.Point, image: cv2.typing.MatLike, mask: cv2.typing.MatLike) -> cv2.typing.MatLike: ...
|
| 287 |
+
@_typing.overload
|
| 288 |
+
def apply(self, index: int, corner: cv2.typing.Point, image: cv2.UMat, mask: cv2.UMat) -> cv2.UMat: ...
|
| 289 |
+
|
| 290 |
+
def getMatGains(self, umv: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[cv2.typing.MatLike]: ...
|
| 291 |
+
|
| 292 |
+
def setMatGains(self, umv: _typing.Sequence[cv2.typing.MatLike]) -> None: ...
|
| 293 |
+
|
| 294 |
+
def setNrFeeds(self, nr_feeds: int) -> None: ...
|
| 295 |
+
|
| 296 |
+
def getNrFeeds(self) -> int: ...
|
| 297 |
+
|
| 298 |
+
def setSimilarityThreshold(self, similarity_threshold: float) -> None: ...
|
| 299 |
+
|
| 300 |
+
def getSimilarityThreshold(self) -> float: ...
|
| 301 |
+
|
| 302 |
+
|
| 303 |
+
class BlocksCompensator(ExposureCompensator):
|
| 304 |
+
# Functions
|
| 305 |
+
@_typing.overload
|
| 306 |
+
def apply(self, index: int, corner: cv2.typing.Point, image: cv2.typing.MatLike, mask: cv2.typing.MatLike) -> cv2.typing.MatLike: ...
|
| 307 |
+
@_typing.overload
|
| 308 |
+
def apply(self, index: int, corner: cv2.typing.Point, image: cv2.UMat, mask: cv2.UMat) -> cv2.UMat: ...
|
| 309 |
+
|
| 310 |
+
def getMatGains(self, umv: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[cv2.typing.MatLike]: ...
|
| 311 |
+
|
| 312 |
+
def setMatGains(self, umv: _typing.Sequence[cv2.typing.MatLike]) -> None: ...
|
| 313 |
+
|
| 314 |
+
def setNrFeeds(self, nr_feeds: int) -> None: ...
|
| 315 |
+
|
| 316 |
+
def getNrFeeds(self) -> int: ...
|
| 317 |
+
|
| 318 |
+
def setSimilarityThreshold(self, similarity_threshold: float) -> None: ...
|
| 319 |
+
|
| 320 |
+
def getSimilarityThreshold(self) -> float: ...
|
| 321 |
+
|
| 322 |
+
@_typing.overload
|
| 323 |
+
def setBlockSize(self, width: int, height: int) -> None: ...
|
| 324 |
+
@_typing.overload
|
| 325 |
+
def setBlockSize(self, size: cv2.typing.Size) -> None: ...
|
| 326 |
+
|
| 327 |
+
def getBlockSize(self) -> cv2.typing.Size: ...
|
| 328 |
+
|
| 329 |
+
def setNrGainsFilteringIterations(self, nr_iterations: int) -> None: ...
|
| 330 |
+
|
| 331 |
+
def getNrGainsFilteringIterations(self) -> int: ...
|
| 332 |
+
|
| 333 |
+
|
| 334 |
+
class BlocksGainCompensator(BlocksCompensator):
|
| 335 |
+
# Functions
|
| 336 |
+
@_typing.overload
|
| 337 |
+
def __init__(self, bl_width: int = ..., bl_height: int = ...) -> None: ...
|
| 338 |
+
@_typing.overload
|
| 339 |
+
def __init__(self, bl_width: int, bl_height: int, nr_feeds: int) -> None: ...
|
| 340 |
+
|
| 341 |
+
@_typing.overload
|
| 342 |
+
def apply(self, index: int, corner: cv2.typing.Point, image: cv2.typing.MatLike, mask: cv2.typing.MatLike) -> cv2.typing.MatLike: ...
|
| 343 |
+
@_typing.overload
|
| 344 |
+
def apply(self, index: int, corner: cv2.typing.Point, image: cv2.UMat, mask: cv2.UMat) -> cv2.UMat: ...
|
| 345 |
+
|
| 346 |
+
def getMatGains(self, umv: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[cv2.typing.MatLike]: ...
|
| 347 |
+
|
| 348 |
+
def setMatGains(self, umv: _typing.Sequence[cv2.typing.MatLike]) -> None: ...
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
class BlocksChannelsCompensator(BlocksCompensator):
|
| 352 |
+
# Functions
|
| 353 |
+
def __init__(self, bl_width: int = ..., bl_height: int = ..., nr_feeds: int = ...) -> None: ...
|
| 354 |
+
|
| 355 |
+
|
| 356 |
+
class ImageFeatures:
|
| 357 |
+
img_idx: int
|
| 358 |
+
img_size: cv2.typing.Size
|
| 359 |
+
keypoints: _typing.Sequence[cv2.KeyPoint]
|
| 360 |
+
descriptors: cv2.UMat
|
| 361 |
+
|
| 362 |
+
# Functions
|
| 363 |
+
def getKeypoints(self) -> _typing.Sequence[cv2.KeyPoint]: ...
|
| 364 |
+
|
| 365 |
+
|
| 366 |
+
class MatchesInfo:
|
| 367 |
+
src_img_idx: int
|
| 368 |
+
dst_img_idx: int
|
| 369 |
+
matches: _typing.Sequence[cv2.DMatch]
|
| 370 |
+
inliers_mask: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]]
|
| 371 |
+
num_inliers: int
|
| 372 |
+
H: cv2.typing.MatLike
|
| 373 |
+
confidence: float
|
| 374 |
+
|
| 375 |
+
# Functions
|
| 376 |
+
def getMatches(self) -> _typing.Sequence[cv2.DMatch]: ...
|
| 377 |
+
|
| 378 |
+
def getInliers(self) -> numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]]: ...
|
| 379 |
+
|
| 380 |
+
|
| 381 |
+
class FeaturesMatcher:
|
| 382 |
+
# Functions
|
| 383 |
+
def apply(self, features1: ImageFeatures, features2: ImageFeatures) -> MatchesInfo: ...
|
| 384 |
+
|
| 385 |
+
def apply2(self, features: _typing.Sequence[ImageFeatures], mask: cv2.UMat | None = ...) -> _typing.Sequence[MatchesInfo]: ...
|
| 386 |
+
|
| 387 |
+
def isThreadSafe(self) -> bool: ...
|
| 388 |
+
|
| 389 |
+
def collectGarbage(self) -> None: ...
|
| 390 |
+
|
| 391 |
+
|
| 392 |
+
class BestOf2NearestMatcher(FeaturesMatcher):
|
| 393 |
+
# Functions
|
| 394 |
+
def __init__(self, try_use_gpu: bool = ..., match_conf: float = ..., num_matches_thresh1: int = ..., num_matches_thresh2: int = ..., matches_confindece_thresh: float = ...) -> None: ...
|
| 395 |
+
|
| 396 |
+
def collectGarbage(self) -> None: ...
|
| 397 |
+
|
| 398 |
+
@classmethod
|
| 399 |
+
def create(cls, try_use_gpu: bool = ..., match_conf: float = ..., num_matches_thresh1: int = ..., num_matches_thresh2: int = ..., matches_confindece_thresh: float = ...) -> BestOf2NearestMatcher: ...
|
| 400 |
+
|
| 401 |
+
|
| 402 |
+
class BestOf2NearestRangeMatcher(BestOf2NearestMatcher):
|
| 403 |
+
# Functions
|
| 404 |
+
def __init__(self, range_width: int = ..., try_use_gpu: bool = ..., match_conf: float = ..., num_matches_thresh1: int = ..., num_matches_thresh2: int = ...) -> None: ...
|
| 405 |
+
|
| 406 |
+
|
| 407 |
+
class AffineBestOf2NearestMatcher(BestOf2NearestMatcher):
|
| 408 |
+
# Functions
|
| 409 |
+
def __init__(self, full_affine: bool = ..., try_use_gpu: bool = ..., match_conf: float = ..., num_matches_thresh1: int = ...) -> None: ...
|
| 410 |
+
|
| 411 |
+
|
| 412 |
+
class Estimator:
|
| 413 |
+
# Functions
|
| 414 |
+
def apply(self, features: _typing.Sequence[ImageFeatures], pairwise_matches: _typing.Sequence[MatchesInfo], cameras: _typing.Sequence[CameraParams]) -> tuple[bool, _typing.Sequence[CameraParams]]: ...
|
| 415 |
+
|
| 416 |
+
|
| 417 |
+
class HomographyBasedEstimator(Estimator):
|
| 418 |
+
# Functions
|
| 419 |
+
def __init__(self, is_focals_estimated: bool = ...) -> None: ...
|
| 420 |
+
|
| 421 |
+
|
| 422 |
+
class AffineBasedEstimator(Estimator):
|
| 423 |
+
# Functions
|
| 424 |
+
def __init__(self) -> None: ...
|
| 425 |
+
|
| 426 |
+
|
| 427 |
+
class BundleAdjusterBase(Estimator):
|
| 428 |
+
# Functions
|
| 429 |
+
def refinementMask(self) -> cv2.typing.MatLike: ...
|
| 430 |
+
|
| 431 |
+
def setRefinementMask(self, mask: cv2.typing.MatLike) -> None: ...
|
| 432 |
+
|
| 433 |
+
def confThresh(self) -> float: ...
|
| 434 |
+
|
| 435 |
+
def setConfThresh(self, conf_thresh: float) -> None: ...
|
| 436 |
+
|
| 437 |
+
def termCriteria(self) -> cv2.typing.TermCriteria: ...
|
| 438 |
+
|
| 439 |
+
def setTermCriteria(self, term_criteria: cv2.typing.TermCriteria) -> None: ...
|
| 440 |
+
|
| 441 |
+
|
| 442 |
+
class NoBundleAdjuster(BundleAdjusterBase):
|
| 443 |
+
# Functions
|
| 444 |
+
def __init__(self) -> None: ...
|
| 445 |
+
|
| 446 |
+
|
| 447 |
+
class BundleAdjusterReproj(BundleAdjusterBase):
|
| 448 |
+
# Functions
|
| 449 |
+
def __init__(self) -> None: ...
|
| 450 |
+
|
| 451 |
+
|
| 452 |
+
class BundleAdjusterRay(BundleAdjusterBase):
|
| 453 |
+
# Functions
|
| 454 |
+
def __init__(self) -> None: ...
|
| 455 |
+
|
| 456 |
+
|
| 457 |
+
class BundleAdjusterAffine(BundleAdjusterBase):
|
| 458 |
+
# Functions
|
| 459 |
+
def __init__(self) -> None: ...
|
| 460 |
+
|
| 461 |
+
|
| 462 |
+
class BundleAdjusterAffinePartial(BundleAdjusterBase):
|
| 463 |
+
# Functions
|
| 464 |
+
def __init__(self) -> None: ...
|
| 465 |
+
|
| 466 |
+
|
| 467 |
+
class SeamFinder:
|
| 468 |
+
# Functions
|
| 469 |
+
def find(self, src: _typing.Sequence[cv2.UMat], corners: _typing.Sequence[cv2.typing.Point], masks: _typing.Sequence[cv2.UMat]) -> _typing.Sequence[cv2.UMat]: ...
|
| 470 |
+
|
| 471 |
+
@classmethod
|
| 472 |
+
def createDefault(cls, type: int) -> SeamFinder: ...
|
| 473 |
+
|
| 474 |
+
|
| 475 |
+
class NoSeamFinder(SeamFinder):
|
| 476 |
+
# Functions
|
| 477 |
+
def find(self, arg1: _typing.Sequence[cv2.UMat], arg2: _typing.Sequence[cv2.typing.Point], arg3: _typing.Sequence[cv2.UMat]) -> _typing.Sequence[cv2.UMat]: ...
|
| 478 |
+
|
| 479 |
+
|
| 480 |
+
class PairwiseSeamFinder(SeamFinder):
|
| 481 |
+
# Functions
|
| 482 |
+
def find(self, src: _typing.Sequence[cv2.UMat], corners: _typing.Sequence[cv2.typing.Point], masks: _typing.Sequence[cv2.UMat]) -> _typing.Sequence[cv2.UMat]: ...
|
| 483 |
+
|
| 484 |
+
|
| 485 |
+
class VoronoiSeamFinder(PairwiseSeamFinder):
|
| 486 |
+
# Functions
|
| 487 |
+
def find(self, src: _typing.Sequence[cv2.UMat], corners: _typing.Sequence[cv2.typing.Point], masks: _typing.Sequence[cv2.UMat]) -> _typing.Sequence[cv2.UMat]: ...
|
| 488 |
+
|
| 489 |
+
|
| 490 |
+
class DpSeamFinder(SeamFinder):
|
| 491 |
+
# Functions
|
| 492 |
+
def __init__(self, costFunc: str) -> None: ...
|
| 493 |
+
|
| 494 |
+
def setCostFunction(self, val: str) -> None: ...
|
| 495 |
+
|
| 496 |
+
|
| 497 |
+
class GraphCutSeamFinder:
|
| 498 |
+
# Functions
|
| 499 |
+
def __init__(self, cost_type: str, terminal_cost: float = ..., bad_region_penalty: float = ...) -> None: ...
|
| 500 |
+
|
| 501 |
+
def find(self, src: _typing.Sequence[cv2.UMat], corners: _typing.Sequence[cv2.typing.Point], masks: _typing.Sequence[cv2.UMat]) -> _typing.Sequence[cv2.UMat]: ...
|
| 502 |
+
|
| 503 |
+
|
| 504 |
+
class Timelapser:
|
| 505 |
+
# Functions
|
| 506 |
+
@classmethod
|
| 507 |
+
def createDefault(cls, type: int) -> Timelapser: ...
|
| 508 |
+
|
| 509 |
+
def initialize(self, corners: _typing.Sequence[cv2.typing.Point], sizes: _typing.Sequence[cv2.typing.Size]) -> None: ...
|
| 510 |
+
|
| 511 |
+
@_typing.overload
|
| 512 |
+
def process(self, img: cv2.typing.MatLike, mask: cv2.typing.MatLike, tl: cv2.typing.Point) -> None: ...
|
| 513 |
+
@_typing.overload
|
| 514 |
+
def process(self, img: cv2.UMat, mask: cv2.UMat, tl: cv2.typing.Point) -> None: ...
|
| 515 |
+
|
| 516 |
+
def getDst(self) -> cv2.UMat: ...
|
| 517 |
+
|
| 518 |
+
|
| 519 |
+
class TimelapserCrop(Timelapser):
|
| 520 |
+
...
|
| 521 |
+
|
| 522 |
+
class ProjectorBase:
|
| 523 |
+
...
|
| 524 |
+
|
| 525 |
+
class SphericalProjector(ProjectorBase):
|
| 526 |
+
# Functions
|
| 527 |
+
def mapForward(self, x: float, y: float, u: float, v: float) -> None: ...
|
| 528 |
+
|
| 529 |
+
def mapBackward(self, u: float, v: float, x: float, y: float) -> None: ...
|
| 530 |
+
|
| 531 |
+
|
| 532 |
+
|
| 533 |
+
# Functions
|
| 534 |
+
def calibrateRotatingCamera(Hs: _typing.Sequence[cv2.typing.MatLike], K: cv2.typing.MatLike | None = ...) -> tuple[bool, cv2.typing.MatLike]: ...
|
| 535 |
+
|
| 536 |
+
@_typing.overload
|
| 537 |
+
def computeImageFeatures(featuresFinder: cv2.Feature2D, images: _typing.Sequence[cv2.typing.MatLike], masks: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[ImageFeatures]: ...
|
| 538 |
+
@_typing.overload
|
| 539 |
+
def computeImageFeatures(featuresFinder: cv2.Feature2D, images: _typing.Sequence[cv2.UMat], masks: _typing.Sequence[cv2.UMat] | None = ...) -> _typing.Sequence[ImageFeatures]: ...
|
| 540 |
+
|
| 541 |
+
@_typing.overload
|
| 542 |
+
def computeImageFeatures2(featuresFinder: cv2.Feature2D, image: cv2.typing.MatLike, mask: cv2.typing.MatLike | None = ...) -> ImageFeatures: ...
|
| 543 |
+
@_typing.overload
|
| 544 |
+
def computeImageFeatures2(featuresFinder: cv2.Feature2D, image: cv2.UMat, mask: cv2.UMat | None = ...) -> ImageFeatures: ...
|
| 545 |
+
|
| 546 |
+
@_typing.overload
|
| 547 |
+
def createLaplacePyr(img: cv2.typing.MatLike, num_levels: int, pyr: _typing.Sequence[cv2.UMat]) -> _typing.Sequence[cv2.UMat]: ...
|
| 548 |
+
@_typing.overload
|
| 549 |
+
def createLaplacePyr(img: cv2.UMat, num_levels: int, pyr: _typing.Sequence[cv2.UMat]) -> _typing.Sequence[cv2.UMat]: ...
|
| 550 |
+
|
| 551 |
+
@_typing.overload
|
| 552 |
+
def createLaplacePyrGpu(img: cv2.typing.MatLike, num_levels: int, pyr: _typing.Sequence[cv2.UMat]) -> _typing.Sequence[cv2.UMat]: ...
|
| 553 |
+
@_typing.overload
|
| 554 |
+
def createLaplacePyrGpu(img: cv2.UMat, num_levels: int, pyr: _typing.Sequence[cv2.UMat]) -> _typing.Sequence[cv2.UMat]: ...
|
| 555 |
+
|
| 556 |
+
@_typing.overload
|
| 557 |
+
def createWeightMap(mask: cv2.typing.MatLike, sharpness: float, weight: cv2.typing.MatLike) -> cv2.typing.MatLike: ...
|
| 558 |
+
@_typing.overload
|
| 559 |
+
def createWeightMap(mask: cv2.UMat, sharpness: float, weight: cv2.UMat) -> cv2.UMat: ...
|
| 560 |
+
|
| 561 |
+
def focalsFromHomography(H: cv2.typing.MatLike, f0: float, f1: float, f0_ok: bool, f1_ok: bool) -> None: ...
|
| 562 |
+
|
| 563 |
+
def leaveBiggestComponent(features: _typing.Sequence[ImageFeatures], pairwise_matches: _typing.Sequence[MatchesInfo], conf_threshold: float) -> _typing.Sequence[int]: ...
|
| 564 |
+
|
| 565 |
+
def matchesGraphAsString(paths: _typing.Sequence[str], pairwise_matches: _typing.Sequence[MatchesInfo], conf_threshold: float) -> str: ...
|
| 566 |
+
|
| 567 |
+
@_typing.overload
|
| 568 |
+
def normalizeUsingWeightMap(weight: cv2.typing.MatLike, src: cv2.typing.MatLike) -> cv2.typing.MatLike: ...
|
| 569 |
+
@_typing.overload
|
| 570 |
+
def normalizeUsingWeightMap(weight: cv2.UMat, src: cv2.UMat) -> cv2.UMat: ...
|
| 571 |
+
|
| 572 |
+
def overlapRoi(tl1: cv2.typing.Point, tl2: cv2.typing.Point, sz1: cv2.typing.Size, sz2: cv2.typing.Size, roi: cv2.typing.Rect) -> bool: ...
|
| 573 |
+
|
| 574 |
+
def restoreImageFromLaplacePyr(pyr: _typing.Sequence[cv2.UMat]) -> _typing.Sequence[cv2.UMat]: ...
|
| 575 |
+
|
| 576 |
+
def restoreImageFromLaplacePyrGpu(pyr: _typing.Sequence[cv2.UMat]) -> _typing.Sequence[cv2.UMat]: ...
|
| 577 |
+
|
| 578 |
+
@_typing.overload
|
| 579 |
+
def resultRoi(corners: _typing.Sequence[cv2.typing.Point], images: _typing.Sequence[cv2.UMat]) -> cv2.typing.Rect: ...
|
| 580 |
+
@_typing.overload
|
| 581 |
+
def resultRoi(corners: _typing.Sequence[cv2.typing.Point], sizes: _typing.Sequence[cv2.typing.Size]) -> cv2.typing.Rect: ...
|
| 582 |
+
|
| 583 |
+
def resultRoiIntersection(corners: _typing.Sequence[cv2.typing.Point], sizes: _typing.Sequence[cv2.typing.Size]) -> cv2.typing.Rect: ...
|
| 584 |
+
|
| 585 |
+
def resultTl(corners: _typing.Sequence[cv2.typing.Point]) -> cv2.typing.Point: ...
|
| 586 |
+
|
| 587 |
+
def selectRandomSubset(count: int, size: int, subset: _typing.Sequence[int]) -> None: ...
|
| 588 |
+
|
| 589 |
+
def stitchingLogLevel() -> int: ...
|
| 590 |
+
|
| 591 |
+
@_typing.overload
|
| 592 |
+
def strip(params: cv2.gapi.ie.PyParams) -> cv2.gapi.GNetParam: ...
|
| 593 |
+
@_typing.overload
|
| 594 |
+
def strip(params: cv2.gapi.onnx.PyParams) -> cv2.gapi.GNetParam: ...
|
| 595 |
+
@_typing.overload
|
| 596 |
+
def strip(params: cv2.gapi.ov.PyParams) -> cv2.gapi.GNetParam: ...
|
| 597 |
+
|
| 598 |
+
def waveCorrect(rmats: _typing.Sequence[cv2.typing.MatLike], kind: WaveCorrectKind) -> _typing.Sequence[cv2.typing.MatLike]: ...
|
| 599 |
+
|
| 600 |
+
|
deepseek/lib/python3.10/site-packages/cv2/gapi/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (12.8 kB). View file
|
|
|
deepseek/lib/python3.10/site-packages/cv2/gapi/core/ocl/__init__.pyi
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__all__: list[str] = []
|
| 2 |
+
|
| 3 |
+
import cv2
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
# Functions
|
| 7 |
+
def kernels() -> cv2.GKernelPackage: ...
|
| 8 |
+
|
| 9 |
+
|
deepseek/lib/python3.10/site-packages/cv2/gapi/ie/detail/__init__.pyi
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__all__: list[str] = []
|
| 2 |
+
|
| 3 |
+
ParamDesc_Kind_Load: int
|
| 4 |
+
PARAM_DESC_KIND_LOAD: int
|
| 5 |
+
ParamDesc_Kind_Import: int
|
| 6 |
+
PARAM_DESC_KIND_IMPORT: int
|
| 7 |
+
ParamDesc_Kind = int
|
| 8 |
+
"""One of [ParamDesc_Kind_Load, PARAM_DESC_KIND_LOAD, ParamDesc_Kind_Import, PARAM_DESC_KIND_IMPORT]"""
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
# Classes
|
| 12 |
+
|
deepseek/lib/python3.10/site-packages/cv2/gapi/ot/cpu/__init__.pyi
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__all__: list[str] = []
|
| 2 |
+
|
| 3 |
+
import cv2
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
# Functions
|
| 7 |
+
def kernels() -> cv2.GKernelPackage: ...
|
| 8 |
+
|
| 9 |
+
|
deepseek/lib/python3.10/site-packages/cv2/gapi/ov/__init__.pyi
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__all__: list[str] = []
|
| 2 |
+
|
| 3 |
+
import cv2.typing
|
| 4 |
+
import typing as _typing
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
# Classes
|
| 8 |
+
class PyParams:
|
| 9 |
+
# Functions
|
| 10 |
+
@_typing.overload
|
| 11 |
+
def __init__(self) -> None: ...
|
| 12 |
+
@_typing.overload
|
| 13 |
+
def __init__(self, tag: str, model_path: str, bin_path: str, device: str) -> None: ...
|
| 14 |
+
@_typing.overload
|
| 15 |
+
def __init__(self, tag: str, blob_path: str, device: str) -> None: ...
|
| 16 |
+
|
| 17 |
+
def cfgPluginConfig(self, config: cv2.typing.map_string_and_string) -> PyParams: ...
|
| 18 |
+
|
| 19 |
+
@_typing.overload
|
| 20 |
+
def cfgInputTensorLayout(self, tensor_layout: str) -> PyParams: ...
|
| 21 |
+
@_typing.overload
|
| 22 |
+
def cfgInputTensorLayout(self, layout_map: cv2.typing.map_string_and_string) -> PyParams: ...
|
| 23 |
+
|
| 24 |
+
@_typing.overload
|
| 25 |
+
def cfgInputModelLayout(self, tensor_layout: str) -> PyParams: ...
|
| 26 |
+
@_typing.overload
|
| 27 |
+
def cfgInputModelLayout(self, layout_map: cv2.typing.map_string_and_string) -> PyParams: ...
|
| 28 |
+
|
| 29 |
+
@_typing.overload
|
| 30 |
+
def cfgOutputTensorLayout(self, tensor_layout: str) -> PyParams: ...
|
| 31 |
+
@_typing.overload
|
| 32 |
+
def cfgOutputTensorLayout(self, layout_map: cv2.typing.map_string_and_string) -> PyParams: ...
|
| 33 |
+
|
| 34 |
+
@_typing.overload
|
| 35 |
+
def cfgOutputModelLayout(self, tensor_layout: str) -> PyParams: ...
|
| 36 |
+
@_typing.overload
|
| 37 |
+
def cfgOutputModelLayout(self, layout_map: cv2.typing.map_string_and_string) -> PyParams: ...
|
| 38 |
+
|
| 39 |
+
@_typing.overload
|
| 40 |
+
def cfgOutputTensorPrecision(self, precision: int) -> PyParams: ...
|
| 41 |
+
@_typing.overload
|
| 42 |
+
def cfgOutputTensorPrecision(self, precision_map: cv2.typing.map_string_and_int) -> PyParams: ...
|
| 43 |
+
|
| 44 |
+
@_typing.overload
|
| 45 |
+
def cfgReshape(self, new_shape: _typing.Sequence[int]) -> PyParams: ...
|
| 46 |
+
@_typing.overload
|
| 47 |
+
def cfgReshape(self, new_shape_map: cv2.typing.map_string_and_vector_size_t) -> PyParams: ...
|
| 48 |
+
|
| 49 |
+
def cfgNumRequests(self, nireq: int) -> PyParams: ...
|
| 50 |
+
|
| 51 |
+
@_typing.overload
|
| 52 |
+
def cfgMean(self, mean_values: _typing.Sequence[float]) -> PyParams: ...
|
| 53 |
+
@_typing.overload
|
| 54 |
+
def cfgMean(self, mean_map: cv2.typing.map_string_and_vector_float) -> PyParams: ...
|
| 55 |
+
|
| 56 |
+
@_typing.overload
|
| 57 |
+
def cfgScale(self, scale_values: _typing.Sequence[float]) -> PyParams: ...
|
| 58 |
+
@_typing.overload
|
| 59 |
+
def cfgScale(self, scale_map: cv2.typing.map_string_and_vector_float) -> PyParams: ...
|
| 60 |
+
|
| 61 |
+
@_typing.overload
|
| 62 |
+
def cfgResize(self, interpolation: int) -> PyParams: ...
|
| 63 |
+
@_typing.overload
|
| 64 |
+
def cfgResize(self, interpolation: cv2.typing.map_string_and_int) -> PyParams: ...
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
# Functions
|
| 69 |
+
@_typing.overload
|
| 70 |
+
def params(tag: str, model_path: str, weights: str, device: str) -> PyParams: ...
|
| 71 |
+
@_typing.overload
|
| 72 |
+
def params(tag: str, bin_path: str, device: str) -> PyParams: ...
|
| 73 |
+
|
| 74 |
+
|
deepseek/lib/python3.10/site-packages/filelock-3.16.1.dist-info/METADATA
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.3
|
| 2 |
+
Name: filelock
|
| 3 |
+
Version: 3.16.1
|
| 4 |
+
Summary: A platform independent file lock.
|
| 5 |
+
Project-URL: Documentation, https://py-filelock.readthedocs.io
|
| 6 |
+
Project-URL: Homepage, https://github.com/tox-dev/py-filelock
|
| 7 |
+
Project-URL: Source, https://github.com/tox-dev/py-filelock
|
| 8 |
+
Project-URL: Tracker, https://github.com/tox-dev/py-filelock/issues
|
| 9 |
+
Maintainer-email: Bernát Gábor <gaborjbernat@gmail.com>
|
| 10 |
+
License-Expression: Unlicense
|
| 11 |
+
License-File: LICENSE
|
| 12 |
+
Keywords: application,cache,directory,log,user
|
| 13 |
+
Classifier: Development Status :: 5 - Production/Stable
|
| 14 |
+
Classifier: Intended Audience :: Developers
|
| 15 |
+
Classifier: License :: OSI Approved :: The Unlicense (Unlicense)
|
| 16 |
+
Classifier: Operating System :: OS Independent
|
| 17 |
+
Classifier: Programming Language :: Python
|
| 18 |
+
Classifier: Programming Language :: Python :: 3 :: Only
|
| 19 |
+
Classifier: Programming Language :: Python :: 3.8
|
| 20 |
+
Classifier: Programming Language :: Python :: 3.9
|
| 21 |
+
Classifier: Programming Language :: Python :: 3.10
|
| 22 |
+
Classifier: Programming Language :: Python :: 3.11
|
| 23 |
+
Classifier: Programming Language :: Python :: 3.12
|
| 24 |
+
Classifier: Programming Language :: Python :: 3.13
|
| 25 |
+
Classifier: Topic :: Internet
|
| 26 |
+
Classifier: Topic :: Software Development :: Libraries
|
| 27 |
+
Classifier: Topic :: System
|
| 28 |
+
Requires-Python: >=3.8
|
| 29 |
+
Provides-Extra: docs
|
| 30 |
+
Requires-Dist: furo>=2024.8.6; extra == 'docs'
|
| 31 |
+
Requires-Dist: sphinx-autodoc-typehints>=2.4.1; extra == 'docs'
|
| 32 |
+
Requires-Dist: sphinx>=8.0.2; extra == 'docs'
|
| 33 |
+
Provides-Extra: testing
|
| 34 |
+
Requires-Dist: covdefaults>=2.3; extra == 'testing'
|
| 35 |
+
Requires-Dist: coverage>=7.6.1; extra == 'testing'
|
| 36 |
+
Requires-Dist: diff-cover>=9.2; extra == 'testing'
|
| 37 |
+
Requires-Dist: pytest-asyncio>=0.24; extra == 'testing'
|
| 38 |
+
Requires-Dist: pytest-cov>=5; extra == 'testing'
|
| 39 |
+
Requires-Dist: pytest-mock>=3.14; extra == 'testing'
|
| 40 |
+
Requires-Dist: pytest-timeout>=2.3.1; extra == 'testing'
|
| 41 |
+
Requires-Dist: pytest>=8.3.3; extra == 'testing'
|
| 42 |
+
Requires-Dist: virtualenv>=20.26.4; extra == 'testing'
|
| 43 |
+
Provides-Extra: typing
|
| 44 |
+
Requires-Dist: typing-extensions>=4.12.2; (python_version < '3.11') and extra == 'typing'
|
| 45 |
+
Description-Content-Type: text/markdown
|
| 46 |
+
|
| 47 |
+
# filelock
|
| 48 |
+
|
| 49 |
+
[](https://pypi.org/project/filelock/)
|
| 50 |
+
[](https://pypi.org/project/filelock/)
|
| 52 |
+
[](https://py-filelock.readthedocs.io/en/latest/?badge=latest)
|
| 54 |
+
[](https://github.com/psf/black)
|
| 56 |
+
[](https://pepy.tech/project/filelock)
|
| 57 |
+
[](https://github.com/tox-dev/py-filelock/actions/workflows/check.yml)
|
| 58 |
+
|
| 59 |
+
For more information checkout the [official documentation](https://py-filelock.readthedocs.io/en/latest/index.html).
|
deepseek/lib/python3.10/site-packages/filelock-3.16.1.dist-info/RECORD
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
filelock-3.16.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
| 2 |
+
filelock-3.16.1.dist-info/METADATA,sha256=LXL5-XQe_eTKkdNs76A6jSicQ1DBSTXqkDcjsprWvIM,2944
|
| 3 |
+
filelock-3.16.1.dist-info/RECORD,,
|
| 4 |
+
filelock-3.16.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 5 |
+
filelock-3.16.1.dist-info/WHEEL,sha256=1yFddiXMmvYK7QYTqtRNtX66WJ0Mz8PYEiEUoOUUxRY,87
|
| 6 |
+
filelock-3.16.1.dist-info/licenses/LICENSE,sha256=iNm062BXnBkew5HKBMFhMFctfu3EqG2qWL8oxuFMm80,1210
|
| 7 |
+
filelock/__init__.py,sha256=_t_-OAGXo_qyPa9lNQ1YnzVYEvSW3I0onPqzpomsVVg,1769
|
| 8 |
+
filelock/__pycache__/__init__.cpython-310.pyc,,
|
| 9 |
+
filelock/__pycache__/_api.cpython-310.pyc,,
|
| 10 |
+
filelock/__pycache__/_error.cpython-310.pyc,,
|
| 11 |
+
filelock/__pycache__/_soft.cpython-310.pyc,,
|
| 12 |
+
filelock/__pycache__/_unix.cpython-310.pyc,,
|
| 13 |
+
filelock/__pycache__/_util.cpython-310.pyc,,
|
| 14 |
+
filelock/__pycache__/_windows.cpython-310.pyc,,
|
| 15 |
+
filelock/__pycache__/asyncio.cpython-310.pyc,,
|
| 16 |
+
filelock/__pycache__/version.cpython-310.pyc,,
|
| 17 |
+
filelock/_api.py,sha256=GVeBEGjpDD8S1bYqG6_u0MZfbYHS6XrHs_n3PVKq-h0,14541
|
| 18 |
+
filelock/_error.py,sha256=-5jMcjTu60YAvAO1UbqDD1GIEjVkwr8xCFwDBtMeYDg,787
|
| 19 |
+
filelock/_soft.py,sha256=haqtc_TB_KJbYv2a8iuEAclKuM4fMG1vTcp28sK919c,1711
|
| 20 |
+
filelock/_unix.py,sha256=-FXP0tjInBHUYygOlMpp4taUmD87QOkrD_4ybg_iT7Q,2259
|
| 21 |
+
filelock/_util.py,sha256=QHBoNFIYfbAThhotH3Q8E2acFc84wpG49-T-uu017ZE,1715
|
| 22 |
+
filelock/_windows.py,sha256=eMKL8dZKrgekf5VYVGR14an29JGEInRtUO8ui9ABywg,2177
|
| 23 |
+
filelock/asyncio.py,sha256=3D4JP4Ms5IXTGib5eOekyr6uH6rZlieV_moVGY36juA,12463
|
| 24 |
+
filelock/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 25 |
+
filelock/version.py,sha256=KSOBzuLwiqiVWDPGfMj1ntr25YrY6JBDr8RvinQX_FM,413
|
deepseek/lib/python3.10/site-packages/filelock-3.16.1.dist-info/WHEEL
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Wheel-Version: 1.0
|
| 2 |
+
Generator: hatchling 1.25.0
|
| 3 |
+
Root-Is-Purelib: true
|
| 4 |
+
Tag: py3-none-any
|
deepseek/lib/python3.10/site-packages/isympy.py
ADDED
|
@@ -0,0 +1,342 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Python shell for SymPy.
|
| 3 |
+
|
| 4 |
+
This is just a normal Python shell (IPython shell if you have the
|
| 5 |
+
IPython package installed), that executes the following commands for
|
| 6 |
+
the user:
|
| 7 |
+
|
| 8 |
+
>>> from __future__ import division
|
| 9 |
+
>>> from sympy import *
|
| 10 |
+
>>> x, y, z, t = symbols('x y z t')
|
| 11 |
+
>>> k, m, n = symbols('k m n', integer=True)
|
| 12 |
+
>>> f, g, h = symbols('f g h', cls=Function)
|
| 13 |
+
>>> init_printing()
|
| 14 |
+
|
| 15 |
+
So starting 'isympy' is equivalent to starting Python (or IPython) and
|
| 16 |
+
executing the above commands by hand. It is intended for easy and quick
|
| 17 |
+
experimentation with SymPy. isympy is a good way to use SymPy as an
|
| 18 |
+
interactive calculator. If you have IPython and Matplotlib installed, then
|
| 19 |
+
interactive plotting is enabled by default.
|
| 20 |
+
|
| 21 |
+
COMMAND LINE OPTIONS
|
| 22 |
+
--------------------
|
| 23 |
+
|
| 24 |
+
-c CONSOLE, --console=CONSOLE
|
| 25 |
+
|
| 26 |
+
Use the specified shell (Python or IPython) shell as the console
|
| 27 |
+
backend instead of the default one (IPython if present, Python
|
| 28 |
+
otherwise), e.g.:
|
| 29 |
+
|
| 30 |
+
$isympy -c python
|
| 31 |
+
|
| 32 |
+
CONSOLE must be one of 'ipython' or 'python'
|
| 33 |
+
|
| 34 |
+
-p PRETTY, --pretty PRETTY
|
| 35 |
+
|
| 36 |
+
Setup pretty-printing in SymPy. When pretty-printing is enabled,
|
| 37 |
+
expressions can be printed with Unicode or ASCII. The default is
|
| 38 |
+
to use pretty-printing (with Unicode if the terminal supports it).
|
| 39 |
+
When this option is 'no', expressions will not be pretty-printed
|
| 40 |
+
and ASCII will be used:
|
| 41 |
+
|
| 42 |
+
$isympy -p no
|
| 43 |
+
|
| 44 |
+
PRETTY must be one of 'unicode', 'ascii', or 'no'
|
| 45 |
+
|
| 46 |
+
-t TYPES, --types=TYPES
|
| 47 |
+
|
| 48 |
+
Setup the ground types for the polys. By default, gmpy ground types
|
| 49 |
+
are used if gmpy2 or gmpy is installed, otherwise it falls back to python
|
| 50 |
+
ground types, which are a little bit slower. You can manually
|
| 51 |
+
choose python ground types even if gmpy is installed (e.g., for
|
| 52 |
+
testing purposes):
|
| 53 |
+
|
| 54 |
+
$isympy -t python
|
| 55 |
+
|
| 56 |
+
TYPES must be one of 'gmpy', 'gmpy1' or 'python'
|
| 57 |
+
|
| 58 |
+
Note that the ground type gmpy1 is primarily intended for testing; it
|
| 59 |
+
forces the use of gmpy version 1 even if gmpy2 is available.
|
| 60 |
+
|
| 61 |
+
This is the same as setting the environment variable
|
| 62 |
+
SYMPY_GROUND_TYPES to the given ground type (e.g.,
|
| 63 |
+
SYMPY_GROUND_TYPES='gmpy')
|
| 64 |
+
|
| 65 |
+
The ground types can be determined interactively from the variable
|
| 66 |
+
sympy.polys.domains.GROUND_TYPES.
|
| 67 |
+
|
| 68 |
+
-o ORDER, --order ORDER
|
| 69 |
+
|
| 70 |
+
Setup the ordering of terms for printing. The default is lex, which
|
| 71 |
+
orders terms lexicographically (e.g., x**2 + x + 1). You can choose
|
| 72 |
+
other orderings, such as rev-lex, which will use reverse
|
| 73 |
+
lexicographic ordering (e.g., 1 + x + x**2):
|
| 74 |
+
|
| 75 |
+
$isympy -o rev-lex
|
| 76 |
+
|
| 77 |
+
ORDER must be one of 'lex', 'rev-lex', 'grlex', 'rev-grlex',
|
| 78 |
+
'grevlex', 'rev-grevlex', 'old', or 'none'.
|
| 79 |
+
|
| 80 |
+
Note that for very large expressions, ORDER='none' may speed up
|
| 81 |
+
printing considerably but the terms will have no canonical order.
|
| 82 |
+
|
| 83 |
+
-q, --quiet
|
| 84 |
+
|
| 85 |
+
Print only Python's and SymPy's versions to stdout at startup.
|
| 86 |
+
|
| 87 |
+
-d, --doctest
|
| 88 |
+
|
| 89 |
+
Use the same format that should be used for doctests. This is
|
| 90 |
+
equivalent to -c python -p no.
|
| 91 |
+
|
| 92 |
+
-C, --no-cache
|
| 93 |
+
|
| 94 |
+
Disable the caching mechanism. Disabling the cache may slow certain
|
| 95 |
+
operations down considerably. This is useful for testing the cache,
|
| 96 |
+
or for benchmarking, as the cache can result in deceptive timings.
|
| 97 |
+
|
| 98 |
+
This is equivalent to setting the environment variable
|
| 99 |
+
SYMPY_USE_CACHE to 'no'.
|
| 100 |
+
|
| 101 |
+
-a, --auto-symbols (requires at least IPython 0.11)
|
| 102 |
+
|
| 103 |
+
Automatically create missing symbols. Normally, typing a name of a
|
| 104 |
+
Symbol that has not been instantiated first would raise NameError,
|
| 105 |
+
but with this option enabled, any undefined name will be
|
| 106 |
+
automatically created as a Symbol.
|
| 107 |
+
|
| 108 |
+
Note that this is intended only for interactive, calculator style
|
| 109 |
+
usage. In a script that uses SymPy, Symbols should be instantiated
|
| 110 |
+
at the top, so that it's clear what they are.
|
| 111 |
+
|
| 112 |
+
This will not override any names that are already defined, which
|
| 113 |
+
includes the single character letters represented by the mnemonic
|
| 114 |
+
QCOSINE (see the "Gotchas and Pitfalls" document in the
|
| 115 |
+
documentation). You can delete existing names by executing "del
|
| 116 |
+
name". If a name is defined, typing "'name' in dir()" will return True.
|
| 117 |
+
|
| 118 |
+
The Symbols that are created using this have default assumptions.
|
| 119 |
+
If you want to place assumptions on symbols, you should create them
|
| 120 |
+
using symbols() or var().
|
| 121 |
+
|
| 122 |
+
Finally, this only works in the top level namespace. So, for
|
| 123 |
+
example, if you define a function in isympy with an undefined
|
| 124 |
+
Symbol, it will not work.
|
| 125 |
+
|
| 126 |
+
See also the -i and -I options.
|
| 127 |
+
|
| 128 |
+
-i, --int-to-Integer (requires at least IPython 0.11)
|
| 129 |
+
|
| 130 |
+
Automatically wrap int literals with Integer. This makes it so that
|
| 131 |
+
things like 1/2 will come out as Rational(1, 2), rather than 0.5. This
|
| 132 |
+
works by preprocessing the source and wrapping all int literals with
|
| 133 |
+
Integer. Note that this will not change the behavior of int literals
|
| 134 |
+
assigned to variables, and it also won't change the behavior of functions
|
| 135 |
+
that return int literals.
|
| 136 |
+
|
| 137 |
+
If you want an int, you can wrap the literal in int(), e.g. int(3)/int(2)
|
| 138 |
+
gives 1.5 (with division imported from __future__).
|
| 139 |
+
|
| 140 |
+
-I, --interactive (requires at least IPython 0.11)
|
| 141 |
+
|
| 142 |
+
This is equivalent to --auto-symbols --int-to-Integer. Future options
|
| 143 |
+
designed for ease of interactive use may be added to this.
|
| 144 |
+
|
| 145 |
+
-D, --debug
|
| 146 |
+
|
| 147 |
+
Enable debugging output. This is the same as setting the
|
| 148 |
+
environment variable SYMPY_DEBUG to 'True'. The debug status is set
|
| 149 |
+
in the variable SYMPY_DEBUG within isympy.
|
| 150 |
+
|
| 151 |
+
-- IPython options
|
| 152 |
+
|
| 153 |
+
Additionally you can pass command line options directly to the IPython
|
| 154 |
+
interpreter (the standard Python shell is not supported). However you
|
| 155 |
+
need to add the '--' separator between two types of options, e.g the
|
| 156 |
+
startup banner option and the colors option. You need to enter the
|
| 157 |
+
options as required by the version of IPython that you are using, too:
|
| 158 |
+
|
| 159 |
+
in IPython 0.11,
|
| 160 |
+
|
| 161 |
+
$isympy -q -- --colors=NoColor
|
| 162 |
+
|
| 163 |
+
or older versions of IPython,
|
| 164 |
+
|
| 165 |
+
$isympy -q -- -colors NoColor
|
| 166 |
+
|
| 167 |
+
See also isympy --help.
|
| 168 |
+
"""
|
| 169 |
+
|
| 170 |
+
import os
|
| 171 |
+
import sys
|
| 172 |
+
|
| 173 |
+
# DO NOT IMPORT SYMPY HERE! Or the setting of the sympy environment variables
|
| 174 |
+
# by the command line will break.
|
| 175 |
+
|
| 176 |
+
def main() -> None:
|
| 177 |
+
from argparse import ArgumentParser, RawDescriptionHelpFormatter
|
| 178 |
+
|
| 179 |
+
VERSION = None
|
| 180 |
+
if '--version' in sys.argv:
|
| 181 |
+
# We cannot import sympy before this is run, because flags like -C and
|
| 182 |
+
# -t set environment variables that must be set before SymPy is
|
| 183 |
+
# imported. The only thing we need to import it for is to get the
|
| 184 |
+
# version, which only matters with the --version flag.
|
| 185 |
+
import sympy
|
| 186 |
+
VERSION = sympy.__version__
|
| 187 |
+
|
| 188 |
+
usage = 'isympy [options] -- [ipython options]'
|
| 189 |
+
parser = ArgumentParser(
|
| 190 |
+
usage=usage,
|
| 191 |
+
description=__doc__,
|
| 192 |
+
formatter_class=RawDescriptionHelpFormatter,
|
| 193 |
+
)
|
| 194 |
+
|
| 195 |
+
parser.add_argument('--version', action='version', version=VERSION)
|
| 196 |
+
|
| 197 |
+
parser.add_argument(
|
| 198 |
+
'-c', '--console',
|
| 199 |
+
dest='console',
|
| 200 |
+
action='store',
|
| 201 |
+
default=None,
|
| 202 |
+
choices=['ipython', 'python'],
|
| 203 |
+
metavar='CONSOLE',
|
| 204 |
+
help='select type of interactive session: ipython | python; defaults '
|
| 205 |
+
'to ipython if IPython is installed, otherwise python')
|
| 206 |
+
|
| 207 |
+
parser.add_argument(
|
| 208 |
+
'-p', '--pretty',
|
| 209 |
+
dest='pretty',
|
| 210 |
+
action='store',
|
| 211 |
+
default=None,
|
| 212 |
+
metavar='PRETTY',
|
| 213 |
+
choices=['unicode', 'ascii', 'no'],
|
| 214 |
+
help='setup pretty printing: unicode | ascii | no; defaults to '
|
| 215 |
+
'unicode printing if the terminal supports it, otherwise ascii')
|
| 216 |
+
|
| 217 |
+
parser.add_argument(
|
| 218 |
+
'-t', '--types',
|
| 219 |
+
dest='types',
|
| 220 |
+
action='store',
|
| 221 |
+
default=None,
|
| 222 |
+
metavar='TYPES',
|
| 223 |
+
choices=['gmpy', 'gmpy1', 'python'],
|
| 224 |
+
help='setup ground types: gmpy | gmpy1 | python; defaults to gmpy if gmpy2 '
|
| 225 |
+
'or gmpy is installed, otherwise python')
|
| 226 |
+
|
| 227 |
+
parser.add_argument(
|
| 228 |
+
'-o', '--order',
|
| 229 |
+
dest='order',
|
| 230 |
+
action='store',
|
| 231 |
+
default=None,
|
| 232 |
+
metavar='ORDER',
|
| 233 |
+
choices=['lex', 'grlex', 'grevlex', 'rev-lex', 'rev-grlex', 'rev-grevlex', 'old', 'none'],
|
| 234 |
+
help='setup ordering of terms: [rev-]lex | [rev-]grlex | [rev-]grevlex | old | none; defaults to lex')
|
| 235 |
+
|
| 236 |
+
parser.add_argument(
|
| 237 |
+
'-q', '--quiet',
|
| 238 |
+
dest='quiet',
|
| 239 |
+
action='store_true',
|
| 240 |
+
default=False,
|
| 241 |
+
help='print only version information at startup')
|
| 242 |
+
|
| 243 |
+
parser.add_argument(
|
| 244 |
+
'-d', '--doctest',
|
| 245 |
+
dest='doctest',
|
| 246 |
+
action='store_true',
|
| 247 |
+
default=False,
|
| 248 |
+
help='use the doctest format for output (you can just copy and paste it)')
|
| 249 |
+
|
| 250 |
+
parser.add_argument(
|
| 251 |
+
'-C', '--no-cache',
|
| 252 |
+
dest='cache',
|
| 253 |
+
action='store_false',
|
| 254 |
+
default=True,
|
| 255 |
+
help='disable caching mechanism')
|
| 256 |
+
|
| 257 |
+
parser.add_argument(
|
| 258 |
+
'-a', '--auto-symbols',
|
| 259 |
+
dest='auto_symbols',
|
| 260 |
+
action='store_true',
|
| 261 |
+
default=False,
|
| 262 |
+
help='automatically construct missing symbols')
|
| 263 |
+
|
| 264 |
+
parser.add_argument(
|
| 265 |
+
'-i', '--int-to-Integer',
|
| 266 |
+
dest='auto_int_to_Integer',
|
| 267 |
+
action='store_true',
|
| 268 |
+
default=False,
|
| 269 |
+
help="automatically wrap int literals with Integer")
|
| 270 |
+
|
| 271 |
+
parser.add_argument(
|
| 272 |
+
'-I', '--interactive',
|
| 273 |
+
dest='interactive',
|
| 274 |
+
action='store_true',
|
| 275 |
+
default=False,
|
| 276 |
+
help="equivalent to -a -i")
|
| 277 |
+
|
| 278 |
+
parser.add_argument(
|
| 279 |
+
'-D', '--debug',
|
| 280 |
+
dest='debug',
|
| 281 |
+
action='store_true',
|
| 282 |
+
default=False,
|
| 283 |
+
help='enable debugging output')
|
| 284 |
+
|
| 285 |
+
(options, ipy_args) = parser.parse_known_args()
|
| 286 |
+
if '--' in ipy_args:
|
| 287 |
+
ipy_args.remove('--')
|
| 288 |
+
|
| 289 |
+
if not options.cache:
|
| 290 |
+
os.environ['SYMPY_USE_CACHE'] = 'no'
|
| 291 |
+
|
| 292 |
+
if options.types:
|
| 293 |
+
os.environ['SYMPY_GROUND_TYPES'] = options.types
|
| 294 |
+
|
| 295 |
+
if options.debug:
|
| 296 |
+
os.environ['SYMPY_DEBUG'] = str(options.debug)
|
| 297 |
+
|
| 298 |
+
if options.doctest:
|
| 299 |
+
options.pretty = 'no'
|
| 300 |
+
options.console = 'python'
|
| 301 |
+
|
| 302 |
+
session = options.console
|
| 303 |
+
|
| 304 |
+
if session is not None:
|
| 305 |
+
ipython = session == 'ipython'
|
| 306 |
+
else:
|
| 307 |
+
try:
|
| 308 |
+
import IPython
|
| 309 |
+
ipython = True
|
| 310 |
+
except ImportError:
|
| 311 |
+
if not options.quiet:
|
| 312 |
+
from sympy.interactive.session import no_ipython
|
| 313 |
+
print(no_ipython)
|
| 314 |
+
ipython = False
|
| 315 |
+
|
| 316 |
+
args = {
|
| 317 |
+
'pretty_print': True,
|
| 318 |
+
'use_unicode': None,
|
| 319 |
+
'use_latex': None,
|
| 320 |
+
'order': None,
|
| 321 |
+
'argv': ipy_args,
|
| 322 |
+
}
|
| 323 |
+
|
| 324 |
+
if options.pretty == 'unicode':
|
| 325 |
+
args['use_unicode'] = True
|
| 326 |
+
elif options.pretty == 'ascii':
|
| 327 |
+
args['use_unicode'] = False
|
| 328 |
+
elif options.pretty == 'no':
|
| 329 |
+
args['pretty_print'] = False
|
| 330 |
+
|
| 331 |
+
if options.order is not None:
|
| 332 |
+
args['order'] = options.order
|
| 333 |
+
|
| 334 |
+
args['quiet'] = options.quiet
|
| 335 |
+
args['auto_symbols'] = options.auto_symbols or options.interactive
|
| 336 |
+
args['auto_int_to_Integer'] = options.auto_int_to_Integer or options.interactive
|
| 337 |
+
|
| 338 |
+
from sympy.interactive import init_session
|
| 339 |
+
init_session(ipython, **args)
|
| 340 |
+
|
| 341 |
+
if __name__ == "__main__":
|
| 342 |
+
main()
|
deepseek/lib/python3.10/site-packages/prometheus_fastapi_instrumentator-7.0.0.dist-info/LICENSE
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
ISC License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2022 Tim Schwenke <tim@trallnag.com>
|
| 4 |
+
|
| 5 |
+
Permission to use, copy, modify, and/or distribute this software for any
|
| 6 |
+
purpose with or without fee is hereby granted, provided that the above
|
| 7 |
+
copyright notice and this permission notice appear in all copies.
|
| 8 |
+
|
| 9 |
+
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
|
| 10 |
+
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
|
| 11 |
+
AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
|
| 12 |
+
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
|
| 13 |
+
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
|
| 14 |
+
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
| 15 |
+
PERFORMANCE OF THIS SOFTWARE.
|
deepseek/lib/python3.10/site-packages/prometheus_fastapi_instrumentator-7.0.0.dist-info/METADATA
ADDED
|
@@ -0,0 +1,339 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.1
|
| 2 |
+
Name: prometheus-fastapi-instrumentator
|
| 3 |
+
Version: 7.0.0
|
| 4 |
+
Summary: Instrument your FastAPI with Prometheus metrics.
|
| 5 |
+
Home-page: https://github.com/trallnag/prometheus-fastapi-instrumentator
|
| 6 |
+
License: ISC
|
| 7 |
+
Keywords: prometheus,instrumentation,fastapi,exporter,metrics
|
| 8 |
+
Author: Tim Schwenke
|
| 9 |
+
Author-email: tim@trallnag.com
|
| 10 |
+
Requires-Python: >=3.8.1,<4.0.0
|
| 11 |
+
Classifier: License :: OSI Approved
|
| 12 |
+
Classifier: Programming Language :: Python :: 3
|
| 13 |
+
Classifier: Programming Language :: Python :: 3.9
|
| 14 |
+
Classifier: Programming Language :: Python :: 3.10
|
| 15 |
+
Classifier: Programming Language :: Python :: 3.11
|
| 16 |
+
Classifier: Programming Language :: Python :: 3.12
|
| 17 |
+
Requires-Dist: prometheus-client (>=0.8.0,<1.0.0)
|
| 18 |
+
Requires-Dist: starlette (>=0.30.0,<1.0.0)
|
| 19 |
+
Project-URL: Repository, https://github.com/trallnag/prometheus-fastapi-instrumentator
|
| 20 |
+
Description-Content-Type: text/markdown
|
| 21 |
+
|
| 22 |
+
# Prometheus FastAPI Instrumentator <!-- omit in toc -->
|
| 23 |
+
|
| 24 |
+
[](https://pypi.python.org/pypi/prometheus-fastapi-instrumentator)
|
| 25 |
+
[](https://pypi.python.org/pypi/prometheus-fastapi-instrumentator)
|
| 26 |
+
[](https://pepy.tech/project/prometheus-fastapi-instrumentator/month)
|
| 27 |
+
[](https://github.com/trallnag/kubestatus2cloudwatch/actions)
|
| 28 |
+
[](https://codecov.io/gh/trallnag/prometheus-fastapi-instrumentator)
|
| 29 |
+
|
| 30 |
+
A configurable and modular Prometheus Instrumentator for your FastAPI. Install
|
| 31 |
+
`prometheus-fastapi-instrumentator` from
|
| 32 |
+
[PyPI](https://pypi.python.org/pypi/prometheus-fastapi-instrumentator/). Here is
|
| 33 |
+
the fast track to get started with a pre-configured instrumentator. Import the
|
| 34 |
+
instrumentator class:
|
| 35 |
+
|
| 36 |
+
```python
|
| 37 |
+
from prometheus_fastapi_instrumentator import Instrumentator
|
| 38 |
+
```
|
| 39 |
+
|
| 40 |
+
Instrument your app with default metrics and expose the metrics:
|
| 41 |
+
|
| 42 |
+
```python
|
| 43 |
+
Instrumentator().instrument(app).expose(app)
|
| 44 |
+
```
|
| 45 |
+
|
| 46 |
+
Depending on your code you might have to use the following instead:
|
| 47 |
+
|
| 48 |
+
```python
|
| 49 |
+
instrumentator = Instrumentator().instrument(app)
|
| 50 |
+
|
| 51 |
+
@app.on_event("startup")
|
| 52 |
+
async def _startup():
|
| 53 |
+
instrumentator.expose(app)
|
| 54 |
+
```
|
| 55 |
+
|
| 56 |
+
With this, your FastAPI is instrumented and metrics are ready to be scraped. The
|
| 57 |
+
defaults give you:
|
| 58 |
+
|
| 59 |
+
- Counter `http_requests_total` with `handler`, `status` and `method`. Total
|
| 60 |
+
number of requests.
|
| 61 |
+
- Summary `http_request_size_bytes` with `handler`. Added up total of the
|
| 62 |
+
content lengths of all incoming requests.
|
| 63 |
+
- Summary `http_response_size_bytes` with `handler`. Added up total of the
|
| 64 |
+
content lengths of all outgoing responses.
|
| 65 |
+
- Histogram `http_request_duration_seconds` with `handler` and `method`. Only a
|
| 66 |
+
few buckets to keep cardinality low.
|
| 67 |
+
- Histogram `http_request_duration_highr_seconds` without any labels. Large
|
| 68 |
+
number of buckets (>20).
|
| 69 |
+
|
| 70 |
+
In addition, following behavior is active:
|
| 71 |
+
|
| 72 |
+
- Status codes are grouped into `2xx`, `3xx` and so on.
|
| 73 |
+
- Requests without a matching template are grouped into the handler `none`.
|
| 74 |
+
|
| 75 |
+
If one of these presets does not suit your needs you can do one of multiple
|
| 76 |
+
things:
|
| 77 |
+
|
| 78 |
+
- Pick one of the already existing closures from
|
| 79 |
+
[`metrics`](./src/prometheus_fastapi_instrumentator/metrics.py) and pass it to
|
| 80 |
+
the instrumentator instance. See [here](#adding-metrics) how to do that.
|
| 81 |
+
- Create your own instrumentation function that you can pass to an
|
| 82 |
+
instrumentator instance. See [here](#creating-new-metrics) to learn how more.
|
| 83 |
+
- Don't use this package at all and just use the source code as inspiration on
|
| 84 |
+
how to instrument your FastAPI.
|
| 85 |
+
|
| 86 |
+
## Table of Contents <!-- omit in toc -->
|
| 87 |
+
|
| 88 |
+
<!--TOC-->
|
| 89 |
+
|
| 90 |
+
- [Disclaimer](#disclaimer)
|
| 91 |
+
- [Features](#features)
|
| 92 |
+
- [Advanced Usage](#advanced-usage)
|
| 93 |
+
- [Creating the Instrumentator](#creating-the-instrumentator)
|
| 94 |
+
- [Adding metrics](#adding-metrics)
|
| 95 |
+
- [Creating new metrics](#creating-new-metrics)
|
| 96 |
+
- [Perform instrumentation](#perform-instrumentation)
|
| 97 |
+
- [Specify namespace and subsystem](#specify-namespace-and-subsystem)
|
| 98 |
+
- [Exposing endpoint](#exposing-endpoint)
|
| 99 |
+
- [Contributing](#contributing)
|
| 100 |
+
- [Licensing](#licensing)
|
| 101 |
+
|
| 102 |
+
<!--TOC-->
|
| 103 |
+
|
| 104 |
+
## Disclaimer
|
| 105 |
+
|
| 106 |
+
Not made for generic Prometheus instrumentation in Python. Use the Prometheus
|
| 107 |
+
client library for that. This packages uses it as well.
|
| 108 |
+
|
| 109 |
+
All the generic middleware and instrumentation code comes with a cost in
|
| 110 |
+
performance that can become noticeable.
|
| 111 |
+
|
| 112 |
+
## Features
|
| 113 |
+
|
| 114 |
+
Beyond the fast track, this instrumentator is **highly configurable** and it is
|
| 115 |
+
very easy to customize and adapt to your specific use case. Here is a list of
|
| 116 |
+
some of these options you may opt-in to:
|
| 117 |
+
|
| 118 |
+
- Regex patterns to ignore certain routes.
|
| 119 |
+
- Completely ignore untemplated routes.
|
| 120 |
+
- Control instrumentation and exposition with an env var.
|
| 121 |
+
- Rounding of latencies to a certain decimal number.
|
| 122 |
+
- Renaming of labels and the metric.
|
| 123 |
+
- Metrics endpoint can compress data with gzip.
|
| 124 |
+
- Opt-in metric to monitor the number of requests in progress.
|
| 125 |
+
|
| 126 |
+
It also features a **modular approach to metrics** that should instrument all
|
| 127 |
+
FastAPI endpoints. You can either choose from a set of already existing metrics
|
| 128 |
+
or create your own. And every metric function by itself can be configured as
|
| 129 |
+
well.
|
| 130 |
+
|
| 131 |
+
## Advanced Usage
|
| 132 |
+
|
| 133 |
+
This chapter contains an example on the advanced usage of the Prometheus FastAPI
|
| 134 |
+
Instrumentator to showcase most of it's features.
|
| 135 |
+
|
| 136 |
+
### Creating the Instrumentator
|
| 137 |
+
|
| 138 |
+
We start by creating an instance of the Instrumentator. Notice the additional
|
| 139 |
+
`metrics` import. This will come in handy later.
|
| 140 |
+
|
| 141 |
+
```python
|
| 142 |
+
from prometheus_fastapi_instrumentator import Instrumentator, metrics
|
| 143 |
+
|
| 144 |
+
instrumentator = Instrumentator(
|
| 145 |
+
should_group_status_codes=False,
|
| 146 |
+
should_ignore_untemplated=True,
|
| 147 |
+
should_respect_env_var=True,
|
| 148 |
+
should_instrument_requests_inprogress=True,
|
| 149 |
+
excluded_handlers=[".*admin.*", "/metrics"],
|
| 150 |
+
env_var_name="ENABLE_METRICS",
|
| 151 |
+
inprogress_name="inprogress",
|
| 152 |
+
inprogress_labels=True,
|
| 153 |
+
)
|
| 154 |
+
```
|
| 155 |
+
|
| 156 |
+
Unlike in the fast track example, now the instrumentation and exposition will
|
| 157 |
+
only take place if the environment variable `ENABLE_METRICS` is `true` at
|
| 158 |
+
run-time. This can be helpful in larger deployments with multiple services
|
| 159 |
+
depending on the same base FastAPI.
|
| 160 |
+
|
| 161 |
+
### Adding metrics
|
| 162 |
+
|
| 163 |
+
Let's say we also want to instrument the size of requests and responses. For
|
| 164 |
+
this we use the `add()` method. This method does nothing more than taking a
|
| 165 |
+
function and adding it to a list. Then during run-time every time FastAPI
|
| 166 |
+
handles a request all functions in this list will be called while giving them a
|
| 167 |
+
single argument that stores useful information like the request and response
|
| 168 |
+
objects. If no `add()` at all is used, the default metric gets added in the
|
| 169 |
+
background. This is what happens in the fast track example.
|
| 170 |
+
|
| 171 |
+
All instrumentation functions are stored as closures in the `metrics` module.
|
| 172 |
+
|
| 173 |
+
Closures come in handy here because it allows us to configure the functions
|
| 174 |
+
within.
|
| 175 |
+
|
| 176 |
+
```python
|
| 177 |
+
instrumentator.add(metrics.latency(buckets=(1, 2, 3,)))
|
| 178 |
+
```
|
| 179 |
+
|
| 180 |
+
This simply adds the metric you also get in the fast track example with a
|
| 181 |
+
modified buckets argument. But we would also like to record the size of all
|
| 182 |
+
requests and responses.
|
| 183 |
+
|
| 184 |
+
```python
|
| 185 |
+
instrumentator.add(
|
| 186 |
+
metrics.request_size(
|
| 187 |
+
should_include_handler=True,
|
| 188 |
+
should_include_method=False,
|
| 189 |
+
should_include_status=True,
|
| 190 |
+
metric_namespace="a",
|
| 191 |
+
metric_subsystem="b",
|
| 192 |
+
)
|
| 193 |
+
).add(
|
| 194 |
+
metrics.response_size(
|
| 195 |
+
should_include_handler=True,
|
| 196 |
+
should_include_method=False,
|
| 197 |
+
should_include_status=True,
|
| 198 |
+
metric_namespace="namespace",
|
| 199 |
+
metric_subsystem="subsystem",
|
| 200 |
+
)
|
| 201 |
+
)
|
| 202 |
+
```
|
| 203 |
+
|
| 204 |
+
You can add as many metrics you like to the instrumentator.
|
| 205 |
+
|
| 206 |
+
### Creating new metrics
|
| 207 |
+
|
| 208 |
+
As already mentioned, it is possible to create custom functions to pass on to
|
| 209 |
+
`add()`. This is also how the default metrics are implemented.
|
| 210 |
+
|
| 211 |
+
The basic idea is that the instrumentator creates an `info` object that contains
|
| 212 |
+
everything necessary for instrumentation based on the configuration of the
|
| 213 |
+
instrumentator. This includes the raw request and response objects but also the
|
| 214 |
+
modified handler, grouped status code and duration. Next, all registered
|
| 215 |
+
instrumentation functions are called. They get `info` as their single argument.
|
| 216 |
+
|
| 217 |
+
Let's say we want to count the number of times a certain language has been
|
| 218 |
+
requested.
|
| 219 |
+
|
| 220 |
+
```python
|
| 221 |
+
from typing import Callable
|
| 222 |
+
from prometheus_fastapi_instrumentator.metrics import Info
|
| 223 |
+
from prometheus_client import Counter
|
| 224 |
+
|
| 225 |
+
def http_requested_languages_total() -> Callable[[Info], None]:
|
| 226 |
+
METRIC = Counter(
|
| 227 |
+
"http_requested_languages_total",
|
| 228 |
+
"Number of times a certain language has been requested.",
|
| 229 |
+
labelnames=("langs",)
|
| 230 |
+
)
|
| 231 |
+
|
| 232 |
+
def instrumentation(info: Info) -> None:
|
| 233 |
+
langs = set()
|
| 234 |
+
lang_str = info.request.headers["Accept-Language"]
|
| 235 |
+
for element in lang_str.split(","):
|
| 236 |
+
element = element.split(";")[0].strip().lower()
|
| 237 |
+
langs.add(element)
|
| 238 |
+
for language in langs:
|
| 239 |
+
METRIC.labels(language).inc()
|
| 240 |
+
|
| 241 |
+
return instrumentation
|
| 242 |
+
```
|
| 243 |
+
|
| 244 |
+
The function `http_requested_languages_total` is used for persistent elements
|
| 245 |
+
that are stored between all instrumentation executions (for example the metric
|
| 246 |
+
instance itself). Next comes the closure. This function must adhere to the shown
|
| 247 |
+
interface. It will always get an `Info` object that contains the request,
|
| 248 |
+
response and a few other modified informations. For example the (grouped) status
|
| 249 |
+
code or the handler. Finally, the closure is returned.
|
| 250 |
+
|
| 251 |
+
**Important:** The response object inside `info` can either be the response
|
| 252 |
+
object or `None`. In addition, errors thrown in the handler are not caught by
|
| 253 |
+
the instrumentator. I recommend to check the documentation and/or the source
|
| 254 |
+
code before creating your own metrics.
|
| 255 |
+
|
| 256 |
+
To use it, we hand over the closure to the instrumentator object.
|
| 257 |
+
|
| 258 |
+
```python
|
| 259 |
+
instrumentator.add(http_requested_languages_total())
|
| 260 |
+
```
|
| 261 |
+
|
| 262 |
+
### Perform instrumentation
|
| 263 |
+
|
| 264 |
+
Up to this point, the FastAPI has not been touched at all. Everything has been
|
| 265 |
+
stored in the `instrumentator` only. To actually register the instrumentation
|
| 266 |
+
with FastAPI, the `instrument()` method has to be called.
|
| 267 |
+
|
| 268 |
+
```python
|
| 269 |
+
instrumentator.instrument(app)
|
| 270 |
+
```
|
| 271 |
+
|
| 272 |
+
Notice that this will do nothing if `should_respect_env_var` has been set during
|
| 273 |
+
construction of the instrumentator object and the respective env var is not
|
| 274 |
+
found.
|
| 275 |
+
|
| 276 |
+
### Specify namespace and subsystem
|
| 277 |
+
|
| 278 |
+
You can specify the namespace and subsystem of the metrics by passing them in
|
| 279 |
+
the instrument method.
|
| 280 |
+
|
| 281 |
+
```python
|
| 282 |
+
from prometheus_fastapi_instrumentator import Instrumentator
|
| 283 |
+
|
| 284 |
+
@app.on_event("startup")
|
| 285 |
+
async def startup():
|
| 286 |
+
Instrumentator().instrument(app, metric_namespace='myproject', metric_subsystem='myservice').expose(app)
|
| 287 |
+
```
|
| 288 |
+
|
| 289 |
+
Then your metrics will contain the namespace and subsystem in the metric name.
|
| 290 |
+
|
| 291 |
+
```sh
|
| 292 |
+
# TYPE myproject_myservice_http_request_duration_highr_seconds histogram
|
| 293 |
+
myproject_myservice_http_request_duration_highr_seconds_bucket{le="0.01"} 0.0
|
| 294 |
+
```
|
| 295 |
+
|
| 296 |
+
### Exposing endpoint
|
| 297 |
+
|
| 298 |
+
To expose an endpoint for the metrics either follow
|
| 299 |
+
[Prometheus Python Client](https://github.com/prometheus/client_python) and add
|
| 300 |
+
the endpoint manually to the FastAPI or serve it on a separate server. You can
|
| 301 |
+
also use the included `expose` method. It will add an endpoint to the given
|
| 302 |
+
FastAPI. With `should_gzip` you can instruct the endpoint to compress the data
|
| 303 |
+
as long as the client accepts gzip encoding. Prometheus for example does by
|
| 304 |
+
default. Beware that network bandwith is often cheaper than CPU cycles.
|
| 305 |
+
|
| 306 |
+
```python
|
| 307 |
+
instrumentator.expose(app, include_in_schema=False, should_gzip=True)
|
| 308 |
+
```
|
| 309 |
+
|
| 310 |
+
Notice that this will to nothing if `should_respect_env_var` has been set during
|
| 311 |
+
construction of the instrumentator object and the respective env var is not
|
| 312 |
+
found.
|
| 313 |
+
|
| 314 |
+
## Contributing
|
| 315 |
+
|
| 316 |
+
Please refer to [`CONTRIBUTING.md`](CONTRIBUTING).
|
| 317 |
+
|
| 318 |
+
Consult [`DEVELOPMENT.md`](DEVELOPMENT.md) for guidance regarding development.
|
| 319 |
+
|
| 320 |
+
Read [`RELEASE.md`](RELEASE.md) for details about the release process.
|
| 321 |
+
|
| 322 |
+
## Licensing
|
| 323 |
+
|
| 324 |
+
The default license for this project is the
|
| 325 |
+
[ISC License](https://choosealicense.com/licenses/isc). A permissive license
|
| 326 |
+
functionally equivalent to the BSD 2-Clause and MIT licenses, removing some
|
| 327 |
+
language that is no longer necessary. See [`LICENSE`](LICENSE) for the license
|
| 328 |
+
text.
|
| 329 |
+
|
| 330 |
+
The [BSD 3-Clause License](https://choosealicense.com/licenses/bsd-3-clause) is
|
| 331 |
+
used as the license for the
|
| 332 |
+
[`routing`](src/prometheus_fastapi_instrumentator/routing.py) module. This is
|
| 333 |
+
due to it containing code from
|
| 334 |
+
[elastic/apm-agent-python](https://github.com/elastic/apm-agent-python). BSD
|
| 335 |
+
3-Clause is a permissive license similar to the BSD 2-Clause License, but with a
|
| 336 |
+
3rd clause that prohibits others from using the name of the copyright holder or
|
| 337 |
+
its contributors to promote derived products without written consent. The
|
| 338 |
+
license text is included in the module itself.
|
| 339 |
+
|
deepseek/lib/python3.10/site-packages/prometheus_fastapi_instrumentator-7.0.0.dist-info/RECORD
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
prometheus_fastapi_instrumentator-7.0.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
| 2 |
+
prometheus_fastapi_instrumentator-7.0.0.dist-info/LICENSE,sha256=1Bb46zX6e7vYSh8YDT_6oXB-XpP6E2AyHdQtnXY9Cfw,762
|
| 3 |
+
prometheus_fastapi_instrumentator-7.0.0.dist-info/METADATA,sha256=_D1vbKE2TeerP-Nbw4LbehbmEintN5ezG34dJIYfVvw,13110
|
| 4 |
+
prometheus_fastapi_instrumentator-7.0.0.dist-info/RECORD,,
|
| 5 |
+
prometheus_fastapi_instrumentator-7.0.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 6 |
+
prometheus_fastapi_instrumentator-7.0.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
| 7 |
+
prometheus_fastapi_instrumentator/__init__.py,sha256=ilCcCHTBzi04jKjOR70MkMt2Y9uGNwabCRm3Yi55m6Q,134
|
| 8 |
+
prometheus_fastapi_instrumentator/__pycache__/__init__.cpython-310.pyc,,
|
| 9 |
+
prometheus_fastapi_instrumentator/__pycache__/instrumentation.cpython-310.pyc,,
|
| 10 |
+
prometheus_fastapi_instrumentator/__pycache__/metrics.cpython-310.pyc,,
|
| 11 |
+
prometheus_fastapi_instrumentator/__pycache__/middleware.cpython-310.pyc,,
|
| 12 |
+
prometheus_fastapi_instrumentator/__pycache__/routing.cpython-310.pyc,,
|
| 13 |
+
prometheus_fastapi_instrumentator/instrumentation.py,sha256=ZZlvPfEvIhzi0PVbxEWo3AXjNcZe56EjxkNHYl-422A,13750
|
| 14 |
+
prometheus_fastapi_instrumentator/metrics.py,sha256=z87HnRSO3TpKorElRtO2ceWWELsgNSRO9TA8JDZLHuM,27724
|
| 15 |
+
prometheus_fastapi_instrumentator/middleware.py,sha256=guRv1ptWdDm8Z7GW3p2zaNZc3opFWaB_QRReS-RlBjM,9484
|
| 16 |
+
prometheus_fastapi_instrumentator/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 17 |
+
prometheus_fastapi_instrumentator/routing.py,sha256=uQ0I9gHF7IIkVjBmfAy8Ax8A3wOChLTmH0aUXRgshfs,4028
|
deepseek/lib/python3.10/site-packages/sentencepiece/__pycache__/sentencepiece_model_pb2.cpython-310.pyc
ADDED
|
Binary file (3.74 kB). View file
|
|
|
deepseek/lib/python3.10/site-packages/sentencepiece/_version.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
__version__ = '0.2.0'
|
deepseek/lib/python3.10/site-packages/sentencepiece/sentencepiece_model_pb2.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
| 3 |
+
# source: sentencepiece_model.proto
|
| 4 |
+
"""Generated protocol buffer code."""
|
| 5 |
+
from google.protobuf.internal import builder as _builder
|
| 6 |
+
from google.protobuf import descriptor as _descriptor
|
| 7 |
+
from google.protobuf import descriptor_pool as _descriptor_pool
|
| 8 |
+
from google.protobuf import symbol_database as _symbol_database
|
| 9 |
+
# @@protoc_insertion_point(imports)
|
| 10 |
+
|
| 11 |
+
_sym_db = _symbol_database.Default()
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03')
|
| 17 |
+
|
| 18 |
+
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
|
| 19 |
+
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', globals())
|
| 20 |
+
if _descriptor._USE_C_DESCRIPTORS == False:
|
| 21 |
+
|
| 22 |
+
DESCRIPTOR._options = None
|
| 23 |
+
DESCRIPTOR._serialized_options = b'H\003'
|
| 24 |
+
_TRAINERSPEC.fields_by_name['mining_sentence_size']._options = None
|
| 25 |
+
_TRAINERSPEC.fields_by_name['mining_sentence_size']._serialized_options = b'\030\001'
|
| 26 |
+
_TRAINERSPEC.fields_by_name['training_sentence_size']._options = None
|
| 27 |
+
_TRAINERSPEC.fields_by_name['training_sentence_size']._serialized_options = b'\030\001'
|
| 28 |
+
_TRAINERSPEC._serialized_start=45
|
| 29 |
+
_TRAINERSPEC._serialized_end=1581
|
| 30 |
+
_TRAINERSPEC_MODELTYPE._serialized_start=1517
|
| 31 |
+
_TRAINERSPEC_MODELTYPE._serialized_end=1570
|
| 32 |
+
_NORMALIZERSPEC._serialized_start=1584
|
| 33 |
+
_NORMALIZERSPEC._serialized_end=1793
|
| 34 |
+
_SELFTESTDATA._serialized_start=1795
|
| 35 |
+
_SELFTESTDATA._serialized_end=1916
|
| 36 |
+
_SELFTESTDATA_SAMPLE._serialized_start=1864
|
| 37 |
+
_SELFTESTDATA_SAMPLE._serialized_end=1905
|
| 38 |
+
_MODELPROTO._serialized_start=1919
|
| 39 |
+
_MODELPROTO._serialized_end=2429
|
| 40 |
+
_MODELPROTO_SENTENCEPIECE._serialized_start=2208
|
| 41 |
+
_MODELPROTO_SENTENCEPIECE._serialized_end=2418
|
| 42 |
+
_MODELPROTO_SENTENCEPIECE_TYPE._serialized_start=2323
|
| 43 |
+
_MODELPROTO_SENTENCEPIECE_TYPE._serialized_end=2407
|
| 44 |
+
# @@protoc_insertion_point(module_scope)
|
deepseek/lib/python3.10/site-packages/torch-2.5.1.dist-info/INSTALLER
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
pip
|
deepseek/lib/python3.10/site-packages/torch-2.5.1.dist-info/METADATA
ADDED
|
@@ -0,0 +1,557 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.1
|
| 2 |
+
Name: torch
|
| 3 |
+
Version: 2.5.1
|
| 4 |
+
Summary: Tensors and Dynamic neural networks in Python with strong GPU acceleration
|
| 5 |
+
Home-page: https://pytorch.org/
|
| 6 |
+
Download-URL: https://github.com/pytorch/pytorch/tags
|
| 7 |
+
Author: PyTorch Team
|
| 8 |
+
Author-email: packages@pytorch.org
|
| 9 |
+
License: BSD-3-Clause
|
| 10 |
+
Keywords: pytorch,machine learning
|
| 11 |
+
Classifier: Development Status :: 5 - Production/Stable
|
| 12 |
+
Classifier: Intended Audience :: Developers
|
| 13 |
+
Classifier: Intended Audience :: Education
|
| 14 |
+
Classifier: Intended Audience :: Science/Research
|
| 15 |
+
Classifier: License :: OSI Approved :: BSD License
|
| 16 |
+
Classifier: Topic :: Scientific/Engineering
|
| 17 |
+
Classifier: Topic :: Scientific/Engineering :: Mathematics
|
| 18 |
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
| 19 |
+
Classifier: Topic :: Software Development
|
| 20 |
+
Classifier: Topic :: Software Development :: Libraries
|
| 21 |
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
| 22 |
+
Classifier: Programming Language :: C++
|
| 23 |
+
Classifier: Programming Language :: Python :: 3
|
| 24 |
+
Classifier: Programming Language :: Python :: 3.8
|
| 25 |
+
Classifier: Programming Language :: Python :: 3.9
|
| 26 |
+
Classifier: Programming Language :: Python :: 3.10
|
| 27 |
+
Classifier: Programming Language :: Python :: 3.11
|
| 28 |
+
Classifier: Programming Language :: Python :: 3.12
|
| 29 |
+
Requires-Python: >=3.8.0
|
| 30 |
+
Description-Content-Type: text/markdown
|
| 31 |
+
License-File: LICENSE
|
| 32 |
+
License-File: NOTICE
|
| 33 |
+
Requires-Dist: filelock
|
| 34 |
+
Requires-Dist: typing-extensions (>=4.8.0)
|
| 35 |
+
Requires-Dist: networkx
|
| 36 |
+
Requires-Dist: jinja2
|
| 37 |
+
Requires-Dist: fsspec
|
| 38 |
+
Requires-Dist: nvidia-cuda-nvrtc-cu12 (==12.4.127) ; platform_system == "Linux" and platform_machine == "x86_64"
|
| 39 |
+
Requires-Dist: nvidia-cuda-runtime-cu12 (==12.4.127) ; platform_system == "Linux" and platform_machine == "x86_64"
|
| 40 |
+
Requires-Dist: nvidia-cuda-cupti-cu12 (==12.4.127) ; platform_system == "Linux" and platform_machine == "x86_64"
|
| 41 |
+
Requires-Dist: nvidia-cudnn-cu12 (==9.1.0.70) ; platform_system == "Linux" and platform_machine == "x86_64"
|
| 42 |
+
Requires-Dist: nvidia-cublas-cu12 (==12.4.5.8) ; platform_system == "Linux" and platform_machine == "x86_64"
|
| 43 |
+
Requires-Dist: nvidia-cufft-cu12 (==11.2.1.3) ; platform_system == "Linux" and platform_machine == "x86_64"
|
| 44 |
+
Requires-Dist: nvidia-curand-cu12 (==10.3.5.147) ; platform_system == "Linux" and platform_machine == "x86_64"
|
| 45 |
+
Requires-Dist: nvidia-cusolver-cu12 (==11.6.1.9) ; platform_system == "Linux" and platform_machine == "x86_64"
|
| 46 |
+
Requires-Dist: nvidia-cusparse-cu12 (==12.3.1.170) ; platform_system == "Linux" and platform_machine == "x86_64"
|
| 47 |
+
Requires-Dist: nvidia-nccl-cu12 (==2.21.5) ; platform_system == "Linux" and platform_machine == "x86_64"
|
| 48 |
+
Requires-Dist: nvidia-nvtx-cu12 (==12.4.127) ; platform_system == "Linux" and platform_machine == "x86_64"
|
| 49 |
+
Requires-Dist: nvidia-nvjitlink-cu12 (==12.4.127) ; platform_system == "Linux" and platform_machine == "x86_64"
|
| 50 |
+
Requires-Dist: triton (==3.1.0) ; platform_system == "Linux" and platform_machine == "x86_64" and python_version < "3.13"
|
| 51 |
+
Requires-Dist: sympy (==1.12.1) ; python_version == "3.8"
|
| 52 |
+
Requires-Dist: setuptools ; python_version >= "3.12"
|
| 53 |
+
Requires-Dist: sympy (==1.13.1) ; python_version >= "3.9"
|
| 54 |
+
Provides-Extra: opt-einsum
|
| 55 |
+
Requires-Dist: opt-einsum (>=3.3) ; extra == 'opt-einsum'
|
| 56 |
+
Provides-Extra: optree
|
| 57 |
+
Requires-Dist: optree (>=0.12.0) ; extra == 'optree'
|
| 58 |
+
|
| 59 |
+

|
| 60 |
+
|
| 61 |
+
--------------------------------------------------------------------------------
|
| 62 |
+
|
| 63 |
+
PyTorch is a Python package that provides two high-level features:
|
| 64 |
+
- Tensor computation (like NumPy) with strong GPU acceleration
|
| 65 |
+
- Deep neural networks built on a tape-based autograd system
|
| 66 |
+
|
| 67 |
+
You can reuse your favorite Python packages such as NumPy, SciPy, and Cython to extend PyTorch when needed.
|
| 68 |
+
|
| 69 |
+
Our trunk health (Continuous Integration signals) can be found at [hud.pytorch.org](https://hud.pytorch.org/ci/pytorch/pytorch/main).
|
| 70 |
+
|
| 71 |
+
<!-- toc -->
|
| 72 |
+
|
| 73 |
+
- [More About PyTorch](#more-about-pytorch)
|
| 74 |
+
- [A GPU-Ready Tensor Library](#a-gpu-ready-tensor-library)
|
| 75 |
+
- [Dynamic Neural Networks: Tape-Based Autograd](#dynamic-neural-networks-tape-based-autograd)
|
| 76 |
+
- [Python First](#python-first)
|
| 77 |
+
- [Imperative Experiences](#imperative-experiences)
|
| 78 |
+
- [Fast and Lean](#fast-and-lean)
|
| 79 |
+
- [Extensions Without Pain](#extensions-without-pain)
|
| 80 |
+
- [Installation](#installation)
|
| 81 |
+
- [Binaries](#binaries)
|
| 82 |
+
- [NVIDIA Jetson Platforms](#nvidia-jetson-platforms)
|
| 83 |
+
- [From Source](#from-source)
|
| 84 |
+
- [Prerequisites](#prerequisites)
|
| 85 |
+
- [NVIDIA CUDA Support](#nvidia-cuda-support)
|
| 86 |
+
- [AMD ROCm Support](#amd-rocm-support)
|
| 87 |
+
- [Intel GPU Support](#intel-gpu-support)
|
| 88 |
+
- [Get the PyTorch Source](#get-the-pytorch-source)
|
| 89 |
+
- [Install Dependencies](#install-dependencies)
|
| 90 |
+
- [Install PyTorch](#install-pytorch)
|
| 91 |
+
- [Adjust Build Options (Optional)](#adjust-build-options-optional)
|
| 92 |
+
- [Docker Image](#docker-image)
|
| 93 |
+
- [Using pre-built images](#using-pre-built-images)
|
| 94 |
+
- [Building the image yourself](#building-the-image-yourself)
|
| 95 |
+
- [Building the Documentation](#building-the-documentation)
|
| 96 |
+
- [Previous Versions](#previous-versions)
|
| 97 |
+
- [Getting Started](#getting-started)
|
| 98 |
+
- [Resources](#resources)
|
| 99 |
+
- [Communication](#communication)
|
| 100 |
+
- [Releases and Contributing](#releases-and-contributing)
|
| 101 |
+
- [The Team](#the-team)
|
| 102 |
+
- [License](#license)
|
| 103 |
+
|
| 104 |
+
<!-- tocstop -->
|
| 105 |
+
|
| 106 |
+
## More About PyTorch
|
| 107 |
+
|
| 108 |
+
[Learn the basics of PyTorch](https://pytorch.org/tutorials/beginner/basics/intro.html)
|
| 109 |
+
|
| 110 |
+
At a granular level, PyTorch is a library that consists of the following components:
|
| 111 |
+
|
| 112 |
+
| Component | Description |
|
| 113 |
+
| ---- | --- |
|
| 114 |
+
| [**torch**](https://pytorch.org/docs/stable/torch.html) | A Tensor library like NumPy, with strong GPU support |
|
| 115 |
+
| [**torch.autograd**](https://pytorch.org/docs/stable/autograd.html) | A tape-based automatic differentiation library that supports all differentiable Tensor operations in torch |
|
| 116 |
+
| [**torch.jit**](https://pytorch.org/docs/stable/jit.html) | A compilation stack (TorchScript) to create serializable and optimizable models from PyTorch code |
|
| 117 |
+
| [**torch.nn**](https://pytorch.org/docs/stable/nn.html) | A neural networks library deeply integrated with autograd designed for maximum flexibility |
|
| 118 |
+
| [**torch.multiprocessing**](https://pytorch.org/docs/stable/multiprocessing.html) | Python multiprocessing, but with magical memory sharing of torch Tensors across processes. Useful for data loading and Hogwild training |
|
| 119 |
+
| [**torch.utils**](https://pytorch.org/docs/stable/data.html) | DataLoader and other utility functions for convenience |
|
| 120 |
+
|
| 121 |
+
Usually, PyTorch is used either as:
|
| 122 |
+
|
| 123 |
+
- A replacement for NumPy to use the power of GPUs.
|
| 124 |
+
- A deep learning research platform that provides maximum flexibility and speed.
|
| 125 |
+
|
| 126 |
+
Elaborating Further:
|
| 127 |
+
|
| 128 |
+
### A GPU-Ready Tensor Library
|
| 129 |
+
|
| 130 |
+
If you use NumPy, then you have used Tensors (a.k.a. ndarray).
|
| 131 |
+
|
| 132 |
+

|
| 133 |
+
|
| 134 |
+
PyTorch provides Tensors that can live either on the CPU or the GPU and accelerates the
|
| 135 |
+
computation by a huge amount.
|
| 136 |
+
|
| 137 |
+
We provide a wide variety of tensor routines to accelerate and fit your scientific computation needs
|
| 138 |
+
such as slicing, indexing, mathematical operations, linear algebra, reductions.
|
| 139 |
+
And they are fast!
|
| 140 |
+
|
| 141 |
+
### Dynamic Neural Networks: Tape-Based Autograd
|
| 142 |
+
|
| 143 |
+
PyTorch has a unique way of building neural networks: using and replaying a tape recorder.
|
| 144 |
+
|
| 145 |
+
Most frameworks such as TensorFlow, Theano, Caffe, and CNTK have a static view of the world.
|
| 146 |
+
One has to build a neural network and reuse the same structure again and again.
|
| 147 |
+
Changing the way the network behaves means that one has to start from scratch.
|
| 148 |
+
|
| 149 |
+
With PyTorch, we use a technique called reverse-mode auto-differentiation, which allows you to
|
| 150 |
+
change the way your network behaves arbitrarily with zero lag or overhead. Our inspiration comes
|
| 151 |
+
from several research papers on this topic, as well as current and past work such as
|
| 152 |
+
[torch-autograd](https://github.com/twitter/torch-autograd),
|
| 153 |
+
[autograd](https://github.com/HIPS/autograd),
|
| 154 |
+
[Chainer](https://chainer.org), etc.
|
| 155 |
+
|
| 156 |
+
While this technique is not unique to PyTorch, it's one of the fastest implementations of it to date.
|
| 157 |
+
You get the best of speed and flexibility for your crazy research.
|
| 158 |
+
|
| 159 |
+

|
| 160 |
+
|
| 161 |
+
### Python First
|
| 162 |
+
|
| 163 |
+
PyTorch is not a Python binding into a monolithic C++ framework.
|
| 164 |
+
It is built to be deeply integrated into Python.
|
| 165 |
+
You can use it naturally like you would use [NumPy](https://www.numpy.org/) / [SciPy](https://www.scipy.org/) / [scikit-learn](https://scikit-learn.org) etc.
|
| 166 |
+
You can write your new neural network layers in Python itself, using your favorite libraries
|
| 167 |
+
and use packages such as [Cython](https://cython.org/) and [Numba](http://numba.pydata.org/).
|
| 168 |
+
Our goal is to not reinvent the wheel where appropriate.
|
| 169 |
+
|
| 170 |
+
### Imperative Experiences
|
| 171 |
+
|
| 172 |
+
PyTorch is designed to be intuitive, linear in thought, and easy to use.
|
| 173 |
+
When you execute a line of code, it gets executed. There isn't an asynchronous view of the world.
|
| 174 |
+
When you drop into a debugger or receive error messages and stack traces, understanding them is straightforward.
|
| 175 |
+
The stack trace points to exactly where your code was defined.
|
| 176 |
+
We hope you never spend hours debugging your code because of bad stack traces or asynchronous and opaque execution engines.
|
| 177 |
+
|
| 178 |
+
### Fast and Lean
|
| 179 |
+
|
| 180 |
+
PyTorch has minimal framework overhead. We integrate acceleration libraries
|
| 181 |
+
such as [Intel MKL](https://software.intel.com/mkl) and NVIDIA ([cuDNN](https://developer.nvidia.com/cudnn), [NCCL](https://developer.nvidia.com/nccl)) to maximize speed.
|
| 182 |
+
At the core, its CPU and GPU Tensor and neural network backends
|
| 183 |
+
are mature and have been tested for years.
|
| 184 |
+
|
| 185 |
+
Hence, PyTorch is quite fast — whether you run small or large neural networks.
|
| 186 |
+
|
| 187 |
+
The memory usage in PyTorch is extremely efficient compared to Torch or some of the alternatives.
|
| 188 |
+
We've written custom memory allocators for the GPU to make sure that
|
| 189 |
+
your deep learning models are maximally memory efficient.
|
| 190 |
+
This enables you to train bigger deep learning models than before.
|
| 191 |
+
|
| 192 |
+
### Extensions Without Pain
|
| 193 |
+
|
| 194 |
+
Writing new neural network modules, or interfacing with PyTorch's Tensor API was designed to be straightforward
|
| 195 |
+
and with minimal abstractions.
|
| 196 |
+
|
| 197 |
+
You can write new neural network layers in Python using the torch API
|
| 198 |
+
[or your favorite NumPy-based libraries such as SciPy](https://pytorch.org/tutorials/advanced/numpy_extensions_tutorial.html).
|
| 199 |
+
|
| 200 |
+
If you want to write your layers in C/C++, we provide a convenient extension API that is efficient and with minimal boilerplate.
|
| 201 |
+
No wrapper code needs to be written. You can see [a tutorial here](https://pytorch.org/tutorials/advanced/cpp_extension.html) and [an example here](https://github.com/pytorch/extension-cpp).
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
## Installation
|
| 205 |
+
|
| 206 |
+
### Binaries
|
| 207 |
+
Commands to install binaries via Conda or pip wheels are on our website: [https://pytorch.org/get-started/locally/](https://pytorch.org/get-started/locally/)
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
#### NVIDIA Jetson Platforms
|
| 211 |
+
|
| 212 |
+
Python wheels for NVIDIA's Jetson Nano, Jetson TX1/TX2, Jetson Xavier NX/AGX, and Jetson AGX Orin are provided [here](https://forums.developer.nvidia.com/t/pytorch-for-jetson-version-1-10-now-available/72048) and the L4T container is published [here](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/l4t-pytorch)
|
| 213 |
+
|
| 214 |
+
They require JetPack 4.2 and above, and [@dusty-nv](https://github.com/dusty-nv) and [@ptrblck](https://github.com/ptrblck) are maintaining them.
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
### From Source
|
| 218 |
+
|
| 219 |
+
#### Prerequisites
|
| 220 |
+
If you are installing from source, you will need:
|
| 221 |
+
- Python 3.8 or later (for Linux, Python 3.8.1+ is needed)
|
| 222 |
+
- A compiler that fully supports C++17, such as clang or gcc (gcc 9.4.0 or newer is required, on Linux)
|
| 223 |
+
- Visual Studio or Visual Studio Build Tool on Windows
|
| 224 |
+
|
| 225 |
+
\* PyTorch CI uses Visual C++ BuildTools, which come with Visual Studio Enterprise,
|
| 226 |
+
Professional, or Community Editions. You can also install the build tools from
|
| 227 |
+
https://visualstudio.microsoft.com/visual-cpp-build-tools/. The build tools *do not*
|
| 228 |
+
come with Visual Studio Code by default.
|
| 229 |
+
|
| 230 |
+
\* We highly recommend installing an [Anaconda](https://www.anaconda.com/download) environment. You will get a high-quality BLAS library (MKL) and you get controlled dependency versions regardless of your Linux distro.
|
| 231 |
+
|
| 232 |
+
An example of environment setup is shown below:
|
| 233 |
+
|
| 234 |
+
* Linux:
|
| 235 |
+
|
| 236 |
+
```bash
|
| 237 |
+
$ source <CONDA_INSTALL_DIR>/bin/activate
|
| 238 |
+
$ conda create -y -n <CONDA_NAME>
|
| 239 |
+
$ conda activate <CONDA_NAME>
|
| 240 |
+
```
|
| 241 |
+
|
| 242 |
+
* Windows:
|
| 243 |
+
|
| 244 |
+
```bash
|
| 245 |
+
$ source <CONDA_INSTALL_DIR>\Scripts\activate.bat
|
| 246 |
+
$ conda create -y -n <CONDA_NAME>
|
| 247 |
+
$ conda activate <CONDA_NAME>
|
| 248 |
+
$ call "C:\Program Files\Microsoft Visual Studio\<VERSION>\Community\VC\Auxiliary\Build\vcvarsall.bat" x64
|
| 249 |
+
```
|
| 250 |
+
|
| 251 |
+
##### NVIDIA CUDA Support
|
| 252 |
+
If you want to compile with CUDA support, [select a supported version of CUDA from our support matrix](https://pytorch.org/get-started/locally/), then install the following:
|
| 253 |
+
- [NVIDIA CUDA](https://developer.nvidia.com/cuda-downloads)
|
| 254 |
+
- [NVIDIA cuDNN](https://developer.nvidia.com/cudnn) v8.5 or above
|
| 255 |
+
- [Compiler](https://gist.github.com/ax3l/9489132) compatible with CUDA
|
| 256 |
+
|
| 257 |
+
Note: You could refer to the [cuDNN Support Matrix](https://docs.nvidia.com/deeplearning/cudnn/reference/support-matrix.html) for cuDNN versions with the various supported CUDA, CUDA driver and NVIDIA hardware
|
| 258 |
+
|
| 259 |
+
If you want to disable CUDA support, export the environment variable `USE_CUDA=0`.
|
| 260 |
+
Other potentially useful environment variables may be found in `setup.py`.
|
| 261 |
+
|
| 262 |
+
If you are building for NVIDIA's Jetson platforms (Jetson Nano, TX1, TX2, AGX Xavier), Instructions to install PyTorch for Jetson Nano are [available here](https://devtalk.nvidia.com/default/topic/1049071/jetson-nano/pytorch-for-jetson-nano/)
|
| 263 |
+
|
| 264 |
+
##### AMD ROCm Support
|
| 265 |
+
If you want to compile with ROCm support, install
|
| 266 |
+
- [AMD ROCm](https://rocm.docs.amd.com/en/latest/deploy/linux/quick_start.html) 4.0 and above installation
|
| 267 |
+
- ROCm is currently supported only for Linux systems.
|
| 268 |
+
|
| 269 |
+
If you want to disable ROCm support, export the environment variable `USE_ROCM=0`.
|
| 270 |
+
Other potentially useful environment variables may be found in `setup.py`.
|
| 271 |
+
|
| 272 |
+
##### Intel GPU Support
|
| 273 |
+
If you want to compile with Intel GPU support, follow these
|
| 274 |
+
- [PyTorch Prerequisites for Intel GPUs](https://www.intel.com/content/www/us/en/developer/articles/tool/pytorch-prerequisites-for-intel-gpus.html) instructions.
|
| 275 |
+
- Intel GPU is supported for Linux and Windows.
|
| 276 |
+
|
| 277 |
+
If you want to disable Intel GPU support, export the environment variable `USE_XPU=0`.
|
| 278 |
+
Other potentially useful environment variables may be found in `setup.py`.
|
| 279 |
+
|
| 280 |
+
#### Get the PyTorch Source
|
| 281 |
+
```bash
|
| 282 |
+
git clone --recursive https://github.com/pytorch/pytorch
|
| 283 |
+
cd pytorch
|
| 284 |
+
# if you are updating an existing checkout
|
| 285 |
+
git submodule sync
|
| 286 |
+
git submodule update --init --recursive
|
| 287 |
+
```
|
| 288 |
+
|
| 289 |
+
#### Install Dependencies
|
| 290 |
+
|
| 291 |
+
**Common**
|
| 292 |
+
|
| 293 |
+
```bash
|
| 294 |
+
conda install cmake ninja
|
| 295 |
+
# Run this command on native Windows
|
| 296 |
+
conda install rust
|
| 297 |
+
# Run this command from the PyTorch directory after cloning the source code using the “Get the PyTorch Source“ section below
|
| 298 |
+
pip install -r requirements.txt
|
| 299 |
+
```
|
| 300 |
+
|
| 301 |
+
**On Linux**
|
| 302 |
+
|
| 303 |
+
```bash
|
| 304 |
+
pip install mkl-static mkl-include
|
| 305 |
+
# CUDA only: Add LAPACK support for the GPU if needed
|
| 306 |
+
conda install -c pytorch magma-cuda121 # or the magma-cuda* that matches your CUDA version from https://anaconda.org/pytorch/repo
|
| 307 |
+
|
| 308 |
+
# (optional) If using torch.compile with inductor/triton, install the matching version of triton
|
| 309 |
+
# Run from the pytorch directory after cloning
|
| 310 |
+
# For Intel GPU support, please explicitly `export USE_XPU=1` before running command.
|
| 311 |
+
make triton
|
| 312 |
+
```
|
| 313 |
+
|
| 314 |
+
**On MacOS**
|
| 315 |
+
|
| 316 |
+
```bash
|
| 317 |
+
# Add this package on intel x86 processor machines only
|
| 318 |
+
pip install mkl-static mkl-include
|
| 319 |
+
# Add these packages if torch.distributed is needed
|
| 320 |
+
conda install pkg-config libuv
|
| 321 |
+
```
|
| 322 |
+
|
| 323 |
+
**On Windows**
|
| 324 |
+
|
| 325 |
+
```bash
|
| 326 |
+
pip install mkl-static mkl-include
|
| 327 |
+
# Add these packages if torch.distributed is needed.
|
| 328 |
+
# Distributed package support on Windows is a prototype feature and is subject to changes.
|
| 329 |
+
conda install -c conda-forge libuv=1.39
|
| 330 |
+
```
|
| 331 |
+
|
| 332 |
+
#### Install PyTorch
|
| 333 |
+
**On Linux**
|
| 334 |
+
|
| 335 |
+
If you would like to compile PyTorch with [new C++ ABI](https://gcc.gnu.org/onlinedocs/libstdc++/manual/using_dual_abi.html) enabled, then first run this command:
|
| 336 |
+
```bash
|
| 337 |
+
export _GLIBCXX_USE_CXX11_ABI=1
|
| 338 |
+
```
|
| 339 |
+
|
| 340 |
+
Please **note** that starting from PyTorch 2.5, the PyTorch build with XPU supports both new and old C++ ABIs. Previously, XPU only supported the new C++ ABI. If you want to compile with Intel GPU support, please follow [Intel GPU Support](#intel-gpu-support).
|
| 341 |
+
|
| 342 |
+
If you're compiling for AMD ROCm then first run this command:
|
| 343 |
+
```bash
|
| 344 |
+
# Only run this if you're compiling for ROCm
|
| 345 |
+
python tools/amd_build/build_amd.py
|
| 346 |
+
```
|
| 347 |
+
|
| 348 |
+
Install PyTorch
|
| 349 |
+
```bash
|
| 350 |
+
export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"}
|
| 351 |
+
python setup.py develop
|
| 352 |
+
```
|
| 353 |
+
|
| 354 |
+
> _Aside:_ If you are using [Anaconda](https://www.anaconda.com/distribution/#download-section), you may experience an error caused by the linker:
|
| 355 |
+
>
|
| 356 |
+
> ```plaintext
|
| 357 |
+
> build/temp.linux-x86_64-3.7/torch/csrc/stub.o: file not recognized: file format not recognized
|
| 358 |
+
> collect2: error: ld returned 1 exit status
|
| 359 |
+
> error: command 'g++' failed with exit status 1
|
| 360 |
+
> ```
|
| 361 |
+
>
|
| 362 |
+
> This is caused by `ld` from the Conda environment shadowing the system `ld`. You should use a newer version of Python that fixes this issue. The recommended Python version is 3.8.1+.
|
| 363 |
+
|
| 364 |
+
**On macOS**
|
| 365 |
+
|
| 366 |
+
```bash
|
| 367 |
+
python3 setup.py develop
|
| 368 |
+
```
|
| 369 |
+
|
| 370 |
+
**On Windows**
|
| 371 |
+
|
| 372 |
+
If you want to build legacy python code, please refer to [Building on legacy code and CUDA](https://github.com/pytorch/pytorch/blob/main/CONTRIBUTING.md#building-on-legacy-code-and-cuda)
|
| 373 |
+
|
| 374 |
+
**CPU-only builds**
|
| 375 |
+
|
| 376 |
+
In this mode PyTorch computations will run on your CPU, not your GPU
|
| 377 |
+
|
| 378 |
+
```cmd
|
| 379 |
+
python setup.py develop
|
| 380 |
+
```
|
| 381 |
+
|
| 382 |
+
Note on OpenMP: The desired OpenMP implementation is Intel OpenMP (iomp). In order to link against iomp, you'll need to manually download the library and set up the building environment by tweaking `CMAKE_INCLUDE_PATH` and `LIB`. The instruction [here](https://github.com/pytorch/pytorch/blob/main/docs/source/notes/windows.rst#building-from-source) is an example for setting up both MKL and Intel OpenMP. Without these configurations for CMake, Microsoft Visual C OpenMP runtime (vcomp) will be used.
|
| 383 |
+
|
| 384 |
+
**CUDA based build**
|
| 385 |
+
|
| 386 |
+
In this mode PyTorch computations will leverage your GPU via CUDA for faster number crunching
|
| 387 |
+
|
| 388 |
+
[NVTX](https://docs.nvidia.com/gameworks/content/gameworkslibrary/nvtx/nvidia_tools_extension_library_nvtx.htm) is needed to build Pytorch with CUDA.
|
| 389 |
+
NVTX is a part of CUDA distributive, where it is called "Nsight Compute". To install it onto an already installed CUDA run CUDA installation once again and check the corresponding checkbox.
|
| 390 |
+
Make sure that CUDA with Nsight Compute is installed after Visual Studio.
|
| 391 |
+
|
| 392 |
+
Currently, VS 2017 / 2019, and Ninja are supported as the generator of CMake. If `ninja.exe` is detected in `PATH`, then Ninja will be used as the default generator, otherwise, it will use VS 2017 / 2019.
|
| 393 |
+
<br/> If Ninja is selected as the generator, the latest MSVC will get selected as the underlying toolchain.
|
| 394 |
+
|
| 395 |
+
Additional libraries such as
|
| 396 |
+
[Magma](https://developer.nvidia.com/magma), [oneDNN, a.k.a. MKLDNN or DNNL](https://github.com/oneapi-src/oneDNN), and [Sccache](https://github.com/mozilla/sccache) are often needed. Please refer to the [installation-helper](https://github.com/pytorch/pytorch/tree/main/.ci/pytorch/win-test-helpers/installation-helpers) to install them.
|
| 397 |
+
|
| 398 |
+
You can refer to the [build_pytorch.bat](https://github.com/pytorch/pytorch/blob/main/.ci/pytorch/win-test-helpers/build_pytorch.bat) script for some other environment variables configurations
|
| 399 |
+
|
| 400 |
+
|
| 401 |
+
```cmd
|
| 402 |
+
cmd
|
| 403 |
+
|
| 404 |
+
:: Set the environment variables after you have downloaded and unzipped the mkl package,
|
| 405 |
+
:: else CMake would throw an error as `Could NOT find OpenMP`.
|
| 406 |
+
set CMAKE_INCLUDE_PATH={Your directory}\mkl\include
|
| 407 |
+
set LIB={Your directory}\mkl\lib;%LIB%
|
| 408 |
+
|
| 409 |
+
:: Read the content in the previous section carefully before you proceed.
|
| 410 |
+
:: [Optional] If you want to override the underlying toolset used by Ninja and Visual Studio with CUDA, please run the following script block.
|
| 411 |
+
:: "Visual Studio 2019 Developer Command Prompt" will be run automatically.
|
| 412 |
+
:: Make sure you have CMake >= 3.12 before you do this when you use the Visual Studio generator.
|
| 413 |
+
set CMAKE_GENERATOR_TOOLSET_VERSION=14.27
|
| 414 |
+
set DISTUTILS_USE_SDK=1
|
| 415 |
+
for /f "usebackq tokens=*" %i in (`"%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" -version [15^,17^) -products * -latest -property installationPath`) do call "%i\VC\Auxiliary\Build\vcvarsall.bat" x64 -vcvars_ver=%CMAKE_GENERATOR_TOOLSET_VERSION%
|
| 416 |
+
|
| 417 |
+
:: [Optional] If you want to override the CUDA host compiler
|
| 418 |
+
set CUDAHOSTCXX=C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.27.29110\bin\HostX64\x64\cl.exe
|
| 419 |
+
|
| 420 |
+
python setup.py develop
|
| 421 |
+
|
| 422 |
+
```
|
| 423 |
+
|
| 424 |
+
##### Adjust Build Options (Optional)
|
| 425 |
+
|
| 426 |
+
You can adjust the configuration of cmake variables optionally (without building first), by doing
|
| 427 |
+
the following. For example, adjusting the pre-detected directories for CuDNN or BLAS can be done
|
| 428 |
+
with such a step.
|
| 429 |
+
|
| 430 |
+
On Linux
|
| 431 |
+
```bash
|
| 432 |
+
export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"}
|
| 433 |
+
python setup.py build --cmake-only
|
| 434 |
+
ccmake build # or cmake-gui build
|
| 435 |
+
```
|
| 436 |
+
|
| 437 |
+
On macOS
|
| 438 |
+
```bash
|
| 439 |
+
export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"}
|
| 440 |
+
MACOSX_DEPLOYMENT_TARGET=10.9 CC=clang CXX=clang++ python setup.py build --cmake-only
|
| 441 |
+
ccmake build # or cmake-gui build
|
| 442 |
+
```
|
| 443 |
+
|
| 444 |
+
### Docker Image
|
| 445 |
+
|
| 446 |
+
#### Using pre-built images
|
| 447 |
+
|
| 448 |
+
You can also pull a pre-built docker image from Docker Hub and run with docker v19.03+
|
| 449 |
+
|
| 450 |
+
```bash
|
| 451 |
+
docker run --gpus all --rm -ti --ipc=host pytorch/pytorch:latest
|
| 452 |
+
```
|
| 453 |
+
|
| 454 |
+
Please note that PyTorch uses shared memory to share data between processes, so if torch multiprocessing is used (e.g.
|
| 455 |
+
for multithreaded data loaders) the default shared memory segment size that container runs with is not enough, and you
|
| 456 |
+
should increase shared memory size either with `--ipc=host` or `--shm-size` command line options to `nvidia-docker run`.
|
| 457 |
+
|
| 458 |
+
#### Building the image yourself
|
| 459 |
+
|
| 460 |
+
**NOTE:** Must be built with a docker version > 18.06
|
| 461 |
+
|
| 462 |
+
The `Dockerfile` is supplied to build images with CUDA 11.1 support and cuDNN v8.
|
| 463 |
+
You can pass `PYTHON_VERSION=x.y` make variable to specify which Python version is to be used by Miniconda, or leave it
|
| 464 |
+
unset to use the default.
|
| 465 |
+
|
| 466 |
+
```bash
|
| 467 |
+
make -f docker.Makefile
|
| 468 |
+
# images are tagged as docker.io/${your_docker_username}/pytorch
|
| 469 |
+
```
|
| 470 |
+
|
| 471 |
+
You can also pass the `CMAKE_VARS="..."` environment variable to specify additional CMake variables to be passed to CMake during the build.
|
| 472 |
+
See [setup.py](./setup.py) for the list of available variables.
|
| 473 |
+
|
| 474 |
+
```bash
|
| 475 |
+
make -f docker.Makefile
|
| 476 |
+
```
|
| 477 |
+
|
| 478 |
+
### Building the Documentation
|
| 479 |
+
|
| 480 |
+
To build documentation in various formats, you will need [Sphinx](http://www.sphinx-doc.org) and the
|
| 481 |
+
readthedocs theme.
|
| 482 |
+
|
| 483 |
+
```bash
|
| 484 |
+
cd docs/
|
| 485 |
+
pip install -r requirements.txt
|
| 486 |
+
```
|
| 487 |
+
You can then build the documentation by running `make <format>` from the
|
| 488 |
+
`docs/` folder. Run `make` to get a list of all available output formats.
|
| 489 |
+
|
| 490 |
+
If you get a katex error run `npm install katex`. If it persists, try
|
| 491 |
+
`npm install -g katex`
|
| 492 |
+
|
| 493 |
+
> Note: if you installed `nodejs` with a different package manager (e.g.,
|
| 494 |
+
`conda`) then `npm` will probably install a version of `katex` that is not
|
| 495 |
+
compatible with your version of `nodejs` and doc builds will fail.
|
| 496 |
+
A combination of versions that is known to work is `node@6.13.1` and
|
| 497 |
+
`katex@0.13.18`. To install the latter with `npm` you can run
|
| 498 |
+
```npm install -g katex@0.13.18```
|
| 499 |
+
|
| 500 |
+
### Previous Versions
|
| 501 |
+
|
| 502 |
+
Installation instructions and binaries for previous PyTorch versions may be found
|
| 503 |
+
on [our website](https://pytorch.org/previous-versions).
|
| 504 |
+
|
| 505 |
+
|
| 506 |
+
## Getting Started
|
| 507 |
+
|
| 508 |
+
Three-pointers to get you started:
|
| 509 |
+
- [Tutorials: get you started with understanding and using PyTorch](https://pytorch.org/tutorials/)
|
| 510 |
+
- [Examples: easy to understand PyTorch code across all domains](https://github.com/pytorch/examples)
|
| 511 |
+
- [The API Reference](https://pytorch.org/docs/)
|
| 512 |
+
- [Glossary](https://github.com/pytorch/pytorch/blob/main/GLOSSARY.md)
|
| 513 |
+
|
| 514 |
+
## Resources
|
| 515 |
+
|
| 516 |
+
* [PyTorch.org](https://pytorch.org/)
|
| 517 |
+
* [PyTorch Tutorials](https://pytorch.org/tutorials/)
|
| 518 |
+
* [PyTorch Examples](https://github.com/pytorch/examples)
|
| 519 |
+
* [PyTorch Models](https://pytorch.org/hub/)
|
| 520 |
+
* [Intro to Deep Learning with PyTorch from Udacity](https://www.udacity.com/course/deep-learning-pytorch--ud188)
|
| 521 |
+
* [Intro to Machine Learning with PyTorch from Udacity](https://www.udacity.com/course/intro-to-machine-learning-nanodegree--nd229)
|
| 522 |
+
* [Deep Neural Networks with PyTorch from Coursera](https://www.coursera.org/learn/deep-neural-networks-with-pytorch)
|
| 523 |
+
* [PyTorch Twitter](https://twitter.com/PyTorch)
|
| 524 |
+
* [PyTorch Blog](https://pytorch.org/blog/)
|
| 525 |
+
* [PyTorch YouTube](https://www.youtube.com/channel/UCWXI5YeOsh03QvJ59PMaXFw)
|
| 526 |
+
|
| 527 |
+
## Communication
|
| 528 |
+
* Forums: Discuss implementations, research, etc. https://discuss.pytorch.org
|
| 529 |
+
* GitHub Issues: Bug reports, feature requests, install issues, RFCs, thoughts, etc.
|
| 530 |
+
* Slack: The [PyTorch Slack](https://pytorch.slack.com/) hosts a primary audience of moderate to experienced PyTorch users and developers for general chat, online discussions, collaboration, etc. If you are a beginner looking for help, the primary medium is [PyTorch Forums](https://discuss.pytorch.org). If you need a slack invite, please fill this form: https://goo.gl/forms/PP1AGvNHpSaJP8to1
|
| 531 |
+
* Newsletter: No-noise, a one-way email newsletter with important announcements about PyTorch. You can sign-up here: https://eepurl.com/cbG0rv
|
| 532 |
+
* Facebook Page: Important announcements about PyTorch. https://www.facebook.com/pytorch
|
| 533 |
+
* For brand guidelines, please visit our website at [pytorch.org](https://pytorch.org/)
|
| 534 |
+
|
| 535 |
+
## Releases and Contributing
|
| 536 |
+
|
| 537 |
+
Typically, PyTorch has three minor releases a year. Please let us know if you encounter a bug by [filing an issue](https://github.com/pytorch/pytorch/issues).
|
| 538 |
+
|
| 539 |
+
We appreciate all contributions. If you are planning to contribute back bug-fixes, please do so without any further discussion.
|
| 540 |
+
|
| 541 |
+
If you plan to contribute new features, utility functions, or extensions to the core, please first open an issue and discuss the feature with us.
|
| 542 |
+
Sending a PR without discussion might end up resulting in a rejected PR because we might be taking the core in a different direction than you might be aware of.
|
| 543 |
+
|
| 544 |
+
To learn more about making a contribution to Pytorch, please see our [Contribution page](CONTRIBUTING.md). For more information about PyTorch releases, see [Release page](RELEASE.md).
|
| 545 |
+
|
| 546 |
+
## The Team
|
| 547 |
+
|
| 548 |
+
PyTorch is a community-driven project with several skillful engineers and researchers contributing to it.
|
| 549 |
+
|
| 550 |
+
PyTorch is currently maintained by [Soumith Chintala](http://soumith.ch), [Gregory Chanan](https://github.com/gchanan), [Dmytro Dzhulgakov](https://github.com/dzhulgakov), [Edward Yang](https://github.com/ezyang), and [Nikita Shulga](https://github.com/malfet) with major contributions coming from hundreds of talented individuals in various forms and means.
|
| 551 |
+
A non-exhaustive but growing list needs to mention: [Trevor Killeen](https://github.com/killeent), [Sasank Chilamkurthy](https://github.com/chsasank), [Sergey Zagoruyko](https://github.com/szagoruyko), [Adam Lerer](https://github.com/adamlerer), [Francisco Massa](https://github.com/fmassa), [Alykhan Tejani](https://github.com/alykhantejani), [Luca Antiga](https://github.com/lantiga), [Alban Desmaison](https://github.com/albanD), [Andreas Koepf](https://github.com/andreaskoepf), [James Bradbury](https://github.com/jamesb93), [Zeming Lin](https://github.com/ebetica), [Yuandong Tian](https://github.com/yuandong-tian), [Guillaume Lample](https://github.com/glample), [Marat Dukhan](https://github.com/Maratyszcza), [Natalia Gimelshein](https://github.com/ngimel), [Christian Sarofeen](https://github.com/csarofeen), [Martin Raison](https://github.com/martinraison), [Edward Yang](https://github.com/ezyang), [Zachary Devito](https://github.com/zdevito).
|
| 552 |
+
|
| 553 |
+
Note: This project is unrelated to [hughperkins/pytorch](https://github.com/hughperkins/pytorch) with the same name. Hugh is a valuable contributor to the Torch community and has helped with many things Torch and PyTorch.
|
| 554 |
+
|
| 555 |
+
## License
|
| 556 |
+
|
| 557 |
+
PyTorch has a BSD-style license, as found in the [LICENSE](LICENSE) file.
|
deepseek/lib/python3.10/site-packages/torch-2.5.1.dist-info/RECORD
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
deepseek/lib/python3.10/site-packages/torch-2.5.1.dist-info/top_level.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
functorch
|
| 2 |
+
torch
|
| 3 |
+
torchgen
|
deepseek/lib/python3.10/site-packages/typing_extensions.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_basinhopping.py
ADDED
|
@@ -0,0 +1,753 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
basinhopping: The basinhopping global optimization algorithm
|
| 3 |
+
"""
|
| 4 |
+
import numpy as np
|
| 5 |
+
import math
|
| 6 |
+
import inspect
|
| 7 |
+
import scipy.optimize
|
| 8 |
+
from scipy._lib._util import check_random_state
|
| 9 |
+
|
| 10 |
+
__all__ = ['basinhopping']
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
_params = (inspect.Parameter('res_new', kind=inspect.Parameter.KEYWORD_ONLY),
|
| 14 |
+
inspect.Parameter('res_old', kind=inspect.Parameter.KEYWORD_ONLY))
|
| 15 |
+
_new_accept_test_signature = inspect.Signature(parameters=_params)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class Storage:
|
| 19 |
+
"""
|
| 20 |
+
Class used to store the lowest energy structure
|
| 21 |
+
"""
|
| 22 |
+
def __init__(self, minres):
|
| 23 |
+
self._add(minres)
|
| 24 |
+
|
| 25 |
+
def _add(self, minres):
|
| 26 |
+
self.minres = minres
|
| 27 |
+
self.minres.x = np.copy(minres.x)
|
| 28 |
+
|
| 29 |
+
def update(self, minres):
|
| 30 |
+
if minres.success and (minres.fun < self.minres.fun
|
| 31 |
+
or not self.minres.success):
|
| 32 |
+
self._add(minres)
|
| 33 |
+
return True
|
| 34 |
+
else:
|
| 35 |
+
return False
|
| 36 |
+
|
| 37 |
+
def get_lowest(self):
|
| 38 |
+
return self.minres
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class BasinHoppingRunner:
|
| 42 |
+
"""This class implements the core of the basinhopping algorithm.
|
| 43 |
+
|
| 44 |
+
x0 : ndarray
|
| 45 |
+
The starting coordinates.
|
| 46 |
+
minimizer : callable
|
| 47 |
+
The local minimizer, with signature ``result = minimizer(x)``.
|
| 48 |
+
The return value is an `optimize.OptimizeResult` object.
|
| 49 |
+
step_taking : callable
|
| 50 |
+
This function displaces the coordinates randomly. Signature should
|
| 51 |
+
be ``x_new = step_taking(x)``. Note that `x` may be modified in-place.
|
| 52 |
+
accept_tests : list of callables
|
| 53 |
+
Each test is passed the kwargs `f_new`, `x_new`, `f_old` and
|
| 54 |
+
`x_old`. These tests will be used to judge whether or not to accept
|
| 55 |
+
the step. The acceptable return values are True, False, or ``"force
|
| 56 |
+
accept"``. If any of the tests return False then the step is rejected.
|
| 57 |
+
If ``"force accept"``, then this will override any other tests in
|
| 58 |
+
order to accept the step. This can be used, for example, to forcefully
|
| 59 |
+
escape from a local minimum that ``basinhopping`` is trapped in.
|
| 60 |
+
disp : bool, optional
|
| 61 |
+
Display status messages.
|
| 62 |
+
|
| 63 |
+
"""
|
| 64 |
+
def __init__(self, x0, minimizer, step_taking, accept_tests, disp=False):
|
| 65 |
+
self.x = np.copy(x0)
|
| 66 |
+
self.minimizer = minimizer
|
| 67 |
+
self.step_taking = step_taking
|
| 68 |
+
self.accept_tests = accept_tests
|
| 69 |
+
self.disp = disp
|
| 70 |
+
|
| 71 |
+
self.nstep = 0
|
| 72 |
+
|
| 73 |
+
# initialize return object
|
| 74 |
+
self.res = scipy.optimize.OptimizeResult()
|
| 75 |
+
self.res.minimization_failures = 0
|
| 76 |
+
|
| 77 |
+
# do initial minimization
|
| 78 |
+
minres = minimizer(self.x)
|
| 79 |
+
if not minres.success:
|
| 80 |
+
self.res.minimization_failures += 1
|
| 81 |
+
if self.disp:
|
| 82 |
+
print("warning: basinhopping: local minimization failure")
|
| 83 |
+
self.x = np.copy(minres.x)
|
| 84 |
+
self.energy = minres.fun
|
| 85 |
+
self.incumbent_minres = minres # best minimize result found so far
|
| 86 |
+
if self.disp:
|
| 87 |
+
print("basinhopping step %d: f %g" % (self.nstep, self.energy))
|
| 88 |
+
|
| 89 |
+
# initialize storage class
|
| 90 |
+
self.storage = Storage(minres)
|
| 91 |
+
|
| 92 |
+
if hasattr(minres, "nfev"):
|
| 93 |
+
self.res.nfev = minres.nfev
|
| 94 |
+
if hasattr(minres, "njev"):
|
| 95 |
+
self.res.njev = minres.njev
|
| 96 |
+
if hasattr(minres, "nhev"):
|
| 97 |
+
self.res.nhev = minres.nhev
|
| 98 |
+
|
| 99 |
+
def _monte_carlo_step(self):
|
| 100 |
+
"""Do one Monte Carlo iteration
|
| 101 |
+
|
| 102 |
+
Randomly displace the coordinates, minimize, and decide whether
|
| 103 |
+
or not to accept the new coordinates.
|
| 104 |
+
"""
|
| 105 |
+
# Take a random step. Make a copy of x because the step_taking
|
| 106 |
+
# algorithm might change x in place
|
| 107 |
+
x_after_step = np.copy(self.x)
|
| 108 |
+
x_after_step = self.step_taking(x_after_step)
|
| 109 |
+
|
| 110 |
+
# do a local minimization
|
| 111 |
+
minres = self.minimizer(x_after_step)
|
| 112 |
+
x_after_quench = minres.x
|
| 113 |
+
energy_after_quench = minres.fun
|
| 114 |
+
if not minres.success:
|
| 115 |
+
self.res.minimization_failures += 1
|
| 116 |
+
if self.disp:
|
| 117 |
+
print("warning: basinhopping: local minimization failure")
|
| 118 |
+
if hasattr(minres, "nfev"):
|
| 119 |
+
self.res.nfev += minres.nfev
|
| 120 |
+
if hasattr(minres, "njev"):
|
| 121 |
+
self.res.njev += minres.njev
|
| 122 |
+
if hasattr(minres, "nhev"):
|
| 123 |
+
self.res.nhev += minres.nhev
|
| 124 |
+
|
| 125 |
+
# accept the move based on self.accept_tests. If any test is False,
|
| 126 |
+
# then reject the step. If any test returns the special string
|
| 127 |
+
# 'force accept', then accept the step regardless. This can be used
|
| 128 |
+
# to forcefully escape from a local minimum if normal basin hopping
|
| 129 |
+
# steps are not sufficient.
|
| 130 |
+
accept = True
|
| 131 |
+
for test in self.accept_tests:
|
| 132 |
+
if inspect.signature(test) == _new_accept_test_signature:
|
| 133 |
+
testres = test(res_new=minres, res_old=self.incumbent_minres)
|
| 134 |
+
else:
|
| 135 |
+
testres = test(f_new=energy_after_quench, x_new=x_after_quench,
|
| 136 |
+
f_old=self.energy, x_old=self.x)
|
| 137 |
+
|
| 138 |
+
if testres == 'force accept':
|
| 139 |
+
accept = True
|
| 140 |
+
break
|
| 141 |
+
elif testres is None:
|
| 142 |
+
raise ValueError("accept_tests must return True, False, or "
|
| 143 |
+
"'force accept'")
|
| 144 |
+
elif not testres:
|
| 145 |
+
accept = False
|
| 146 |
+
|
| 147 |
+
# Report the result of the acceptance test to the take step class.
|
| 148 |
+
# This is for adaptive step taking
|
| 149 |
+
if hasattr(self.step_taking, "report"):
|
| 150 |
+
self.step_taking.report(accept, f_new=energy_after_quench,
|
| 151 |
+
x_new=x_after_quench, f_old=self.energy,
|
| 152 |
+
x_old=self.x)
|
| 153 |
+
|
| 154 |
+
return accept, minres
|
| 155 |
+
|
| 156 |
+
def one_cycle(self):
|
| 157 |
+
"""Do one cycle of the basinhopping algorithm
|
| 158 |
+
"""
|
| 159 |
+
self.nstep += 1
|
| 160 |
+
new_global_min = False
|
| 161 |
+
|
| 162 |
+
accept, minres = self._monte_carlo_step()
|
| 163 |
+
|
| 164 |
+
if accept:
|
| 165 |
+
self.energy = minres.fun
|
| 166 |
+
self.x = np.copy(minres.x)
|
| 167 |
+
self.incumbent_minres = minres # best minimize result found so far
|
| 168 |
+
new_global_min = self.storage.update(minres)
|
| 169 |
+
|
| 170 |
+
# print some information
|
| 171 |
+
if self.disp:
|
| 172 |
+
self.print_report(minres.fun, accept)
|
| 173 |
+
if new_global_min:
|
| 174 |
+
print("found new global minimum on step %d with function"
|
| 175 |
+
" value %g" % (self.nstep, self.energy))
|
| 176 |
+
|
| 177 |
+
# save some variables as BasinHoppingRunner attributes
|
| 178 |
+
self.xtrial = minres.x
|
| 179 |
+
self.energy_trial = minres.fun
|
| 180 |
+
self.accept = accept
|
| 181 |
+
|
| 182 |
+
return new_global_min
|
| 183 |
+
|
| 184 |
+
def print_report(self, energy_trial, accept):
|
| 185 |
+
"""print a status update"""
|
| 186 |
+
minres = self.storage.get_lowest()
|
| 187 |
+
print("basinhopping step %d: f %g trial_f %g accepted %d "
|
| 188 |
+
" lowest_f %g" % (self.nstep, self.energy, energy_trial,
|
| 189 |
+
accept, minres.fun))
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
class AdaptiveStepsize:
|
| 193 |
+
"""
|
| 194 |
+
Class to implement adaptive stepsize.
|
| 195 |
+
|
| 196 |
+
This class wraps the step taking class and modifies the stepsize to
|
| 197 |
+
ensure the true acceptance rate is as close as possible to the target.
|
| 198 |
+
|
| 199 |
+
Parameters
|
| 200 |
+
----------
|
| 201 |
+
takestep : callable
|
| 202 |
+
The step taking routine. Must contain modifiable attribute
|
| 203 |
+
takestep.stepsize
|
| 204 |
+
accept_rate : float, optional
|
| 205 |
+
The target step acceptance rate
|
| 206 |
+
interval : int, optional
|
| 207 |
+
Interval for how often to update the stepsize
|
| 208 |
+
factor : float, optional
|
| 209 |
+
The step size is multiplied or divided by this factor upon each
|
| 210 |
+
update.
|
| 211 |
+
verbose : bool, optional
|
| 212 |
+
Print information about each update
|
| 213 |
+
|
| 214 |
+
"""
|
| 215 |
+
def __init__(self, takestep, accept_rate=0.5, interval=50, factor=0.9,
|
| 216 |
+
verbose=True):
|
| 217 |
+
self.takestep = takestep
|
| 218 |
+
self.target_accept_rate = accept_rate
|
| 219 |
+
self.interval = interval
|
| 220 |
+
self.factor = factor
|
| 221 |
+
self.verbose = verbose
|
| 222 |
+
|
| 223 |
+
self.nstep = 0
|
| 224 |
+
self.nstep_tot = 0
|
| 225 |
+
self.naccept = 0
|
| 226 |
+
|
| 227 |
+
def __call__(self, x):
|
| 228 |
+
return self.take_step(x)
|
| 229 |
+
|
| 230 |
+
def _adjust_step_size(self):
|
| 231 |
+
old_stepsize = self.takestep.stepsize
|
| 232 |
+
accept_rate = float(self.naccept) / self.nstep
|
| 233 |
+
if accept_rate > self.target_accept_rate:
|
| 234 |
+
# We're accepting too many steps. This generally means we're
|
| 235 |
+
# trapped in a basin. Take bigger steps.
|
| 236 |
+
self.takestep.stepsize /= self.factor
|
| 237 |
+
else:
|
| 238 |
+
# We're not accepting enough steps. Take smaller steps.
|
| 239 |
+
self.takestep.stepsize *= self.factor
|
| 240 |
+
if self.verbose:
|
| 241 |
+
print(f"adaptive stepsize: acceptance rate {accept_rate:f} target "
|
| 242 |
+
f"{self.target_accept_rate:f} new stepsize "
|
| 243 |
+
f"{self.takestep.stepsize:g} old stepsize {old_stepsize:g}")
|
| 244 |
+
|
| 245 |
+
def take_step(self, x):
|
| 246 |
+
self.nstep += 1
|
| 247 |
+
self.nstep_tot += 1
|
| 248 |
+
if self.nstep % self.interval == 0:
|
| 249 |
+
self._adjust_step_size()
|
| 250 |
+
return self.takestep(x)
|
| 251 |
+
|
| 252 |
+
def report(self, accept, **kwargs):
|
| 253 |
+
"called by basinhopping to report the result of the step"
|
| 254 |
+
if accept:
|
| 255 |
+
self.naccept += 1
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
class RandomDisplacement:
|
| 259 |
+
"""Add a random displacement of maximum size `stepsize` to each coordinate.
|
| 260 |
+
|
| 261 |
+
Calling this updates `x` in-place.
|
| 262 |
+
|
| 263 |
+
Parameters
|
| 264 |
+
----------
|
| 265 |
+
stepsize : float, optional
|
| 266 |
+
Maximum stepsize in any dimension
|
| 267 |
+
random_gen : {None, int, `numpy.random.Generator`,
|
| 268 |
+
`numpy.random.RandomState`}, optional
|
| 269 |
+
|
| 270 |
+
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
|
| 271 |
+
singleton is used.
|
| 272 |
+
If `seed` is an int, a new ``RandomState`` instance is used,
|
| 273 |
+
seeded with `seed`.
|
| 274 |
+
If `seed` is already a ``Generator`` or ``RandomState`` instance then
|
| 275 |
+
that instance is used.
|
| 276 |
+
|
| 277 |
+
"""
|
| 278 |
+
|
| 279 |
+
def __init__(self, stepsize=0.5, random_gen=None):
|
| 280 |
+
self.stepsize = stepsize
|
| 281 |
+
self.random_gen = check_random_state(random_gen)
|
| 282 |
+
|
| 283 |
+
def __call__(self, x):
|
| 284 |
+
x += self.random_gen.uniform(-self.stepsize, self.stepsize,
|
| 285 |
+
np.shape(x))
|
| 286 |
+
return x
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
class MinimizerWrapper:
|
| 290 |
+
"""
|
| 291 |
+
wrap a minimizer function as a minimizer class
|
| 292 |
+
"""
|
| 293 |
+
def __init__(self, minimizer, func=None, **kwargs):
|
| 294 |
+
self.minimizer = minimizer
|
| 295 |
+
self.func = func
|
| 296 |
+
self.kwargs = kwargs
|
| 297 |
+
|
| 298 |
+
def __call__(self, x0):
|
| 299 |
+
if self.func is None:
|
| 300 |
+
return self.minimizer(x0, **self.kwargs)
|
| 301 |
+
else:
|
| 302 |
+
return self.minimizer(self.func, x0, **self.kwargs)
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
class Metropolis:
|
| 306 |
+
"""Metropolis acceptance criterion.
|
| 307 |
+
|
| 308 |
+
Parameters
|
| 309 |
+
----------
|
| 310 |
+
T : float
|
| 311 |
+
The "temperature" parameter for the accept or reject criterion.
|
| 312 |
+
random_gen : {None, int, `numpy.random.Generator`,
|
| 313 |
+
`numpy.random.RandomState`}, optional
|
| 314 |
+
|
| 315 |
+
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
|
| 316 |
+
singleton is used.
|
| 317 |
+
If `seed` is an int, a new ``RandomState`` instance is used,
|
| 318 |
+
seeded with `seed`.
|
| 319 |
+
If `seed` is already a ``Generator`` or ``RandomState`` instance then
|
| 320 |
+
that instance is used.
|
| 321 |
+
Random number generator used for acceptance test.
|
| 322 |
+
|
| 323 |
+
"""
|
| 324 |
+
|
| 325 |
+
def __init__(self, T, random_gen=None):
|
| 326 |
+
# Avoid ZeroDivisionError since "MBH can be regarded as a special case
|
| 327 |
+
# of the BH framework with the Metropolis criterion, where temperature
|
| 328 |
+
# T = 0." (Reject all steps that increase energy.)
|
| 329 |
+
self.beta = 1.0 / T if T != 0 else float('inf')
|
| 330 |
+
self.random_gen = check_random_state(random_gen)
|
| 331 |
+
|
| 332 |
+
def accept_reject(self, res_new, res_old):
|
| 333 |
+
"""
|
| 334 |
+
Assuming the local search underlying res_new was successful:
|
| 335 |
+
If new energy is lower than old, it will always be accepted.
|
| 336 |
+
If new is higher than old, there is a chance it will be accepted,
|
| 337 |
+
less likely for larger differences.
|
| 338 |
+
"""
|
| 339 |
+
with np.errstate(invalid='ignore'):
|
| 340 |
+
# The energy values being fed to Metropolis are 1-length arrays, and if
|
| 341 |
+
# they are equal, their difference is 0, which gets multiplied by beta,
|
| 342 |
+
# which is inf, and array([0]) * float('inf') causes
|
| 343 |
+
#
|
| 344 |
+
# RuntimeWarning: invalid value encountered in multiply
|
| 345 |
+
#
|
| 346 |
+
# Ignore this warning so when the algorithm is on a flat plane, it always
|
| 347 |
+
# accepts the step, to try to move off the plane.
|
| 348 |
+
prod = -(res_new.fun - res_old.fun) * self.beta
|
| 349 |
+
w = math.exp(min(0, prod))
|
| 350 |
+
|
| 351 |
+
rand = self.random_gen.uniform()
|
| 352 |
+
return w >= rand and (res_new.success or not res_old.success)
|
| 353 |
+
|
| 354 |
+
def __call__(self, *, res_new, res_old):
|
| 355 |
+
"""
|
| 356 |
+
f_new and f_old are mandatory in kwargs
|
| 357 |
+
"""
|
| 358 |
+
return bool(self.accept_reject(res_new, res_old))
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
def basinhopping(func, x0, niter=100, T=1.0, stepsize=0.5,
|
| 362 |
+
minimizer_kwargs=None, take_step=None, accept_test=None,
|
| 363 |
+
callback=None, interval=50, disp=False, niter_success=None,
|
| 364 |
+
seed=None, *, target_accept_rate=0.5, stepwise_factor=0.9):
|
| 365 |
+
"""Find the global minimum of a function using the basin-hopping algorithm.
|
| 366 |
+
|
| 367 |
+
Basin-hopping is a two-phase method that combines a global stepping
|
| 368 |
+
algorithm with local minimization at each step. Designed to mimic
|
| 369 |
+
the natural process of energy minimization of clusters of atoms, it works
|
| 370 |
+
well for similar problems with "funnel-like, but rugged" energy landscapes
|
| 371 |
+
[5]_.
|
| 372 |
+
|
| 373 |
+
As the step-taking, step acceptance, and minimization methods are all
|
| 374 |
+
customizable, this function can also be used to implement other two-phase
|
| 375 |
+
methods.
|
| 376 |
+
|
| 377 |
+
Parameters
|
| 378 |
+
----------
|
| 379 |
+
func : callable ``f(x, *args)``
|
| 380 |
+
Function to be optimized. ``args`` can be passed as an optional item
|
| 381 |
+
in the dict `minimizer_kwargs`
|
| 382 |
+
x0 : array_like
|
| 383 |
+
Initial guess.
|
| 384 |
+
niter : integer, optional
|
| 385 |
+
The number of basin-hopping iterations. There will be a total of
|
| 386 |
+
``niter + 1`` runs of the local minimizer.
|
| 387 |
+
T : float, optional
|
| 388 |
+
The "temperature" parameter for the acceptance or rejection criterion.
|
| 389 |
+
Higher "temperatures" mean that larger jumps in function value will be
|
| 390 |
+
accepted. For best results `T` should be comparable to the
|
| 391 |
+
separation (in function value) between local minima.
|
| 392 |
+
stepsize : float, optional
|
| 393 |
+
Maximum step size for use in the random displacement.
|
| 394 |
+
minimizer_kwargs : dict, optional
|
| 395 |
+
Extra keyword arguments to be passed to the local minimizer
|
| 396 |
+
`scipy.optimize.minimize` Some important options could be:
|
| 397 |
+
|
| 398 |
+
method : str
|
| 399 |
+
The minimization method (e.g. ``"L-BFGS-B"``)
|
| 400 |
+
args : tuple
|
| 401 |
+
Extra arguments passed to the objective function (`func`) and
|
| 402 |
+
its derivatives (Jacobian, Hessian).
|
| 403 |
+
|
| 404 |
+
take_step : callable ``take_step(x)``, optional
|
| 405 |
+
Replace the default step-taking routine with this routine. The default
|
| 406 |
+
step-taking routine is a random displacement of the coordinates, but
|
| 407 |
+
other step-taking algorithms may be better for some systems.
|
| 408 |
+
`take_step` can optionally have the attribute ``take_step.stepsize``.
|
| 409 |
+
If this attribute exists, then `basinhopping` will adjust
|
| 410 |
+
``take_step.stepsize`` in order to try to optimize the global minimum
|
| 411 |
+
search.
|
| 412 |
+
accept_test : callable, ``accept_test(f_new=f_new, x_new=x_new, f_old=fold, x_old=x_old)``, optional
|
| 413 |
+
Define a test which will be used to judge whether to accept the
|
| 414 |
+
step. This will be used in addition to the Metropolis test based on
|
| 415 |
+
"temperature" `T`. The acceptable return values are True,
|
| 416 |
+
False, or ``"force accept"``. If any of the tests return False
|
| 417 |
+
then the step is rejected. If the latter, then this will override any
|
| 418 |
+
other tests in order to accept the step. This can be used, for example,
|
| 419 |
+
to forcefully escape from a local minimum that `basinhopping` is
|
| 420 |
+
trapped in.
|
| 421 |
+
callback : callable, ``callback(x, f, accept)``, optional
|
| 422 |
+
A callback function which will be called for all minima found. ``x``
|
| 423 |
+
and ``f`` are the coordinates and function value of the trial minimum,
|
| 424 |
+
and ``accept`` is whether that minimum was accepted. This can
|
| 425 |
+
be used, for example, to save the lowest N minima found. Also,
|
| 426 |
+
`callback` can be used to specify a user defined stop criterion by
|
| 427 |
+
optionally returning True to stop the `basinhopping` routine.
|
| 428 |
+
interval : integer, optional
|
| 429 |
+
interval for how often to update the `stepsize`
|
| 430 |
+
disp : bool, optional
|
| 431 |
+
Set to True to print status messages
|
| 432 |
+
niter_success : integer, optional
|
| 433 |
+
Stop the run if the global minimum candidate remains the same for this
|
| 434 |
+
number of iterations.
|
| 435 |
+
seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
|
| 436 |
+
|
| 437 |
+
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
|
| 438 |
+
singleton is used.
|
| 439 |
+
If `seed` is an int, a new ``RandomState`` instance is used,
|
| 440 |
+
seeded with `seed`.
|
| 441 |
+
If `seed` is already a ``Generator`` or ``RandomState`` instance then
|
| 442 |
+
that instance is used.
|
| 443 |
+
Specify `seed` for repeatable minimizations. The random numbers
|
| 444 |
+
generated with this seed only affect the default Metropolis
|
| 445 |
+
`accept_test` and the default `take_step`. If you supply your own
|
| 446 |
+
`take_step` and `accept_test`, and these functions use random
|
| 447 |
+
number generation, then those functions are responsible for the state
|
| 448 |
+
of their random number generator.
|
| 449 |
+
target_accept_rate : float, optional
|
| 450 |
+
The target acceptance rate that is used to adjust the `stepsize`.
|
| 451 |
+
If the current acceptance rate is greater than the target,
|
| 452 |
+
then the `stepsize` is increased. Otherwise, it is decreased.
|
| 453 |
+
Range is (0, 1). Default is 0.5.
|
| 454 |
+
|
| 455 |
+
.. versionadded:: 1.8.0
|
| 456 |
+
|
| 457 |
+
stepwise_factor : float, optional
|
| 458 |
+
The `stepsize` is multiplied or divided by this stepwise factor upon
|
| 459 |
+
each update. Range is (0, 1). Default is 0.9.
|
| 460 |
+
|
| 461 |
+
.. versionadded:: 1.8.0
|
| 462 |
+
|
| 463 |
+
Returns
|
| 464 |
+
-------
|
| 465 |
+
res : OptimizeResult
|
| 466 |
+
The optimization result represented as a `OptimizeResult` object.
|
| 467 |
+
Important attributes are: ``x`` the solution array, ``fun`` the value
|
| 468 |
+
of the function at the solution, and ``message`` which describes the
|
| 469 |
+
cause of the termination. The ``OptimizeResult`` object returned by the
|
| 470 |
+
selected minimizer at the lowest minimum is also contained within this
|
| 471 |
+
object and can be accessed through the ``lowest_optimization_result``
|
| 472 |
+
attribute. See `OptimizeResult` for a description of other attributes.
|
| 473 |
+
|
| 474 |
+
See Also
|
| 475 |
+
--------
|
| 476 |
+
minimize :
|
| 477 |
+
The local minimization function called once for each basinhopping step.
|
| 478 |
+
`minimizer_kwargs` is passed to this routine.
|
| 479 |
+
|
| 480 |
+
Notes
|
| 481 |
+
-----
|
| 482 |
+
Basin-hopping is a stochastic algorithm which attempts to find the global
|
| 483 |
+
minimum of a smooth scalar function of one or more variables [1]_ [2]_ [3]_
|
| 484 |
+
[4]_. The algorithm in its current form was described by David Wales and
|
| 485 |
+
Jonathan Doye [2]_ http://www-wales.ch.cam.ac.uk/.
|
| 486 |
+
|
| 487 |
+
The algorithm is iterative with each cycle composed of the following
|
| 488 |
+
features
|
| 489 |
+
|
| 490 |
+
1) random perturbation of the coordinates
|
| 491 |
+
|
| 492 |
+
2) local minimization
|
| 493 |
+
|
| 494 |
+
3) accept or reject the new coordinates based on the minimized function
|
| 495 |
+
value
|
| 496 |
+
|
| 497 |
+
The acceptance test used here is the Metropolis criterion of standard Monte
|
| 498 |
+
Carlo algorithms, although there are many other possibilities [3]_.
|
| 499 |
+
|
| 500 |
+
This global minimization method has been shown to be extremely efficient
|
| 501 |
+
for a wide variety of problems in physics and chemistry. It is
|
| 502 |
+
particularly useful when the function has many minima separated by large
|
| 503 |
+
barriers. See the `Cambridge Cluster Database
|
| 504 |
+
<https://www-wales.ch.cam.ac.uk/CCD.html>`_ for databases of molecular
|
| 505 |
+
systems that have been optimized primarily using basin-hopping. This
|
| 506 |
+
database includes minimization problems exceeding 300 degrees of freedom.
|
| 507 |
+
|
| 508 |
+
See the free software program `GMIN <https://www-wales.ch.cam.ac.uk/GMIN>`_
|
| 509 |
+
for a Fortran implementation of basin-hopping. This implementation has many
|
| 510 |
+
variations of the procedure described above, including more
|
| 511 |
+
advanced step taking algorithms and alternate acceptance criterion.
|
| 512 |
+
|
| 513 |
+
For stochastic global optimization there is no way to determine if the true
|
| 514 |
+
global minimum has actually been found. Instead, as a consistency check,
|
| 515 |
+
the algorithm can be run from a number of different random starting points
|
| 516 |
+
to ensure the lowest minimum found in each example has converged to the
|
| 517 |
+
global minimum. For this reason, `basinhopping` will by default simply
|
| 518 |
+
run for the number of iterations `niter` and return the lowest minimum
|
| 519 |
+
found. It is left to the user to ensure that this is in fact the global
|
| 520 |
+
minimum.
|
| 521 |
+
|
| 522 |
+
Choosing `stepsize`: This is a crucial parameter in `basinhopping` and
|
| 523 |
+
depends on the problem being solved. The step is chosen uniformly in the
|
| 524 |
+
region from x0-stepsize to x0+stepsize, in each dimension. Ideally, it
|
| 525 |
+
should be comparable to the typical separation (in argument values) between
|
| 526 |
+
local minima of the function being optimized. `basinhopping` will, by
|
| 527 |
+
default, adjust `stepsize` to find an optimal value, but this may take
|
| 528 |
+
many iterations. You will get quicker results if you set a sensible
|
| 529 |
+
initial value for ``stepsize``.
|
| 530 |
+
|
| 531 |
+
Choosing `T`: The parameter `T` is the "temperature" used in the
|
| 532 |
+
Metropolis criterion. Basinhopping steps are always accepted if
|
| 533 |
+
``func(xnew) < func(xold)``. Otherwise, they are accepted with
|
| 534 |
+
probability::
|
| 535 |
+
|
| 536 |
+
exp( -(func(xnew) - func(xold)) / T )
|
| 537 |
+
|
| 538 |
+
So, for best results, `T` should to be comparable to the typical
|
| 539 |
+
difference (in function values) between local minima. (The height of
|
| 540 |
+
"walls" between local minima is irrelevant.)
|
| 541 |
+
|
| 542 |
+
If `T` is 0, the algorithm becomes Monotonic Basin-Hopping, in which all
|
| 543 |
+
steps that increase energy are rejected.
|
| 544 |
+
|
| 545 |
+
.. versionadded:: 0.12.0
|
| 546 |
+
|
| 547 |
+
References
|
| 548 |
+
----------
|
| 549 |
+
.. [1] Wales, David J. 2003, Energy Landscapes, Cambridge University Press,
|
| 550 |
+
Cambridge, UK.
|
| 551 |
+
.. [2] Wales, D J, and Doye J P K, Global Optimization by Basin-Hopping and
|
| 552 |
+
the Lowest Energy Structures of Lennard-Jones Clusters Containing up to
|
| 553 |
+
110 Atoms. Journal of Physical Chemistry A, 1997, 101, 5111.
|
| 554 |
+
.. [3] Li, Z. and Scheraga, H. A., Monte Carlo-minimization approach to the
|
| 555 |
+
multiple-minima problem in protein folding, Proc. Natl. Acad. Sci. USA,
|
| 556 |
+
1987, 84, 6611.
|
| 557 |
+
.. [4] Wales, D. J. and Scheraga, H. A., Global optimization of clusters,
|
| 558 |
+
crystals, and biomolecules, Science, 1999, 285, 1368.
|
| 559 |
+
.. [5] Olson, B., Hashmi, I., Molloy, K., and Shehu1, A., Basin Hopping as
|
| 560 |
+
a General and Versatile Optimization Framework for the Characterization
|
| 561 |
+
of Biological Macromolecules, Advances in Artificial Intelligence,
|
| 562 |
+
Volume 2012 (2012), Article ID 674832, :doi:`10.1155/2012/674832`
|
| 563 |
+
|
| 564 |
+
Examples
|
| 565 |
+
--------
|
| 566 |
+
The following example is a 1-D minimization problem, with many
|
| 567 |
+
local minima superimposed on a parabola.
|
| 568 |
+
|
| 569 |
+
>>> import numpy as np
|
| 570 |
+
>>> from scipy.optimize import basinhopping
|
| 571 |
+
>>> func = lambda x: np.cos(14.5 * x - 0.3) + (x + 0.2) * x
|
| 572 |
+
>>> x0 = [1.]
|
| 573 |
+
|
| 574 |
+
Basinhopping, internally, uses a local minimization algorithm. We will use
|
| 575 |
+
the parameter `minimizer_kwargs` to tell basinhopping which algorithm to
|
| 576 |
+
use and how to set up that minimizer. This parameter will be passed to
|
| 577 |
+
`scipy.optimize.minimize`.
|
| 578 |
+
|
| 579 |
+
>>> minimizer_kwargs = {"method": "BFGS"}
|
| 580 |
+
>>> ret = basinhopping(func, x0, minimizer_kwargs=minimizer_kwargs,
|
| 581 |
+
... niter=200)
|
| 582 |
+
>>> # the global minimum is:
|
| 583 |
+
>>> ret.x, ret.fun
|
| 584 |
+
-0.1951, -1.0009
|
| 585 |
+
|
| 586 |
+
Next consider a 2-D minimization problem. Also, this time, we
|
| 587 |
+
will use gradient information to significantly speed up the search.
|
| 588 |
+
|
| 589 |
+
>>> def func2d(x):
|
| 590 |
+
... f = np.cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] +
|
| 591 |
+
... 0.2) * x[0]
|
| 592 |
+
... df = np.zeros(2)
|
| 593 |
+
... df[0] = -14.5 * np.sin(14.5 * x[0] - 0.3) + 2. * x[0] + 0.2
|
| 594 |
+
... df[1] = 2. * x[1] + 0.2
|
| 595 |
+
... return f, df
|
| 596 |
+
|
| 597 |
+
We'll also use a different local minimization algorithm. Also, we must tell
|
| 598 |
+
the minimizer that our function returns both energy and gradient (Jacobian).
|
| 599 |
+
|
| 600 |
+
>>> minimizer_kwargs = {"method":"L-BFGS-B", "jac":True}
|
| 601 |
+
>>> x0 = [1.0, 1.0]
|
| 602 |
+
>>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs,
|
| 603 |
+
... niter=200)
|
| 604 |
+
>>> print("global minimum: x = [%.4f, %.4f], f(x) = %.4f" % (ret.x[0],
|
| 605 |
+
... ret.x[1],
|
| 606 |
+
... ret.fun))
|
| 607 |
+
global minimum: x = [-0.1951, -0.1000], f(x) = -1.0109
|
| 608 |
+
|
| 609 |
+
Here is an example using a custom step-taking routine. Imagine you want
|
| 610 |
+
the first coordinate to take larger steps than the rest of the coordinates.
|
| 611 |
+
This can be implemented like so:
|
| 612 |
+
|
| 613 |
+
>>> class MyTakeStep:
|
| 614 |
+
... def __init__(self, stepsize=0.5):
|
| 615 |
+
... self.stepsize = stepsize
|
| 616 |
+
... self.rng = np.random.default_rng()
|
| 617 |
+
... def __call__(self, x):
|
| 618 |
+
... s = self.stepsize
|
| 619 |
+
... x[0] += self.rng.uniform(-2.*s, 2.*s)
|
| 620 |
+
... x[1:] += self.rng.uniform(-s, s, x[1:].shape)
|
| 621 |
+
... return x
|
| 622 |
+
|
| 623 |
+
Since ``MyTakeStep.stepsize`` exists basinhopping will adjust the magnitude
|
| 624 |
+
of `stepsize` to optimize the search. We'll use the same 2-D function as
|
| 625 |
+
before
|
| 626 |
+
|
| 627 |
+
>>> mytakestep = MyTakeStep()
|
| 628 |
+
>>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs,
|
| 629 |
+
... niter=200, take_step=mytakestep)
|
| 630 |
+
>>> print("global minimum: x = [%.4f, %.4f], f(x) = %.4f" % (ret.x[0],
|
| 631 |
+
... ret.x[1],
|
| 632 |
+
... ret.fun))
|
| 633 |
+
global minimum: x = [-0.1951, -0.1000], f(x) = -1.0109
|
| 634 |
+
|
| 635 |
+
Now, let's do an example using a custom callback function which prints the
|
| 636 |
+
value of every minimum found
|
| 637 |
+
|
| 638 |
+
>>> def print_fun(x, f, accepted):
|
| 639 |
+
... print("at minimum %.4f accepted %d" % (f, int(accepted)))
|
| 640 |
+
|
| 641 |
+
We'll run it for only 10 basinhopping steps this time.
|
| 642 |
+
|
| 643 |
+
>>> rng = np.random.default_rng()
|
| 644 |
+
>>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs,
|
| 645 |
+
... niter=10, callback=print_fun, seed=rng)
|
| 646 |
+
at minimum 0.4159 accepted 1
|
| 647 |
+
at minimum -0.4317 accepted 1
|
| 648 |
+
at minimum -1.0109 accepted 1
|
| 649 |
+
at minimum -0.9073 accepted 1
|
| 650 |
+
at minimum -0.4317 accepted 0
|
| 651 |
+
at minimum -0.1021 accepted 1
|
| 652 |
+
at minimum -0.7425 accepted 1
|
| 653 |
+
at minimum -0.9073 accepted 1
|
| 654 |
+
at minimum -0.4317 accepted 0
|
| 655 |
+
at minimum -0.7425 accepted 1
|
| 656 |
+
at minimum -0.9073 accepted 1
|
| 657 |
+
|
| 658 |
+
The minimum at -1.0109 is actually the global minimum, found already on the
|
| 659 |
+
8th iteration.
|
| 660 |
+
|
| 661 |
+
""" # numpy/numpydoc#87 # noqa: E501
|
| 662 |
+
if target_accept_rate <= 0. or target_accept_rate >= 1.:
|
| 663 |
+
raise ValueError('target_accept_rate has to be in range (0, 1)')
|
| 664 |
+
if stepwise_factor <= 0. or stepwise_factor >= 1.:
|
| 665 |
+
raise ValueError('stepwise_factor has to be in range (0, 1)')
|
| 666 |
+
|
| 667 |
+
x0 = np.array(x0)
|
| 668 |
+
|
| 669 |
+
# set up the np.random generator
|
| 670 |
+
rng = check_random_state(seed)
|
| 671 |
+
|
| 672 |
+
# set up minimizer
|
| 673 |
+
if minimizer_kwargs is None:
|
| 674 |
+
minimizer_kwargs = dict()
|
| 675 |
+
wrapped_minimizer = MinimizerWrapper(scipy.optimize.minimize, func,
|
| 676 |
+
**minimizer_kwargs)
|
| 677 |
+
|
| 678 |
+
# set up step-taking algorithm
|
| 679 |
+
if take_step is not None:
|
| 680 |
+
if not callable(take_step):
|
| 681 |
+
raise TypeError("take_step must be callable")
|
| 682 |
+
# if take_step.stepsize exists then use AdaptiveStepsize to control
|
| 683 |
+
# take_step.stepsize
|
| 684 |
+
if hasattr(take_step, "stepsize"):
|
| 685 |
+
take_step_wrapped = AdaptiveStepsize(
|
| 686 |
+
take_step, interval=interval,
|
| 687 |
+
accept_rate=target_accept_rate,
|
| 688 |
+
factor=stepwise_factor,
|
| 689 |
+
verbose=disp)
|
| 690 |
+
else:
|
| 691 |
+
take_step_wrapped = take_step
|
| 692 |
+
else:
|
| 693 |
+
# use default
|
| 694 |
+
displace = RandomDisplacement(stepsize=stepsize, random_gen=rng)
|
| 695 |
+
take_step_wrapped = AdaptiveStepsize(displace, interval=interval,
|
| 696 |
+
accept_rate=target_accept_rate,
|
| 697 |
+
factor=stepwise_factor,
|
| 698 |
+
verbose=disp)
|
| 699 |
+
|
| 700 |
+
# set up accept tests
|
| 701 |
+
accept_tests = []
|
| 702 |
+
if accept_test is not None:
|
| 703 |
+
if not callable(accept_test):
|
| 704 |
+
raise TypeError("accept_test must be callable")
|
| 705 |
+
accept_tests = [accept_test]
|
| 706 |
+
|
| 707 |
+
# use default
|
| 708 |
+
metropolis = Metropolis(T, random_gen=rng)
|
| 709 |
+
accept_tests.append(metropolis)
|
| 710 |
+
|
| 711 |
+
if niter_success is None:
|
| 712 |
+
niter_success = niter + 2
|
| 713 |
+
|
| 714 |
+
bh = BasinHoppingRunner(x0, wrapped_minimizer, take_step_wrapped,
|
| 715 |
+
accept_tests, disp=disp)
|
| 716 |
+
|
| 717 |
+
# The wrapped minimizer is called once during construction of
|
| 718 |
+
# BasinHoppingRunner, so run the callback
|
| 719 |
+
if callable(callback):
|
| 720 |
+
callback(bh.storage.minres.x, bh.storage.minres.fun, True)
|
| 721 |
+
|
| 722 |
+
# start main iteration loop
|
| 723 |
+
count, i = 0, 0
|
| 724 |
+
message = ["requested number of basinhopping iterations completed"
|
| 725 |
+
" successfully"]
|
| 726 |
+
for i in range(niter):
|
| 727 |
+
new_global_min = bh.one_cycle()
|
| 728 |
+
|
| 729 |
+
if callable(callback):
|
| 730 |
+
# should we pass a copy of x?
|
| 731 |
+
val = callback(bh.xtrial, bh.energy_trial, bh.accept)
|
| 732 |
+
if val is not None:
|
| 733 |
+
if val:
|
| 734 |
+
message = ["callback function requested stop early by"
|
| 735 |
+
"returning True"]
|
| 736 |
+
break
|
| 737 |
+
|
| 738 |
+
count += 1
|
| 739 |
+
if new_global_min:
|
| 740 |
+
count = 0
|
| 741 |
+
elif count > niter_success:
|
| 742 |
+
message = ["success condition satisfied"]
|
| 743 |
+
break
|
| 744 |
+
|
| 745 |
+
# prepare return object
|
| 746 |
+
res = bh.res
|
| 747 |
+
res.lowest_optimization_result = bh.storage.get_lowest()
|
| 748 |
+
res.x = np.copy(res.lowest_optimization_result.x)
|
| 749 |
+
res.fun = res.lowest_optimization_result.fun
|
| 750 |
+
res.message = message
|
| 751 |
+
res.nit = i + 1
|
| 752 |
+
res.success = res.lowest_optimization_result.success
|
| 753 |
+
return res
|
evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_cobyqa_py.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
from ._optimize import _check_unknown_options
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def _minimize_cobyqa(fun, x0, args=(), bounds=None, constraints=(),
|
| 7 |
+
callback=None, disp=False, maxfev=None, maxiter=None,
|
| 8 |
+
f_target=-np.inf, feasibility_tol=1e-8,
|
| 9 |
+
initial_tr_radius=1.0, final_tr_radius=1e-6, scale=False,
|
| 10 |
+
**unknown_options):
|
| 11 |
+
"""
|
| 12 |
+
Minimize a scalar function of one or more variables using the
|
| 13 |
+
Constrained Optimization BY Quadratic Approximations (COBYQA) algorithm [1]_.
|
| 14 |
+
|
| 15 |
+
.. versionadded:: 1.14.0
|
| 16 |
+
|
| 17 |
+
Options
|
| 18 |
+
-------
|
| 19 |
+
disp : bool
|
| 20 |
+
Set to True to print information about the optimization procedure.
|
| 21 |
+
maxfev : int
|
| 22 |
+
Maximum number of function evaluations.
|
| 23 |
+
maxiter : int
|
| 24 |
+
Maximum number of iterations.
|
| 25 |
+
f_target : float
|
| 26 |
+
Target value for the objective function. The optimization procedure is
|
| 27 |
+
terminated when the objective function value of a feasible point (see
|
| 28 |
+
`feasibility_tol` below) is less than or equal to this target.
|
| 29 |
+
feasibility_tol : float
|
| 30 |
+
Absolute tolerance for the constraint violation.
|
| 31 |
+
initial_tr_radius : float
|
| 32 |
+
Initial trust-region radius. Typically, this value should be in the
|
| 33 |
+
order of one tenth of the greatest expected change to the variables.
|
| 34 |
+
final_tr_radius : float
|
| 35 |
+
Final trust-region radius. It should indicate the accuracy required in
|
| 36 |
+
the final values of the variables. If provided, this option overrides
|
| 37 |
+
the value of `tol` in the `minimize` function.
|
| 38 |
+
scale : bool
|
| 39 |
+
Set to True to scale the variables according to the bounds. If True and
|
| 40 |
+
if all the lower and upper bounds are finite, the variables are scaled
|
| 41 |
+
to be within the range :math:`[-1, 1]`. If any of the lower or upper
|
| 42 |
+
bounds is infinite, the variables are not scaled.
|
| 43 |
+
|
| 44 |
+
References
|
| 45 |
+
----------
|
| 46 |
+
.. [1] COBYQA
|
| 47 |
+
https://www.cobyqa.com/stable/
|
| 48 |
+
"""
|
| 49 |
+
from .._lib.cobyqa import minimize # import here to avoid circular imports
|
| 50 |
+
|
| 51 |
+
_check_unknown_options(unknown_options)
|
| 52 |
+
options = {
|
| 53 |
+
'disp': bool(disp),
|
| 54 |
+
'maxfev': int(maxfev) if maxfev is not None else 500 * len(x0),
|
| 55 |
+
'maxiter': int(maxiter) if maxiter is not None else 1000 * len(x0),
|
| 56 |
+
'target': float(f_target),
|
| 57 |
+
'feasibility_tol': float(feasibility_tol),
|
| 58 |
+
'radius_init': float(initial_tr_radius),
|
| 59 |
+
'radius_final': float(final_tr_radius),
|
| 60 |
+
'scale': bool(scale),
|
| 61 |
+
}
|
| 62 |
+
return minimize(fun, x0, args, bounds, constraints, callback, options)
|
evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_dcsrch.py
ADDED
|
@@ -0,0 +1,728 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
"""
|
| 4 |
+
# 2023 - ported from minpack2.dcsrch, dcstep (Fortran) to Python
|
| 5 |
+
c MINPACK-1 Project. June 1983.
|
| 6 |
+
c Argonne National Laboratory.
|
| 7 |
+
c Jorge J. More' and David J. Thuente.
|
| 8 |
+
c
|
| 9 |
+
c MINPACK-2 Project. November 1993.
|
| 10 |
+
c Argonne National Laboratory and University of Minnesota.
|
| 11 |
+
c Brett M. Averick, Richard G. Carter, and Jorge J. More'.
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
# NOTE this file was linted by black on first commit, and can be kept that way.
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class DCSRCH:
|
| 18 |
+
"""
|
| 19 |
+
Parameters
|
| 20 |
+
----------
|
| 21 |
+
phi : callable phi(alpha)
|
| 22 |
+
Function at point `alpha`
|
| 23 |
+
derphi : callable phi'(alpha)
|
| 24 |
+
Objective function derivative. Returns a scalar.
|
| 25 |
+
ftol : float
|
| 26 |
+
A nonnegative tolerance for the sufficient decrease condition.
|
| 27 |
+
gtol : float
|
| 28 |
+
A nonnegative tolerance for the curvature condition.
|
| 29 |
+
xtol : float
|
| 30 |
+
A nonnegative relative tolerance for an acceptable step. The
|
| 31 |
+
subroutine exits with a warning if the relative difference between
|
| 32 |
+
sty and stx is less than xtol.
|
| 33 |
+
stpmin : float
|
| 34 |
+
A nonnegative lower bound for the step.
|
| 35 |
+
stpmax :
|
| 36 |
+
A nonnegative upper bound for the step.
|
| 37 |
+
|
| 38 |
+
Notes
|
| 39 |
+
-----
|
| 40 |
+
|
| 41 |
+
This subroutine finds a step that satisfies a sufficient
|
| 42 |
+
decrease condition and a curvature condition.
|
| 43 |
+
|
| 44 |
+
Each call of the subroutine updates an interval with
|
| 45 |
+
endpoints stx and sty. The interval is initially chosen
|
| 46 |
+
so that it contains a minimizer of the modified function
|
| 47 |
+
|
| 48 |
+
psi(stp) = f(stp) - f(0) - ftol*stp*f'(0).
|
| 49 |
+
|
| 50 |
+
If psi(stp) <= 0 and f'(stp) >= 0 for some step, then the
|
| 51 |
+
interval is chosen so that it contains a minimizer of f.
|
| 52 |
+
|
| 53 |
+
The algorithm is designed to find a step that satisfies
|
| 54 |
+
the sufficient decrease condition
|
| 55 |
+
|
| 56 |
+
f(stp) <= f(0) + ftol*stp*f'(0),
|
| 57 |
+
|
| 58 |
+
and the curvature condition
|
| 59 |
+
|
| 60 |
+
abs(f'(stp)) <= gtol*abs(f'(0)).
|
| 61 |
+
|
| 62 |
+
If ftol is less than gtol and if, for example, the function
|
| 63 |
+
is bounded below, then there is always a step which satisfies
|
| 64 |
+
both conditions.
|
| 65 |
+
|
| 66 |
+
If no step can be found that satisfies both conditions, then
|
| 67 |
+
the algorithm stops with a warning. In this case stp only
|
| 68 |
+
satisfies the sufficient decrease condition.
|
| 69 |
+
|
| 70 |
+
A typical invocation of dcsrch has the following outline:
|
| 71 |
+
|
| 72 |
+
Evaluate the function at stp = 0.0d0; store in f.
|
| 73 |
+
Evaluate the gradient at stp = 0.0d0; store in g.
|
| 74 |
+
Choose a starting step stp.
|
| 75 |
+
|
| 76 |
+
task = 'START'
|
| 77 |
+
10 continue
|
| 78 |
+
call dcsrch(stp,f,g,ftol,gtol,xtol,task,stpmin,stpmax,
|
| 79 |
+
isave,dsave)
|
| 80 |
+
if (task .eq. 'FG') then
|
| 81 |
+
Evaluate the function and the gradient at stp
|
| 82 |
+
go to 10
|
| 83 |
+
end if
|
| 84 |
+
|
| 85 |
+
NOTE: The user must not alter work arrays between calls.
|
| 86 |
+
|
| 87 |
+
The subroutine statement is
|
| 88 |
+
|
| 89 |
+
subroutine dcsrch(f,g,stp,ftol,gtol,xtol,stpmin,stpmax,
|
| 90 |
+
task,isave,dsave)
|
| 91 |
+
where
|
| 92 |
+
|
| 93 |
+
stp is a double precision variable.
|
| 94 |
+
On entry stp is the current estimate of a satisfactory
|
| 95 |
+
step. On initial entry, a positive initial estimate
|
| 96 |
+
must be provided.
|
| 97 |
+
On exit stp is the current estimate of a satisfactory step
|
| 98 |
+
if task = 'FG'. If task = 'CONV' then stp satisfies
|
| 99 |
+
the sufficient decrease and curvature condition.
|
| 100 |
+
|
| 101 |
+
f is a double precision variable.
|
| 102 |
+
On initial entry f is the value of the function at 0.
|
| 103 |
+
On subsequent entries f is the value of the
|
| 104 |
+
function at stp.
|
| 105 |
+
On exit f is the value of the function at stp.
|
| 106 |
+
|
| 107 |
+
g is a double precision variable.
|
| 108 |
+
On initial entry g is the derivative of the function at 0.
|
| 109 |
+
On subsequent entries g is the derivative of the
|
| 110 |
+
function at stp.
|
| 111 |
+
On exit g is the derivative of the function at stp.
|
| 112 |
+
|
| 113 |
+
ftol is a double precision variable.
|
| 114 |
+
On entry ftol specifies a nonnegative tolerance for the
|
| 115 |
+
sufficient decrease condition.
|
| 116 |
+
On exit ftol is unchanged.
|
| 117 |
+
|
| 118 |
+
gtol is a double precision variable.
|
| 119 |
+
On entry gtol specifies a nonnegative tolerance for the
|
| 120 |
+
curvature condition.
|
| 121 |
+
On exit gtol is unchanged.
|
| 122 |
+
|
| 123 |
+
xtol is a double precision variable.
|
| 124 |
+
On entry xtol specifies a nonnegative relative tolerance
|
| 125 |
+
for an acceptable step. The subroutine exits with a
|
| 126 |
+
warning if the relative difference between sty and stx
|
| 127 |
+
is less than xtol.
|
| 128 |
+
|
| 129 |
+
On exit xtol is unchanged.
|
| 130 |
+
|
| 131 |
+
task is a character variable of length at least 60.
|
| 132 |
+
On initial entry task must be set to 'START'.
|
| 133 |
+
On exit task indicates the required action:
|
| 134 |
+
|
| 135 |
+
If task(1:2) = 'FG' then evaluate the function and
|
| 136 |
+
derivative at stp and call dcsrch again.
|
| 137 |
+
|
| 138 |
+
If task(1:4) = 'CONV' then the search is successful.
|
| 139 |
+
|
| 140 |
+
If task(1:4) = 'WARN' then the subroutine is not able
|
| 141 |
+
to satisfy the convergence conditions. The exit value of
|
| 142 |
+
stp contains the best point found during the search.
|
| 143 |
+
|
| 144 |
+
If task(1:5) = 'ERROR' then there is an error in the
|
| 145 |
+
input arguments.
|
| 146 |
+
|
| 147 |
+
On exit with convergence, a warning or an error, the
|
| 148 |
+
variable task contains additional information.
|
| 149 |
+
|
| 150 |
+
stpmin is a double precision variable.
|
| 151 |
+
On entry stpmin is a nonnegative lower bound for the step.
|
| 152 |
+
On exit stpmin is unchanged.
|
| 153 |
+
|
| 154 |
+
stpmax is a double precision variable.
|
| 155 |
+
On entry stpmax is a nonnegative upper bound for the step.
|
| 156 |
+
On exit stpmax is unchanged.
|
| 157 |
+
|
| 158 |
+
isave is an integer work array of dimension 2.
|
| 159 |
+
|
| 160 |
+
dsave is a double precision work array of dimension 13.
|
| 161 |
+
|
| 162 |
+
Subprograms called
|
| 163 |
+
|
| 164 |
+
MINPACK-2 ... dcstep
|
| 165 |
+
MINPACK-1 Project. June 1983.
|
| 166 |
+
Argonne National Laboratory.
|
| 167 |
+
Jorge J. More' and David J. Thuente.
|
| 168 |
+
|
| 169 |
+
MINPACK-2 Project. November 1993.
|
| 170 |
+
Argonne National Laboratory and University of Minnesota.
|
| 171 |
+
Brett M. Averick, Richard G. Carter, and Jorge J. More'.
|
| 172 |
+
"""
|
| 173 |
+
|
| 174 |
+
def __init__(self, phi, derphi, ftol, gtol, xtol, stpmin, stpmax):
|
| 175 |
+
self.stage = None
|
| 176 |
+
self.ginit = None
|
| 177 |
+
self.gtest = None
|
| 178 |
+
self.gx = None
|
| 179 |
+
self.gy = None
|
| 180 |
+
self.finit = None
|
| 181 |
+
self.fx = None
|
| 182 |
+
self.fy = None
|
| 183 |
+
self.stx = None
|
| 184 |
+
self.sty = None
|
| 185 |
+
self.stmin = None
|
| 186 |
+
self.stmax = None
|
| 187 |
+
self.width = None
|
| 188 |
+
self.width1 = None
|
| 189 |
+
|
| 190 |
+
# leave all assessment of tolerances/limits to the first call of
|
| 191 |
+
# this object
|
| 192 |
+
self.ftol = ftol
|
| 193 |
+
self.gtol = gtol
|
| 194 |
+
self.xtol = xtol
|
| 195 |
+
self.stpmin = stpmin
|
| 196 |
+
self.stpmax = stpmax
|
| 197 |
+
|
| 198 |
+
self.phi = phi
|
| 199 |
+
self.derphi = derphi
|
| 200 |
+
|
| 201 |
+
def __call__(self, alpha1, phi0=None, derphi0=None, maxiter=100):
|
| 202 |
+
"""
|
| 203 |
+
Parameters
|
| 204 |
+
----------
|
| 205 |
+
alpha1 : float
|
| 206 |
+
alpha1 is the current estimate of a satisfactory
|
| 207 |
+
step. A positive initial estimate must be provided.
|
| 208 |
+
phi0 : float
|
| 209 |
+
the value of `phi` at 0 (if known).
|
| 210 |
+
derphi0 : float
|
| 211 |
+
the derivative of `derphi` at 0 (if known).
|
| 212 |
+
maxiter : int
|
| 213 |
+
|
| 214 |
+
Returns
|
| 215 |
+
-------
|
| 216 |
+
alpha : float
|
| 217 |
+
Step size, or None if no suitable step was found.
|
| 218 |
+
phi : float
|
| 219 |
+
Value of `phi` at the new point `alpha`.
|
| 220 |
+
phi0 : float
|
| 221 |
+
Value of `phi` at `alpha=0`.
|
| 222 |
+
task : bytes
|
| 223 |
+
On exit task indicates status information.
|
| 224 |
+
|
| 225 |
+
If task[:4] == b'CONV' then the search is successful.
|
| 226 |
+
|
| 227 |
+
If task[:4] == b'WARN' then the subroutine is not able
|
| 228 |
+
to satisfy the convergence conditions. The exit value of
|
| 229 |
+
stp contains the best point found during the search.
|
| 230 |
+
|
| 231 |
+
If task[:5] == b'ERROR' then there is an error in the
|
| 232 |
+
input arguments.
|
| 233 |
+
"""
|
| 234 |
+
if phi0 is None:
|
| 235 |
+
phi0 = self.phi(0.0)
|
| 236 |
+
if derphi0 is None:
|
| 237 |
+
derphi0 = self.derphi(0.0)
|
| 238 |
+
|
| 239 |
+
phi1 = phi0
|
| 240 |
+
derphi1 = derphi0
|
| 241 |
+
|
| 242 |
+
task = b"START"
|
| 243 |
+
for i in range(maxiter):
|
| 244 |
+
stp, phi1, derphi1, task = self._iterate(
|
| 245 |
+
alpha1, phi1, derphi1, task
|
| 246 |
+
)
|
| 247 |
+
|
| 248 |
+
if not np.isfinite(stp):
|
| 249 |
+
task = b"WARN"
|
| 250 |
+
stp = None
|
| 251 |
+
break
|
| 252 |
+
|
| 253 |
+
if task[:2] == b"FG":
|
| 254 |
+
alpha1 = stp
|
| 255 |
+
phi1 = self.phi(stp)
|
| 256 |
+
derphi1 = self.derphi(stp)
|
| 257 |
+
else:
|
| 258 |
+
break
|
| 259 |
+
else:
|
| 260 |
+
# maxiter reached, the line search did not converge
|
| 261 |
+
stp = None
|
| 262 |
+
task = b"WARNING: dcsrch did not converge within max iterations"
|
| 263 |
+
|
| 264 |
+
if task[:5] == b"ERROR" or task[:4] == b"WARN":
|
| 265 |
+
stp = None # failed
|
| 266 |
+
|
| 267 |
+
return stp, phi1, phi0, task
|
| 268 |
+
|
| 269 |
+
def _iterate(self, stp, f, g, task):
|
| 270 |
+
"""
|
| 271 |
+
Parameters
|
| 272 |
+
----------
|
| 273 |
+
stp : float
|
| 274 |
+
The current estimate of a satisfactory step. On initial entry, a
|
| 275 |
+
positive initial estimate must be provided.
|
| 276 |
+
f : float
|
| 277 |
+
On first call f is the value of the function at 0. On subsequent
|
| 278 |
+
entries f should be the value of the function at stp.
|
| 279 |
+
g : float
|
| 280 |
+
On initial entry g is the derivative of the function at 0. On
|
| 281 |
+
subsequent entries g is the derivative of the function at stp.
|
| 282 |
+
task : bytes
|
| 283 |
+
On initial entry task must be set to 'START'.
|
| 284 |
+
|
| 285 |
+
On exit with convergence, a warning or an error, the
|
| 286 |
+
variable task contains additional information.
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
Returns
|
| 290 |
+
-------
|
| 291 |
+
stp, f, g, task: tuple
|
| 292 |
+
|
| 293 |
+
stp : float
|
| 294 |
+
the current estimate of a satisfactory step if task = 'FG'. If
|
| 295 |
+
task = 'CONV' then stp satisfies the sufficient decrease and
|
| 296 |
+
curvature condition.
|
| 297 |
+
f : float
|
| 298 |
+
the value of the function at stp.
|
| 299 |
+
g : float
|
| 300 |
+
the derivative of the function at stp.
|
| 301 |
+
task : bytes
|
| 302 |
+
On exit task indicates the required action:
|
| 303 |
+
|
| 304 |
+
If task(1:2) == b'FG' then evaluate the function and
|
| 305 |
+
derivative at stp and call dcsrch again.
|
| 306 |
+
|
| 307 |
+
If task(1:4) == b'CONV' then the search is successful.
|
| 308 |
+
|
| 309 |
+
If task(1:4) == b'WARN' then the subroutine is not able
|
| 310 |
+
to satisfy the convergence conditions. The exit value of
|
| 311 |
+
stp contains the best point found during the search.
|
| 312 |
+
|
| 313 |
+
If task(1:5) == b'ERROR' then there is an error in the
|
| 314 |
+
input arguments.
|
| 315 |
+
"""
|
| 316 |
+
p5 = 0.5
|
| 317 |
+
p66 = 0.66
|
| 318 |
+
xtrapl = 1.1
|
| 319 |
+
xtrapu = 4.0
|
| 320 |
+
|
| 321 |
+
if task[:5] == b"START":
|
| 322 |
+
if stp < self.stpmin:
|
| 323 |
+
task = b"ERROR: STP .LT. STPMIN"
|
| 324 |
+
if stp > self.stpmax:
|
| 325 |
+
task = b"ERROR: STP .GT. STPMAX"
|
| 326 |
+
if g >= 0:
|
| 327 |
+
task = b"ERROR: INITIAL G .GE. ZERO"
|
| 328 |
+
if self.ftol < 0:
|
| 329 |
+
task = b"ERROR: FTOL .LT. ZERO"
|
| 330 |
+
if self.gtol < 0:
|
| 331 |
+
task = b"ERROR: GTOL .LT. ZERO"
|
| 332 |
+
if self.xtol < 0:
|
| 333 |
+
task = b"ERROR: XTOL .LT. ZERO"
|
| 334 |
+
if self.stpmin < 0:
|
| 335 |
+
task = b"ERROR: STPMIN .LT. ZERO"
|
| 336 |
+
if self.stpmax < self.stpmin:
|
| 337 |
+
task = b"ERROR: STPMAX .LT. STPMIN"
|
| 338 |
+
|
| 339 |
+
if task[:5] == b"ERROR":
|
| 340 |
+
return stp, f, g, task
|
| 341 |
+
|
| 342 |
+
# Initialize local variables.
|
| 343 |
+
|
| 344 |
+
self.brackt = False
|
| 345 |
+
self.stage = 1
|
| 346 |
+
self.finit = f
|
| 347 |
+
self.ginit = g
|
| 348 |
+
self.gtest = self.ftol * self.ginit
|
| 349 |
+
self.width = self.stpmax - self.stpmin
|
| 350 |
+
self.width1 = self.width / p5
|
| 351 |
+
|
| 352 |
+
# The variables stx, fx, gx contain the values of the step,
|
| 353 |
+
# function, and derivative at the best step.
|
| 354 |
+
# The variables sty, fy, gy contain the value of the step,
|
| 355 |
+
# function, and derivative at sty.
|
| 356 |
+
# The variables stp, f, g contain the values of the step,
|
| 357 |
+
# function, and derivative at stp.
|
| 358 |
+
|
| 359 |
+
self.stx = 0.0
|
| 360 |
+
self.fx = self.finit
|
| 361 |
+
self.gx = self.ginit
|
| 362 |
+
self.sty = 0.0
|
| 363 |
+
self.fy = self.finit
|
| 364 |
+
self.gy = self.ginit
|
| 365 |
+
self.stmin = 0
|
| 366 |
+
self.stmax = stp + xtrapu * stp
|
| 367 |
+
task = b"FG"
|
| 368 |
+
return stp, f, g, task
|
| 369 |
+
|
| 370 |
+
# in the original Fortran this was a location to restore variables
|
| 371 |
+
# we don't need to do that because they're attributes.
|
| 372 |
+
|
| 373 |
+
# If psi(stp) <= 0 and f'(stp) >= 0 for some step, then the
|
| 374 |
+
# algorithm enters the second stage.
|
| 375 |
+
ftest = self.finit + stp * self.gtest
|
| 376 |
+
|
| 377 |
+
if self.stage == 1 and f <= ftest and g >= 0:
|
| 378 |
+
self.stage = 2
|
| 379 |
+
|
| 380 |
+
# test for warnings
|
| 381 |
+
if self.brackt and (stp <= self.stmin or stp >= self.stmax):
|
| 382 |
+
task = b"WARNING: ROUNDING ERRORS PREVENT PROGRESS"
|
| 383 |
+
if self.brackt and self.stmax - self.stmin <= self.xtol * self.stmax:
|
| 384 |
+
task = b"WARNING: XTOL TEST SATISFIED"
|
| 385 |
+
if stp == self.stpmax and f <= ftest and g <= self.gtest:
|
| 386 |
+
task = b"WARNING: STP = STPMAX"
|
| 387 |
+
if stp == self.stpmin and (f > ftest or g >= self.gtest):
|
| 388 |
+
task = b"WARNING: STP = STPMIN"
|
| 389 |
+
|
| 390 |
+
# test for convergence
|
| 391 |
+
if f <= ftest and abs(g) <= self.gtol * -self.ginit:
|
| 392 |
+
task = b"CONVERGENCE"
|
| 393 |
+
|
| 394 |
+
# test for termination
|
| 395 |
+
if task[:4] == b"WARN" or task[:4] == b"CONV":
|
| 396 |
+
return stp, f, g, task
|
| 397 |
+
|
| 398 |
+
# A modified function is used to predict the step during the
|
| 399 |
+
# first stage if a lower function value has been obtained but
|
| 400 |
+
# the decrease is not sufficient.
|
| 401 |
+
if self.stage == 1 and f <= self.fx and f > ftest:
|
| 402 |
+
# Define the modified function and derivative values.
|
| 403 |
+
fm = f - stp * self.gtest
|
| 404 |
+
fxm = self.fx - self.stx * self.gtest
|
| 405 |
+
fym = self.fy - self.sty * self.gtest
|
| 406 |
+
gm = g - self.gtest
|
| 407 |
+
gxm = self.gx - self.gtest
|
| 408 |
+
gym = self.gy - self.gtest
|
| 409 |
+
|
| 410 |
+
# Call dcstep to update stx, sty, and to compute the new step.
|
| 411 |
+
# dcstep can have several operations which can produce NaN
|
| 412 |
+
# e.g. inf/inf. Filter these out.
|
| 413 |
+
with np.errstate(invalid="ignore", over="ignore"):
|
| 414 |
+
tup = dcstep(
|
| 415 |
+
self.stx,
|
| 416 |
+
fxm,
|
| 417 |
+
gxm,
|
| 418 |
+
self.sty,
|
| 419 |
+
fym,
|
| 420 |
+
gym,
|
| 421 |
+
stp,
|
| 422 |
+
fm,
|
| 423 |
+
gm,
|
| 424 |
+
self.brackt,
|
| 425 |
+
self.stmin,
|
| 426 |
+
self.stmax,
|
| 427 |
+
)
|
| 428 |
+
self.stx, fxm, gxm, self.sty, fym, gym, stp, self.brackt = tup
|
| 429 |
+
|
| 430 |
+
# Reset the function and derivative values for f
|
| 431 |
+
self.fx = fxm + self.stx * self.gtest
|
| 432 |
+
self.fy = fym + self.sty * self.gtest
|
| 433 |
+
self.gx = gxm + self.gtest
|
| 434 |
+
self.gy = gym + self.gtest
|
| 435 |
+
|
| 436 |
+
else:
|
| 437 |
+
# Call dcstep to update stx, sty, and to compute the new step.
|
| 438 |
+
# dcstep can have several operations which can produce NaN
|
| 439 |
+
# e.g. inf/inf. Filter these out.
|
| 440 |
+
|
| 441 |
+
with np.errstate(invalid="ignore", over="ignore"):
|
| 442 |
+
tup = dcstep(
|
| 443 |
+
self.stx,
|
| 444 |
+
self.fx,
|
| 445 |
+
self.gx,
|
| 446 |
+
self.sty,
|
| 447 |
+
self.fy,
|
| 448 |
+
self.gy,
|
| 449 |
+
stp,
|
| 450 |
+
f,
|
| 451 |
+
g,
|
| 452 |
+
self.brackt,
|
| 453 |
+
self.stmin,
|
| 454 |
+
self.stmax,
|
| 455 |
+
)
|
| 456 |
+
(
|
| 457 |
+
self.stx,
|
| 458 |
+
self.fx,
|
| 459 |
+
self.gx,
|
| 460 |
+
self.sty,
|
| 461 |
+
self.fy,
|
| 462 |
+
self.gy,
|
| 463 |
+
stp,
|
| 464 |
+
self.brackt,
|
| 465 |
+
) = tup
|
| 466 |
+
|
| 467 |
+
# Decide if a bisection step is needed
|
| 468 |
+
if self.brackt:
|
| 469 |
+
if abs(self.sty - self.stx) >= p66 * self.width1:
|
| 470 |
+
stp = self.stx + p5 * (self.sty - self.stx)
|
| 471 |
+
self.width1 = self.width
|
| 472 |
+
self.width = abs(self.sty - self.stx)
|
| 473 |
+
|
| 474 |
+
# Set the minimum and maximum steps allowed for stp.
|
| 475 |
+
if self.brackt:
|
| 476 |
+
self.stmin = min(self.stx, self.sty)
|
| 477 |
+
self.stmax = max(self.stx, self.sty)
|
| 478 |
+
else:
|
| 479 |
+
self.stmin = stp + xtrapl * (stp - self.stx)
|
| 480 |
+
self.stmax = stp + xtrapu * (stp - self.stx)
|
| 481 |
+
|
| 482 |
+
# Force the step to be within the bounds stpmax and stpmin.
|
| 483 |
+
stp = np.clip(stp, self.stpmin, self.stpmax)
|
| 484 |
+
|
| 485 |
+
# If further progress is not possible, let stp be the best
|
| 486 |
+
# point obtained during the search.
|
| 487 |
+
if (
|
| 488 |
+
self.brackt
|
| 489 |
+
and (stp <= self.stmin or stp >= self.stmax)
|
| 490 |
+
or (
|
| 491 |
+
self.brackt
|
| 492 |
+
and self.stmax - self.stmin <= self.xtol * self.stmax
|
| 493 |
+
)
|
| 494 |
+
):
|
| 495 |
+
stp = self.stx
|
| 496 |
+
|
| 497 |
+
# Obtain another function and derivative
|
| 498 |
+
task = b"FG"
|
| 499 |
+
return stp, f, g, task
|
| 500 |
+
|
| 501 |
+
|
| 502 |
+
def dcstep(stx, fx, dx, sty, fy, dy, stp, fp, dp, brackt, stpmin, stpmax):
|
| 503 |
+
"""
|
| 504 |
+
Subroutine dcstep
|
| 505 |
+
|
| 506 |
+
This subroutine computes a safeguarded step for a search
|
| 507 |
+
procedure and updates an interval that contains a step that
|
| 508 |
+
satisfies a sufficient decrease and a curvature condition.
|
| 509 |
+
|
| 510 |
+
The parameter stx contains the step with the least function
|
| 511 |
+
value. If brackt is set to .true. then a minimizer has
|
| 512 |
+
been bracketed in an interval with endpoints stx and sty.
|
| 513 |
+
The parameter stp contains the current step.
|
| 514 |
+
The subroutine assumes that if brackt is set to .true. then
|
| 515 |
+
|
| 516 |
+
min(stx,sty) < stp < max(stx,sty),
|
| 517 |
+
|
| 518 |
+
and that the derivative at stx is negative in the direction
|
| 519 |
+
of the step.
|
| 520 |
+
|
| 521 |
+
The subroutine statement is
|
| 522 |
+
|
| 523 |
+
subroutine dcstep(stx,fx,dx,sty,fy,dy,stp,fp,dp,brackt,
|
| 524 |
+
stpmin,stpmax)
|
| 525 |
+
|
| 526 |
+
where
|
| 527 |
+
|
| 528 |
+
stx is a double precision variable.
|
| 529 |
+
On entry stx is the best step obtained so far and is an
|
| 530 |
+
endpoint of the interval that contains the minimizer.
|
| 531 |
+
On exit stx is the updated best step.
|
| 532 |
+
|
| 533 |
+
fx is a double precision variable.
|
| 534 |
+
On entry fx is the function at stx.
|
| 535 |
+
On exit fx is the function at stx.
|
| 536 |
+
|
| 537 |
+
dx is a double precision variable.
|
| 538 |
+
On entry dx is the derivative of the function at
|
| 539 |
+
stx. The derivative must be negative in the direction of
|
| 540 |
+
the step, that is, dx and stp - stx must have opposite
|
| 541 |
+
signs.
|
| 542 |
+
On exit dx is the derivative of the function at stx.
|
| 543 |
+
|
| 544 |
+
sty is a double precision variable.
|
| 545 |
+
On entry sty is the second endpoint of the interval that
|
| 546 |
+
contains the minimizer.
|
| 547 |
+
On exit sty is the updated endpoint of the interval that
|
| 548 |
+
contains the minimizer.
|
| 549 |
+
|
| 550 |
+
fy is a double precision variable.
|
| 551 |
+
On entry fy is the function at sty.
|
| 552 |
+
On exit fy is the function at sty.
|
| 553 |
+
|
| 554 |
+
dy is a double precision variable.
|
| 555 |
+
On entry dy is the derivative of the function at sty.
|
| 556 |
+
On exit dy is the derivative of the function at the exit sty.
|
| 557 |
+
|
| 558 |
+
stp is a double precision variable.
|
| 559 |
+
On entry stp is the current step. If brackt is set to .true.
|
| 560 |
+
then on input stp must be between stx and sty.
|
| 561 |
+
On exit stp is a new trial step.
|
| 562 |
+
|
| 563 |
+
fp is a double precision variable.
|
| 564 |
+
On entry fp is the function at stp
|
| 565 |
+
On exit fp is unchanged.
|
| 566 |
+
|
| 567 |
+
dp is a double precision variable.
|
| 568 |
+
On entry dp is the derivative of the function at stp.
|
| 569 |
+
On exit dp is unchanged.
|
| 570 |
+
|
| 571 |
+
brackt is an logical variable.
|
| 572 |
+
On entry brackt specifies if a minimizer has been bracketed.
|
| 573 |
+
Initially brackt must be set to .false.
|
| 574 |
+
On exit brackt specifies if a minimizer has been bracketed.
|
| 575 |
+
When a minimizer is bracketed brackt is set to .true.
|
| 576 |
+
|
| 577 |
+
stpmin is a double precision variable.
|
| 578 |
+
On entry stpmin is a lower bound for the step.
|
| 579 |
+
On exit stpmin is unchanged.
|
| 580 |
+
|
| 581 |
+
stpmax is a double precision variable.
|
| 582 |
+
On entry stpmax is an upper bound for the step.
|
| 583 |
+
On exit stpmax is unchanged.
|
| 584 |
+
|
| 585 |
+
MINPACK-1 Project. June 1983
|
| 586 |
+
Argonne National Laboratory.
|
| 587 |
+
Jorge J. More' and David J. Thuente.
|
| 588 |
+
|
| 589 |
+
MINPACK-2 Project. November 1993.
|
| 590 |
+
Argonne National Laboratory and University of Minnesota.
|
| 591 |
+
Brett M. Averick and Jorge J. More'.
|
| 592 |
+
|
| 593 |
+
"""
|
| 594 |
+
sgn_dp = np.sign(dp)
|
| 595 |
+
sgn_dx = np.sign(dx)
|
| 596 |
+
|
| 597 |
+
# sgnd = dp * (dx / abs(dx))
|
| 598 |
+
sgnd = sgn_dp * sgn_dx
|
| 599 |
+
|
| 600 |
+
# First case: A higher function value. The minimum is bracketed.
|
| 601 |
+
# If the cubic step is closer to stx than the quadratic step, the
|
| 602 |
+
# cubic step is taken, otherwise the average of the cubic and
|
| 603 |
+
# quadratic steps is taken.
|
| 604 |
+
if fp > fx:
|
| 605 |
+
theta = 3.0 * (fx - fp) / (stp - stx) + dx + dp
|
| 606 |
+
s = max(abs(theta), abs(dx), abs(dp))
|
| 607 |
+
gamma = s * np.sqrt((theta / s) ** 2 - (dx / s) * (dp / s))
|
| 608 |
+
if stp < stx:
|
| 609 |
+
gamma *= -1
|
| 610 |
+
p = (gamma - dx) + theta
|
| 611 |
+
q = ((gamma - dx) + gamma) + dp
|
| 612 |
+
r = p / q
|
| 613 |
+
stpc = stx + r * (stp - stx)
|
| 614 |
+
stpq = stx + ((dx / ((fx - fp) / (stp - stx) + dx)) / 2.0) * (stp - stx)
|
| 615 |
+
if abs(stpc - stx) <= abs(stpq - stx):
|
| 616 |
+
stpf = stpc
|
| 617 |
+
else:
|
| 618 |
+
stpf = stpc + (stpq - stpc) / 2.0
|
| 619 |
+
brackt = True
|
| 620 |
+
elif sgnd < 0.0:
|
| 621 |
+
# Second case: A lower function value and derivatives of opposite
|
| 622 |
+
# sign. The minimum is bracketed. If the cubic step is farther from
|
| 623 |
+
# stp than the secant step, the cubic step is taken, otherwise the
|
| 624 |
+
# secant step is taken.
|
| 625 |
+
theta = 3 * (fx - fp) / (stp - stx) + dx + dp
|
| 626 |
+
s = max(abs(theta), abs(dx), abs(dp))
|
| 627 |
+
gamma = s * np.sqrt((theta / s) ** 2 - (dx / s) * (dp / s))
|
| 628 |
+
if stp > stx:
|
| 629 |
+
gamma *= -1
|
| 630 |
+
p = (gamma - dp) + theta
|
| 631 |
+
q = ((gamma - dp) + gamma) + dx
|
| 632 |
+
r = p / q
|
| 633 |
+
stpc = stp + r * (stx - stp)
|
| 634 |
+
stpq = stp + (dp / (dp - dx)) * (stx - stp)
|
| 635 |
+
if abs(stpc - stp) > abs(stpq - stp):
|
| 636 |
+
stpf = stpc
|
| 637 |
+
else:
|
| 638 |
+
stpf = stpq
|
| 639 |
+
brackt = True
|
| 640 |
+
elif abs(dp) < abs(dx):
|
| 641 |
+
# Third case: A lower function value, derivatives of the same sign,
|
| 642 |
+
# and the magnitude of the derivative decreases.
|
| 643 |
+
|
| 644 |
+
# The cubic step is computed only if the cubic tends to infinity
|
| 645 |
+
# in the direction of the step or if the minimum of the cubic
|
| 646 |
+
# is beyond stp. Otherwise the cubic step is defined to be the
|
| 647 |
+
# secant step.
|
| 648 |
+
theta = 3 * (fx - fp) / (stp - stx) + dx + dp
|
| 649 |
+
s = max(abs(theta), abs(dx), abs(dp))
|
| 650 |
+
|
| 651 |
+
# The case gamma = 0 only arises if the cubic does not tend
|
| 652 |
+
# to infinity in the direction of the step.
|
| 653 |
+
gamma = s * np.sqrt(max(0, (theta / s) ** 2 - (dx / s) * (dp / s)))
|
| 654 |
+
if stp > stx:
|
| 655 |
+
gamma = -gamma
|
| 656 |
+
p = (gamma - dp) + theta
|
| 657 |
+
q = (gamma + (dx - dp)) + gamma
|
| 658 |
+
r = p / q
|
| 659 |
+
if r < 0 and gamma != 0:
|
| 660 |
+
stpc = stp + r * (stx - stp)
|
| 661 |
+
elif stp > stx:
|
| 662 |
+
stpc = stpmax
|
| 663 |
+
else:
|
| 664 |
+
stpc = stpmin
|
| 665 |
+
stpq = stp + (dp / (dp - dx)) * (stx - stp)
|
| 666 |
+
|
| 667 |
+
if brackt:
|
| 668 |
+
# A minimizer has been bracketed. If the cubic step is
|
| 669 |
+
# closer to stp than the secant step, the cubic step is
|
| 670 |
+
# taken, otherwise the secant step is taken.
|
| 671 |
+
if abs(stpc - stp) < abs(stpq - stp):
|
| 672 |
+
stpf = stpc
|
| 673 |
+
else:
|
| 674 |
+
stpf = stpq
|
| 675 |
+
|
| 676 |
+
if stp > stx:
|
| 677 |
+
stpf = min(stp + 0.66 * (sty - stp), stpf)
|
| 678 |
+
else:
|
| 679 |
+
stpf = max(stp + 0.66 * (sty - stp), stpf)
|
| 680 |
+
else:
|
| 681 |
+
# A minimizer has not been bracketed. If the cubic step is
|
| 682 |
+
# farther from stp than the secant step, the cubic step is
|
| 683 |
+
# taken, otherwise the secant step is taken.
|
| 684 |
+
if abs(stpc - stp) > abs(stpq - stp):
|
| 685 |
+
stpf = stpc
|
| 686 |
+
else:
|
| 687 |
+
stpf = stpq
|
| 688 |
+
stpf = np.clip(stpf, stpmin, stpmax)
|
| 689 |
+
|
| 690 |
+
else:
|
| 691 |
+
# Fourth case: A lower function value, derivatives of the same sign,
|
| 692 |
+
# and the magnitude of the derivative does not decrease. If the
|
| 693 |
+
# minimum is not bracketed, the step is either stpmin or stpmax,
|
| 694 |
+
# otherwise the cubic step is taken.
|
| 695 |
+
if brackt:
|
| 696 |
+
theta = 3.0 * (fp - fy) / (sty - stp) + dy + dp
|
| 697 |
+
s = max(abs(theta), abs(dy), abs(dp))
|
| 698 |
+
gamma = s * np.sqrt((theta / s) ** 2 - (dy / s) * (dp / s))
|
| 699 |
+
if stp > sty:
|
| 700 |
+
gamma = -gamma
|
| 701 |
+
p = (gamma - dp) + theta
|
| 702 |
+
q = ((gamma - dp) + gamma) + dy
|
| 703 |
+
r = p / q
|
| 704 |
+
stpc = stp + r * (sty - stp)
|
| 705 |
+
stpf = stpc
|
| 706 |
+
elif stp > stx:
|
| 707 |
+
stpf = stpmax
|
| 708 |
+
else:
|
| 709 |
+
stpf = stpmin
|
| 710 |
+
|
| 711 |
+
# Update the interval which contains a minimizer.
|
| 712 |
+
if fp > fx:
|
| 713 |
+
sty = stp
|
| 714 |
+
fy = fp
|
| 715 |
+
dy = dp
|
| 716 |
+
else:
|
| 717 |
+
if sgnd < 0:
|
| 718 |
+
sty = stx
|
| 719 |
+
fy = fx
|
| 720 |
+
dy = dx
|
| 721 |
+
stx = stp
|
| 722 |
+
fx = fp
|
| 723 |
+
dx = dp
|
| 724 |
+
|
| 725 |
+
# Compute the new step.
|
| 726 |
+
stp = stpf
|
| 727 |
+
|
| 728 |
+
return stx, fx, dx, sty, fy, dy, stp, brackt
|
evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_differentiate.py
ADDED
|
@@ -0,0 +1,856 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: disable-error-code="attr-defined"
|
| 2 |
+
import numpy as np
|
| 3 |
+
import scipy._lib._elementwise_iterative_method as eim
|
| 4 |
+
from scipy._lib._util import _RichResult
|
| 5 |
+
|
| 6 |
+
_EERRORINCREASE = -1 # used in _differentiate
|
| 7 |
+
|
| 8 |
+
def _differentiate_iv(func, x, args, atol, rtol, maxiter, order, initial_step,
|
| 9 |
+
step_factor, step_direction, preserve_shape, callback):
|
| 10 |
+
# Input validation for `_differentiate`
|
| 11 |
+
|
| 12 |
+
if not callable(func):
|
| 13 |
+
raise ValueError('`func` must be callable.')
|
| 14 |
+
|
| 15 |
+
# x has more complex IV that is taken care of during initialization
|
| 16 |
+
x = np.asarray(x)
|
| 17 |
+
dtype = x.dtype if np.issubdtype(x.dtype, np.inexact) else np.float64
|
| 18 |
+
|
| 19 |
+
if not np.iterable(args):
|
| 20 |
+
args = (args,)
|
| 21 |
+
|
| 22 |
+
if atol is None:
|
| 23 |
+
atol = np.finfo(dtype).tiny
|
| 24 |
+
|
| 25 |
+
if rtol is None:
|
| 26 |
+
rtol = np.sqrt(np.finfo(dtype).eps)
|
| 27 |
+
|
| 28 |
+
message = 'Tolerances and step parameters must be non-negative scalars.'
|
| 29 |
+
tols = np.asarray([atol, rtol, initial_step, step_factor])
|
| 30 |
+
if (not np.issubdtype(tols.dtype, np.number)
|
| 31 |
+
or np.any(tols < 0)
|
| 32 |
+
or tols.shape != (4,)):
|
| 33 |
+
raise ValueError(message)
|
| 34 |
+
initial_step, step_factor = tols[2:].astype(dtype)
|
| 35 |
+
|
| 36 |
+
maxiter_int = int(maxiter)
|
| 37 |
+
if maxiter != maxiter_int or maxiter <= 0:
|
| 38 |
+
raise ValueError('`maxiter` must be a positive integer.')
|
| 39 |
+
|
| 40 |
+
order_int = int(order)
|
| 41 |
+
if order_int != order or order <= 0:
|
| 42 |
+
raise ValueError('`order` must be a positive integer.')
|
| 43 |
+
|
| 44 |
+
step_direction = np.sign(step_direction).astype(dtype)
|
| 45 |
+
x, step_direction = np.broadcast_arrays(x, step_direction)
|
| 46 |
+
x, step_direction = x[()], step_direction[()]
|
| 47 |
+
|
| 48 |
+
message = '`preserve_shape` must be True or False.'
|
| 49 |
+
if preserve_shape not in {True, False}:
|
| 50 |
+
raise ValueError(message)
|
| 51 |
+
|
| 52 |
+
if callback is not None and not callable(callback):
|
| 53 |
+
raise ValueError('`callback` must be callable.')
|
| 54 |
+
|
| 55 |
+
return (func, x, args, atol, rtol, maxiter_int, order_int, initial_step,
|
| 56 |
+
step_factor, step_direction, preserve_shape, callback)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def _differentiate(func, x, *, args=(), atol=None, rtol=None, maxiter=10,
|
| 60 |
+
order=8, initial_step=0.5, step_factor=2.0,
|
| 61 |
+
step_direction=0, preserve_shape=False, callback=None):
|
| 62 |
+
"""Evaluate the derivative of an elementwise scalar function numerically.
|
| 63 |
+
|
| 64 |
+
Parameters
|
| 65 |
+
----------
|
| 66 |
+
func : callable
|
| 67 |
+
The function whose derivative is desired. The signature must be::
|
| 68 |
+
|
| 69 |
+
func(x: ndarray, *fargs) -> ndarray
|
| 70 |
+
|
| 71 |
+
where each element of ``x`` is a finite real number and ``fargs`` is a tuple,
|
| 72 |
+
which may contain an arbitrary number of arrays that are broadcastable
|
| 73 |
+
with `x`. ``func`` must be an elementwise function: each element
|
| 74 |
+
``func(x)[i]`` must equal ``func(x[i])`` for all indices ``i``.
|
| 75 |
+
x : array_like
|
| 76 |
+
Abscissae at which to evaluate the derivative.
|
| 77 |
+
args : tuple, optional
|
| 78 |
+
Additional positional arguments to be passed to `func`. Must be arrays
|
| 79 |
+
broadcastable with `x`. If the callable to be differentiated requires
|
| 80 |
+
arguments that are not broadcastable with `x`, wrap that callable with
|
| 81 |
+
`func`. See Examples.
|
| 82 |
+
atol, rtol : float, optional
|
| 83 |
+
Absolute and relative tolerances for the stopping condition: iteration
|
| 84 |
+
will stop when ``res.error < atol + rtol * abs(res.df)``. The default
|
| 85 |
+
`atol` is the smallest normal number of the appropriate dtype, and
|
| 86 |
+
the default `rtol` is the square root of the precision of the
|
| 87 |
+
appropriate dtype.
|
| 88 |
+
order : int, default: 8
|
| 89 |
+
The (positive integer) order of the finite difference formula to be
|
| 90 |
+
used. Odd integers will be rounded up to the next even integer.
|
| 91 |
+
initial_step : float, default: 0.5
|
| 92 |
+
The (absolute) initial step size for the finite difference derivative
|
| 93 |
+
approximation.
|
| 94 |
+
step_factor : float, default: 2.0
|
| 95 |
+
The factor by which the step size is *reduced* in each iteration; i.e.
|
| 96 |
+
the step size in iteration 1 is ``initial_step/step_factor``. If
|
| 97 |
+
``step_factor < 1``, subsequent steps will be greater than the initial
|
| 98 |
+
step; this may be useful if steps smaller than some threshold are
|
| 99 |
+
undesirable (e.g. due to subtractive cancellation error).
|
| 100 |
+
maxiter : int, default: 10
|
| 101 |
+
The maximum number of iterations of the algorithm to perform. See
|
| 102 |
+
notes.
|
| 103 |
+
step_direction : array_like
|
| 104 |
+
An array representing the direction of the finite difference steps (for
|
| 105 |
+
use when `x` lies near to the boundary of the domain of the function.)
|
| 106 |
+
Must be broadcastable with `x` and all `args`.
|
| 107 |
+
Where 0 (default), central differences are used; where negative (e.g.
|
| 108 |
+
-1), steps are non-positive; and where positive (e.g. 1), all steps are
|
| 109 |
+
non-negative.
|
| 110 |
+
preserve_shape : bool, default: False
|
| 111 |
+
In the following, "arguments of `func`" refers to the array ``x`` and
|
| 112 |
+
any arrays within ``fargs``. Let ``shape`` be the broadcasted shape
|
| 113 |
+
of `x` and all elements of `args` (which is conceptually
|
| 114 |
+
distinct from ``fargs`` passed into `f`).
|
| 115 |
+
|
| 116 |
+
- When ``preserve_shape=False`` (default), `f` must accept arguments
|
| 117 |
+
of *any* broadcastable shapes.
|
| 118 |
+
|
| 119 |
+
- When ``preserve_shape=True``, `f` must accept arguments of shape
|
| 120 |
+
``shape`` *or* ``shape + (n,)``, where ``(n,)`` is the number of
|
| 121 |
+
abscissae at which the function is being evaluated.
|
| 122 |
+
|
| 123 |
+
In either case, for each scalar element ``xi`` within `x`, the array
|
| 124 |
+
returned by `f` must include the scalar ``f(xi)`` at the same index.
|
| 125 |
+
Consequently, the shape of the output is always the shape of the input
|
| 126 |
+
``x``.
|
| 127 |
+
|
| 128 |
+
See Examples.
|
| 129 |
+
callback : callable, optional
|
| 130 |
+
An optional user-supplied function to be called before the first
|
| 131 |
+
iteration and after each iteration.
|
| 132 |
+
Called as ``callback(res)``, where ``res`` is a ``_RichResult``
|
| 133 |
+
similar to that returned by `_differentiate` (but containing the
|
| 134 |
+
current iterate's values of all variables). If `callback` raises a
|
| 135 |
+
``StopIteration``, the algorithm will terminate immediately and
|
| 136 |
+
`_differentiate` will return a result.
|
| 137 |
+
|
| 138 |
+
Returns
|
| 139 |
+
-------
|
| 140 |
+
res : _RichResult
|
| 141 |
+
An instance of `scipy._lib._util._RichResult` with the following
|
| 142 |
+
attributes. (The descriptions are written as though the values will be
|
| 143 |
+
scalars; however, if `func` returns an array, the outputs will be
|
| 144 |
+
arrays of the same shape.)
|
| 145 |
+
|
| 146 |
+
success : bool
|
| 147 |
+
``True`` when the algorithm terminated successfully (status ``0``).
|
| 148 |
+
status : int
|
| 149 |
+
An integer representing the exit status of the algorithm.
|
| 150 |
+
``0`` : The algorithm converged to the specified tolerances.
|
| 151 |
+
``-1`` : The error estimate increased, so iteration was terminated.
|
| 152 |
+
``-2`` : The maximum number of iterations was reached.
|
| 153 |
+
``-3`` : A non-finite value was encountered.
|
| 154 |
+
``-4`` : Iteration was terminated by `callback`.
|
| 155 |
+
``1`` : The algorithm is proceeding normally (in `callback` only).
|
| 156 |
+
df : float
|
| 157 |
+
The derivative of `func` at `x`, if the algorithm terminated
|
| 158 |
+
successfully.
|
| 159 |
+
error : float
|
| 160 |
+
An estimate of the error: the magnitude of the difference between
|
| 161 |
+
the current estimate of the derivative and the estimate in the
|
| 162 |
+
previous iteration.
|
| 163 |
+
nit : int
|
| 164 |
+
The number of iterations performed.
|
| 165 |
+
nfev : int
|
| 166 |
+
The number of points at which `func` was evaluated.
|
| 167 |
+
x : float
|
| 168 |
+
The value at which the derivative of `func` was evaluated
|
| 169 |
+
(after broadcasting with `args` and `step_direction`).
|
| 170 |
+
|
| 171 |
+
Notes
|
| 172 |
+
-----
|
| 173 |
+
The implementation was inspired by jacobi [1]_, numdifftools [2]_, and
|
| 174 |
+
DERIVEST [3]_, but the implementation follows the theory of Taylor series
|
| 175 |
+
more straightforwardly (and arguably naively so).
|
| 176 |
+
In the first iteration, the derivative is estimated using a finite
|
| 177 |
+
difference formula of order `order` with maximum step size `initial_step`.
|
| 178 |
+
Each subsequent iteration, the maximum step size is reduced by
|
| 179 |
+
`step_factor`, and the derivative is estimated again until a termination
|
| 180 |
+
condition is reached. The error estimate is the magnitude of the difference
|
| 181 |
+
between the current derivative approximation and that of the previous
|
| 182 |
+
iteration.
|
| 183 |
+
|
| 184 |
+
The stencils of the finite difference formulae are designed such that
|
| 185 |
+
abscissae are "nested": after `func` is evaluated at ``order + 1``
|
| 186 |
+
points in the first iteration, `func` is evaluated at only two new points
|
| 187 |
+
in each subsequent iteration; ``order - 1`` previously evaluated function
|
| 188 |
+
values required by the finite difference formula are reused, and two
|
| 189 |
+
function values (evaluations at the points furthest from `x`) are unused.
|
| 190 |
+
|
| 191 |
+
Step sizes are absolute. When the step size is small relative to the
|
| 192 |
+
magnitude of `x`, precision is lost; for example, if `x` is ``1e20``, the
|
| 193 |
+
default initial step size of ``0.5`` cannot be resolved. Accordingly,
|
| 194 |
+
consider using larger initial step sizes for large magnitudes of `x`.
|
| 195 |
+
|
| 196 |
+
The default tolerances are challenging to satisfy at points where the
|
| 197 |
+
true derivative is exactly zero. If the derivative may be exactly zero,
|
| 198 |
+
consider specifying an absolute tolerance (e.g. ``atol=1e-16``) to
|
| 199 |
+
improve convergence.
|
| 200 |
+
|
| 201 |
+
References
|
| 202 |
+
----------
|
| 203 |
+
[1]_ Hans Dembinski (@HDembinski). jacobi.
|
| 204 |
+
https://github.com/HDembinski/jacobi
|
| 205 |
+
[2]_ Per A. Brodtkorb and John D'Errico. numdifftools.
|
| 206 |
+
https://numdifftools.readthedocs.io/en/latest/
|
| 207 |
+
[3]_ John D'Errico. DERIVEST: Adaptive Robust Numerical Differentiation.
|
| 208 |
+
https://www.mathworks.com/matlabcentral/fileexchange/13490-adaptive-robust-numerical-differentiation
|
| 209 |
+
[4]_ Numerical Differentition. Wikipedia.
|
| 210 |
+
https://en.wikipedia.org/wiki/Numerical_differentiation
|
| 211 |
+
|
| 212 |
+
Examples
|
| 213 |
+
--------
|
| 214 |
+
Evaluate the derivative of ``np.exp`` at several points ``x``.
|
| 215 |
+
|
| 216 |
+
>>> import numpy as np
|
| 217 |
+
>>> from scipy.optimize._differentiate import _differentiate
|
| 218 |
+
>>> f = np.exp
|
| 219 |
+
>>> df = np.exp # true derivative
|
| 220 |
+
>>> x = np.linspace(1, 2, 5)
|
| 221 |
+
>>> res = _differentiate(f, x)
|
| 222 |
+
>>> res.df # approximation of the derivative
|
| 223 |
+
array([2.71828183, 3.49034296, 4.48168907, 5.75460268, 7.3890561 ])
|
| 224 |
+
>>> res.error # estimate of the error
|
| 225 |
+
array(
|
| 226 |
+
[7.12940817e-12, 9.16688947e-12, 1.17594823e-11, 1.50972568e-11, 1.93942640e-11]
|
| 227 |
+
)
|
| 228 |
+
>>> abs(res.df - df(x)) # true error
|
| 229 |
+
array(
|
| 230 |
+
[3.06421555e-14, 3.01980663e-14, 5.06261699e-14, 6.30606678e-14, 8.34887715e-14]
|
| 231 |
+
)
|
| 232 |
+
|
| 233 |
+
Show the convergence of the approximation as the step size is reduced.
|
| 234 |
+
Each iteration, the step size is reduced by `step_factor`, so for
|
| 235 |
+
sufficiently small initial step, each iteration reduces the error by a
|
| 236 |
+
factor of ``1/step_factor**order`` until finite precision arithmetic
|
| 237 |
+
inhibits further improvement.
|
| 238 |
+
|
| 239 |
+
>>> iter = list(range(1, 12)) # maximum iterations
|
| 240 |
+
>>> hfac = 2 # step size reduction per iteration
|
| 241 |
+
>>> hdir = [-1, 0, 1] # compare left-, central-, and right- steps
|
| 242 |
+
>>> order = 4 # order of differentiation formula
|
| 243 |
+
>>> x = 1
|
| 244 |
+
>>> ref = df(x)
|
| 245 |
+
>>> errors = [] # true error
|
| 246 |
+
>>> for i in iter:
|
| 247 |
+
... res = _differentiate(f, x, maxiter=i, step_factor=hfac,
|
| 248 |
+
... step_direction=hdir, order=order,
|
| 249 |
+
... atol=0, rtol=0) # prevent early termination
|
| 250 |
+
... errors.append(abs(res.df - ref))
|
| 251 |
+
>>> errors = np.array(errors)
|
| 252 |
+
>>> plt.semilogy(iter, errors[:, 0], label='left differences')
|
| 253 |
+
>>> plt.semilogy(iter, errors[:, 1], label='central differences')
|
| 254 |
+
>>> plt.semilogy(iter, errors[:, 2], label='right differences')
|
| 255 |
+
>>> plt.xlabel('iteration')
|
| 256 |
+
>>> plt.ylabel('error')
|
| 257 |
+
>>> plt.legend()
|
| 258 |
+
>>> plt.show()
|
| 259 |
+
>>> (errors[1, 1] / errors[0, 1], 1 / hfac**order)
|
| 260 |
+
(0.06215223140159822, 0.0625)
|
| 261 |
+
|
| 262 |
+
The implementation is vectorized over `x`, `step_direction`, and `args`.
|
| 263 |
+
The function is evaluated once before the first iteration to perform input
|
| 264 |
+
validation and standardization, and once per iteration thereafter.
|
| 265 |
+
|
| 266 |
+
>>> def f(x, p):
|
| 267 |
+
... print('here')
|
| 268 |
+
... f.nit += 1
|
| 269 |
+
... return x**p
|
| 270 |
+
>>> f.nit = 0
|
| 271 |
+
>>> def df(x, p):
|
| 272 |
+
... return p*x**(p-1)
|
| 273 |
+
>>> x = np.arange(1, 5)
|
| 274 |
+
>>> p = np.arange(1, 6).reshape((-1, 1))
|
| 275 |
+
>>> hdir = np.arange(-1, 2).reshape((-1, 1, 1))
|
| 276 |
+
>>> res = _differentiate(f, x, args=(p,), step_direction=hdir, maxiter=1)
|
| 277 |
+
>>> np.allclose(res.df, df(x, p))
|
| 278 |
+
True
|
| 279 |
+
>>> res.df.shape
|
| 280 |
+
(3, 5, 4)
|
| 281 |
+
>>> f.nit
|
| 282 |
+
2
|
| 283 |
+
|
| 284 |
+
By default, `preserve_shape` is False, and therefore the callable
|
| 285 |
+
`f` may be called with arrays of any broadcastable shapes.
|
| 286 |
+
For example:
|
| 287 |
+
|
| 288 |
+
>>> shapes = []
|
| 289 |
+
>>> def f(x, c):
|
| 290 |
+
... shape = np.broadcast_shapes(x.shape, c.shape)
|
| 291 |
+
... shapes.append(shape)
|
| 292 |
+
... return np.sin(c*x)
|
| 293 |
+
>>>
|
| 294 |
+
>>> c = [1, 5, 10, 20]
|
| 295 |
+
>>> res = _differentiate(f, 0, args=(c,))
|
| 296 |
+
>>> shapes
|
| 297 |
+
[(4,), (4, 8), (4, 2), (3, 2), (2, 2), (1, 2)]
|
| 298 |
+
|
| 299 |
+
To understand where these shapes are coming from - and to better
|
| 300 |
+
understand how `_differentiate` computes accurate results - note that
|
| 301 |
+
higher values of ``c`` correspond with higher frequency sinusoids.
|
| 302 |
+
The higher frequency sinusoids make the function's derivative change
|
| 303 |
+
faster, so more function evaluations are required to achieve the target
|
| 304 |
+
accuracy:
|
| 305 |
+
|
| 306 |
+
>>> res.nfev
|
| 307 |
+
array([11, 13, 15, 17])
|
| 308 |
+
|
| 309 |
+
The initial ``shape``, ``(4,)``, corresponds with evaluating the
|
| 310 |
+
function at a single abscissa and all four frequencies; this is used
|
| 311 |
+
for input validation and to determine the size and dtype of the arrays
|
| 312 |
+
that store results. The next shape corresponds with evaluating the
|
| 313 |
+
function at an initial grid of abscissae and all four frequencies.
|
| 314 |
+
Successive calls to the function evaluate the function at two more
|
| 315 |
+
abscissae, increasing the effective order of the approximation by two.
|
| 316 |
+
However, in later function evaluations, the function is evaluated at
|
| 317 |
+
fewer frequencies because the corresponding derivative has already
|
| 318 |
+
converged to the required tolerance. This saves function evaluations to
|
| 319 |
+
improve performance, but it requires the function to accept arguments of
|
| 320 |
+
any shape.
|
| 321 |
+
|
| 322 |
+
"Vector-valued" functions are unlikely to satisfy this requirement.
|
| 323 |
+
For example, consider
|
| 324 |
+
|
| 325 |
+
>>> def f(x):
|
| 326 |
+
... return [x, np.sin(3*x), x+np.sin(10*x), np.sin(20*x)*(x-1)**2]
|
| 327 |
+
|
| 328 |
+
This integrand is not compatible with `_differentiate` as written; for instance,
|
| 329 |
+
the shape of the output will not be the same as the shape of ``x``. Such a
|
| 330 |
+
function *could* be converted to a compatible form with the introduction of
|
| 331 |
+
additional parameters, but this would be inconvenient. In such cases,
|
| 332 |
+
a simpler solution would be to use `preserve_shape`.
|
| 333 |
+
|
| 334 |
+
>>> shapes = []
|
| 335 |
+
>>> def f(x):
|
| 336 |
+
... shapes.append(x.shape)
|
| 337 |
+
... x0, x1, x2, x3 = x
|
| 338 |
+
... return [x0, np.sin(3*x1), x2+np.sin(10*x2), np.sin(20*x3)*(x3-1)**2]
|
| 339 |
+
>>>
|
| 340 |
+
>>> x = np.zeros(4)
|
| 341 |
+
>>> res = _differentiate(f, x, preserve_shape=True)
|
| 342 |
+
>>> shapes
|
| 343 |
+
[(4,), (4, 8), (4, 2), (4, 2), (4, 2), (4, 2)]
|
| 344 |
+
|
| 345 |
+
Here, the shape of ``x`` is ``(4,)``. With ``preserve_shape=True``, the
|
| 346 |
+
function may be called with argument ``x`` of shape ``(4,)`` or ``(4, n)``,
|
| 347 |
+
and this is what we observe.
|
| 348 |
+
|
| 349 |
+
"""
|
| 350 |
+
# TODO (followup):
|
| 351 |
+
# - investigate behavior at saddle points
|
| 352 |
+
# - array initial_step / step_factor?
|
| 353 |
+
# - multivariate functions?
|
| 354 |
+
|
| 355 |
+
res = _differentiate_iv(func, x, args, atol, rtol, maxiter, order, initial_step,
|
| 356 |
+
step_factor, step_direction, preserve_shape, callback)
|
| 357 |
+
(func, x, args, atol, rtol, maxiter, order,
|
| 358 |
+
h0, fac, hdir, preserve_shape, callback) = res
|
| 359 |
+
|
| 360 |
+
# Initialization
|
| 361 |
+
# Since f(x) (no step) is not needed for central differences, it may be
|
| 362 |
+
# possible to eliminate this function evaluation. However, it's useful for
|
| 363 |
+
# input validation and standardization, and everything else is designed to
|
| 364 |
+
# reduce function calls, so let's keep it simple.
|
| 365 |
+
temp = eim._initialize(func, (x,), args, preserve_shape=preserve_shape)
|
| 366 |
+
func, xs, fs, args, shape, dtype, xp = temp
|
| 367 |
+
x, f = xs[0], fs[0]
|
| 368 |
+
df = np.full_like(f, np.nan)
|
| 369 |
+
# Ideally we'd broadcast the shape of `hdir` in `_elementwise_algo_init`, but
|
| 370 |
+
# it's simpler to do it here than to generalize `_elementwise_algo_init` further.
|
| 371 |
+
# `hdir` and `x` are already broadcasted in `_differentiate_iv`, so we know
|
| 372 |
+
# that `hdir` can be broadcasted to the final shape.
|
| 373 |
+
hdir = np.broadcast_to(hdir, shape).flatten()
|
| 374 |
+
|
| 375 |
+
status = np.full_like(x, eim._EINPROGRESS, dtype=int) # in progress
|
| 376 |
+
nit, nfev = 0, 1 # one function evaluations performed above
|
| 377 |
+
# Boolean indices of left, central, right, and (all) one-sided steps
|
| 378 |
+
il = hdir < 0
|
| 379 |
+
ic = hdir == 0
|
| 380 |
+
ir = hdir > 0
|
| 381 |
+
io = il | ir
|
| 382 |
+
|
| 383 |
+
# Most of these attributes are reasonably obvious, but:
|
| 384 |
+
# - `fs` holds all the function values of all active `x`. The zeroth
|
| 385 |
+
# axis corresponds with active points `x`, the first axis corresponds
|
| 386 |
+
# with the different steps (in the order described in
|
| 387 |
+
# `_differentiate_weights`).
|
| 388 |
+
# - `terms` (which could probably use a better name) is half the `order`,
|
| 389 |
+
# which is always even.
|
| 390 |
+
work = _RichResult(x=x, df=df, fs=f[:, np.newaxis], error=np.nan, h=h0,
|
| 391 |
+
df_last=np.nan, error_last=np.nan, h0=h0, fac=fac,
|
| 392 |
+
atol=atol, rtol=rtol, nit=nit, nfev=nfev,
|
| 393 |
+
status=status, dtype=dtype, terms=(order+1)//2,
|
| 394 |
+
hdir=hdir, il=il, ic=ic, ir=ir, io=io)
|
| 395 |
+
# This is the correspondence between terms in the `work` object and the
|
| 396 |
+
# final result. In this case, the mapping is trivial. Note that `success`
|
| 397 |
+
# is prepended automatically.
|
| 398 |
+
res_work_pairs = [('status', 'status'), ('df', 'df'), ('error', 'error'),
|
| 399 |
+
('nit', 'nit'), ('nfev', 'nfev'), ('x', 'x')]
|
| 400 |
+
|
| 401 |
+
def pre_func_eval(work):
|
| 402 |
+
"""Determine the abscissae at which the function needs to be evaluated.
|
| 403 |
+
|
| 404 |
+
See `_differentiate_weights` for a description of the stencil (pattern
|
| 405 |
+
of the abscissae).
|
| 406 |
+
|
| 407 |
+
In the first iteration, there is only one stored function value in
|
| 408 |
+
`work.fs`, `f(x)`, so we need to evaluate at `order` new points. In
|
| 409 |
+
subsequent iterations, we evaluate at two new points. Note that
|
| 410 |
+
`work.x` is always flattened into a 1D array after broadcasting with
|
| 411 |
+
all `args`, so we add a new axis at the end and evaluate all point
|
| 412 |
+
in one call to the function.
|
| 413 |
+
|
| 414 |
+
For improvement:
|
| 415 |
+
- Consider measuring the step size actually taken, since `(x + h) - x`
|
| 416 |
+
is not identically equal to `h` with floating point arithmetic.
|
| 417 |
+
- Adjust the step size automatically if `x` is too big to resolve the
|
| 418 |
+
step.
|
| 419 |
+
- We could probably save some work if there are no central difference
|
| 420 |
+
steps or no one-sided steps.
|
| 421 |
+
"""
|
| 422 |
+
n = work.terms # half the order
|
| 423 |
+
h = work.h # step size
|
| 424 |
+
c = work.fac # step reduction factor
|
| 425 |
+
d = c**0.5 # square root of step reduction factor (one-sided stencil)
|
| 426 |
+
# Note - no need to be careful about dtypes until we allocate `x_eval`
|
| 427 |
+
|
| 428 |
+
if work.nit == 0:
|
| 429 |
+
hc = h / c**np.arange(n)
|
| 430 |
+
hc = np.concatenate((-hc[::-1], hc))
|
| 431 |
+
else:
|
| 432 |
+
hc = np.asarray([-h, h]) / c**(n-1)
|
| 433 |
+
|
| 434 |
+
if work.nit == 0:
|
| 435 |
+
hr = h / d**np.arange(2*n)
|
| 436 |
+
else:
|
| 437 |
+
hr = np.asarray([h, h/d]) / c**(n-1)
|
| 438 |
+
|
| 439 |
+
n_new = 2*n if work.nit == 0 else 2 # number of new abscissae
|
| 440 |
+
x_eval = np.zeros((len(work.hdir), n_new), dtype=work.dtype)
|
| 441 |
+
il, ic, ir = work.il, work.ic, work.ir
|
| 442 |
+
x_eval[ir] = work.x[ir, np.newaxis] + hr
|
| 443 |
+
x_eval[ic] = work.x[ic, np.newaxis] + hc
|
| 444 |
+
x_eval[il] = work.x[il, np.newaxis] - hr
|
| 445 |
+
return x_eval
|
| 446 |
+
|
| 447 |
+
def post_func_eval(x, f, work):
|
| 448 |
+
""" Estimate the derivative and error from the function evaluations
|
| 449 |
+
|
| 450 |
+
As in `pre_func_eval`: in the first iteration, there is only one stored
|
| 451 |
+
function value in `work.fs`, `f(x)`, so we need to add the `order` new
|
| 452 |
+
points. In subsequent iterations, we add two new points. The tricky
|
| 453 |
+
part is getting the order to match that of the weights, which is
|
| 454 |
+
described in `_differentiate_weights`.
|
| 455 |
+
|
| 456 |
+
For improvement:
|
| 457 |
+
- Change the order of the weights (and steps in `pre_func_eval`) to
|
| 458 |
+
simplify `work_fc` concatenation and eliminate `fc` concatenation.
|
| 459 |
+
- It would be simple to do one-step Richardson extrapolation with `df`
|
| 460 |
+
and `df_last` to increase the order of the estimate and/or improve
|
| 461 |
+
the error estimate.
|
| 462 |
+
- Process the function evaluations in a more numerically favorable
|
| 463 |
+
way. For instance, combining the pairs of central difference evals
|
| 464 |
+
into a second-order approximation and using Richardson extrapolation
|
| 465 |
+
to produce a higher order approximation seemed to retain accuracy up
|
| 466 |
+
to very high order.
|
| 467 |
+
- Alternatively, we could use `polyfit` like Jacobi. An advantage of
|
| 468 |
+
fitting polynomial to more points than necessary is improved noise
|
| 469 |
+
tolerance.
|
| 470 |
+
"""
|
| 471 |
+
n = work.terms
|
| 472 |
+
n_new = n if work.nit == 0 else 1
|
| 473 |
+
il, ic, io = work.il, work.ic, work.io
|
| 474 |
+
|
| 475 |
+
# Central difference
|
| 476 |
+
# `work_fc` is *all* the points at which the function has been evaluated
|
| 477 |
+
# `fc` is the points we're using *this iteration* to produce the estimate
|
| 478 |
+
work_fc = (f[ic, :n_new], work.fs[ic, :], f[ic, -n_new:])
|
| 479 |
+
work_fc = np.concatenate(work_fc, axis=-1)
|
| 480 |
+
if work.nit == 0:
|
| 481 |
+
fc = work_fc
|
| 482 |
+
else:
|
| 483 |
+
fc = (work_fc[:, :n], work_fc[:, n:n+1], work_fc[:, -n:])
|
| 484 |
+
fc = np.concatenate(fc, axis=-1)
|
| 485 |
+
|
| 486 |
+
# One-sided difference
|
| 487 |
+
work_fo = np.concatenate((work.fs[io, :], f[io, :]), axis=-1)
|
| 488 |
+
if work.nit == 0:
|
| 489 |
+
fo = work_fo
|
| 490 |
+
else:
|
| 491 |
+
fo = np.concatenate((work_fo[:, 0:1], work_fo[:, -2*n:]), axis=-1)
|
| 492 |
+
|
| 493 |
+
work.fs = np.zeros((len(ic), work.fs.shape[-1] + 2*n_new))
|
| 494 |
+
work.fs[ic] = work_fc
|
| 495 |
+
work.fs[io] = work_fo
|
| 496 |
+
|
| 497 |
+
wc, wo = _differentiate_weights(work, n)
|
| 498 |
+
work.df_last = work.df.copy()
|
| 499 |
+
work.df[ic] = fc @ wc / work.h
|
| 500 |
+
work.df[io] = fo @ wo / work.h
|
| 501 |
+
work.df[il] *= -1
|
| 502 |
+
|
| 503 |
+
work.h /= work.fac
|
| 504 |
+
work.error_last = work.error
|
| 505 |
+
# Simple error estimate - the difference in derivative estimates between
|
| 506 |
+
# this iteration and the last. This is typically conservative because if
|
| 507 |
+
# convergence has begin, the true error is much closer to the difference
|
| 508 |
+
# between the current estimate and the *next* error estimate. However,
|
| 509 |
+
# we could use Richarson extrapolation to produce an error estimate that
|
| 510 |
+
# is one order higher, and take the difference between that and
|
| 511 |
+
# `work.df` (which would just be constant factor that depends on `fac`.)
|
| 512 |
+
work.error = abs(work.df - work.df_last)
|
| 513 |
+
|
| 514 |
+
def check_termination(work):
|
| 515 |
+
"""Terminate due to convergence, non-finite values, or error increase"""
|
| 516 |
+
stop = np.zeros_like(work.df).astype(bool)
|
| 517 |
+
|
| 518 |
+
i = work.error < work.atol + work.rtol*abs(work.df)
|
| 519 |
+
work.status[i] = eim._ECONVERGED
|
| 520 |
+
stop[i] = True
|
| 521 |
+
|
| 522 |
+
if work.nit > 0:
|
| 523 |
+
i = ~((np.isfinite(work.x) & np.isfinite(work.df)) | stop)
|
| 524 |
+
work.df[i], work.status[i] = np.nan, eim._EVALUEERR
|
| 525 |
+
stop[i] = True
|
| 526 |
+
|
| 527 |
+
# With infinite precision, there is a step size below which
|
| 528 |
+
# all smaller step sizes will reduce the error. But in floating point
|
| 529 |
+
# arithmetic, catastrophic cancellation will begin to cause the error
|
| 530 |
+
# to increase again. This heuristic tries to avoid step sizes that are
|
| 531 |
+
# too small. There may be more theoretically sound approaches for
|
| 532 |
+
# detecting a step size that minimizes the total error, but this
|
| 533 |
+
# heuristic seems simple and effective.
|
| 534 |
+
i = (work.error > work.error_last*10) & ~stop
|
| 535 |
+
work.status[i] = _EERRORINCREASE
|
| 536 |
+
stop[i] = True
|
| 537 |
+
|
| 538 |
+
return stop
|
| 539 |
+
|
| 540 |
+
def post_termination_check(work):
|
| 541 |
+
return
|
| 542 |
+
|
| 543 |
+
def customize_result(res, shape):
|
| 544 |
+
return shape
|
| 545 |
+
|
| 546 |
+
return eim._loop(work, callback, shape, maxiter, func, args, dtype,
|
| 547 |
+
pre_func_eval, post_func_eval, check_termination,
|
| 548 |
+
post_termination_check, customize_result, res_work_pairs,
|
| 549 |
+
xp, preserve_shape)
|
| 550 |
+
|
| 551 |
+
|
| 552 |
+
def _differentiate_weights(work, n):
|
| 553 |
+
# This produces the weights of the finite difference formula for a given
|
| 554 |
+
# stencil. In experiments, use of a second-order central difference formula
|
| 555 |
+
# with Richardson extrapolation was more accurate numerically, but it was
|
| 556 |
+
# more complicated, and it would have become even more complicated when
|
| 557 |
+
# adding support for one-sided differences. However, now that all the
|
| 558 |
+
# function evaluation values are stored, they can be processed in whatever
|
| 559 |
+
# way is desired to produce the derivative estimate. We leave alternative
|
| 560 |
+
# approaches to future work. To be more self-contained, here is the theory
|
| 561 |
+
# for deriving the weights below.
|
| 562 |
+
#
|
| 563 |
+
# Recall that the Taylor expansion of a univariate, scalar-values function
|
| 564 |
+
# about a point `x` may be expressed as:
|
| 565 |
+
# f(x + h) = f(x) + f'(x)*h + f''(x)/2!*h**2 + O(h**3)
|
| 566 |
+
# Suppose we evaluate f(x), f(x+h), and f(x-h). We have:
|
| 567 |
+
# f(x) = f(x)
|
| 568 |
+
# f(x + h) = f(x) + f'(x)*h + f''(x)/2!*h**2 + O(h**3)
|
| 569 |
+
# f(x - h) = f(x) - f'(x)*h + f''(x)/2!*h**2 + O(h**3)
|
| 570 |
+
# We can solve for weights `wi` such that:
|
| 571 |
+
# w1*f(x) = w1*(f(x))
|
| 572 |
+
# + w2*f(x + h) = w2*(f(x) + f'(x)*h + f''(x)/2!*h**2) + O(h**3)
|
| 573 |
+
# + w3*f(x - h) = w3*(f(x) - f'(x)*h + f''(x)/2!*h**2) + O(h**3)
|
| 574 |
+
# = 0 + f'(x)*h + 0 + O(h**3)
|
| 575 |
+
# Then
|
| 576 |
+
# f'(x) ~ (w1*f(x) + w2*f(x+h) + w3*f(x-h))/h
|
| 577 |
+
# is a finite difference derivative approximation with error O(h**2),
|
| 578 |
+
# and so it is said to be a "second-order" approximation. Under certain
|
| 579 |
+
# conditions (e.g. well-behaved function, `h` sufficiently small), the
|
| 580 |
+
# error in the approximation will decrease with h**2; that is, if `h` is
|
| 581 |
+
# reduced by a factor of 2, the error is reduced by a factor of 4.
|
| 582 |
+
#
|
| 583 |
+
# By default, we use eighth-order formulae. Our central-difference formula
|
| 584 |
+
# uses abscissae:
|
| 585 |
+
# x-h/c**3, x-h/c**2, x-h/c, x-h, x, x+h, x+h/c, x+h/c**2, x+h/c**3
|
| 586 |
+
# where `c` is the step factor. (Typically, the step factor is greater than
|
| 587 |
+
# one, so the outermost points - as written above - are actually closest to
|
| 588 |
+
# `x`.) This "stencil" is chosen so that each iteration, the step can be
|
| 589 |
+
# reduced by the factor `c`, and most of the function evaluations can be
|
| 590 |
+
# reused with the new step size. For example, in the next iteration, we
|
| 591 |
+
# will have:
|
| 592 |
+
# x-h/c**4, x-h/c**3, x-h/c**2, x-h/c, x, x+h/c, x+h/c**2, x+h/c**3, x+h/c**4
|
| 593 |
+
# We do not reuse `x-h` and `x+h` for the new derivative estimate.
|
| 594 |
+
# While this would increase the order of the formula and thus the
|
| 595 |
+
# theoretical convergence rate, it is also less stable numerically.
|
| 596 |
+
# (As noted above, there are other ways of processing the values that are
|
| 597 |
+
# more stable. Thus, even now we store `f(x-h)` and `f(x+h)` in `work.fs`
|
| 598 |
+
# to simplify future development of this sort of improvement.)
|
| 599 |
+
#
|
| 600 |
+
# The (right) one-sided formula is produced similarly using abscissae
|
| 601 |
+
# x, x+h, x+h/d, x+h/d**2, ..., x+h/d**6, x+h/d**7, x+h/d**7
|
| 602 |
+
# where `d` is the square root of `c`. (The left one-sided formula simply
|
| 603 |
+
# uses -h.) When the step size is reduced by factor `c = d**2`, we have
|
| 604 |
+
# abscissae:
|
| 605 |
+
# x, x+h/d**2, x+h/d**3..., x+h/d**8, x+h/d**9, x+h/d**9
|
| 606 |
+
# `d` is chosen as the square root of `c` so that the rate of the step-size
|
| 607 |
+
# reduction is the same per iteration as in the central difference case.
|
| 608 |
+
# Note that because the central difference formulas are inherently of even
|
| 609 |
+
# order, for simplicity, we use only even-order formulas for one-sided
|
| 610 |
+
# differences, too.
|
| 611 |
+
|
| 612 |
+
# It's possible for the user to specify `fac` in, say, double precision but
|
| 613 |
+
# `x` and `args` in single precision. `fac` gets converted to single
|
| 614 |
+
# precision, but we should always use double precision for the intermediate
|
| 615 |
+
# calculations here to avoid additional error in the weights.
|
| 616 |
+
fac = work.fac.astype(np.float64)
|
| 617 |
+
|
| 618 |
+
# Note that if the user switches back to floating point precision with
|
| 619 |
+
# `x` and `args`, then `fac` will not necessarily equal the (lower
|
| 620 |
+
# precision) cached `_differentiate_weights.fac`, and the weights will
|
| 621 |
+
# need to be recalculated. This could be fixed, but it's late, and of
|
| 622 |
+
# low consequence.
|
| 623 |
+
if fac != _differentiate_weights.fac:
|
| 624 |
+
_differentiate_weights.central = []
|
| 625 |
+
_differentiate_weights.right = []
|
| 626 |
+
_differentiate_weights.fac = fac
|
| 627 |
+
|
| 628 |
+
if len(_differentiate_weights.central) != 2*n + 1:
|
| 629 |
+
# Central difference weights. Consider refactoring this; it could
|
| 630 |
+
# probably be more compact.
|
| 631 |
+
i = np.arange(-n, n + 1)
|
| 632 |
+
p = np.abs(i) - 1. # center point has power `p` -1, but sign `s` is 0
|
| 633 |
+
s = np.sign(i)
|
| 634 |
+
|
| 635 |
+
h = s / fac ** p
|
| 636 |
+
A = np.vander(h, increasing=True).T
|
| 637 |
+
b = np.zeros(2*n + 1)
|
| 638 |
+
b[1] = 1
|
| 639 |
+
weights = np.linalg.solve(A, b)
|
| 640 |
+
|
| 641 |
+
# Enforce identities to improve accuracy
|
| 642 |
+
weights[n] = 0
|
| 643 |
+
for i in range(n):
|
| 644 |
+
weights[-i-1] = -weights[i]
|
| 645 |
+
|
| 646 |
+
# Cache the weights. We only need to calculate them once unless
|
| 647 |
+
# the step factor changes.
|
| 648 |
+
_differentiate_weights.central = weights
|
| 649 |
+
|
| 650 |
+
# One-sided difference weights. The left one-sided weights (with
|
| 651 |
+
# negative steps) are simply the negative of the right one-sided
|
| 652 |
+
# weights, so no need to compute them separately.
|
| 653 |
+
i = np.arange(2*n + 1)
|
| 654 |
+
p = i - 1.
|
| 655 |
+
s = np.sign(i)
|
| 656 |
+
|
| 657 |
+
h = s / np.sqrt(fac) ** p
|
| 658 |
+
A = np.vander(h, increasing=True).T
|
| 659 |
+
b = np.zeros(2 * n + 1)
|
| 660 |
+
b[1] = 1
|
| 661 |
+
weights = np.linalg.solve(A, b)
|
| 662 |
+
|
| 663 |
+
_differentiate_weights.right = weights
|
| 664 |
+
|
| 665 |
+
return (_differentiate_weights.central.astype(work.dtype, copy=False),
|
| 666 |
+
_differentiate_weights.right.astype(work.dtype, copy=False))
|
| 667 |
+
_differentiate_weights.central = []
|
| 668 |
+
_differentiate_weights.right = []
|
| 669 |
+
_differentiate_weights.fac = None
|
| 670 |
+
|
| 671 |
+
|
| 672 |
+
def _jacobian(func, x, *, atol=None, rtol=None, maxiter=10,
|
| 673 |
+
order=8, initial_step=0.5, step_factor=2.0):
|
| 674 |
+
r"""Evaluate the Jacobian of a function numerically.
|
| 675 |
+
|
| 676 |
+
Parameters
|
| 677 |
+
----------
|
| 678 |
+
func : callable
|
| 679 |
+
The function whose Jacobian is desired. The signature must be::
|
| 680 |
+
|
| 681 |
+
func(x: ndarray) -> ndarray
|
| 682 |
+
|
| 683 |
+
where each element of ``x`` is a finite real. If the function to be
|
| 684 |
+
differentiated accepts additional, arguments wrap it (e.g. using
|
| 685 |
+
`functools.partial` or ``lambda``) and pass the wrapped callable
|
| 686 |
+
into `_jacobian`. See Notes regarding vectorization and the dimensionality
|
| 687 |
+
of the input and output.
|
| 688 |
+
x : array_like
|
| 689 |
+
Points at which to evaluate the Jacobian. Must have at least one dimension.
|
| 690 |
+
See Notes regarding the dimensionality and vectorization.
|
| 691 |
+
atol, rtol : float, optional
|
| 692 |
+
Absolute and relative tolerances for the stopping condition: iteration
|
| 693 |
+
will stop for each element of the Jacobian when
|
| 694 |
+
``res.error < atol + rtol * abs(res.df)``. The default `atol` is the
|
| 695 |
+
smallest normal number of the appropriate dtype, and the default `rtol`
|
| 696 |
+
is the square root of the precision of the appropriate dtype.
|
| 697 |
+
order : int, default: 8
|
| 698 |
+
The (positive integer) order of the finite difference formula to be
|
| 699 |
+
used. Odd integers will be rounded up to the next even integer.
|
| 700 |
+
initial_step : float, default: 0.5
|
| 701 |
+
The (absolute) initial step size for the finite difference derivative
|
| 702 |
+
approximation.
|
| 703 |
+
step_factor : float, default: 2.0
|
| 704 |
+
The factor by which the step size is *reduced* in each iteration; i.e.
|
| 705 |
+
the step size in iteration 1 is ``initial_step/step_factor``. If
|
| 706 |
+
``step_factor < 1``, subsequent steps will be greater than the initial
|
| 707 |
+
step; this may be useful if steps smaller than some threshold are
|
| 708 |
+
undesirable (e.g. due to subtractive cancellation error).
|
| 709 |
+
maxiter : int, default: 10
|
| 710 |
+
The maximum number of iterations of the algorithm to perform.
|
| 711 |
+
|
| 712 |
+
Returns
|
| 713 |
+
-------
|
| 714 |
+
res : _RichResult
|
| 715 |
+
An instance of `scipy._lib._util._RichResult` with the following
|
| 716 |
+
attributes.
|
| 717 |
+
|
| 718 |
+
success : bool array
|
| 719 |
+
``True`` when the algorithm terminated successfully (status ``0``).
|
| 720 |
+
status : int array
|
| 721 |
+
An integer representing the exit status of the algorithm.
|
| 722 |
+
``0`` : The algorithm converged to the specified tolerances.
|
| 723 |
+
``-1`` : The error estimate increased, so iteration was terminated.
|
| 724 |
+
``-2`` : The maximum number of iterations was reached.
|
| 725 |
+
``-3`` : A non-finite value was encountered.
|
| 726 |
+
``-4`` : Iteration was terminated by `callback`.
|
| 727 |
+
``1`` : The algorithm is proceeding normally (in `callback` only).
|
| 728 |
+
df : float array
|
| 729 |
+
The Jacobian of `func` at `x`, if the algorithm terminated
|
| 730 |
+
successfully.
|
| 731 |
+
error : float array
|
| 732 |
+
An estimate of the error: the magnitude of the difference between
|
| 733 |
+
the current estimate of the derivative and the estimate in the
|
| 734 |
+
previous iteration.
|
| 735 |
+
nit : int array
|
| 736 |
+
The number of iterations performed.
|
| 737 |
+
nfev : int array
|
| 738 |
+
The number of points at which `func` was evaluated.
|
| 739 |
+
x : float array
|
| 740 |
+
The value at which the derivative of `func` was evaluated.
|
| 741 |
+
|
| 742 |
+
See Also
|
| 743 |
+
--------
|
| 744 |
+
_differentiate
|
| 745 |
+
|
| 746 |
+
Notes
|
| 747 |
+
-----
|
| 748 |
+
Suppose we wish to evaluate the Jacobian of a function
|
| 749 |
+
:math:`f: \mathbf{R^m} \rightarrow \mathbf{R^n}`, and assign to variables
|
| 750 |
+
``m`` and ``n`` the positive integer values of :math:`m` and :math:`n`,
|
| 751 |
+
respectively. If we wish to evaluate the Jacobian at a single point,
|
| 752 |
+
then:
|
| 753 |
+
|
| 754 |
+
- argument `x` must be an array of shape ``(m,)``
|
| 755 |
+
- argument `func` must be vectorized to accept an array of shape ``(m, p)``.
|
| 756 |
+
The first axis represents the :math:`m` inputs of :math:`f`; the second
|
| 757 |
+
is for evaluating the function at multiple points in a single call.
|
| 758 |
+
- argument `func` must return an array of shape ``(n, p)``. The first
|
| 759 |
+
axis represents the :math:`n` outputs of :math:`f`; the second
|
| 760 |
+
is for the result of evaluating the function at multiple points.
|
| 761 |
+
- attribute ``df`` of the result object will be an array of shape ``(n, m)``,
|
| 762 |
+
the Jacobian.
|
| 763 |
+
|
| 764 |
+
This function is also vectorized in the sense that the Jacobian can be
|
| 765 |
+
evaluated at ``k`` points in a single call. In this case, `x` would be an
|
| 766 |
+
array of shape ``(m, k)``, `func` would accept an array of shape
|
| 767 |
+
``(m, k, p)`` and return an array of shape ``(n, k, p)``, and the ``df``
|
| 768 |
+
attribute of the result would have shape ``(n, m, k)``.
|
| 769 |
+
|
| 770 |
+
References
|
| 771 |
+
----------
|
| 772 |
+
.. [1] Jacobian matrix and determinant, *Wikipedia*,
|
| 773 |
+
https://en.wikipedia.org/wiki/Jacobian_matrix_and_determinant
|
| 774 |
+
|
| 775 |
+
Examples
|
| 776 |
+
--------
|
| 777 |
+
The Rosenbrock function maps from :math:`\mathbf{R}^m \righarrow \mathbf{R}`;
|
| 778 |
+
the SciPy implementation `scipy.optimize.rosen` is vectorized to accept an
|
| 779 |
+
array of shape ``(m, p)`` and return an array of shape ``m``. Suppose we wish
|
| 780 |
+
to evaluate the Jacobian (AKA the gradient because the function returns a scalar)
|
| 781 |
+
at ``[0.5, 0.5, 0.5]``.
|
| 782 |
+
|
| 783 |
+
>>> import numpy as np
|
| 784 |
+
>>> from scipy.optimize._differentiate import _jacobian as jacobian
|
| 785 |
+
>>> from scipy.optimize import rosen, rosen_der
|
| 786 |
+
>>> m = 3
|
| 787 |
+
>>> x = np.full(m, 0.5)
|
| 788 |
+
>>> res = jacobian(rosen, x)
|
| 789 |
+
>>> ref = rosen_der(x) # reference value of the gradient
|
| 790 |
+
>>> res.df, ref
|
| 791 |
+
(array([-51., -1., 50.]), array([-51., -1., 50.]))
|
| 792 |
+
|
| 793 |
+
As an example of a function with multiple outputs, consider Example 4
|
| 794 |
+
from [1]_.
|
| 795 |
+
|
| 796 |
+
>>> def f(x):
|
| 797 |
+
... x1, x2, x3 = x ...
|
| 798 |
+
... return [x1, 5*x3, 4*x2**2 - 2*x3, x3*np.sin(x1)]
|
| 799 |
+
|
| 800 |
+
The true Jacobian is given by:
|
| 801 |
+
|
| 802 |
+
>>> def df(x):
|
| 803 |
+
... x1, x2, x3 = x
|
| 804 |
+
... one = np.ones_like(x1)
|
| 805 |
+
... return [[one, 0*one, 0*one],
|
| 806 |
+
... [0*one, 0*one, 5*one],
|
| 807 |
+
... [0*one, 8*x2, -2*one],
|
| 808 |
+
... [x3*np.cos(x1), 0*one, np.sin(x1)]]
|
| 809 |
+
|
| 810 |
+
Evaluate the Jacobian at an arbitrary point.
|
| 811 |
+
|
| 812 |
+
>>> rng = np.random.default_rng(389252938452)
|
| 813 |
+
>>> x = rng.random(size=3)
|
| 814 |
+
>>> res = jacobian(f, x)
|
| 815 |
+
>>> ref = df(x)
|
| 816 |
+
>>> res.df.shape == (4, 3)
|
| 817 |
+
True
|
| 818 |
+
>>> np.allclose(res.df, ref)
|
| 819 |
+
True
|
| 820 |
+
|
| 821 |
+
Evaluate the Jacobian at 10 arbitrary points in a single call.
|
| 822 |
+
|
| 823 |
+
>>> x = rng.random(size=(3, 10))
|
| 824 |
+
>>> res = jacobian(f, x)
|
| 825 |
+
>>> ref = df(x)
|
| 826 |
+
>>> res.df.shape == (4, 3, 10)
|
| 827 |
+
True
|
| 828 |
+
>>> np.allclose(res.df, ref)
|
| 829 |
+
True
|
| 830 |
+
|
| 831 |
+
"""
|
| 832 |
+
x = np.asarray(x)
|
| 833 |
+
int_dtype = np.issubdtype(x.dtype, np.integer)
|
| 834 |
+
x0 = np.asarray(x, dtype=float) if int_dtype else x
|
| 835 |
+
|
| 836 |
+
if x0.ndim < 1:
|
| 837 |
+
message = "Argument `x` must be at least 1-D."
|
| 838 |
+
raise ValueError(message)
|
| 839 |
+
|
| 840 |
+
m = x0.shape[0]
|
| 841 |
+
i = np.arange(m)
|
| 842 |
+
|
| 843 |
+
def wrapped(x):
|
| 844 |
+
p = () if x.ndim == x0.ndim else (x.shape[-1],) # number of abscissae
|
| 845 |
+
new_dims = (1,) if x.ndim == x0.ndim else (1, -1)
|
| 846 |
+
new_shape = (m, m) + x0.shape[1:] + p
|
| 847 |
+
xph = np.expand_dims(x0, new_dims)
|
| 848 |
+
xph = np.broadcast_to(xph, new_shape).copy()
|
| 849 |
+
xph[i, i] = x
|
| 850 |
+
return func(xph)
|
| 851 |
+
|
| 852 |
+
res = _differentiate(wrapped, x, atol=atol, rtol=rtol,
|
| 853 |
+
maxiter=maxiter, order=order, initial_step=initial_step,
|
| 854 |
+
step_factor=step_factor, preserve_shape=True)
|
| 855 |
+
del res.x # the user knows `x`, and the way it gets broadcasted is meaningless here
|
| 856 |
+
return res
|
evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_direct_py.py
ADDED
|
@@ -0,0 +1,278 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
from typing import ( # noqa: UP035
|
| 3 |
+
Any, Callable, Iterable, TYPE_CHECKING
|
| 4 |
+
)
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
from scipy.optimize import OptimizeResult
|
| 8 |
+
from ._constraints import old_bound_to_new, Bounds
|
| 9 |
+
from ._direct import direct as _direct # type: ignore
|
| 10 |
+
|
| 11 |
+
if TYPE_CHECKING:
|
| 12 |
+
import numpy.typing as npt
|
| 13 |
+
|
| 14 |
+
__all__ = ['direct']
|
| 15 |
+
|
| 16 |
+
ERROR_MESSAGES = (
|
| 17 |
+
"Number of function evaluations done is larger than maxfun={}",
|
| 18 |
+
"Number of iterations is larger than maxiter={}",
|
| 19 |
+
"u[i] < l[i] for some i",
|
| 20 |
+
"maxfun is too large",
|
| 21 |
+
"Initialization failed",
|
| 22 |
+
"There was an error in the creation of the sample points",
|
| 23 |
+
"An error occurred while the function was sampled",
|
| 24 |
+
"Maximum number of levels has been reached.",
|
| 25 |
+
"Forced stop",
|
| 26 |
+
"Invalid arguments",
|
| 27 |
+
"Out of memory",
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
SUCCESS_MESSAGES = (
|
| 31 |
+
("The best function value found is within a relative error={} "
|
| 32 |
+
"of the (known) global optimum f_min"),
|
| 33 |
+
("The volume of the hyperrectangle containing the lowest function value "
|
| 34 |
+
"found is below vol_tol={}"),
|
| 35 |
+
("The side length measure of the hyperrectangle containing the lowest "
|
| 36 |
+
"function value found is below len_tol={}"),
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def direct(
|
| 41 |
+
func: Callable[[npt.ArrayLike, tuple[Any]], float],
|
| 42 |
+
bounds: Iterable | Bounds,
|
| 43 |
+
*,
|
| 44 |
+
args: tuple = (),
|
| 45 |
+
eps: float = 1e-4,
|
| 46 |
+
maxfun: int | None = None,
|
| 47 |
+
maxiter: int = 1000,
|
| 48 |
+
locally_biased: bool = True,
|
| 49 |
+
f_min: float = -np.inf,
|
| 50 |
+
f_min_rtol: float = 1e-4,
|
| 51 |
+
vol_tol: float = 1e-16,
|
| 52 |
+
len_tol: float = 1e-6,
|
| 53 |
+
callback: Callable[[npt.ArrayLike], None] | None = None
|
| 54 |
+
) -> OptimizeResult:
|
| 55 |
+
"""
|
| 56 |
+
Finds the global minimum of a function using the
|
| 57 |
+
DIRECT algorithm.
|
| 58 |
+
|
| 59 |
+
Parameters
|
| 60 |
+
----------
|
| 61 |
+
func : callable
|
| 62 |
+
The objective function to be minimized.
|
| 63 |
+
``func(x, *args) -> float``
|
| 64 |
+
where ``x`` is an 1-D array with shape (n,) and ``args`` is a tuple of
|
| 65 |
+
the fixed parameters needed to completely specify the function.
|
| 66 |
+
bounds : sequence or `Bounds`
|
| 67 |
+
Bounds for variables. There are two ways to specify the bounds:
|
| 68 |
+
|
| 69 |
+
1. Instance of `Bounds` class.
|
| 70 |
+
2. ``(min, max)`` pairs for each element in ``x``.
|
| 71 |
+
|
| 72 |
+
args : tuple, optional
|
| 73 |
+
Any additional fixed parameters needed to
|
| 74 |
+
completely specify the objective function.
|
| 75 |
+
eps : float, optional
|
| 76 |
+
Minimal required difference of the objective function values
|
| 77 |
+
between the current best hyperrectangle and the next potentially
|
| 78 |
+
optimal hyperrectangle to be divided. In consequence, `eps` serves as a
|
| 79 |
+
tradeoff between local and global search: the smaller, the more local
|
| 80 |
+
the search becomes. Default is 1e-4.
|
| 81 |
+
maxfun : int or None, optional
|
| 82 |
+
Approximate upper bound on objective function evaluations.
|
| 83 |
+
If `None`, will be automatically set to ``1000 * N`` where ``N``
|
| 84 |
+
represents the number of dimensions. Will be capped if necessary to
|
| 85 |
+
limit DIRECT's RAM usage to app. 1GiB. This will only occur for very
|
| 86 |
+
high dimensional problems and excessive `max_fun`. Default is `None`.
|
| 87 |
+
maxiter : int, optional
|
| 88 |
+
Maximum number of iterations. Default is 1000.
|
| 89 |
+
locally_biased : bool, optional
|
| 90 |
+
If `True` (default), use the locally biased variant of the
|
| 91 |
+
algorithm known as DIRECT_L. If `False`, use the original unbiased
|
| 92 |
+
DIRECT algorithm. For hard problems with many local minima,
|
| 93 |
+
`False` is recommended.
|
| 94 |
+
f_min : float, optional
|
| 95 |
+
Function value of the global optimum. Set this value only if the
|
| 96 |
+
global optimum is known. Default is ``-np.inf``, so that this
|
| 97 |
+
termination criterion is deactivated.
|
| 98 |
+
f_min_rtol : float, optional
|
| 99 |
+
Terminate the optimization once the relative error between the
|
| 100 |
+
current best minimum `f` and the supplied global minimum `f_min`
|
| 101 |
+
is smaller than `f_min_rtol`. This parameter is only used if
|
| 102 |
+
`f_min` is also set. Must lie between 0 and 1. Default is 1e-4.
|
| 103 |
+
vol_tol : float, optional
|
| 104 |
+
Terminate the optimization once the volume of the hyperrectangle
|
| 105 |
+
containing the lowest function value is smaller than `vol_tol`
|
| 106 |
+
of the complete search space. Must lie between 0 and 1.
|
| 107 |
+
Default is 1e-16.
|
| 108 |
+
len_tol : float, optional
|
| 109 |
+
If `locally_biased=True`, terminate the optimization once half of
|
| 110 |
+
the normalized maximal side length of the hyperrectangle containing
|
| 111 |
+
the lowest function value is smaller than `len_tol`.
|
| 112 |
+
If `locally_biased=False`, terminate the optimization once half of
|
| 113 |
+
the normalized diagonal of the hyperrectangle containing the lowest
|
| 114 |
+
function value is smaller than `len_tol`. Must lie between 0 and 1.
|
| 115 |
+
Default is 1e-6.
|
| 116 |
+
callback : callable, optional
|
| 117 |
+
A callback function with signature ``callback(xk)`` where ``xk``
|
| 118 |
+
represents the best function value found so far.
|
| 119 |
+
|
| 120 |
+
Returns
|
| 121 |
+
-------
|
| 122 |
+
res : OptimizeResult
|
| 123 |
+
The optimization result represented as a ``OptimizeResult`` object.
|
| 124 |
+
Important attributes are: ``x`` the solution array, ``success`` a
|
| 125 |
+
Boolean flag indicating if the optimizer exited successfully and
|
| 126 |
+
``message`` which describes the cause of the termination. See
|
| 127 |
+
`OptimizeResult` for a description of other attributes.
|
| 128 |
+
|
| 129 |
+
Notes
|
| 130 |
+
-----
|
| 131 |
+
DIviding RECTangles (DIRECT) is a deterministic global
|
| 132 |
+
optimization algorithm capable of minimizing a black box function with
|
| 133 |
+
its variables subject to lower and upper bound constraints by sampling
|
| 134 |
+
potential solutions in the search space [1]_. The algorithm starts by
|
| 135 |
+
normalising the search space to an n-dimensional unit hypercube.
|
| 136 |
+
It samples the function at the center of this hypercube and at 2n
|
| 137 |
+
(n is the number of variables) more points, 2 in each coordinate
|
| 138 |
+
direction. Using these function values, DIRECT then divides the
|
| 139 |
+
domain into hyperrectangles, each having exactly one of the sampling
|
| 140 |
+
points as its center. In each iteration, DIRECT chooses, using the `eps`
|
| 141 |
+
parameter which defaults to 1e-4, some of the existing hyperrectangles
|
| 142 |
+
to be further divided. This division process continues until either the
|
| 143 |
+
maximum number of iterations or maximum function evaluations allowed
|
| 144 |
+
are exceeded, or the hyperrectangle containing the minimal value found
|
| 145 |
+
so far becomes small enough. If `f_min` is specified, the optimization
|
| 146 |
+
will stop once this function value is reached within a relative tolerance.
|
| 147 |
+
The locally biased variant of DIRECT (originally called DIRECT_L) [2]_ is
|
| 148 |
+
used by default. It makes the search more locally biased and more
|
| 149 |
+
efficient for cases with only a few local minima.
|
| 150 |
+
|
| 151 |
+
A note about termination criteria: `vol_tol` refers to the volume of the
|
| 152 |
+
hyperrectangle containing the lowest function value found so far. This
|
| 153 |
+
volume decreases exponentially with increasing dimensionality of the
|
| 154 |
+
problem. Therefore `vol_tol` should be decreased to avoid premature
|
| 155 |
+
termination of the algorithm for higher dimensions. This does not hold
|
| 156 |
+
for `len_tol`: it refers either to half of the maximal side length
|
| 157 |
+
(for ``locally_biased=True``) or half of the diagonal of the
|
| 158 |
+
hyperrectangle (for ``locally_biased=False``).
|
| 159 |
+
|
| 160 |
+
This code is based on the DIRECT 2.0.4 Fortran code by Gablonsky et al. at
|
| 161 |
+
https://ctk.math.ncsu.edu/SOFTWARE/DIRECTv204.tar.gz .
|
| 162 |
+
This original version was initially converted via f2c and then cleaned up
|
| 163 |
+
and reorganized by Steven G. Johnson, August 2007, for the NLopt project.
|
| 164 |
+
The `direct` function wraps the C implementation.
|
| 165 |
+
|
| 166 |
+
.. versionadded:: 1.9.0
|
| 167 |
+
|
| 168 |
+
References
|
| 169 |
+
----------
|
| 170 |
+
.. [1] Jones, D.R., Perttunen, C.D. & Stuckman, B.E. Lipschitzian
|
| 171 |
+
optimization without the Lipschitz constant. J Optim Theory Appl
|
| 172 |
+
79, 157-181 (1993).
|
| 173 |
+
.. [2] Gablonsky, J., Kelley, C. A Locally-Biased form of the DIRECT
|
| 174 |
+
Algorithm. Journal of Global Optimization 21, 27-37 (2001).
|
| 175 |
+
|
| 176 |
+
Examples
|
| 177 |
+
--------
|
| 178 |
+
The following example is a 2-D problem with four local minima: minimizing
|
| 179 |
+
the Styblinski-Tang function
|
| 180 |
+
(https://en.wikipedia.org/wiki/Test_functions_for_optimization).
|
| 181 |
+
|
| 182 |
+
>>> from scipy.optimize import direct, Bounds
|
| 183 |
+
>>> def styblinski_tang(pos):
|
| 184 |
+
... x, y = pos
|
| 185 |
+
... return 0.5 * (x**4 - 16*x**2 + 5*x + y**4 - 16*y**2 + 5*y)
|
| 186 |
+
>>> bounds = Bounds([-4., -4.], [4., 4.])
|
| 187 |
+
>>> result = direct(styblinski_tang, bounds)
|
| 188 |
+
>>> result.x, result.fun, result.nfev
|
| 189 |
+
array([-2.90321597, -2.90321597]), -78.3323279095383, 2011
|
| 190 |
+
|
| 191 |
+
The correct global minimum was found but with a huge number of function
|
| 192 |
+
evaluations (2011). Loosening the termination tolerances `vol_tol` and
|
| 193 |
+
`len_tol` can be used to stop DIRECT earlier.
|
| 194 |
+
|
| 195 |
+
>>> result = direct(styblinski_tang, bounds, len_tol=1e-3)
|
| 196 |
+
>>> result.x, result.fun, result.nfev
|
| 197 |
+
array([-2.9044353, -2.9044353]), -78.33230330754142, 207
|
| 198 |
+
|
| 199 |
+
"""
|
| 200 |
+
# convert bounds to new Bounds class if necessary
|
| 201 |
+
if not isinstance(bounds, Bounds):
|
| 202 |
+
if isinstance(bounds, list) or isinstance(bounds, tuple):
|
| 203 |
+
lb, ub = old_bound_to_new(bounds)
|
| 204 |
+
bounds = Bounds(lb, ub)
|
| 205 |
+
else:
|
| 206 |
+
message = ("bounds must be a sequence or "
|
| 207 |
+
"instance of Bounds class")
|
| 208 |
+
raise ValueError(message)
|
| 209 |
+
|
| 210 |
+
lb = np.ascontiguousarray(bounds.lb, dtype=np.float64)
|
| 211 |
+
ub = np.ascontiguousarray(bounds.ub, dtype=np.float64)
|
| 212 |
+
|
| 213 |
+
# validate bounds
|
| 214 |
+
# check that lower bounds are smaller than upper bounds
|
| 215 |
+
if not np.all(lb < ub):
|
| 216 |
+
raise ValueError('Bounds are not consistent min < max')
|
| 217 |
+
# check for infs
|
| 218 |
+
if (np.any(np.isinf(lb)) or np.any(np.isinf(ub))):
|
| 219 |
+
raise ValueError("Bounds must not be inf.")
|
| 220 |
+
|
| 221 |
+
# validate tolerances
|
| 222 |
+
if (vol_tol < 0 or vol_tol > 1):
|
| 223 |
+
raise ValueError("vol_tol must be between 0 and 1.")
|
| 224 |
+
if (len_tol < 0 or len_tol > 1):
|
| 225 |
+
raise ValueError("len_tol must be between 0 and 1.")
|
| 226 |
+
if (f_min_rtol < 0 or f_min_rtol > 1):
|
| 227 |
+
raise ValueError("f_min_rtol must be between 0 and 1.")
|
| 228 |
+
|
| 229 |
+
# validate maxfun and maxiter
|
| 230 |
+
if maxfun is None:
|
| 231 |
+
maxfun = 1000 * lb.shape[0]
|
| 232 |
+
if not isinstance(maxfun, int):
|
| 233 |
+
raise ValueError("maxfun must be of type int.")
|
| 234 |
+
if maxfun < 0:
|
| 235 |
+
raise ValueError("maxfun must be > 0.")
|
| 236 |
+
if not isinstance(maxiter, int):
|
| 237 |
+
raise ValueError("maxiter must be of type int.")
|
| 238 |
+
if maxiter < 0:
|
| 239 |
+
raise ValueError("maxiter must be > 0.")
|
| 240 |
+
|
| 241 |
+
# validate boolean parameters
|
| 242 |
+
if not isinstance(locally_biased, bool):
|
| 243 |
+
raise ValueError("locally_biased must be True or False.")
|
| 244 |
+
|
| 245 |
+
def _func_wrap(x, args=None):
|
| 246 |
+
x = np.asarray(x)
|
| 247 |
+
if args is None:
|
| 248 |
+
f = func(x)
|
| 249 |
+
else:
|
| 250 |
+
f = func(x, *args)
|
| 251 |
+
# always return a float
|
| 252 |
+
return np.asarray(f).item()
|
| 253 |
+
|
| 254 |
+
# TODO: fix disp argument
|
| 255 |
+
x, fun, ret_code, nfev, nit = _direct(
|
| 256 |
+
_func_wrap,
|
| 257 |
+
np.asarray(lb), np.asarray(ub),
|
| 258 |
+
args,
|
| 259 |
+
False, eps, maxfun, maxiter,
|
| 260 |
+
locally_biased,
|
| 261 |
+
f_min, f_min_rtol,
|
| 262 |
+
vol_tol, len_tol, callback
|
| 263 |
+
)
|
| 264 |
+
|
| 265 |
+
format_val = (maxfun, maxiter, f_min_rtol, vol_tol, len_tol)
|
| 266 |
+
if ret_code > 2:
|
| 267 |
+
message = SUCCESS_MESSAGES[ret_code - 3].format(
|
| 268 |
+
format_val[ret_code - 1])
|
| 269 |
+
elif 0 < ret_code <= 2:
|
| 270 |
+
message = ERROR_MESSAGES[ret_code - 1].format(format_val[ret_code - 1])
|
| 271 |
+
elif 0 > ret_code > -100:
|
| 272 |
+
message = ERROR_MESSAGES[abs(ret_code) + 1]
|
| 273 |
+
else:
|
| 274 |
+
message = ERROR_MESSAGES[ret_code + 99]
|
| 275 |
+
|
| 276 |
+
return OptimizeResult(x=np.asarray(x), fun=fun, status=ret_code,
|
| 277 |
+
success=ret_code > 2, message=message,
|
| 278 |
+
nfev=nfev, nit=nit)
|
evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_group_columns.cpython-310-x86_64-linux-gnu.so
ADDED
|
Binary file (99.8 kB). View file
|
|
|
evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_hessian_update_strategy.py
ADDED
|
@@ -0,0 +1,475 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Hessian update strategies for quasi-Newton optimization methods."""
|
| 2 |
+
import numpy as np
|
| 3 |
+
from numpy.linalg import norm
|
| 4 |
+
from scipy.linalg import get_blas_funcs, issymmetric
|
| 5 |
+
from warnings import warn
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
__all__ = ['HessianUpdateStrategy', 'BFGS', 'SR1']
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class HessianUpdateStrategy:
|
| 12 |
+
"""Interface for implementing Hessian update strategies.
|
| 13 |
+
|
| 14 |
+
Many optimization methods make use of Hessian (or inverse Hessian)
|
| 15 |
+
approximations, such as the quasi-Newton methods BFGS, SR1, L-BFGS.
|
| 16 |
+
Some of these approximations, however, do not actually need to store
|
| 17 |
+
the entire matrix or can compute the internal matrix product with a
|
| 18 |
+
given vector in a very efficiently manner. This class serves as an
|
| 19 |
+
abstract interface between the optimization algorithm and the
|
| 20 |
+
quasi-Newton update strategies, giving freedom of implementation
|
| 21 |
+
to store and update the internal matrix as efficiently as possible.
|
| 22 |
+
Different choices of initialization and update procedure will result
|
| 23 |
+
in different quasi-Newton strategies.
|
| 24 |
+
|
| 25 |
+
Four methods should be implemented in derived classes: ``initialize``,
|
| 26 |
+
``update``, ``dot`` and ``get_matrix``.
|
| 27 |
+
|
| 28 |
+
Notes
|
| 29 |
+
-----
|
| 30 |
+
Any instance of a class that implements this interface,
|
| 31 |
+
can be accepted by the method ``minimize`` and used by
|
| 32 |
+
the compatible solvers to approximate the Hessian (or
|
| 33 |
+
inverse Hessian) used by the optimization algorithms.
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
def initialize(self, n, approx_type):
|
| 37 |
+
"""Initialize internal matrix.
|
| 38 |
+
|
| 39 |
+
Allocate internal memory for storing and updating
|
| 40 |
+
the Hessian or its inverse.
|
| 41 |
+
|
| 42 |
+
Parameters
|
| 43 |
+
----------
|
| 44 |
+
n : int
|
| 45 |
+
Problem dimension.
|
| 46 |
+
approx_type : {'hess', 'inv_hess'}
|
| 47 |
+
Selects either the Hessian or the inverse Hessian.
|
| 48 |
+
When set to 'hess' the Hessian will be stored and updated.
|
| 49 |
+
When set to 'inv_hess' its inverse will be used instead.
|
| 50 |
+
"""
|
| 51 |
+
raise NotImplementedError("The method ``initialize(n, approx_type)``"
|
| 52 |
+
" is not implemented.")
|
| 53 |
+
|
| 54 |
+
def update(self, delta_x, delta_grad):
|
| 55 |
+
"""Update internal matrix.
|
| 56 |
+
|
| 57 |
+
Update Hessian matrix or its inverse (depending on how 'approx_type'
|
| 58 |
+
is defined) using information about the last evaluated points.
|
| 59 |
+
|
| 60 |
+
Parameters
|
| 61 |
+
----------
|
| 62 |
+
delta_x : ndarray
|
| 63 |
+
The difference between two points the gradient
|
| 64 |
+
function have been evaluated at: ``delta_x = x2 - x1``.
|
| 65 |
+
delta_grad : ndarray
|
| 66 |
+
The difference between the gradients:
|
| 67 |
+
``delta_grad = grad(x2) - grad(x1)``.
|
| 68 |
+
"""
|
| 69 |
+
raise NotImplementedError("The method ``update(delta_x, delta_grad)``"
|
| 70 |
+
" is not implemented.")
|
| 71 |
+
|
| 72 |
+
def dot(self, p):
|
| 73 |
+
"""Compute the product of the internal matrix with the given vector.
|
| 74 |
+
|
| 75 |
+
Parameters
|
| 76 |
+
----------
|
| 77 |
+
p : array_like
|
| 78 |
+
1-D array representing a vector.
|
| 79 |
+
|
| 80 |
+
Returns
|
| 81 |
+
-------
|
| 82 |
+
Hp : array
|
| 83 |
+
1-D represents the result of multiplying the approximation matrix
|
| 84 |
+
by vector p.
|
| 85 |
+
"""
|
| 86 |
+
raise NotImplementedError("The method ``dot(p)``"
|
| 87 |
+
" is not implemented.")
|
| 88 |
+
|
| 89 |
+
def get_matrix(self):
|
| 90 |
+
"""Return current internal matrix.
|
| 91 |
+
|
| 92 |
+
Returns
|
| 93 |
+
-------
|
| 94 |
+
H : ndarray, shape (n, n)
|
| 95 |
+
Dense matrix containing either the Hessian
|
| 96 |
+
or its inverse (depending on how 'approx_type'
|
| 97 |
+
is defined).
|
| 98 |
+
"""
|
| 99 |
+
raise NotImplementedError("The method ``get_matrix(p)``"
|
| 100 |
+
" is not implemented.")
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
class FullHessianUpdateStrategy(HessianUpdateStrategy):
|
| 104 |
+
"""Hessian update strategy with full dimensional internal representation.
|
| 105 |
+
"""
|
| 106 |
+
_syr = get_blas_funcs('syr', dtype='d') # Symmetric rank 1 update
|
| 107 |
+
_syr2 = get_blas_funcs('syr2', dtype='d') # Symmetric rank 2 update
|
| 108 |
+
# Symmetric matrix-vector product
|
| 109 |
+
_symv = get_blas_funcs('symv', dtype='d')
|
| 110 |
+
|
| 111 |
+
def __init__(self, init_scale='auto'):
|
| 112 |
+
self.init_scale = init_scale
|
| 113 |
+
# Until initialize is called we can't really use the class,
|
| 114 |
+
# so it makes sense to set everything to None.
|
| 115 |
+
self.first_iteration = None
|
| 116 |
+
self.approx_type = None
|
| 117 |
+
self.B = None
|
| 118 |
+
self.H = None
|
| 119 |
+
|
| 120 |
+
def initialize(self, n, approx_type):
|
| 121 |
+
"""Initialize internal matrix.
|
| 122 |
+
|
| 123 |
+
Allocate internal memory for storing and updating
|
| 124 |
+
the Hessian or its inverse.
|
| 125 |
+
|
| 126 |
+
Parameters
|
| 127 |
+
----------
|
| 128 |
+
n : int
|
| 129 |
+
Problem dimension.
|
| 130 |
+
approx_type : {'hess', 'inv_hess'}
|
| 131 |
+
Selects either the Hessian or the inverse Hessian.
|
| 132 |
+
When set to 'hess' the Hessian will be stored and updated.
|
| 133 |
+
When set to 'inv_hess' its inverse will be used instead.
|
| 134 |
+
"""
|
| 135 |
+
self.first_iteration = True
|
| 136 |
+
self.n = n
|
| 137 |
+
self.approx_type = approx_type
|
| 138 |
+
if approx_type not in ('hess', 'inv_hess'):
|
| 139 |
+
raise ValueError("`approx_type` must be 'hess' or 'inv_hess'.")
|
| 140 |
+
# Create matrix
|
| 141 |
+
if self.approx_type == 'hess':
|
| 142 |
+
self.B = np.eye(n, dtype=float)
|
| 143 |
+
else:
|
| 144 |
+
self.H = np.eye(n, dtype=float)
|
| 145 |
+
|
| 146 |
+
def _auto_scale(self, delta_x, delta_grad):
|
| 147 |
+
# Heuristic to scale matrix at first iteration.
|
| 148 |
+
# Described in Nocedal and Wright "Numerical Optimization"
|
| 149 |
+
# p.143 formula (6.20).
|
| 150 |
+
s_norm2 = np.dot(delta_x, delta_x)
|
| 151 |
+
y_norm2 = np.dot(delta_grad, delta_grad)
|
| 152 |
+
ys = np.abs(np.dot(delta_grad, delta_x))
|
| 153 |
+
if ys == 0.0 or y_norm2 == 0 or s_norm2 == 0:
|
| 154 |
+
return 1
|
| 155 |
+
if self.approx_type == 'hess':
|
| 156 |
+
return y_norm2 / ys
|
| 157 |
+
else:
|
| 158 |
+
return ys / y_norm2
|
| 159 |
+
|
| 160 |
+
def _update_implementation(self, delta_x, delta_grad):
|
| 161 |
+
raise NotImplementedError("The method ``_update_implementation``"
|
| 162 |
+
" is not implemented.")
|
| 163 |
+
|
| 164 |
+
def update(self, delta_x, delta_grad):
|
| 165 |
+
"""Update internal matrix.
|
| 166 |
+
|
| 167 |
+
Update Hessian matrix or its inverse (depending on how 'approx_type'
|
| 168 |
+
is defined) using information about the last evaluated points.
|
| 169 |
+
|
| 170 |
+
Parameters
|
| 171 |
+
----------
|
| 172 |
+
delta_x : ndarray
|
| 173 |
+
The difference between two points the gradient
|
| 174 |
+
function have been evaluated at: ``delta_x = x2 - x1``.
|
| 175 |
+
delta_grad : ndarray
|
| 176 |
+
The difference between the gradients:
|
| 177 |
+
``delta_grad = grad(x2) - grad(x1)``.
|
| 178 |
+
"""
|
| 179 |
+
if np.all(delta_x == 0.0):
|
| 180 |
+
return
|
| 181 |
+
if np.all(delta_grad == 0.0):
|
| 182 |
+
warn('delta_grad == 0.0. Check if the approximated '
|
| 183 |
+
'function is linear. If the function is linear '
|
| 184 |
+
'better results can be obtained by defining the '
|
| 185 |
+
'Hessian as zero instead of using quasi-Newton '
|
| 186 |
+
'approximations.',
|
| 187 |
+
UserWarning, stacklevel=2)
|
| 188 |
+
return
|
| 189 |
+
if self.first_iteration:
|
| 190 |
+
# Get user specific scale
|
| 191 |
+
if isinstance(self.init_scale, str) and self.init_scale == "auto":
|
| 192 |
+
scale = self._auto_scale(delta_x, delta_grad)
|
| 193 |
+
else:
|
| 194 |
+
scale = self.init_scale
|
| 195 |
+
|
| 196 |
+
# Check for complex: numpy will silently cast a complex array to
|
| 197 |
+
# a real one but not so for scalar as it raises a TypeError.
|
| 198 |
+
# Checking here brings a consistent behavior.
|
| 199 |
+
replace = False
|
| 200 |
+
if np.size(scale) == 1:
|
| 201 |
+
# to account for the legacy behavior having the exact same cast
|
| 202 |
+
scale = float(scale)
|
| 203 |
+
elif np.iscomplexobj(scale):
|
| 204 |
+
raise TypeError("init_scale contains complex elements, "
|
| 205 |
+
"must be real.")
|
| 206 |
+
else: # test explicitly for allowed shapes and values
|
| 207 |
+
replace = True
|
| 208 |
+
if self.approx_type == 'hess':
|
| 209 |
+
shape = np.shape(self.B)
|
| 210 |
+
dtype = self.B.dtype
|
| 211 |
+
else:
|
| 212 |
+
shape = np.shape(self.H)
|
| 213 |
+
dtype = self.H.dtype
|
| 214 |
+
# copy, will replace the original
|
| 215 |
+
scale = np.array(scale, dtype=dtype, copy=True)
|
| 216 |
+
|
| 217 |
+
# it has to match the shape of the matrix for the multiplication,
|
| 218 |
+
# no implicit broadcasting is allowed
|
| 219 |
+
if shape != (init_shape := np.shape(scale)):
|
| 220 |
+
raise ValueError("If init_scale is an array, it must have the "
|
| 221 |
+
f"dimensions of the hess/inv_hess: {shape}."
|
| 222 |
+
f" Got {init_shape}.")
|
| 223 |
+
if not issymmetric(scale):
|
| 224 |
+
raise ValueError("If init_scale is an array, it must be"
|
| 225 |
+
" symmetric (passing scipy.linalg.issymmetric)"
|
| 226 |
+
" to be an approximation of a hess/inv_hess.")
|
| 227 |
+
|
| 228 |
+
# Scale initial matrix with ``scale * np.eye(n)`` or replace
|
| 229 |
+
# This is not ideal, we could assign the scale directly in
|
| 230 |
+
# initialize, but we would need to
|
| 231 |
+
if self.approx_type == 'hess':
|
| 232 |
+
if replace:
|
| 233 |
+
self.B = scale
|
| 234 |
+
else:
|
| 235 |
+
self.B *= scale
|
| 236 |
+
else:
|
| 237 |
+
if replace:
|
| 238 |
+
self.H = scale
|
| 239 |
+
else:
|
| 240 |
+
self.H *= scale
|
| 241 |
+
self.first_iteration = False
|
| 242 |
+
self._update_implementation(delta_x, delta_grad)
|
| 243 |
+
|
| 244 |
+
def dot(self, p):
|
| 245 |
+
"""Compute the product of the internal matrix with the given vector.
|
| 246 |
+
|
| 247 |
+
Parameters
|
| 248 |
+
----------
|
| 249 |
+
p : array_like
|
| 250 |
+
1-D array representing a vector.
|
| 251 |
+
|
| 252 |
+
Returns
|
| 253 |
+
-------
|
| 254 |
+
Hp : array
|
| 255 |
+
1-D represents the result of multiplying the approximation matrix
|
| 256 |
+
by vector p.
|
| 257 |
+
"""
|
| 258 |
+
if self.approx_type == 'hess':
|
| 259 |
+
return self._symv(1, self.B, p)
|
| 260 |
+
else:
|
| 261 |
+
return self._symv(1, self.H, p)
|
| 262 |
+
|
| 263 |
+
def get_matrix(self):
|
| 264 |
+
"""Return the current internal matrix.
|
| 265 |
+
|
| 266 |
+
Returns
|
| 267 |
+
-------
|
| 268 |
+
M : ndarray, shape (n, n)
|
| 269 |
+
Dense matrix containing either the Hessian or its inverse
|
| 270 |
+
(depending on how `approx_type` was defined).
|
| 271 |
+
"""
|
| 272 |
+
if self.approx_type == 'hess':
|
| 273 |
+
M = np.copy(self.B)
|
| 274 |
+
else:
|
| 275 |
+
M = np.copy(self.H)
|
| 276 |
+
li = np.tril_indices_from(M, k=-1)
|
| 277 |
+
M[li] = M.T[li]
|
| 278 |
+
return M
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
class BFGS(FullHessianUpdateStrategy):
|
| 282 |
+
"""Broyden-Fletcher-Goldfarb-Shanno (BFGS) Hessian update strategy.
|
| 283 |
+
|
| 284 |
+
Parameters
|
| 285 |
+
----------
|
| 286 |
+
exception_strategy : {'skip_update', 'damp_update'}, optional
|
| 287 |
+
Define how to proceed when the curvature condition is violated.
|
| 288 |
+
Set it to 'skip_update' to just skip the update. Or, alternatively,
|
| 289 |
+
set it to 'damp_update' to interpolate between the actual BFGS
|
| 290 |
+
result and the unmodified matrix. Both exceptions strategies
|
| 291 |
+
are explained in [1]_, p.536-537.
|
| 292 |
+
min_curvature : float
|
| 293 |
+
This number, scaled by a normalization factor, defines the
|
| 294 |
+
minimum curvature ``dot(delta_grad, delta_x)`` allowed to go
|
| 295 |
+
unaffected by the exception strategy. By default is equal to
|
| 296 |
+
1e-8 when ``exception_strategy = 'skip_update'`` and equal
|
| 297 |
+
to 0.2 when ``exception_strategy = 'damp_update'``.
|
| 298 |
+
init_scale : {float, np.array, 'auto'}
|
| 299 |
+
This parameter can be used to initialize the Hessian or its
|
| 300 |
+
inverse. When a float is given, the relevant array is initialized
|
| 301 |
+
to ``np.eye(n) * init_scale``, where ``n`` is the problem dimension.
|
| 302 |
+
Alternatively, if a precisely ``(n, n)`` shaped, symmetric array is given,
|
| 303 |
+
this array will be used. Otherwise an error is generated.
|
| 304 |
+
Set it to 'auto' in order to use an automatic heuristic for choosing
|
| 305 |
+
the initial scale. The heuristic is described in [1]_, p.143.
|
| 306 |
+
The default is 'auto'.
|
| 307 |
+
|
| 308 |
+
Notes
|
| 309 |
+
-----
|
| 310 |
+
The update is based on the description in [1]_, p.140.
|
| 311 |
+
|
| 312 |
+
References
|
| 313 |
+
----------
|
| 314 |
+
.. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization"
|
| 315 |
+
Second Edition (2006).
|
| 316 |
+
"""
|
| 317 |
+
|
| 318 |
+
def __init__(self, exception_strategy='skip_update', min_curvature=None,
|
| 319 |
+
init_scale='auto'):
|
| 320 |
+
if exception_strategy == 'skip_update':
|
| 321 |
+
if min_curvature is not None:
|
| 322 |
+
self.min_curvature = min_curvature
|
| 323 |
+
else:
|
| 324 |
+
self.min_curvature = 1e-8
|
| 325 |
+
elif exception_strategy == 'damp_update':
|
| 326 |
+
if min_curvature is not None:
|
| 327 |
+
self.min_curvature = min_curvature
|
| 328 |
+
else:
|
| 329 |
+
self.min_curvature = 0.2
|
| 330 |
+
else:
|
| 331 |
+
raise ValueError("`exception_strategy` must be 'skip_update' "
|
| 332 |
+
"or 'damp_update'.")
|
| 333 |
+
|
| 334 |
+
super().__init__(init_scale)
|
| 335 |
+
self.exception_strategy = exception_strategy
|
| 336 |
+
|
| 337 |
+
def _update_inverse_hessian(self, ys, Hy, yHy, s):
|
| 338 |
+
"""Update the inverse Hessian matrix.
|
| 339 |
+
|
| 340 |
+
BFGS update using the formula:
|
| 341 |
+
|
| 342 |
+
``H <- H + ((H*y).T*y + s.T*y)/(s.T*y)^2 * (s*s.T)
|
| 343 |
+
- 1/(s.T*y) * ((H*y)*s.T + s*(H*y).T)``
|
| 344 |
+
|
| 345 |
+
where ``s = delta_x`` and ``y = delta_grad``. This formula is
|
| 346 |
+
equivalent to (6.17) in [1]_ written in a more efficient way
|
| 347 |
+
for implementation.
|
| 348 |
+
|
| 349 |
+
References
|
| 350 |
+
----------
|
| 351 |
+
.. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization"
|
| 352 |
+
Second Edition (2006).
|
| 353 |
+
"""
|
| 354 |
+
self.H = self._syr2(-1.0 / ys, s, Hy, a=self.H)
|
| 355 |
+
self.H = self._syr((ys + yHy) / ys ** 2, s, a=self.H)
|
| 356 |
+
|
| 357 |
+
def _update_hessian(self, ys, Bs, sBs, y):
|
| 358 |
+
"""Update the Hessian matrix.
|
| 359 |
+
|
| 360 |
+
BFGS update using the formula:
|
| 361 |
+
|
| 362 |
+
``B <- B - (B*s)*(B*s).T/s.T*(B*s) + y*y^T/s.T*y``
|
| 363 |
+
|
| 364 |
+
where ``s`` is short for ``delta_x`` and ``y`` is short
|
| 365 |
+
for ``delta_grad``. Formula (6.19) in [1]_.
|
| 366 |
+
|
| 367 |
+
References
|
| 368 |
+
----------
|
| 369 |
+
.. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization"
|
| 370 |
+
Second Edition (2006).
|
| 371 |
+
"""
|
| 372 |
+
self.B = self._syr(1.0 / ys, y, a=self.B)
|
| 373 |
+
self.B = self._syr(-1.0 / sBs, Bs, a=self.B)
|
| 374 |
+
|
| 375 |
+
def _update_implementation(self, delta_x, delta_grad):
|
| 376 |
+
# Auxiliary variables w and z
|
| 377 |
+
if self.approx_type == 'hess':
|
| 378 |
+
w = delta_x
|
| 379 |
+
z = delta_grad
|
| 380 |
+
else:
|
| 381 |
+
w = delta_grad
|
| 382 |
+
z = delta_x
|
| 383 |
+
# Do some common operations
|
| 384 |
+
wz = np.dot(w, z)
|
| 385 |
+
Mw = self.dot(w)
|
| 386 |
+
wMw = Mw.dot(w)
|
| 387 |
+
# Guarantee that wMw > 0 by reinitializing matrix.
|
| 388 |
+
# While this is always true in exact arithmetic,
|
| 389 |
+
# indefinite matrix may appear due to roundoff errors.
|
| 390 |
+
if wMw <= 0.0:
|
| 391 |
+
scale = self._auto_scale(delta_x, delta_grad)
|
| 392 |
+
# Reinitialize matrix
|
| 393 |
+
if self.approx_type == 'hess':
|
| 394 |
+
self.B = scale * np.eye(self.n, dtype=float)
|
| 395 |
+
else:
|
| 396 |
+
self.H = scale * np.eye(self.n, dtype=float)
|
| 397 |
+
# Do common operations for new matrix
|
| 398 |
+
Mw = self.dot(w)
|
| 399 |
+
wMw = Mw.dot(w)
|
| 400 |
+
# Check if curvature condition is violated
|
| 401 |
+
if wz <= self.min_curvature * wMw:
|
| 402 |
+
# If the option 'skip_update' is set
|
| 403 |
+
# we just skip the update when the condition
|
| 404 |
+
# is violated.
|
| 405 |
+
if self.exception_strategy == 'skip_update':
|
| 406 |
+
return
|
| 407 |
+
# If the option 'damp_update' is set we
|
| 408 |
+
# interpolate between the actual BFGS
|
| 409 |
+
# result and the unmodified matrix.
|
| 410 |
+
elif self.exception_strategy == 'damp_update':
|
| 411 |
+
update_factor = (1-self.min_curvature) / (1 - wz/wMw)
|
| 412 |
+
z = update_factor*z + (1-update_factor)*Mw
|
| 413 |
+
wz = np.dot(w, z)
|
| 414 |
+
# Update matrix
|
| 415 |
+
if self.approx_type == 'hess':
|
| 416 |
+
self._update_hessian(wz, Mw, wMw, z)
|
| 417 |
+
else:
|
| 418 |
+
self._update_inverse_hessian(wz, Mw, wMw, z)
|
| 419 |
+
|
| 420 |
+
|
| 421 |
+
class SR1(FullHessianUpdateStrategy):
|
| 422 |
+
"""Symmetric-rank-1 Hessian update strategy.
|
| 423 |
+
|
| 424 |
+
Parameters
|
| 425 |
+
----------
|
| 426 |
+
min_denominator : float
|
| 427 |
+
This number, scaled by a normalization factor,
|
| 428 |
+
defines the minimum denominator magnitude allowed
|
| 429 |
+
in the update. When the condition is violated we skip
|
| 430 |
+
the update. By default uses ``1e-8``.
|
| 431 |
+
init_scale : {float, np.array, 'auto'}, optional
|
| 432 |
+
This parameter can be used to initialize the Hessian or its
|
| 433 |
+
inverse. When a float is given, the relevant array is initialized
|
| 434 |
+
to ``np.eye(n) * init_scale``, where ``n`` is the problem dimension.
|
| 435 |
+
Alternatively, if a precisely ``(n, n)`` shaped, symmetric array is given,
|
| 436 |
+
this array will be used. Otherwise an error is generated.
|
| 437 |
+
Set it to 'auto' in order to use an automatic heuristic for choosing
|
| 438 |
+
the initial scale. The heuristic is described in [1]_, p.143.
|
| 439 |
+
The default is 'auto'.
|
| 440 |
+
|
| 441 |
+
Notes
|
| 442 |
+
-----
|
| 443 |
+
The update is based on the description in [1]_, p.144-146.
|
| 444 |
+
|
| 445 |
+
References
|
| 446 |
+
----------
|
| 447 |
+
.. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization"
|
| 448 |
+
Second Edition (2006).
|
| 449 |
+
"""
|
| 450 |
+
|
| 451 |
+
def __init__(self, min_denominator=1e-8, init_scale='auto'):
|
| 452 |
+
self.min_denominator = min_denominator
|
| 453 |
+
super().__init__(init_scale)
|
| 454 |
+
|
| 455 |
+
def _update_implementation(self, delta_x, delta_grad):
|
| 456 |
+
# Auxiliary variables w and z
|
| 457 |
+
if self.approx_type == 'hess':
|
| 458 |
+
w = delta_x
|
| 459 |
+
z = delta_grad
|
| 460 |
+
else:
|
| 461 |
+
w = delta_grad
|
| 462 |
+
z = delta_x
|
| 463 |
+
# Do some common operations
|
| 464 |
+
Mw = self.dot(w)
|
| 465 |
+
z_minus_Mw = z - Mw
|
| 466 |
+
denominator = np.dot(w, z_minus_Mw)
|
| 467 |
+
# If the denominator is too small
|
| 468 |
+
# we just skip the update.
|
| 469 |
+
if np.abs(denominator) <= self.min_denominator*norm(w)*norm(z_minus_Mw):
|
| 470 |
+
return
|
| 471 |
+
# Update matrix
|
| 472 |
+
if self.approx_type == 'hess':
|
| 473 |
+
self.B = self._syr(1/denominator, z_minus_Mw, a=self.B)
|
| 474 |
+
else:
|
| 475 |
+
self.H = self._syr(1/denominator, z_minus_Mw, a=self.H)
|
evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_lbfgsb.cpython-310-x86_64-linux-gnu.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d0e0cc53dba47fe455ac20e0c5588de5dcd553f4c8df5bc5b11a81d84339d015
|
| 3 |
+
size 524785
|
evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_linprog.py
ADDED
|
@@ -0,0 +1,716 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
A top-level linear programming interface.
|
| 3 |
+
|
| 4 |
+
.. versionadded:: 0.15.0
|
| 5 |
+
|
| 6 |
+
Functions
|
| 7 |
+
---------
|
| 8 |
+
.. autosummary::
|
| 9 |
+
:toctree: generated/
|
| 10 |
+
|
| 11 |
+
linprog
|
| 12 |
+
linprog_verbose_callback
|
| 13 |
+
linprog_terse_callback
|
| 14 |
+
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
import numpy as np
|
| 18 |
+
|
| 19 |
+
from ._optimize import OptimizeResult, OptimizeWarning
|
| 20 |
+
from warnings import warn
|
| 21 |
+
from ._linprog_highs import _linprog_highs
|
| 22 |
+
from ._linprog_ip import _linprog_ip
|
| 23 |
+
from ._linprog_simplex import _linprog_simplex
|
| 24 |
+
from ._linprog_rs import _linprog_rs
|
| 25 |
+
from ._linprog_doc import (_linprog_highs_doc, _linprog_ip_doc, # noqa: F401
|
| 26 |
+
_linprog_rs_doc, _linprog_simplex_doc,
|
| 27 |
+
_linprog_highs_ipm_doc, _linprog_highs_ds_doc)
|
| 28 |
+
from ._linprog_util import (
|
| 29 |
+
_parse_linprog, _presolve, _get_Abc, _LPProblem, _autoscale,
|
| 30 |
+
_postsolve, _check_result, _display_summary)
|
| 31 |
+
from copy import deepcopy
|
| 32 |
+
|
| 33 |
+
__all__ = ['linprog', 'linprog_verbose_callback', 'linprog_terse_callback']
|
| 34 |
+
|
| 35 |
+
__docformat__ = "restructuredtext en"
|
| 36 |
+
|
| 37 |
+
LINPROG_METHODS = [
|
| 38 |
+
'simplex', 'revised simplex', 'interior-point', 'highs', 'highs-ds', 'highs-ipm'
|
| 39 |
+
]
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def linprog_verbose_callback(res):
|
| 43 |
+
"""
|
| 44 |
+
A sample callback function demonstrating the linprog callback interface.
|
| 45 |
+
This callback produces detailed output to sys.stdout before each iteration
|
| 46 |
+
and after the final iteration of the simplex algorithm.
|
| 47 |
+
|
| 48 |
+
Parameters
|
| 49 |
+
----------
|
| 50 |
+
res : A `scipy.optimize.OptimizeResult` consisting of the following fields:
|
| 51 |
+
|
| 52 |
+
x : 1-D array
|
| 53 |
+
The independent variable vector which optimizes the linear
|
| 54 |
+
programming problem.
|
| 55 |
+
fun : float
|
| 56 |
+
Value of the objective function.
|
| 57 |
+
success : bool
|
| 58 |
+
True if the algorithm succeeded in finding an optimal solution.
|
| 59 |
+
slack : 1-D array
|
| 60 |
+
The values of the slack variables. Each slack variable corresponds
|
| 61 |
+
to an inequality constraint. If the slack is zero, then the
|
| 62 |
+
corresponding constraint is active.
|
| 63 |
+
con : 1-D array
|
| 64 |
+
The (nominally zero) residuals of the equality constraints, that is,
|
| 65 |
+
``b - A_eq @ x``
|
| 66 |
+
phase : int
|
| 67 |
+
The phase of the optimization being executed. In phase 1 a basic
|
| 68 |
+
feasible solution is sought and the T has an additional row
|
| 69 |
+
representing an alternate objective function.
|
| 70 |
+
status : int
|
| 71 |
+
An integer representing the exit status of the optimization::
|
| 72 |
+
|
| 73 |
+
0 : Optimization terminated successfully
|
| 74 |
+
1 : Iteration limit reached
|
| 75 |
+
2 : Problem appears to be infeasible
|
| 76 |
+
3 : Problem appears to be unbounded
|
| 77 |
+
4 : Serious numerical difficulties encountered
|
| 78 |
+
|
| 79 |
+
nit : int
|
| 80 |
+
The number of iterations performed.
|
| 81 |
+
message : str
|
| 82 |
+
A string descriptor of the exit status of the optimization.
|
| 83 |
+
"""
|
| 84 |
+
x = res['x']
|
| 85 |
+
fun = res['fun']
|
| 86 |
+
phase = res['phase']
|
| 87 |
+
status = res['status']
|
| 88 |
+
nit = res['nit']
|
| 89 |
+
message = res['message']
|
| 90 |
+
complete = res['complete']
|
| 91 |
+
|
| 92 |
+
saved_printoptions = np.get_printoptions()
|
| 93 |
+
np.set_printoptions(linewidth=500,
|
| 94 |
+
formatter={'float': lambda x: f"{x: 12.4f}"})
|
| 95 |
+
if status:
|
| 96 |
+
print('--------- Simplex Early Exit -------\n')
|
| 97 |
+
print(f'The simplex method exited early with status {status:d}')
|
| 98 |
+
print(message)
|
| 99 |
+
elif complete:
|
| 100 |
+
print('--------- Simplex Complete --------\n')
|
| 101 |
+
print(f'Iterations required: {nit}')
|
| 102 |
+
else:
|
| 103 |
+
print(f'--------- Iteration {nit:d} ---------\n')
|
| 104 |
+
|
| 105 |
+
if nit > 0:
|
| 106 |
+
if phase == 1:
|
| 107 |
+
print('Current Pseudo-Objective Value:')
|
| 108 |
+
else:
|
| 109 |
+
print('Current Objective Value:')
|
| 110 |
+
print('f = ', fun)
|
| 111 |
+
print()
|
| 112 |
+
print('Current Solution Vector:')
|
| 113 |
+
print('x = ', x)
|
| 114 |
+
print()
|
| 115 |
+
|
| 116 |
+
np.set_printoptions(**saved_printoptions)
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
def linprog_terse_callback(res):
|
| 120 |
+
"""
|
| 121 |
+
A sample callback function demonstrating the linprog callback interface.
|
| 122 |
+
This callback produces brief output to sys.stdout before each iteration
|
| 123 |
+
and after the final iteration of the simplex algorithm.
|
| 124 |
+
|
| 125 |
+
Parameters
|
| 126 |
+
----------
|
| 127 |
+
res : A `scipy.optimize.OptimizeResult` consisting of the following fields:
|
| 128 |
+
|
| 129 |
+
x : 1-D array
|
| 130 |
+
The independent variable vector which optimizes the linear
|
| 131 |
+
programming problem.
|
| 132 |
+
fun : float
|
| 133 |
+
Value of the objective function.
|
| 134 |
+
success : bool
|
| 135 |
+
True if the algorithm succeeded in finding an optimal solution.
|
| 136 |
+
slack : 1-D array
|
| 137 |
+
The values of the slack variables. Each slack variable corresponds
|
| 138 |
+
to an inequality constraint. If the slack is zero, then the
|
| 139 |
+
corresponding constraint is active.
|
| 140 |
+
con : 1-D array
|
| 141 |
+
The (nominally zero) residuals of the equality constraints, that is,
|
| 142 |
+
``b - A_eq @ x``.
|
| 143 |
+
phase : int
|
| 144 |
+
The phase of the optimization being executed. In phase 1 a basic
|
| 145 |
+
feasible solution is sought and the T has an additional row
|
| 146 |
+
representing an alternate objective function.
|
| 147 |
+
status : int
|
| 148 |
+
An integer representing the exit status of the optimization::
|
| 149 |
+
|
| 150 |
+
0 : Optimization terminated successfully
|
| 151 |
+
1 : Iteration limit reached
|
| 152 |
+
2 : Problem appears to be infeasible
|
| 153 |
+
3 : Problem appears to be unbounded
|
| 154 |
+
4 : Serious numerical difficulties encountered
|
| 155 |
+
|
| 156 |
+
nit : int
|
| 157 |
+
The number of iterations performed.
|
| 158 |
+
message : str
|
| 159 |
+
A string descriptor of the exit status of the optimization.
|
| 160 |
+
"""
|
| 161 |
+
nit = res['nit']
|
| 162 |
+
x = res['x']
|
| 163 |
+
|
| 164 |
+
if nit == 0:
|
| 165 |
+
print("Iter: X:")
|
| 166 |
+
print(f"{nit: <5d} ", end="")
|
| 167 |
+
print(x)
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
def linprog(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
|
| 171 |
+
bounds=(0, None), method='highs', callback=None,
|
| 172 |
+
options=None, x0=None, integrality=None):
|
| 173 |
+
r"""
|
| 174 |
+
Linear programming: minimize a linear objective function subject to linear
|
| 175 |
+
equality and inequality constraints.
|
| 176 |
+
|
| 177 |
+
Linear programming solves problems of the following form:
|
| 178 |
+
|
| 179 |
+
.. math::
|
| 180 |
+
|
| 181 |
+
\min_x \ & c^T x \\
|
| 182 |
+
\mbox{such that} \ & A_{ub} x \leq b_{ub},\\
|
| 183 |
+
& A_{eq} x = b_{eq},\\
|
| 184 |
+
& l \leq x \leq u ,
|
| 185 |
+
|
| 186 |
+
where :math:`x` is a vector of decision variables; :math:`c`,
|
| 187 |
+
:math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
|
| 188 |
+
:math:`A_{ub}` and :math:`A_{eq}` are matrices.
|
| 189 |
+
|
| 190 |
+
Alternatively, that's:
|
| 191 |
+
|
| 192 |
+
- minimize ::
|
| 193 |
+
|
| 194 |
+
c @ x
|
| 195 |
+
|
| 196 |
+
- such that ::
|
| 197 |
+
|
| 198 |
+
A_ub @ x <= b_ub
|
| 199 |
+
A_eq @ x == b_eq
|
| 200 |
+
lb <= x <= ub
|
| 201 |
+
|
| 202 |
+
Note that by default ``lb = 0`` and ``ub = None``. Other bounds can be
|
| 203 |
+
specified with ``bounds``.
|
| 204 |
+
|
| 205 |
+
Parameters
|
| 206 |
+
----------
|
| 207 |
+
c : 1-D array
|
| 208 |
+
The coefficients of the linear objective function to be minimized.
|
| 209 |
+
A_ub : 2-D array, optional
|
| 210 |
+
The inequality constraint matrix. Each row of ``A_ub`` specifies the
|
| 211 |
+
coefficients of a linear inequality constraint on ``x``.
|
| 212 |
+
b_ub : 1-D array, optional
|
| 213 |
+
The inequality constraint vector. Each element represents an
|
| 214 |
+
upper bound on the corresponding value of ``A_ub @ x``.
|
| 215 |
+
A_eq : 2-D array, optional
|
| 216 |
+
The equality constraint matrix. Each row of ``A_eq`` specifies the
|
| 217 |
+
coefficients of a linear equality constraint on ``x``.
|
| 218 |
+
b_eq : 1-D array, optional
|
| 219 |
+
The equality constraint vector. Each element of ``A_eq @ x`` must equal
|
| 220 |
+
the corresponding element of ``b_eq``.
|
| 221 |
+
bounds : sequence, optional
|
| 222 |
+
A sequence of ``(min, max)`` pairs for each element in ``x``, defining
|
| 223 |
+
the minimum and maximum values of that decision variable.
|
| 224 |
+
If a single tuple ``(min, max)`` is provided, then ``min`` and ``max``
|
| 225 |
+
will serve as bounds for all decision variables.
|
| 226 |
+
Use ``None`` to indicate that there is no bound. For instance, the
|
| 227 |
+
default bound ``(0, None)`` means that all decision variables are
|
| 228 |
+
non-negative, and the pair ``(None, None)`` means no bounds at all,
|
| 229 |
+
i.e. all variables are allowed to be any real.
|
| 230 |
+
method : str, optional
|
| 231 |
+
The algorithm used to solve the standard form problem.
|
| 232 |
+
:ref:`'highs' <optimize.linprog-highs>` (default),
|
| 233 |
+
:ref:`'highs-ds' <optimize.linprog-highs-ds>`,
|
| 234 |
+
:ref:`'highs-ipm' <optimize.linprog-highs-ipm>`,
|
| 235 |
+
:ref:`'interior-point' <optimize.linprog-interior-point>` (legacy),
|
| 236 |
+
:ref:`'revised simplex' <optimize.linprog-revised_simplex>` (legacy),
|
| 237 |
+
and
|
| 238 |
+
:ref:`'simplex' <optimize.linprog-simplex>` (legacy) are supported.
|
| 239 |
+
The legacy methods are deprecated and will be removed in SciPy 1.11.0.
|
| 240 |
+
callback : callable, optional
|
| 241 |
+
If a callback function is provided, it will be called at least once per
|
| 242 |
+
iteration of the algorithm. The callback function must accept a single
|
| 243 |
+
`scipy.optimize.OptimizeResult` consisting of the following fields:
|
| 244 |
+
|
| 245 |
+
x : 1-D array
|
| 246 |
+
The current solution vector.
|
| 247 |
+
fun : float
|
| 248 |
+
The current value of the objective function ``c @ x``.
|
| 249 |
+
success : bool
|
| 250 |
+
``True`` when the algorithm has completed successfully.
|
| 251 |
+
slack : 1-D array
|
| 252 |
+
The (nominally positive) values of the slack,
|
| 253 |
+
``b_ub - A_ub @ x``.
|
| 254 |
+
con : 1-D array
|
| 255 |
+
The (nominally zero) residuals of the equality constraints,
|
| 256 |
+
``b_eq - A_eq @ x``.
|
| 257 |
+
phase : int
|
| 258 |
+
The phase of the algorithm being executed.
|
| 259 |
+
status : int
|
| 260 |
+
An integer representing the status of the algorithm.
|
| 261 |
+
|
| 262 |
+
``0`` : Optimization proceeding nominally.
|
| 263 |
+
|
| 264 |
+
``1`` : Iteration limit reached.
|
| 265 |
+
|
| 266 |
+
``2`` : Problem appears to be infeasible.
|
| 267 |
+
|
| 268 |
+
``3`` : Problem appears to be unbounded.
|
| 269 |
+
|
| 270 |
+
``4`` : Numerical difficulties encountered.
|
| 271 |
+
|
| 272 |
+
nit : int
|
| 273 |
+
The current iteration number.
|
| 274 |
+
message : str
|
| 275 |
+
A string descriptor of the algorithm status.
|
| 276 |
+
|
| 277 |
+
Callback functions are not currently supported by the HiGHS methods.
|
| 278 |
+
|
| 279 |
+
options : dict, optional
|
| 280 |
+
A dictionary of solver options. All methods accept the following
|
| 281 |
+
options:
|
| 282 |
+
|
| 283 |
+
maxiter : int
|
| 284 |
+
Maximum number of iterations to perform.
|
| 285 |
+
Default: see method-specific documentation.
|
| 286 |
+
disp : bool
|
| 287 |
+
Set to ``True`` to print convergence messages.
|
| 288 |
+
Default: ``False``.
|
| 289 |
+
presolve : bool
|
| 290 |
+
Set to ``False`` to disable automatic presolve.
|
| 291 |
+
Default: ``True``.
|
| 292 |
+
|
| 293 |
+
All methods except the HiGHS solvers also accept:
|
| 294 |
+
|
| 295 |
+
tol : float
|
| 296 |
+
A tolerance which determines when a residual is "close enough" to
|
| 297 |
+
zero to be considered exactly zero.
|
| 298 |
+
autoscale : bool
|
| 299 |
+
Set to ``True`` to automatically perform equilibration.
|
| 300 |
+
Consider using this option if the numerical values in the
|
| 301 |
+
constraints are separated by several orders of magnitude.
|
| 302 |
+
Default: ``False``.
|
| 303 |
+
rr : bool
|
| 304 |
+
Set to ``False`` to disable automatic redundancy removal.
|
| 305 |
+
Default: ``True``.
|
| 306 |
+
rr_method : string
|
| 307 |
+
Method used to identify and remove redundant rows from the
|
| 308 |
+
equality constraint matrix after presolve. For problems with
|
| 309 |
+
dense input, the available methods for redundancy removal are:
|
| 310 |
+
|
| 311 |
+
"SVD":
|
| 312 |
+
Repeatedly performs singular value decomposition on
|
| 313 |
+
the matrix, detecting redundant rows based on nonzeros
|
| 314 |
+
in the left singular vectors that correspond with
|
| 315 |
+
zero singular values. May be fast when the matrix is
|
| 316 |
+
nearly full rank.
|
| 317 |
+
"pivot":
|
| 318 |
+
Uses the algorithm presented in [5]_ to identify
|
| 319 |
+
redundant rows.
|
| 320 |
+
"ID":
|
| 321 |
+
Uses a randomized interpolative decomposition.
|
| 322 |
+
Identifies columns of the matrix transpose not used in
|
| 323 |
+
a full-rank interpolative decomposition of the matrix.
|
| 324 |
+
None:
|
| 325 |
+
Uses "svd" if the matrix is nearly full rank, that is,
|
| 326 |
+
the difference between the matrix rank and the number
|
| 327 |
+
of rows is less than five. If not, uses "pivot". The
|
| 328 |
+
behavior of this default is subject to change without
|
| 329 |
+
prior notice.
|
| 330 |
+
|
| 331 |
+
Default: None.
|
| 332 |
+
For problems with sparse input, this option is ignored, and the
|
| 333 |
+
pivot-based algorithm presented in [5]_ is used.
|
| 334 |
+
|
| 335 |
+
For method-specific options, see
|
| 336 |
+
:func:`show_options('linprog') <show_options>`.
|
| 337 |
+
|
| 338 |
+
x0 : 1-D array, optional
|
| 339 |
+
Guess values of the decision variables, which will be refined by
|
| 340 |
+
the optimization algorithm. This argument is currently used only by the
|
| 341 |
+
'revised simplex' method, and can only be used if `x0` represents a
|
| 342 |
+
basic feasible solution.
|
| 343 |
+
|
| 344 |
+
integrality : 1-D array or int, optional
|
| 345 |
+
Indicates the type of integrality constraint on each decision variable.
|
| 346 |
+
|
| 347 |
+
``0`` : Continuous variable; no integrality constraint.
|
| 348 |
+
|
| 349 |
+
``1`` : Integer variable; decision variable must be an integer
|
| 350 |
+
within `bounds`.
|
| 351 |
+
|
| 352 |
+
``2`` : Semi-continuous variable; decision variable must be within
|
| 353 |
+
`bounds` or take value ``0``.
|
| 354 |
+
|
| 355 |
+
``3`` : Semi-integer variable; decision variable must be an integer
|
| 356 |
+
within `bounds` or take value ``0``.
|
| 357 |
+
|
| 358 |
+
By default, all variables are continuous.
|
| 359 |
+
|
| 360 |
+
For mixed integrality constraints, supply an array of shape `c.shape`.
|
| 361 |
+
To infer a constraint on each decision variable from shorter inputs,
|
| 362 |
+
the argument will be broadcasted to `c.shape` using `np.broadcast_to`.
|
| 363 |
+
|
| 364 |
+
This argument is currently used only by the ``'highs'`` method and
|
| 365 |
+
ignored otherwise.
|
| 366 |
+
|
| 367 |
+
Returns
|
| 368 |
+
-------
|
| 369 |
+
res : OptimizeResult
|
| 370 |
+
A :class:`scipy.optimize.OptimizeResult` consisting of the fields
|
| 371 |
+
below. Note that the return types of the fields may depend on whether
|
| 372 |
+
the optimization was successful, therefore it is recommended to check
|
| 373 |
+
`OptimizeResult.status` before relying on the other fields:
|
| 374 |
+
|
| 375 |
+
x : 1-D array
|
| 376 |
+
The values of the decision variables that minimizes the
|
| 377 |
+
objective function while satisfying the constraints.
|
| 378 |
+
fun : float
|
| 379 |
+
The optimal value of the objective function ``c @ x``.
|
| 380 |
+
slack : 1-D array
|
| 381 |
+
The (nominally positive) values of the slack variables,
|
| 382 |
+
``b_ub - A_ub @ x``.
|
| 383 |
+
con : 1-D array
|
| 384 |
+
The (nominally zero) residuals of the equality constraints,
|
| 385 |
+
``b_eq - A_eq @ x``.
|
| 386 |
+
success : bool
|
| 387 |
+
``True`` when the algorithm succeeds in finding an optimal
|
| 388 |
+
solution.
|
| 389 |
+
status : int
|
| 390 |
+
An integer representing the exit status of the algorithm.
|
| 391 |
+
|
| 392 |
+
``0`` : Optimization terminated successfully.
|
| 393 |
+
|
| 394 |
+
``1`` : Iteration limit reached.
|
| 395 |
+
|
| 396 |
+
``2`` : Problem appears to be infeasible.
|
| 397 |
+
|
| 398 |
+
``3`` : Problem appears to be unbounded.
|
| 399 |
+
|
| 400 |
+
``4`` : Numerical difficulties encountered.
|
| 401 |
+
|
| 402 |
+
nit : int
|
| 403 |
+
The total number of iterations performed in all phases.
|
| 404 |
+
message : str
|
| 405 |
+
A string descriptor of the exit status of the algorithm.
|
| 406 |
+
|
| 407 |
+
See Also
|
| 408 |
+
--------
|
| 409 |
+
show_options : Additional options accepted by the solvers.
|
| 410 |
+
|
| 411 |
+
Notes
|
| 412 |
+
-----
|
| 413 |
+
This section describes the available solvers that can be selected by the
|
| 414 |
+
'method' parameter.
|
| 415 |
+
|
| 416 |
+
`'highs-ds'` and
|
| 417 |
+
`'highs-ipm'` are interfaces to the
|
| 418 |
+
HiGHS simplex and interior-point method solvers [13]_, respectively.
|
| 419 |
+
`'highs'` (default) chooses between
|
| 420 |
+
the two automatically. These are the fastest linear
|
| 421 |
+
programming solvers in SciPy, especially for large, sparse problems;
|
| 422 |
+
which of these two is faster is problem-dependent.
|
| 423 |
+
The other solvers (`'interior-point'`, `'revised simplex'`, and
|
| 424 |
+
`'simplex'`) are legacy methods and will be removed in SciPy 1.11.0.
|
| 425 |
+
|
| 426 |
+
Method *highs-ds* is a wrapper of the C++ high performance dual
|
| 427 |
+
revised simplex implementation (HSOL) [13]_, [14]_. Method *highs-ipm*
|
| 428 |
+
is a wrapper of a C++ implementation of an **i**\ nterior-\ **p**\ oint
|
| 429 |
+
**m**\ ethod [13]_; it features a crossover routine, so it is as accurate
|
| 430 |
+
as a simplex solver. Method *highs* chooses between the two automatically.
|
| 431 |
+
For new code involving `linprog`, we recommend explicitly choosing one of
|
| 432 |
+
these three method values.
|
| 433 |
+
|
| 434 |
+
.. versionadded:: 1.6.0
|
| 435 |
+
|
| 436 |
+
Method *interior-point* uses the primal-dual path following algorithm
|
| 437 |
+
as outlined in [4]_. This algorithm supports sparse constraint matrices and
|
| 438 |
+
is typically faster than the simplex methods, especially for large, sparse
|
| 439 |
+
problems. Note, however, that the solution returned may be slightly less
|
| 440 |
+
accurate than those of the simplex methods and will not, in general,
|
| 441 |
+
correspond with a vertex of the polytope defined by the constraints.
|
| 442 |
+
|
| 443 |
+
.. versionadded:: 1.0.0
|
| 444 |
+
|
| 445 |
+
Method *revised simplex* uses the revised simplex method as described in
|
| 446 |
+
[9]_, except that a factorization [11]_ of the basis matrix, rather than
|
| 447 |
+
its inverse, is efficiently maintained and used to solve the linear systems
|
| 448 |
+
at each iteration of the algorithm.
|
| 449 |
+
|
| 450 |
+
.. versionadded:: 1.3.0
|
| 451 |
+
|
| 452 |
+
Method *simplex* uses a traditional, full-tableau implementation of
|
| 453 |
+
Dantzig's simplex algorithm [1]_, [2]_ (*not* the
|
| 454 |
+
Nelder-Mead simplex). This algorithm is included for backwards
|
| 455 |
+
compatibility and educational purposes.
|
| 456 |
+
|
| 457 |
+
.. versionadded:: 0.15.0
|
| 458 |
+
|
| 459 |
+
Before applying *interior-point*, *revised simplex*, or *simplex*,
|
| 460 |
+
a presolve procedure based on [8]_ attempts
|
| 461 |
+
to identify trivial infeasibilities, trivial unboundedness, and potential
|
| 462 |
+
problem simplifications. Specifically, it checks for:
|
| 463 |
+
|
| 464 |
+
- rows of zeros in ``A_eq`` or ``A_ub``, representing trivial constraints;
|
| 465 |
+
- columns of zeros in ``A_eq`` `and` ``A_ub``, representing unconstrained
|
| 466 |
+
variables;
|
| 467 |
+
- column singletons in ``A_eq``, representing fixed variables; and
|
| 468 |
+
- column singletons in ``A_ub``, representing simple bounds.
|
| 469 |
+
|
| 470 |
+
If presolve reveals that the problem is unbounded (e.g. an unconstrained
|
| 471 |
+
and unbounded variable has negative cost) or infeasible (e.g., a row of
|
| 472 |
+
zeros in ``A_eq`` corresponds with a nonzero in ``b_eq``), the solver
|
| 473 |
+
terminates with the appropriate status code. Note that presolve terminates
|
| 474 |
+
as soon as any sign of unboundedness is detected; consequently, a problem
|
| 475 |
+
may be reported as unbounded when in reality the problem is infeasible
|
| 476 |
+
(but infeasibility has not been detected yet). Therefore, if it is
|
| 477 |
+
important to know whether the problem is actually infeasible, solve the
|
| 478 |
+
problem again with option ``presolve=False``.
|
| 479 |
+
|
| 480 |
+
If neither infeasibility nor unboundedness are detected in a single pass
|
| 481 |
+
of the presolve, bounds are tightened where possible and fixed
|
| 482 |
+
variables are removed from the problem. Then, linearly dependent rows
|
| 483 |
+
of the ``A_eq`` matrix are removed, (unless they represent an
|
| 484 |
+
infeasibility) to avoid numerical difficulties in the primary solve
|
| 485 |
+
routine. Note that rows that are nearly linearly dependent (within a
|
| 486 |
+
prescribed tolerance) may also be removed, which can change the optimal
|
| 487 |
+
solution in rare cases. If this is a concern, eliminate redundancy from
|
| 488 |
+
your problem formulation and run with option ``rr=False`` or
|
| 489 |
+
``presolve=False``.
|
| 490 |
+
|
| 491 |
+
Several potential improvements can be made here: additional presolve
|
| 492 |
+
checks outlined in [8]_ should be implemented, the presolve routine should
|
| 493 |
+
be run multiple times (until no further simplifications can be made), and
|
| 494 |
+
more of the efficiency improvements from [5]_ should be implemented in the
|
| 495 |
+
redundancy removal routines.
|
| 496 |
+
|
| 497 |
+
After presolve, the problem is transformed to standard form by converting
|
| 498 |
+
the (tightened) simple bounds to upper bound constraints, introducing
|
| 499 |
+
non-negative slack variables for inequality constraints, and expressing
|
| 500 |
+
unbounded variables as the difference between two non-negative variables.
|
| 501 |
+
Optionally, the problem is automatically scaled via equilibration [12]_.
|
| 502 |
+
The selected algorithm solves the standard form problem, and a
|
| 503 |
+
postprocessing routine converts the result to a solution to the original
|
| 504 |
+
problem.
|
| 505 |
+
|
| 506 |
+
References
|
| 507 |
+
----------
|
| 508 |
+
.. [1] Dantzig, George B., Linear programming and extensions. Rand
|
| 509 |
+
Corporation Research Study Princeton Univ. Press, Princeton, NJ,
|
| 510 |
+
1963
|
| 511 |
+
.. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to
|
| 512 |
+
Mathematical Programming", McGraw-Hill, Chapter 4.
|
| 513 |
+
.. [3] Bland, Robert G. New finite pivoting rules for the simplex method.
|
| 514 |
+
Mathematics of Operations Research (2), 1977: pp. 103-107.
|
| 515 |
+
.. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
|
| 516 |
+
optimizer for linear programming: an implementation of the
|
| 517 |
+
homogeneous algorithm." High performance optimization. Springer US,
|
| 518 |
+
2000. 197-232.
|
| 519 |
+
.. [5] Andersen, Erling D. "Finding all linearly dependent rows in
|
| 520 |
+
large-scale linear programming." Optimization Methods and Software
|
| 521 |
+
6.3 (1995): 219-227.
|
| 522 |
+
.. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear
|
| 523 |
+
Programming based on Newton's Method." Unpublished Course Notes,
|
| 524 |
+
March 2004. Available 2/25/2017 at
|
| 525 |
+
https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf
|
| 526 |
+
.. [7] Fourer, Robert. "Solving Linear Programs by Interior-Point Methods."
|
| 527 |
+
Unpublished Course Notes, August 26, 2005. Available 2/25/2017 at
|
| 528 |
+
http://www.4er.org/CourseNotes/Book%20B/B-III.pdf
|
| 529 |
+
.. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear
|
| 530 |
+
programming." Mathematical Programming 71.2 (1995): 221-245.
|
| 531 |
+
.. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
|
| 532 |
+
programming." Athena Scientific 1 (1997): 997.
|
| 533 |
+
.. [10] Andersen, Erling D., et al. Implementation of interior point
|
| 534 |
+
methods for large scale linear programming. HEC/Universite de
|
| 535 |
+
Geneve, 1996.
|
| 536 |
+
.. [11] Bartels, Richard H. "A stabilization of the simplex method."
|
| 537 |
+
Journal in Numerische Mathematik 16.5 (1971): 414-434.
|
| 538 |
+
.. [12] Tomlin, J. A. "On scaling linear programming problems."
|
| 539 |
+
Mathematical Programming Study 4 (1975): 146-166.
|
| 540 |
+
.. [13] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J.
|
| 541 |
+
"HiGHS - high performance software for linear optimization."
|
| 542 |
+
https://highs.dev/
|
| 543 |
+
.. [14] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised
|
| 544 |
+
simplex method." Mathematical Programming Computation, 10 (1),
|
| 545 |
+
119-142, 2018. DOI: 10.1007/s12532-017-0130-5
|
| 546 |
+
|
| 547 |
+
Examples
|
| 548 |
+
--------
|
| 549 |
+
Consider the following problem:
|
| 550 |
+
|
| 551 |
+
.. math::
|
| 552 |
+
|
| 553 |
+
\min_{x_0, x_1} \ -x_0 + 4x_1 & \\
|
| 554 |
+
\mbox{such that} \ -3x_0 + x_1 & \leq 6,\\
|
| 555 |
+
-x_0 - 2x_1 & \geq -4,\\
|
| 556 |
+
x_1 & \geq -3.
|
| 557 |
+
|
| 558 |
+
The problem is not presented in the form accepted by `linprog`. This is
|
| 559 |
+
easily remedied by converting the "greater than" inequality
|
| 560 |
+
constraint to a "less than" inequality constraint by
|
| 561 |
+
multiplying both sides by a factor of :math:`-1`. Note also that the last
|
| 562 |
+
constraint is really the simple bound :math:`-3 \leq x_1 \leq \infty`.
|
| 563 |
+
Finally, since there are no bounds on :math:`x_0`, we must explicitly
|
| 564 |
+
specify the bounds :math:`-\infty \leq x_0 \leq \infty`, as the
|
| 565 |
+
default is for variables to be non-negative. After collecting coeffecients
|
| 566 |
+
into arrays and tuples, the input for this problem is:
|
| 567 |
+
|
| 568 |
+
>>> from scipy.optimize import linprog
|
| 569 |
+
>>> c = [-1, 4]
|
| 570 |
+
>>> A = [[-3, 1], [1, 2]]
|
| 571 |
+
>>> b = [6, 4]
|
| 572 |
+
>>> x0_bounds = (None, None)
|
| 573 |
+
>>> x1_bounds = (-3, None)
|
| 574 |
+
>>> res = linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds])
|
| 575 |
+
>>> res.fun
|
| 576 |
+
-22.0
|
| 577 |
+
>>> res.x
|
| 578 |
+
array([10., -3.])
|
| 579 |
+
>>> res.message
|
| 580 |
+
'Optimization terminated successfully. (HiGHS Status 7: Optimal)'
|
| 581 |
+
|
| 582 |
+
The marginals (AKA dual values / shadow prices / Lagrange multipliers)
|
| 583 |
+
and residuals (slacks) are also available.
|
| 584 |
+
|
| 585 |
+
>>> res.ineqlin
|
| 586 |
+
residual: [ 3.900e+01 0.000e+00]
|
| 587 |
+
marginals: [-0.000e+00 -1.000e+00]
|
| 588 |
+
|
| 589 |
+
For example, because the marginal associated with the second inequality
|
| 590 |
+
constraint is -1, we expect the optimal value of the objective function
|
| 591 |
+
to decrease by ``eps`` if we add a small amount ``eps`` to the right hand
|
| 592 |
+
side of the second inequality constraint:
|
| 593 |
+
|
| 594 |
+
>>> eps = 0.05
|
| 595 |
+
>>> b[1] += eps
|
| 596 |
+
>>> linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds]).fun
|
| 597 |
+
-22.05
|
| 598 |
+
|
| 599 |
+
Also, because the residual on the first inequality constraint is 39, we
|
| 600 |
+
can decrease the right hand side of the first constraint by 39 without
|
| 601 |
+
affecting the optimal solution.
|
| 602 |
+
|
| 603 |
+
>>> b = [6, 4] # reset to original values
|
| 604 |
+
>>> b[0] -= 39
|
| 605 |
+
>>> linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds]).fun
|
| 606 |
+
-22.0
|
| 607 |
+
|
| 608 |
+
"""
|
| 609 |
+
|
| 610 |
+
meth = method.lower()
|
| 611 |
+
methods = {"highs", "highs-ds", "highs-ipm",
|
| 612 |
+
"simplex", "revised simplex", "interior-point"}
|
| 613 |
+
|
| 614 |
+
if meth not in methods:
|
| 615 |
+
raise ValueError(f"Unknown solver '{method}'")
|
| 616 |
+
|
| 617 |
+
if x0 is not None and meth != "revised simplex":
|
| 618 |
+
warning_message = "x0 is used only when method is 'revised simplex'. "
|
| 619 |
+
warn(warning_message, OptimizeWarning, stacklevel=2)
|
| 620 |
+
|
| 621 |
+
if np.any(integrality) and not meth == "highs":
|
| 622 |
+
integrality = None
|
| 623 |
+
warning_message = ("Only `method='highs'` supports integer "
|
| 624 |
+
"constraints. Ignoring `integrality`.")
|
| 625 |
+
warn(warning_message, OptimizeWarning, stacklevel=2)
|
| 626 |
+
elif np.any(integrality):
|
| 627 |
+
integrality = np.broadcast_to(integrality, np.shape(c))
|
| 628 |
+
else:
|
| 629 |
+
integrality = None
|
| 630 |
+
|
| 631 |
+
lp = _LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality)
|
| 632 |
+
lp, solver_options = _parse_linprog(lp, options, meth)
|
| 633 |
+
tol = solver_options.get('tol', 1e-9)
|
| 634 |
+
|
| 635 |
+
# Give unmodified problem to HiGHS
|
| 636 |
+
if meth.startswith('highs'):
|
| 637 |
+
if callback is not None:
|
| 638 |
+
raise NotImplementedError("HiGHS solvers do not support the "
|
| 639 |
+
"callback interface.")
|
| 640 |
+
highs_solvers = {'highs-ipm': 'ipm', 'highs-ds': 'simplex',
|
| 641 |
+
'highs': None}
|
| 642 |
+
|
| 643 |
+
sol = _linprog_highs(lp, solver=highs_solvers[meth],
|
| 644 |
+
**solver_options)
|
| 645 |
+
sol['status'], sol['message'] = (
|
| 646 |
+
_check_result(sol['x'], sol['fun'], sol['status'], sol['slack'],
|
| 647 |
+
sol['con'], lp.bounds, tol, sol['message'],
|
| 648 |
+
integrality))
|
| 649 |
+
sol['success'] = sol['status'] == 0
|
| 650 |
+
return OptimizeResult(sol)
|
| 651 |
+
|
| 652 |
+
warn(f"`method='{meth}'` is deprecated and will be removed in SciPy "
|
| 653 |
+
"1.11.0. Please use one of the HiGHS solvers (e.g. "
|
| 654 |
+
"`method='highs'`) in new code.", DeprecationWarning, stacklevel=2)
|
| 655 |
+
|
| 656 |
+
iteration = 0
|
| 657 |
+
complete = False # will become True if solved in presolve
|
| 658 |
+
undo = []
|
| 659 |
+
|
| 660 |
+
# Keep the original arrays to calculate slack/residuals for original
|
| 661 |
+
# problem.
|
| 662 |
+
lp_o = deepcopy(lp)
|
| 663 |
+
|
| 664 |
+
# Solve trivial problem, eliminate variables, tighten bounds, etc.
|
| 665 |
+
rr_method = solver_options.pop('rr_method', None) # need to pop these;
|
| 666 |
+
rr = solver_options.pop('rr', True) # they're not passed to methods
|
| 667 |
+
c0 = 0 # we might get a constant term in the objective
|
| 668 |
+
if solver_options.pop('presolve', True):
|
| 669 |
+
(lp, c0, x, undo, complete, status, message) = _presolve(lp, rr,
|
| 670 |
+
rr_method,
|
| 671 |
+
tol)
|
| 672 |
+
|
| 673 |
+
C, b_scale = 1, 1 # for trivial unscaling if autoscale is not used
|
| 674 |
+
postsolve_args = (lp_o._replace(bounds=lp.bounds), undo, C, b_scale)
|
| 675 |
+
|
| 676 |
+
if not complete:
|
| 677 |
+
A, b, c, c0, x0 = _get_Abc(lp, c0)
|
| 678 |
+
if solver_options.pop('autoscale', False):
|
| 679 |
+
A, b, c, x0, C, b_scale = _autoscale(A, b, c, x0)
|
| 680 |
+
postsolve_args = postsolve_args[:-2] + (C, b_scale)
|
| 681 |
+
|
| 682 |
+
if meth == 'simplex':
|
| 683 |
+
x, status, message, iteration = _linprog_simplex(
|
| 684 |
+
c, c0=c0, A=A, b=b, callback=callback,
|
| 685 |
+
postsolve_args=postsolve_args, **solver_options)
|
| 686 |
+
elif meth == 'interior-point':
|
| 687 |
+
x, status, message, iteration = _linprog_ip(
|
| 688 |
+
c, c0=c0, A=A, b=b, callback=callback,
|
| 689 |
+
postsolve_args=postsolve_args, **solver_options)
|
| 690 |
+
elif meth == 'revised simplex':
|
| 691 |
+
x, status, message, iteration = _linprog_rs(
|
| 692 |
+
c, c0=c0, A=A, b=b, x0=x0, callback=callback,
|
| 693 |
+
postsolve_args=postsolve_args, **solver_options)
|
| 694 |
+
|
| 695 |
+
# Eliminate artificial variables, re-introduce presolved variables, etc.
|
| 696 |
+
disp = solver_options.get('disp', False)
|
| 697 |
+
|
| 698 |
+
x, fun, slack, con = _postsolve(x, postsolve_args, complete)
|
| 699 |
+
|
| 700 |
+
status, message = _check_result(x, fun, status, slack, con, lp_o.bounds,
|
| 701 |
+
tol, message, integrality)
|
| 702 |
+
|
| 703 |
+
if disp:
|
| 704 |
+
_display_summary(message, status, fun, iteration)
|
| 705 |
+
|
| 706 |
+
sol = {
|
| 707 |
+
'x': x,
|
| 708 |
+
'fun': fun,
|
| 709 |
+
'slack': slack,
|
| 710 |
+
'con': con,
|
| 711 |
+
'status': status,
|
| 712 |
+
'message': message,
|
| 713 |
+
'nit': iteration,
|
| 714 |
+
'success': status == 0}
|
| 715 |
+
|
| 716 |
+
return OptimizeResult(sol)
|
evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_linprog_highs.py
ADDED
|
@@ -0,0 +1,440 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""HiGHS Linear Optimization Methods
|
| 2 |
+
|
| 3 |
+
Interface to HiGHS linear optimization software.
|
| 4 |
+
https://highs.dev/
|
| 5 |
+
|
| 6 |
+
.. versionadded:: 1.5.0
|
| 7 |
+
|
| 8 |
+
References
|
| 9 |
+
----------
|
| 10 |
+
.. [1] Q. Huangfu and J.A.J. Hall. "Parallelizing the dual revised simplex
|
| 11 |
+
method." Mathematical Programming Computation, 10 (1), 119-142,
|
| 12 |
+
2018. DOI: 10.1007/s12532-017-0130-5
|
| 13 |
+
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
import inspect
|
| 17 |
+
import numpy as np
|
| 18 |
+
from ._optimize import OptimizeWarning, OptimizeResult
|
| 19 |
+
from warnings import warn
|
| 20 |
+
from ._highs._highs_wrapper import _highs_wrapper
|
| 21 |
+
from ._highs._highs_constants import (
|
| 22 |
+
CONST_INF,
|
| 23 |
+
MESSAGE_LEVEL_NONE,
|
| 24 |
+
HIGHS_OBJECTIVE_SENSE_MINIMIZE,
|
| 25 |
+
|
| 26 |
+
MODEL_STATUS_NOTSET,
|
| 27 |
+
MODEL_STATUS_LOAD_ERROR,
|
| 28 |
+
MODEL_STATUS_MODEL_ERROR,
|
| 29 |
+
MODEL_STATUS_PRESOLVE_ERROR,
|
| 30 |
+
MODEL_STATUS_SOLVE_ERROR,
|
| 31 |
+
MODEL_STATUS_POSTSOLVE_ERROR,
|
| 32 |
+
MODEL_STATUS_MODEL_EMPTY,
|
| 33 |
+
MODEL_STATUS_OPTIMAL,
|
| 34 |
+
MODEL_STATUS_INFEASIBLE,
|
| 35 |
+
MODEL_STATUS_UNBOUNDED_OR_INFEASIBLE,
|
| 36 |
+
MODEL_STATUS_UNBOUNDED,
|
| 37 |
+
MODEL_STATUS_REACHED_DUAL_OBJECTIVE_VALUE_UPPER_BOUND
|
| 38 |
+
as MODEL_STATUS_RDOVUB,
|
| 39 |
+
MODEL_STATUS_REACHED_OBJECTIVE_TARGET,
|
| 40 |
+
MODEL_STATUS_REACHED_TIME_LIMIT,
|
| 41 |
+
MODEL_STATUS_REACHED_ITERATION_LIMIT,
|
| 42 |
+
|
| 43 |
+
HIGHS_SIMPLEX_STRATEGY_DUAL,
|
| 44 |
+
|
| 45 |
+
HIGHS_SIMPLEX_CRASH_STRATEGY_OFF,
|
| 46 |
+
|
| 47 |
+
HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_CHOOSE,
|
| 48 |
+
HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_DANTZIG,
|
| 49 |
+
HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_DEVEX,
|
| 50 |
+
HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_STEEPEST_EDGE,
|
| 51 |
+
)
|
| 52 |
+
from scipy.sparse import csc_matrix, vstack, issparse
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def _highs_to_scipy_status_message(highs_status, highs_message):
|
| 56 |
+
"""Converts HiGHS status number/message to SciPy status number/message"""
|
| 57 |
+
|
| 58 |
+
scipy_statuses_messages = {
|
| 59 |
+
None: (4, "HiGHS did not provide a status code. "),
|
| 60 |
+
MODEL_STATUS_NOTSET: (4, ""),
|
| 61 |
+
MODEL_STATUS_LOAD_ERROR: (4, ""),
|
| 62 |
+
MODEL_STATUS_MODEL_ERROR: (2, ""),
|
| 63 |
+
MODEL_STATUS_PRESOLVE_ERROR: (4, ""),
|
| 64 |
+
MODEL_STATUS_SOLVE_ERROR: (4, ""),
|
| 65 |
+
MODEL_STATUS_POSTSOLVE_ERROR: (4, ""),
|
| 66 |
+
MODEL_STATUS_MODEL_EMPTY: (4, ""),
|
| 67 |
+
MODEL_STATUS_RDOVUB: (4, ""),
|
| 68 |
+
MODEL_STATUS_REACHED_OBJECTIVE_TARGET: (4, ""),
|
| 69 |
+
MODEL_STATUS_OPTIMAL: (0, "Optimization terminated successfully. "),
|
| 70 |
+
MODEL_STATUS_REACHED_TIME_LIMIT: (1, "Time limit reached. "),
|
| 71 |
+
MODEL_STATUS_REACHED_ITERATION_LIMIT: (1, "Iteration limit reached. "),
|
| 72 |
+
MODEL_STATUS_INFEASIBLE: (2, "The problem is infeasible. "),
|
| 73 |
+
MODEL_STATUS_UNBOUNDED: (3, "The problem is unbounded. "),
|
| 74 |
+
MODEL_STATUS_UNBOUNDED_OR_INFEASIBLE: (4, "The problem is unbounded "
|
| 75 |
+
"or infeasible. ")}
|
| 76 |
+
unrecognized = (4, "The HiGHS status code was not recognized. ")
|
| 77 |
+
scipy_status, scipy_message = (
|
| 78 |
+
scipy_statuses_messages.get(highs_status, unrecognized))
|
| 79 |
+
scipy_message = (f"{scipy_message}"
|
| 80 |
+
f"(HiGHS Status {highs_status}: {highs_message})")
|
| 81 |
+
return scipy_status, scipy_message
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def _replace_inf(x):
|
| 85 |
+
# Replace `np.inf` with CONST_INF
|
| 86 |
+
infs = np.isinf(x)
|
| 87 |
+
with np.errstate(invalid="ignore"):
|
| 88 |
+
x[infs] = np.sign(x[infs])*CONST_INF
|
| 89 |
+
return x
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def _convert_to_highs_enum(option, option_str, choices):
|
| 93 |
+
# If option is in the choices we can look it up, if not use
|
| 94 |
+
# the default value taken from function signature and warn:
|
| 95 |
+
try:
|
| 96 |
+
return choices[option.lower()]
|
| 97 |
+
except AttributeError:
|
| 98 |
+
return choices[option]
|
| 99 |
+
except KeyError:
|
| 100 |
+
sig = inspect.signature(_linprog_highs)
|
| 101 |
+
default_str = sig.parameters[option_str].default
|
| 102 |
+
warn(f"Option {option_str} is {option}, but only values in "
|
| 103 |
+
f"{set(choices.keys())} are allowed. Using default: "
|
| 104 |
+
f"{default_str}.",
|
| 105 |
+
OptimizeWarning, stacklevel=3)
|
| 106 |
+
return choices[default_str]
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def _linprog_highs(lp, solver, time_limit=None, presolve=True,
|
| 110 |
+
disp=False, maxiter=None,
|
| 111 |
+
dual_feasibility_tolerance=None,
|
| 112 |
+
primal_feasibility_tolerance=None,
|
| 113 |
+
ipm_optimality_tolerance=None,
|
| 114 |
+
simplex_dual_edge_weight_strategy=None,
|
| 115 |
+
mip_rel_gap=None,
|
| 116 |
+
mip_max_nodes=None,
|
| 117 |
+
**unknown_options):
|
| 118 |
+
r"""
|
| 119 |
+
Solve the following linear programming problem using one of the HiGHS
|
| 120 |
+
solvers:
|
| 121 |
+
|
| 122 |
+
User-facing documentation is in _linprog_doc.py.
|
| 123 |
+
|
| 124 |
+
Parameters
|
| 125 |
+
----------
|
| 126 |
+
lp : _LPProblem
|
| 127 |
+
A ``scipy.optimize._linprog_util._LPProblem`` ``namedtuple``.
|
| 128 |
+
solver : "ipm" or "simplex" or None
|
| 129 |
+
Which HiGHS solver to use. If ``None``, "simplex" will be used.
|
| 130 |
+
|
| 131 |
+
Options
|
| 132 |
+
-------
|
| 133 |
+
maxiter : int
|
| 134 |
+
The maximum number of iterations to perform in either phase. For
|
| 135 |
+
``solver='ipm'``, this does not include the number of crossover
|
| 136 |
+
iterations. Default is the largest possible value for an ``int``
|
| 137 |
+
on the platform.
|
| 138 |
+
disp : bool
|
| 139 |
+
Set to ``True`` if indicators of optimization status are to be printed
|
| 140 |
+
to the console each iteration; default ``False``.
|
| 141 |
+
time_limit : float
|
| 142 |
+
The maximum time in seconds allotted to solve the problem; default is
|
| 143 |
+
the largest possible value for a ``double`` on the platform.
|
| 144 |
+
presolve : bool
|
| 145 |
+
Presolve attempts to identify trivial infeasibilities,
|
| 146 |
+
identify trivial unboundedness, and simplify the problem before
|
| 147 |
+
sending it to the main solver. It is generally recommended
|
| 148 |
+
to keep the default setting ``True``; set to ``False`` if presolve is
|
| 149 |
+
to be disabled.
|
| 150 |
+
dual_feasibility_tolerance : double
|
| 151 |
+
Dual feasibility tolerance. Default is 1e-07.
|
| 152 |
+
The minimum of this and ``primal_feasibility_tolerance``
|
| 153 |
+
is used for the feasibility tolerance when ``solver='ipm'``.
|
| 154 |
+
primal_feasibility_tolerance : double
|
| 155 |
+
Primal feasibility tolerance. Default is 1e-07.
|
| 156 |
+
The minimum of this and ``dual_feasibility_tolerance``
|
| 157 |
+
is used for the feasibility tolerance when ``solver='ipm'``.
|
| 158 |
+
ipm_optimality_tolerance : double
|
| 159 |
+
Optimality tolerance for ``solver='ipm'``. Default is 1e-08.
|
| 160 |
+
Minimum possible value is 1e-12 and must be smaller than the largest
|
| 161 |
+
possible value for a ``double`` on the platform.
|
| 162 |
+
simplex_dual_edge_weight_strategy : str (default: None)
|
| 163 |
+
Strategy for simplex dual edge weights. The default, ``None``,
|
| 164 |
+
automatically selects one of the following.
|
| 165 |
+
|
| 166 |
+
``'dantzig'`` uses Dantzig's original strategy of choosing the most
|
| 167 |
+
negative reduced cost.
|
| 168 |
+
|
| 169 |
+
``'devex'`` uses the strategy described in [15]_.
|
| 170 |
+
|
| 171 |
+
``steepest`` uses the exact steepest edge strategy as described in
|
| 172 |
+
[16]_.
|
| 173 |
+
|
| 174 |
+
``'steepest-devex'`` begins with the exact steepest edge strategy
|
| 175 |
+
until the computation is too costly or inexact and then switches to
|
| 176 |
+
the devex method.
|
| 177 |
+
|
| 178 |
+
Currently, using ``None`` always selects ``'steepest-devex'``, but this
|
| 179 |
+
may change as new options become available.
|
| 180 |
+
|
| 181 |
+
mip_max_nodes : int
|
| 182 |
+
The maximum number of nodes allotted to solve the problem; default is
|
| 183 |
+
the largest possible value for a ``HighsInt`` on the platform.
|
| 184 |
+
Ignored if not using the MIP solver.
|
| 185 |
+
unknown_options : dict
|
| 186 |
+
Optional arguments not used by this particular solver. If
|
| 187 |
+
``unknown_options`` is non-empty, a warning is issued listing all
|
| 188 |
+
unused options.
|
| 189 |
+
|
| 190 |
+
Returns
|
| 191 |
+
-------
|
| 192 |
+
sol : dict
|
| 193 |
+
A dictionary consisting of the fields:
|
| 194 |
+
|
| 195 |
+
x : 1D array
|
| 196 |
+
The values of the decision variables that minimizes the
|
| 197 |
+
objective function while satisfying the constraints.
|
| 198 |
+
fun : float
|
| 199 |
+
The optimal value of the objective function ``c @ x``.
|
| 200 |
+
slack : 1D array
|
| 201 |
+
The (nominally positive) values of the slack,
|
| 202 |
+
``b_ub - A_ub @ x``.
|
| 203 |
+
con : 1D array
|
| 204 |
+
The (nominally zero) residuals of the equality constraints,
|
| 205 |
+
``b_eq - A_eq @ x``.
|
| 206 |
+
success : bool
|
| 207 |
+
``True`` when the algorithm succeeds in finding an optimal
|
| 208 |
+
solution.
|
| 209 |
+
status : int
|
| 210 |
+
An integer representing the exit status of the algorithm.
|
| 211 |
+
|
| 212 |
+
``0`` : Optimization terminated successfully.
|
| 213 |
+
|
| 214 |
+
``1`` : Iteration or time limit reached.
|
| 215 |
+
|
| 216 |
+
``2`` : Problem appears to be infeasible.
|
| 217 |
+
|
| 218 |
+
``3`` : Problem appears to be unbounded.
|
| 219 |
+
|
| 220 |
+
``4`` : The HiGHS solver ran into a problem.
|
| 221 |
+
|
| 222 |
+
message : str
|
| 223 |
+
A string descriptor of the exit status of the algorithm.
|
| 224 |
+
nit : int
|
| 225 |
+
The total number of iterations performed.
|
| 226 |
+
For ``solver='simplex'``, this includes iterations in all
|
| 227 |
+
phases. For ``solver='ipm'``, this does not include
|
| 228 |
+
crossover iterations.
|
| 229 |
+
crossover_nit : int
|
| 230 |
+
The number of primal/dual pushes performed during the
|
| 231 |
+
crossover routine for ``solver='ipm'``. This is ``0``
|
| 232 |
+
for ``solver='simplex'``.
|
| 233 |
+
ineqlin : OptimizeResult
|
| 234 |
+
Solution and sensitivity information corresponding to the
|
| 235 |
+
inequality constraints, `b_ub`. A dictionary consisting of the
|
| 236 |
+
fields:
|
| 237 |
+
|
| 238 |
+
residual : np.ndnarray
|
| 239 |
+
The (nominally positive) values of the slack variables,
|
| 240 |
+
``b_ub - A_ub @ x``. This quantity is also commonly
|
| 241 |
+
referred to as "slack".
|
| 242 |
+
|
| 243 |
+
marginals : np.ndarray
|
| 244 |
+
The sensitivity (partial derivative) of the objective
|
| 245 |
+
function with respect to the right-hand side of the
|
| 246 |
+
inequality constraints, `b_ub`.
|
| 247 |
+
|
| 248 |
+
eqlin : OptimizeResult
|
| 249 |
+
Solution and sensitivity information corresponding to the
|
| 250 |
+
equality constraints, `b_eq`. A dictionary consisting of the
|
| 251 |
+
fields:
|
| 252 |
+
|
| 253 |
+
residual : np.ndarray
|
| 254 |
+
The (nominally zero) residuals of the equality constraints,
|
| 255 |
+
``b_eq - A_eq @ x``.
|
| 256 |
+
|
| 257 |
+
marginals : np.ndarray
|
| 258 |
+
The sensitivity (partial derivative) of the objective
|
| 259 |
+
function with respect to the right-hand side of the
|
| 260 |
+
equality constraints, `b_eq`.
|
| 261 |
+
|
| 262 |
+
lower, upper : OptimizeResult
|
| 263 |
+
Solution and sensitivity information corresponding to the
|
| 264 |
+
lower and upper bounds on decision variables, `bounds`.
|
| 265 |
+
|
| 266 |
+
residual : np.ndarray
|
| 267 |
+
The (nominally positive) values of the quantity
|
| 268 |
+
``x - lb`` (lower) or ``ub - x`` (upper).
|
| 269 |
+
|
| 270 |
+
marginals : np.ndarray
|
| 271 |
+
The sensitivity (partial derivative) of the objective
|
| 272 |
+
function with respect to the lower and upper
|
| 273 |
+
`bounds`.
|
| 274 |
+
|
| 275 |
+
mip_node_count : int
|
| 276 |
+
The number of subproblems or "nodes" solved by the MILP
|
| 277 |
+
solver. Only present when `integrality` is not `None`.
|
| 278 |
+
|
| 279 |
+
mip_dual_bound : float
|
| 280 |
+
The MILP solver's final estimate of the lower bound on the
|
| 281 |
+
optimal solution. Only present when `integrality` is not
|
| 282 |
+
`None`.
|
| 283 |
+
|
| 284 |
+
mip_gap : float
|
| 285 |
+
The difference between the final objective function value
|
| 286 |
+
and the final dual bound, scaled by the final objective
|
| 287 |
+
function value. Only present when `integrality` is not
|
| 288 |
+
`None`.
|
| 289 |
+
|
| 290 |
+
Notes
|
| 291 |
+
-----
|
| 292 |
+
The result fields `ineqlin`, `eqlin`, `lower`, and `upper` all contain
|
| 293 |
+
`marginals`, or partial derivatives of the objective function with respect
|
| 294 |
+
to the right-hand side of each constraint. These partial derivatives are
|
| 295 |
+
also referred to as "Lagrange multipliers", "dual values", and
|
| 296 |
+
"shadow prices". The sign convention of `marginals` is opposite that
|
| 297 |
+
of Lagrange multipliers produced by many nonlinear solvers.
|
| 298 |
+
|
| 299 |
+
References
|
| 300 |
+
----------
|
| 301 |
+
.. [15] Harris, Paula MJ. "Pivot selection methods of the Devex LP code."
|
| 302 |
+
Mathematical programming 5.1 (1973): 1-28.
|
| 303 |
+
.. [16] Goldfarb, Donald, and John Ker Reid. "A practicable steepest-edge
|
| 304 |
+
simplex algorithm." Mathematical Programming 12.1 (1977): 361-371.
|
| 305 |
+
"""
|
| 306 |
+
if unknown_options:
|
| 307 |
+
message = (f"Unrecognized options detected: {unknown_options}. "
|
| 308 |
+
"These will be passed to HiGHS verbatim.")
|
| 309 |
+
warn(message, OptimizeWarning, stacklevel=3)
|
| 310 |
+
|
| 311 |
+
# Map options to HiGHS enum values
|
| 312 |
+
simplex_dual_edge_weight_strategy_enum = _convert_to_highs_enum(
|
| 313 |
+
simplex_dual_edge_weight_strategy,
|
| 314 |
+
'simplex_dual_edge_weight_strategy',
|
| 315 |
+
choices={'dantzig': HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_DANTZIG,
|
| 316 |
+
'devex': HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_DEVEX,
|
| 317 |
+
'steepest-devex': HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_CHOOSE,
|
| 318 |
+
'steepest':
|
| 319 |
+
HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_STEEPEST_EDGE,
|
| 320 |
+
None: None})
|
| 321 |
+
|
| 322 |
+
c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality = lp
|
| 323 |
+
|
| 324 |
+
lb, ub = bounds.T.copy() # separate bounds, copy->C-cntgs
|
| 325 |
+
# highs_wrapper solves LHS <= A*x <= RHS, not equality constraints
|
| 326 |
+
with np.errstate(invalid="ignore"):
|
| 327 |
+
lhs_ub = -np.ones_like(b_ub)*np.inf # LHS of UB constraints is -inf
|
| 328 |
+
rhs_ub = b_ub # RHS of UB constraints is b_ub
|
| 329 |
+
lhs_eq = b_eq # Equality constraint is inequality
|
| 330 |
+
rhs_eq = b_eq # constraint with LHS=RHS
|
| 331 |
+
lhs = np.concatenate((lhs_ub, lhs_eq))
|
| 332 |
+
rhs = np.concatenate((rhs_ub, rhs_eq))
|
| 333 |
+
|
| 334 |
+
if issparse(A_ub) or issparse(A_eq):
|
| 335 |
+
A = vstack((A_ub, A_eq))
|
| 336 |
+
else:
|
| 337 |
+
A = np.vstack((A_ub, A_eq))
|
| 338 |
+
A = csc_matrix(A)
|
| 339 |
+
|
| 340 |
+
options = {
|
| 341 |
+
'presolve': presolve,
|
| 342 |
+
'sense': HIGHS_OBJECTIVE_SENSE_MINIMIZE,
|
| 343 |
+
'solver': solver,
|
| 344 |
+
'time_limit': time_limit,
|
| 345 |
+
'highs_debug_level': MESSAGE_LEVEL_NONE,
|
| 346 |
+
'dual_feasibility_tolerance': dual_feasibility_tolerance,
|
| 347 |
+
'ipm_optimality_tolerance': ipm_optimality_tolerance,
|
| 348 |
+
'log_to_console': disp,
|
| 349 |
+
'mip_max_nodes': mip_max_nodes,
|
| 350 |
+
'output_flag': disp,
|
| 351 |
+
'primal_feasibility_tolerance': primal_feasibility_tolerance,
|
| 352 |
+
'simplex_dual_edge_weight_strategy':
|
| 353 |
+
simplex_dual_edge_weight_strategy_enum,
|
| 354 |
+
'simplex_strategy': HIGHS_SIMPLEX_STRATEGY_DUAL,
|
| 355 |
+
'simplex_crash_strategy': HIGHS_SIMPLEX_CRASH_STRATEGY_OFF,
|
| 356 |
+
'ipm_iteration_limit': maxiter,
|
| 357 |
+
'simplex_iteration_limit': maxiter,
|
| 358 |
+
'mip_rel_gap': mip_rel_gap,
|
| 359 |
+
}
|
| 360 |
+
options.update(unknown_options)
|
| 361 |
+
|
| 362 |
+
# np.inf doesn't work; use very large constant
|
| 363 |
+
rhs = _replace_inf(rhs)
|
| 364 |
+
lhs = _replace_inf(lhs)
|
| 365 |
+
lb = _replace_inf(lb)
|
| 366 |
+
ub = _replace_inf(ub)
|
| 367 |
+
|
| 368 |
+
if integrality is None or np.sum(integrality) == 0:
|
| 369 |
+
integrality = np.empty(0)
|
| 370 |
+
else:
|
| 371 |
+
integrality = np.array(integrality)
|
| 372 |
+
|
| 373 |
+
res = _highs_wrapper(c, A.indptr, A.indices, A.data, lhs, rhs,
|
| 374 |
+
lb, ub, integrality.astype(np.uint8), options)
|
| 375 |
+
|
| 376 |
+
# HiGHS represents constraints as lhs/rhs, so
|
| 377 |
+
# Ax + s = b => Ax = b - s
|
| 378 |
+
# and we need to split up s by A_ub and A_eq
|
| 379 |
+
if 'slack' in res:
|
| 380 |
+
slack = res['slack']
|
| 381 |
+
con = np.array(slack[len(b_ub):])
|
| 382 |
+
slack = np.array(slack[:len(b_ub)])
|
| 383 |
+
else:
|
| 384 |
+
slack, con = None, None
|
| 385 |
+
|
| 386 |
+
# lagrange multipliers for equalities/inequalities and upper/lower bounds
|
| 387 |
+
if 'lambda' in res:
|
| 388 |
+
lamda = res['lambda']
|
| 389 |
+
marg_ineqlin = np.array(lamda[:len(b_ub)])
|
| 390 |
+
marg_eqlin = np.array(lamda[len(b_ub):])
|
| 391 |
+
marg_upper = np.array(res['marg_bnds'][1, :])
|
| 392 |
+
marg_lower = np.array(res['marg_bnds'][0, :])
|
| 393 |
+
else:
|
| 394 |
+
marg_ineqlin, marg_eqlin = None, None
|
| 395 |
+
marg_upper, marg_lower = None, None
|
| 396 |
+
|
| 397 |
+
# this needs to be updated if we start choosing the solver intelligently
|
| 398 |
+
|
| 399 |
+
# Convert to scipy-style status and message
|
| 400 |
+
highs_status = res.get('status', None)
|
| 401 |
+
highs_message = res.get('message', None)
|
| 402 |
+
status, message = _highs_to_scipy_status_message(highs_status,
|
| 403 |
+
highs_message)
|
| 404 |
+
|
| 405 |
+
x = np.array(res['x']) if 'x' in res else None
|
| 406 |
+
sol = {'x': x,
|
| 407 |
+
'slack': slack,
|
| 408 |
+
'con': con,
|
| 409 |
+
'ineqlin': OptimizeResult({
|
| 410 |
+
'residual': slack,
|
| 411 |
+
'marginals': marg_ineqlin,
|
| 412 |
+
}),
|
| 413 |
+
'eqlin': OptimizeResult({
|
| 414 |
+
'residual': con,
|
| 415 |
+
'marginals': marg_eqlin,
|
| 416 |
+
}),
|
| 417 |
+
'lower': OptimizeResult({
|
| 418 |
+
'residual': None if x is None else x - lb,
|
| 419 |
+
'marginals': marg_lower,
|
| 420 |
+
}),
|
| 421 |
+
'upper': OptimizeResult({
|
| 422 |
+
'residual': None if x is None else ub - x,
|
| 423 |
+
'marginals': marg_upper
|
| 424 |
+
}),
|
| 425 |
+
'fun': res.get('fun'),
|
| 426 |
+
'status': status,
|
| 427 |
+
'success': res['status'] == MODEL_STATUS_OPTIMAL,
|
| 428 |
+
'message': message,
|
| 429 |
+
'nit': res.get('simplex_nit', 0) or res.get('ipm_nit', 0),
|
| 430 |
+
'crossover_nit': res.get('crossover_nit'),
|
| 431 |
+
}
|
| 432 |
+
|
| 433 |
+
if np.any(x) and integrality is not None:
|
| 434 |
+
sol.update({
|
| 435 |
+
'mip_node_count': res.get('mip_node_count', 0),
|
| 436 |
+
'mip_dual_bound': res.get('mip_dual_bound', 0.0),
|
| 437 |
+
'mip_gap': res.get('mip_gap', 0.0),
|
| 438 |
+
})
|
| 439 |
+
|
| 440 |
+
return sol
|
evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_linprog_ip.py
ADDED
|
@@ -0,0 +1,1126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Interior-point method for linear programming
|
| 2 |
+
|
| 3 |
+
The *interior-point* method uses the primal-dual path following algorithm
|
| 4 |
+
outlined in [1]_. This algorithm supports sparse constraint matrices and
|
| 5 |
+
is typically faster than the simplex methods, especially for large, sparse
|
| 6 |
+
problems. Note, however, that the solution returned may be slightly less
|
| 7 |
+
accurate than those of the simplex methods and will not, in general,
|
| 8 |
+
correspond with a vertex of the polytope defined by the constraints.
|
| 9 |
+
|
| 10 |
+
.. versionadded:: 1.0.0
|
| 11 |
+
|
| 12 |
+
References
|
| 13 |
+
----------
|
| 14 |
+
.. [1] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
|
| 15 |
+
optimizer for linear programming: an implementation of the
|
| 16 |
+
homogeneous algorithm." High performance optimization. Springer US,
|
| 17 |
+
2000. 197-232.
|
| 18 |
+
"""
|
| 19 |
+
# Author: Matt Haberland
|
| 20 |
+
|
| 21 |
+
import numpy as np
|
| 22 |
+
import scipy as sp
|
| 23 |
+
import scipy.sparse as sps
|
| 24 |
+
from warnings import warn
|
| 25 |
+
from scipy.linalg import LinAlgError
|
| 26 |
+
from ._optimize import OptimizeWarning, OptimizeResult, _check_unknown_options
|
| 27 |
+
from ._linprog_util import _postsolve
|
| 28 |
+
has_umfpack = True
|
| 29 |
+
has_cholmod = True
|
| 30 |
+
try:
|
| 31 |
+
import sksparse # noqa: F401
|
| 32 |
+
from sksparse.cholmod import cholesky as cholmod # noqa: F401
|
| 33 |
+
from sksparse.cholmod import analyze as cholmod_analyze
|
| 34 |
+
except ImportError:
|
| 35 |
+
has_cholmod = False
|
| 36 |
+
try:
|
| 37 |
+
import scikits.umfpack # test whether to use factorized # noqa: F401
|
| 38 |
+
except ImportError:
|
| 39 |
+
has_umfpack = False
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def _get_solver(M, sparse=False, lstsq=False, sym_pos=True,
|
| 43 |
+
cholesky=True, permc_spec='MMD_AT_PLUS_A'):
|
| 44 |
+
"""
|
| 45 |
+
Given solver options, return a handle to the appropriate linear system
|
| 46 |
+
solver.
|
| 47 |
+
|
| 48 |
+
Parameters
|
| 49 |
+
----------
|
| 50 |
+
M : 2-D array
|
| 51 |
+
As defined in [4] Equation 8.31
|
| 52 |
+
sparse : bool (default = False)
|
| 53 |
+
True if the system to be solved is sparse. This is typically set
|
| 54 |
+
True when the original ``A_ub`` and ``A_eq`` arrays are sparse.
|
| 55 |
+
lstsq : bool (default = False)
|
| 56 |
+
True if the system is ill-conditioned and/or (nearly) singular and
|
| 57 |
+
thus a more robust least-squares solver is desired. This is sometimes
|
| 58 |
+
needed as the solution is approached.
|
| 59 |
+
sym_pos : bool (default = True)
|
| 60 |
+
True if the system matrix is symmetric positive definite
|
| 61 |
+
Sometimes this needs to be set false as the solution is approached,
|
| 62 |
+
even when the system should be symmetric positive definite, due to
|
| 63 |
+
numerical difficulties.
|
| 64 |
+
cholesky : bool (default = True)
|
| 65 |
+
True if the system is to be solved by Cholesky, rather than LU,
|
| 66 |
+
decomposition. This is typically faster unless the problem is very
|
| 67 |
+
small or prone to numerical difficulties.
|
| 68 |
+
permc_spec : str (default = 'MMD_AT_PLUS_A')
|
| 69 |
+
Sparsity preservation strategy used by SuperLU. Acceptable values are:
|
| 70 |
+
|
| 71 |
+
- ``NATURAL``: natural ordering.
|
| 72 |
+
- ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
|
| 73 |
+
- ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
|
| 74 |
+
- ``COLAMD``: approximate minimum degree column ordering.
|
| 75 |
+
|
| 76 |
+
See SuperLU documentation.
|
| 77 |
+
|
| 78 |
+
Returns
|
| 79 |
+
-------
|
| 80 |
+
solve : function
|
| 81 |
+
Handle to the appropriate solver function
|
| 82 |
+
|
| 83 |
+
"""
|
| 84 |
+
try:
|
| 85 |
+
if sparse:
|
| 86 |
+
if lstsq:
|
| 87 |
+
def solve(r, sym_pos=False):
|
| 88 |
+
return sps.linalg.lsqr(M, r)[0]
|
| 89 |
+
elif cholesky:
|
| 90 |
+
try:
|
| 91 |
+
# Will raise an exception in the first call,
|
| 92 |
+
# or when the matrix changes due to a new problem
|
| 93 |
+
_get_solver.cholmod_factor.cholesky_inplace(M)
|
| 94 |
+
except Exception:
|
| 95 |
+
_get_solver.cholmod_factor = cholmod_analyze(M)
|
| 96 |
+
_get_solver.cholmod_factor.cholesky_inplace(M)
|
| 97 |
+
solve = _get_solver.cholmod_factor
|
| 98 |
+
else:
|
| 99 |
+
if has_umfpack and sym_pos:
|
| 100 |
+
solve = sps.linalg.factorized(M)
|
| 101 |
+
else: # factorized doesn't pass permc_spec
|
| 102 |
+
solve = sps.linalg.splu(M, permc_spec=permc_spec).solve
|
| 103 |
+
|
| 104 |
+
else:
|
| 105 |
+
if lstsq: # sometimes necessary as solution is approached
|
| 106 |
+
def solve(r):
|
| 107 |
+
return sp.linalg.lstsq(M, r)[0]
|
| 108 |
+
elif cholesky:
|
| 109 |
+
L = sp.linalg.cho_factor(M)
|
| 110 |
+
|
| 111 |
+
def solve(r):
|
| 112 |
+
return sp.linalg.cho_solve(L, r)
|
| 113 |
+
else:
|
| 114 |
+
# this seems to cache the matrix factorization, so solving
|
| 115 |
+
# with multiple right hand sides is much faster
|
| 116 |
+
def solve(r, sym_pos=sym_pos):
|
| 117 |
+
if sym_pos:
|
| 118 |
+
return sp.linalg.solve(M, r, assume_a="pos")
|
| 119 |
+
else:
|
| 120 |
+
return sp.linalg.solve(M, r)
|
| 121 |
+
# There are many things that can go wrong here, and it's hard to say
|
| 122 |
+
# what all of them are. It doesn't really matter: if the matrix can't be
|
| 123 |
+
# factorized, return None. get_solver will be called again with different
|
| 124 |
+
# inputs, and a new routine will try to factorize the matrix.
|
| 125 |
+
except KeyboardInterrupt:
|
| 126 |
+
raise
|
| 127 |
+
except Exception:
|
| 128 |
+
return None
|
| 129 |
+
return solve
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
def _get_delta(A, b, c, x, y, z, tau, kappa, gamma, eta, sparse=False,
|
| 133 |
+
lstsq=False, sym_pos=True, cholesky=True, pc=True, ip=False,
|
| 134 |
+
permc_spec='MMD_AT_PLUS_A'):
|
| 135 |
+
"""
|
| 136 |
+
Given standard form problem defined by ``A``, ``b``, and ``c``;
|
| 137 |
+
current variable estimates ``x``, ``y``, ``z``, ``tau``, and ``kappa``;
|
| 138 |
+
algorithmic parameters ``gamma and ``eta;
|
| 139 |
+
and options ``sparse``, ``lstsq``, ``sym_pos``, ``cholesky``, ``pc``
|
| 140 |
+
(predictor-corrector), and ``ip`` (initial point improvement),
|
| 141 |
+
get the search direction for increments to the variable estimates.
|
| 142 |
+
|
| 143 |
+
Parameters
|
| 144 |
+
----------
|
| 145 |
+
As defined in [4], except:
|
| 146 |
+
sparse : bool
|
| 147 |
+
True if the system to be solved is sparse. This is typically set
|
| 148 |
+
True when the original ``A_ub`` and ``A_eq`` arrays are sparse.
|
| 149 |
+
lstsq : bool
|
| 150 |
+
True if the system is ill-conditioned and/or (nearly) singular and
|
| 151 |
+
thus a more robust least-squares solver is desired. This is sometimes
|
| 152 |
+
needed as the solution is approached.
|
| 153 |
+
sym_pos : bool
|
| 154 |
+
True if the system matrix is symmetric positive definite
|
| 155 |
+
Sometimes this needs to be set false as the solution is approached,
|
| 156 |
+
even when the system should be symmetric positive definite, due to
|
| 157 |
+
numerical difficulties.
|
| 158 |
+
cholesky : bool
|
| 159 |
+
True if the system is to be solved by Cholesky, rather than LU,
|
| 160 |
+
decomposition. This is typically faster unless the problem is very
|
| 161 |
+
small or prone to numerical difficulties.
|
| 162 |
+
pc : bool
|
| 163 |
+
True if the predictor-corrector method of Mehrota is to be used. This
|
| 164 |
+
is almost always (if not always) beneficial. Even though it requires
|
| 165 |
+
the solution of an additional linear system, the factorization
|
| 166 |
+
is typically (implicitly) reused so solution is efficient, and the
|
| 167 |
+
number of algorithm iterations is typically reduced.
|
| 168 |
+
ip : bool
|
| 169 |
+
True if the improved initial point suggestion due to [4] section 4.3
|
| 170 |
+
is desired. It's unclear whether this is beneficial.
|
| 171 |
+
permc_spec : str (default = 'MMD_AT_PLUS_A')
|
| 172 |
+
(Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos =
|
| 173 |
+
True``.) A matrix is factorized in each iteration of the algorithm.
|
| 174 |
+
This option specifies how to permute the columns of the matrix for
|
| 175 |
+
sparsity preservation. Acceptable values are:
|
| 176 |
+
|
| 177 |
+
- ``NATURAL``: natural ordering.
|
| 178 |
+
- ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
|
| 179 |
+
- ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
|
| 180 |
+
- ``COLAMD``: approximate minimum degree column ordering.
|
| 181 |
+
|
| 182 |
+
This option can impact the convergence of the
|
| 183 |
+
interior point algorithm; test different values to determine which
|
| 184 |
+
performs best for your problem. For more information, refer to
|
| 185 |
+
``scipy.sparse.linalg.splu``.
|
| 186 |
+
|
| 187 |
+
Returns
|
| 188 |
+
-------
|
| 189 |
+
Search directions as defined in [4]
|
| 190 |
+
|
| 191 |
+
References
|
| 192 |
+
----------
|
| 193 |
+
.. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
|
| 194 |
+
optimizer for linear programming: an implementation of the
|
| 195 |
+
homogeneous algorithm." High performance optimization. Springer US,
|
| 196 |
+
2000. 197-232.
|
| 197 |
+
|
| 198 |
+
"""
|
| 199 |
+
if A.shape[0] == 0:
|
| 200 |
+
# If there are no constraints, some solvers fail (understandably)
|
| 201 |
+
# rather than returning empty solution. This gets the job done.
|
| 202 |
+
sparse, lstsq, sym_pos, cholesky = False, False, True, False
|
| 203 |
+
n_x = len(x)
|
| 204 |
+
|
| 205 |
+
# [4] Equation 8.8
|
| 206 |
+
r_P = b * tau - A.dot(x)
|
| 207 |
+
r_D = c * tau - A.T.dot(y) - z
|
| 208 |
+
r_G = c.dot(x) - b.transpose().dot(y) + kappa
|
| 209 |
+
mu = (x.dot(z) + tau * kappa) / (n_x + 1)
|
| 210 |
+
|
| 211 |
+
# Assemble M from [4] Equation 8.31
|
| 212 |
+
Dinv = x / z
|
| 213 |
+
|
| 214 |
+
if sparse:
|
| 215 |
+
M = A.dot(sps.diags(Dinv, 0, format="csc").dot(A.T))
|
| 216 |
+
else:
|
| 217 |
+
M = A.dot(Dinv.reshape(-1, 1) * A.T)
|
| 218 |
+
solve = _get_solver(M, sparse, lstsq, sym_pos, cholesky, permc_spec)
|
| 219 |
+
|
| 220 |
+
# pc: "predictor-corrector" [4] Section 4.1
|
| 221 |
+
# In development this option could be turned off
|
| 222 |
+
# but it always seems to improve performance substantially
|
| 223 |
+
n_corrections = 1 if pc else 0
|
| 224 |
+
|
| 225 |
+
i = 0
|
| 226 |
+
alpha, d_x, d_z, d_tau, d_kappa = 0, 0, 0, 0, 0
|
| 227 |
+
while i <= n_corrections:
|
| 228 |
+
# Reference [4] Eq. 8.6
|
| 229 |
+
rhatp = eta(gamma) * r_P
|
| 230 |
+
rhatd = eta(gamma) * r_D
|
| 231 |
+
rhatg = eta(gamma) * r_G
|
| 232 |
+
|
| 233 |
+
# Reference [4] Eq. 8.7
|
| 234 |
+
rhatxs = gamma * mu - x * z
|
| 235 |
+
rhattk = gamma * mu - tau * kappa
|
| 236 |
+
|
| 237 |
+
if i == 1:
|
| 238 |
+
if ip: # if the correction is to get "initial point"
|
| 239 |
+
# Reference [4] Eq. 8.23
|
| 240 |
+
rhatxs = ((1 - alpha) * gamma * mu -
|
| 241 |
+
x * z - alpha**2 * d_x * d_z)
|
| 242 |
+
rhattk = ((1 - alpha) * gamma * mu -
|
| 243 |
+
tau * kappa -
|
| 244 |
+
alpha**2 * d_tau * d_kappa)
|
| 245 |
+
else: # if the correction is for "predictor-corrector"
|
| 246 |
+
# Reference [4] Eq. 8.13
|
| 247 |
+
rhatxs -= d_x * d_z
|
| 248 |
+
rhattk -= d_tau * d_kappa
|
| 249 |
+
|
| 250 |
+
# sometimes numerical difficulties arise as the solution is approached
|
| 251 |
+
# this loop tries to solve the equations using a sequence of functions
|
| 252 |
+
# for solve. For dense systems, the order is:
|
| 253 |
+
# 1. scipy.linalg.cho_factor/scipy.linalg.cho_solve,
|
| 254 |
+
# 2. scipy.linalg.solve w/ sym_pos = True,
|
| 255 |
+
# 3. scipy.linalg.solve w/ sym_pos = False, and if all else fails
|
| 256 |
+
# 4. scipy.linalg.lstsq
|
| 257 |
+
# For sparse systems, the order is:
|
| 258 |
+
# 1. sksparse.cholmod.cholesky (if available)
|
| 259 |
+
# 2. scipy.sparse.linalg.factorized (if umfpack available)
|
| 260 |
+
# 3. scipy.sparse.linalg.splu
|
| 261 |
+
# 4. scipy.sparse.linalg.lsqr
|
| 262 |
+
solved = False
|
| 263 |
+
while not solved:
|
| 264 |
+
try:
|
| 265 |
+
# [4] Equation 8.28
|
| 266 |
+
p, q = _sym_solve(Dinv, A, c, b, solve)
|
| 267 |
+
# [4] Equation 8.29
|
| 268 |
+
u, v = _sym_solve(Dinv, A, rhatd -
|
| 269 |
+
(1 / x) * rhatxs, rhatp, solve)
|
| 270 |
+
if np.any(np.isnan(p)) or np.any(np.isnan(q)):
|
| 271 |
+
raise LinAlgError
|
| 272 |
+
solved = True
|
| 273 |
+
except (LinAlgError, ValueError, TypeError) as e:
|
| 274 |
+
# Usually this doesn't happen. If it does, it happens when
|
| 275 |
+
# there are redundant constraints or when approaching the
|
| 276 |
+
# solution. If so, change solver.
|
| 277 |
+
if cholesky:
|
| 278 |
+
cholesky = False
|
| 279 |
+
warn(
|
| 280 |
+
"Solving system with option 'cholesky':True "
|
| 281 |
+
"failed. It is normal for this to happen "
|
| 282 |
+
"occasionally, especially as the solution is "
|
| 283 |
+
"approached. However, if you see this frequently, "
|
| 284 |
+
"consider setting option 'cholesky' to False.",
|
| 285 |
+
OptimizeWarning, stacklevel=5)
|
| 286 |
+
elif sym_pos:
|
| 287 |
+
sym_pos = False
|
| 288 |
+
warn(
|
| 289 |
+
"Solving system with option 'sym_pos':True "
|
| 290 |
+
"failed. It is normal for this to happen "
|
| 291 |
+
"occasionally, especially as the solution is "
|
| 292 |
+
"approached. However, if you see this frequently, "
|
| 293 |
+
"consider setting option 'sym_pos' to False.",
|
| 294 |
+
OptimizeWarning, stacklevel=5)
|
| 295 |
+
elif not lstsq:
|
| 296 |
+
lstsq = True
|
| 297 |
+
warn(
|
| 298 |
+
"Solving system with option 'sym_pos':False "
|
| 299 |
+
"failed. This may happen occasionally, "
|
| 300 |
+
"especially as the solution is "
|
| 301 |
+
"approached. However, if you see this frequently, "
|
| 302 |
+
"your problem may be numerically challenging. "
|
| 303 |
+
"If you cannot improve the formulation, consider "
|
| 304 |
+
"setting 'lstsq' to True. Consider also setting "
|
| 305 |
+
"`presolve` to True, if it is not already.",
|
| 306 |
+
OptimizeWarning, stacklevel=5)
|
| 307 |
+
else:
|
| 308 |
+
raise e
|
| 309 |
+
solve = _get_solver(M, sparse, lstsq, sym_pos,
|
| 310 |
+
cholesky, permc_spec)
|
| 311 |
+
# [4] Results after 8.29
|
| 312 |
+
d_tau = ((rhatg + 1 / tau * rhattk - (-c.dot(u) + b.dot(v))) /
|
| 313 |
+
(1 / tau * kappa + (-c.dot(p) + b.dot(q))))
|
| 314 |
+
d_x = u + p * d_tau
|
| 315 |
+
d_y = v + q * d_tau
|
| 316 |
+
|
| 317 |
+
# [4] Relations between after 8.25 and 8.26
|
| 318 |
+
d_z = (1 / x) * (rhatxs - z * d_x)
|
| 319 |
+
d_kappa = 1 / tau * (rhattk - kappa * d_tau)
|
| 320 |
+
|
| 321 |
+
# [4] 8.12 and "Let alpha be the maximal possible step..." before 8.23
|
| 322 |
+
alpha = _get_step(x, d_x, z, d_z, tau, d_tau, kappa, d_kappa, 1)
|
| 323 |
+
if ip: # initial point - see [4] 4.4
|
| 324 |
+
gamma = 10
|
| 325 |
+
else: # predictor-corrector, [4] definition after 8.12
|
| 326 |
+
beta1 = 0.1 # [4] pg. 220 (Table 8.1)
|
| 327 |
+
gamma = (1 - alpha)**2 * min(beta1, (1 - alpha))
|
| 328 |
+
i += 1
|
| 329 |
+
|
| 330 |
+
return d_x, d_y, d_z, d_tau, d_kappa
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
def _sym_solve(Dinv, A, r1, r2, solve):
|
| 334 |
+
"""
|
| 335 |
+
An implementation of [4] equation 8.31 and 8.32
|
| 336 |
+
|
| 337 |
+
References
|
| 338 |
+
----------
|
| 339 |
+
.. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
|
| 340 |
+
optimizer for linear programming: an implementation of the
|
| 341 |
+
homogeneous algorithm." High performance optimization. Springer US,
|
| 342 |
+
2000. 197-232.
|
| 343 |
+
|
| 344 |
+
"""
|
| 345 |
+
# [4] 8.31
|
| 346 |
+
r = r2 + A.dot(Dinv * r1)
|
| 347 |
+
v = solve(r)
|
| 348 |
+
# [4] 8.32
|
| 349 |
+
u = Dinv * (A.T.dot(v) - r1)
|
| 350 |
+
return u, v
|
| 351 |
+
|
| 352 |
+
|
| 353 |
+
def _get_step(x, d_x, z, d_z, tau, d_tau, kappa, d_kappa, alpha0):
|
| 354 |
+
"""
|
| 355 |
+
An implementation of [4] equation 8.21
|
| 356 |
+
|
| 357 |
+
References
|
| 358 |
+
----------
|
| 359 |
+
.. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
|
| 360 |
+
optimizer for linear programming: an implementation of the
|
| 361 |
+
homogeneous algorithm." High performance optimization. Springer US,
|
| 362 |
+
2000. 197-232.
|
| 363 |
+
|
| 364 |
+
"""
|
| 365 |
+
# [4] 4.3 Equation 8.21, ignoring 8.20 requirement
|
| 366 |
+
# same step is taken in primal and dual spaces
|
| 367 |
+
# alpha0 is basically beta3 from [4] Table 8.1, but instead of beta3
|
| 368 |
+
# the value 1 is used in Mehrota corrector and initial point correction
|
| 369 |
+
i_x = d_x < 0
|
| 370 |
+
i_z = d_z < 0
|
| 371 |
+
alpha_x = alpha0 * np.min(x[i_x] / -d_x[i_x]) if np.any(i_x) else 1
|
| 372 |
+
alpha_tau = alpha0 * tau / -d_tau if d_tau < 0 else 1
|
| 373 |
+
alpha_z = alpha0 * np.min(z[i_z] / -d_z[i_z]) if np.any(i_z) else 1
|
| 374 |
+
alpha_kappa = alpha0 * kappa / -d_kappa if d_kappa < 0 else 1
|
| 375 |
+
alpha = np.min([1, alpha_x, alpha_tau, alpha_z, alpha_kappa])
|
| 376 |
+
return alpha
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
def _get_message(status):
|
| 380 |
+
"""
|
| 381 |
+
Given problem status code, return a more detailed message.
|
| 382 |
+
|
| 383 |
+
Parameters
|
| 384 |
+
----------
|
| 385 |
+
status : int
|
| 386 |
+
An integer representing the exit status of the optimization::
|
| 387 |
+
|
| 388 |
+
0 : Optimization terminated successfully
|
| 389 |
+
1 : Iteration limit reached
|
| 390 |
+
2 : Problem appears to be infeasible
|
| 391 |
+
3 : Problem appears to be unbounded
|
| 392 |
+
4 : Serious numerical difficulties encountered
|
| 393 |
+
|
| 394 |
+
Returns
|
| 395 |
+
-------
|
| 396 |
+
message : str
|
| 397 |
+
A string descriptor of the exit status of the optimization.
|
| 398 |
+
|
| 399 |
+
"""
|
| 400 |
+
messages = (
|
| 401 |
+
["Optimization terminated successfully.",
|
| 402 |
+
"The iteration limit was reached before the algorithm converged.",
|
| 403 |
+
"The algorithm terminated successfully and determined that the "
|
| 404 |
+
"problem is infeasible.",
|
| 405 |
+
"The algorithm terminated successfully and determined that the "
|
| 406 |
+
"problem is unbounded.",
|
| 407 |
+
"Numerical difficulties were encountered before the problem "
|
| 408 |
+
"converged. Please check your problem formulation for errors, "
|
| 409 |
+
"independence of linear equality constraints, and reasonable "
|
| 410 |
+
"scaling and matrix condition numbers. If you continue to "
|
| 411 |
+
"encounter this error, please submit a bug report."
|
| 412 |
+
])
|
| 413 |
+
return messages[status]
|
| 414 |
+
|
| 415 |
+
|
| 416 |
+
def _do_step(x, y, z, tau, kappa, d_x, d_y, d_z, d_tau, d_kappa, alpha):
|
| 417 |
+
"""
|
| 418 |
+
An implementation of [4] Equation 8.9
|
| 419 |
+
|
| 420 |
+
References
|
| 421 |
+
----------
|
| 422 |
+
.. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
|
| 423 |
+
optimizer for linear programming: an implementation of the
|
| 424 |
+
homogeneous algorithm." High performance optimization. Springer US,
|
| 425 |
+
2000. 197-232.
|
| 426 |
+
|
| 427 |
+
"""
|
| 428 |
+
x = x + alpha * d_x
|
| 429 |
+
tau = tau + alpha * d_tau
|
| 430 |
+
z = z + alpha * d_z
|
| 431 |
+
kappa = kappa + alpha * d_kappa
|
| 432 |
+
y = y + alpha * d_y
|
| 433 |
+
return x, y, z, tau, kappa
|
| 434 |
+
|
| 435 |
+
|
| 436 |
+
def _get_blind_start(shape):
|
| 437 |
+
"""
|
| 438 |
+
Return the starting point from [4] 4.4
|
| 439 |
+
|
| 440 |
+
References
|
| 441 |
+
----------
|
| 442 |
+
.. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
|
| 443 |
+
optimizer for linear programming: an implementation of the
|
| 444 |
+
homogeneous algorithm." High performance optimization. Springer US,
|
| 445 |
+
2000. 197-232.
|
| 446 |
+
|
| 447 |
+
"""
|
| 448 |
+
m, n = shape
|
| 449 |
+
x0 = np.ones(n)
|
| 450 |
+
y0 = np.zeros(m)
|
| 451 |
+
z0 = np.ones(n)
|
| 452 |
+
tau0 = 1
|
| 453 |
+
kappa0 = 1
|
| 454 |
+
return x0, y0, z0, tau0, kappa0
|
| 455 |
+
|
| 456 |
+
|
| 457 |
+
def _indicators(A, b, c, c0, x, y, z, tau, kappa):
|
| 458 |
+
"""
|
| 459 |
+
Implementation of several equations from [4] used as indicators of
|
| 460 |
+
the status of optimization.
|
| 461 |
+
|
| 462 |
+
References
|
| 463 |
+
----------
|
| 464 |
+
.. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
|
| 465 |
+
optimizer for linear programming: an implementation of the
|
| 466 |
+
homogeneous algorithm." High performance optimization. Springer US,
|
| 467 |
+
2000. 197-232.
|
| 468 |
+
|
| 469 |
+
"""
|
| 470 |
+
|
| 471 |
+
# residuals for termination are relative to initial values
|
| 472 |
+
x0, y0, z0, tau0, kappa0 = _get_blind_start(A.shape)
|
| 473 |
+
|
| 474 |
+
# See [4], Section 4 - The Homogeneous Algorithm, Equation 8.8
|
| 475 |
+
def r_p(x, tau):
|
| 476 |
+
return b * tau - A.dot(x)
|
| 477 |
+
|
| 478 |
+
def r_d(y, z, tau):
|
| 479 |
+
return c * tau - A.T.dot(y) - z
|
| 480 |
+
|
| 481 |
+
def r_g(x, y, kappa):
|
| 482 |
+
return kappa + c.dot(x) - b.dot(y)
|
| 483 |
+
|
| 484 |
+
# np.dot unpacks if they are arrays of size one
|
| 485 |
+
def mu(x, tau, z, kappa):
|
| 486 |
+
return (x.dot(z) + np.dot(tau, kappa)) / (len(x) + 1)
|
| 487 |
+
|
| 488 |
+
obj = c.dot(x / tau) + c0
|
| 489 |
+
|
| 490 |
+
def norm(a):
|
| 491 |
+
return np.linalg.norm(a)
|
| 492 |
+
|
| 493 |
+
# See [4], Section 4.5 - The Stopping Criteria
|
| 494 |
+
r_p0 = r_p(x0, tau0)
|
| 495 |
+
r_d0 = r_d(y0, z0, tau0)
|
| 496 |
+
r_g0 = r_g(x0, y0, kappa0)
|
| 497 |
+
mu_0 = mu(x0, tau0, z0, kappa0)
|
| 498 |
+
rho_A = norm(c.T.dot(x) - b.T.dot(y)) / (tau + norm(b.T.dot(y)))
|
| 499 |
+
rho_p = norm(r_p(x, tau)) / max(1, norm(r_p0))
|
| 500 |
+
rho_d = norm(r_d(y, z, tau)) / max(1, norm(r_d0))
|
| 501 |
+
rho_g = norm(r_g(x, y, kappa)) / max(1, norm(r_g0))
|
| 502 |
+
rho_mu = mu(x, tau, z, kappa) / mu_0
|
| 503 |
+
return rho_p, rho_d, rho_A, rho_g, rho_mu, obj
|
| 504 |
+
|
| 505 |
+
|
| 506 |
+
def _display_iter(rho_p, rho_d, rho_g, alpha, rho_mu, obj, header=False):
|
| 507 |
+
"""
|
| 508 |
+
Print indicators of optimization status to the console.
|
| 509 |
+
|
| 510 |
+
Parameters
|
| 511 |
+
----------
|
| 512 |
+
rho_p : float
|
| 513 |
+
The (normalized) primal feasibility, see [4] 4.5
|
| 514 |
+
rho_d : float
|
| 515 |
+
The (normalized) dual feasibility, see [4] 4.5
|
| 516 |
+
rho_g : float
|
| 517 |
+
The (normalized) duality gap, see [4] 4.5
|
| 518 |
+
alpha : float
|
| 519 |
+
The step size, see [4] 4.3
|
| 520 |
+
rho_mu : float
|
| 521 |
+
The (normalized) path parameter, see [4] 4.5
|
| 522 |
+
obj : float
|
| 523 |
+
The objective function value of the current iterate
|
| 524 |
+
header : bool
|
| 525 |
+
True if a header is to be printed
|
| 526 |
+
|
| 527 |
+
References
|
| 528 |
+
----------
|
| 529 |
+
.. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
|
| 530 |
+
optimizer for linear programming: an implementation of the
|
| 531 |
+
homogeneous algorithm." High performance optimization. Springer US,
|
| 532 |
+
2000. 197-232.
|
| 533 |
+
|
| 534 |
+
"""
|
| 535 |
+
if header:
|
| 536 |
+
print("Primal Feasibility ",
|
| 537 |
+
"Dual Feasibility ",
|
| 538 |
+
"Duality Gap ",
|
| 539 |
+
"Step ",
|
| 540 |
+
"Path Parameter ",
|
| 541 |
+
"Objective ")
|
| 542 |
+
|
| 543 |
+
# no clue why this works
|
| 544 |
+
fmt = '{0:<20.13}{1:<20.13}{2:<20.13}{3:<17.13}{4:<20.13}{5:<20.13}'
|
| 545 |
+
print(fmt.format(
|
| 546 |
+
float(rho_p),
|
| 547 |
+
float(rho_d),
|
| 548 |
+
float(rho_g),
|
| 549 |
+
alpha if isinstance(alpha, str) else float(alpha),
|
| 550 |
+
float(rho_mu),
|
| 551 |
+
float(obj)))
|
| 552 |
+
|
| 553 |
+
|
| 554 |
+
def _ip_hsd(A, b, c, c0, alpha0, beta, maxiter, disp, tol, sparse, lstsq,
|
| 555 |
+
sym_pos, cholesky, pc, ip, permc_spec, callback, postsolve_args):
|
| 556 |
+
r"""
|
| 557 |
+
Solve a linear programming problem in standard form:
|
| 558 |
+
|
| 559 |
+
Minimize::
|
| 560 |
+
|
| 561 |
+
c @ x
|
| 562 |
+
|
| 563 |
+
Subject to::
|
| 564 |
+
|
| 565 |
+
A @ x == b
|
| 566 |
+
x >= 0
|
| 567 |
+
|
| 568 |
+
using the interior point method of [4].
|
| 569 |
+
|
| 570 |
+
Parameters
|
| 571 |
+
----------
|
| 572 |
+
A : 2-D array
|
| 573 |
+
2-D array such that ``A @ x``, gives the values of the equality
|
| 574 |
+
constraints at ``x``.
|
| 575 |
+
b : 1-D array
|
| 576 |
+
1-D array of values representing the RHS of each equality constraint
|
| 577 |
+
(row) in ``A`` (for standard form problem).
|
| 578 |
+
c : 1-D array
|
| 579 |
+
Coefficients of the linear objective function to be minimized (for
|
| 580 |
+
standard form problem).
|
| 581 |
+
c0 : float
|
| 582 |
+
Constant term in objective function due to fixed (and eliminated)
|
| 583 |
+
variables. (Purely for display.)
|
| 584 |
+
alpha0 : float
|
| 585 |
+
The maximal step size for Mehrota's predictor-corrector search
|
| 586 |
+
direction; see :math:`\beta_3`of [4] Table 8.1
|
| 587 |
+
beta : float
|
| 588 |
+
The desired reduction of the path parameter :math:`\mu` (see [6]_)
|
| 589 |
+
maxiter : int
|
| 590 |
+
The maximum number of iterations of the algorithm.
|
| 591 |
+
disp : bool
|
| 592 |
+
Set to ``True`` if indicators of optimization status are to be printed
|
| 593 |
+
to the console each iteration.
|
| 594 |
+
tol : float
|
| 595 |
+
Termination tolerance; see [4]_ Section 4.5.
|
| 596 |
+
sparse : bool
|
| 597 |
+
Set to ``True`` if the problem is to be treated as sparse. However,
|
| 598 |
+
the inputs ``A_eq`` and ``A_ub`` should nonetheless be provided as
|
| 599 |
+
(dense) arrays rather than sparse matrices.
|
| 600 |
+
lstsq : bool
|
| 601 |
+
Set to ``True`` if the problem is expected to be very poorly
|
| 602 |
+
conditioned. This should always be left as ``False`` unless severe
|
| 603 |
+
numerical difficulties are frequently encountered, and a better option
|
| 604 |
+
would be to improve the formulation of the problem.
|
| 605 |
+
sym_pos : bool
|
| 606 |
+
Leave ``True`` if the problem is expected to yield a well conditioned
|
| 607 |
+
symmetric positive definite normal equation matrix (almost always).
|
| 608 |
+
cholesky : bool
|
| 609 |
+
Set to ``True`` if the normal equations are to be solved by explicit
|
| 610 |
+
Cholesky decomposition followed by explicit forward/backward
|
| 611 |
+
substitution. This is typically faster for moderate, dense problems
|
| 612 |
+
that are numerically well-behaved.
|
| 613 |
+
pc : bool
|
| 614 |
+
Leave ``True`` if the predictor-corrector method of Mehrota is to be
|
| 615 |
+
used. This is almost always (if not always) beneficial.
|
| 616 |
+
ip : bool
|
| 617 |
+
Set to ``True`` if the improved initial point suggestion due to [4]_
|
| 618 |
+
Section 4.3 is desired. It's unclear whether this is beneficial.
|
| 619 |
+
permc_spec : str (default = 'MMD_AT_PLUS_A')
|
| 620 |
+
(Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos =
|
| 621 |
+
True``.) A matrix is factorized in each iteration of the algorithm.
|
| 622 |
+
This option specifies how to permute the columns of the matrix for
|
| 623 |
+
sparsity preservation. Acceptable values are:
|
| 624 |
+
|
| 625 |
+
- ``NATURAL``: natural ordering.
|
| 626 |
+
- ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
|
| 627 |
+
- ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
|
| 628 |
+
- ``COLAMD``: approximate minimum degree column ordering.
|
| 629 |
+
|
| 630 |
+
This option can impact the convergence of the
|
| 631 |
+
interior point algorithm; test different values to determine which
|
| 632 |
+
performs best for your problem. For more information, refer to
|
| 633 |
+
``scipy.sparse.linalg.splu``.
|
| 634 |
+
callback : callable, optional
|
| 635 |
+
If a callback function is provided, it will be called within each
|
| 636 |
+
iteration of the algorithm. The callback function must accept a single
|
| 637 |
+
`scipy.optimize.OptimizeResult` consisting of the following fields:
|
| 638 |
+
|
| 639 |
+
x : 1-D array
|
| 640 |
+
Current solution vector
|
| 641 |
+
fun : float
|
| 642 |
+
Current value of the objective function
|
| 643 |
+
success : bool
|
| 644 |
+
True only when an algorithm has completed successfully,
|
| 645 |
+
so this is always False as the callback function is called
|
| 646 |
+
only while the algorithm is still iterating.
|
| 647 |
+
slack : 1-D array
|
| 648 |
+
The values of the slack variables. Each slack variable
|
| 649 |
+
corresponds to an inequality constraint. If the slack is zero,
|
| 650 |
+
the corresponding constraint is active.
|
| 651 |
+
con : 1-D array
|
| 652 |
+
The (nominally zero) residuals of the equality constraints,
|
| 653 |
+
that is, ``b - A_eq @ x``
|
| 654 |
+
phase : int
|
| 655 |
+
The phase of the algorithm being executed. This is always
|
| 656 |
+
1 for the interior-point method because it has only one phase.
|
| 657 |
+
status : int
|
| 658 |
+
For revised simplex, this is always 0 because if a different
|
| 659 |
+
status is detected, the algorithm terminates.
|
| 660 |
+
nit : int
|
| 661 |
+
The number of iterations performed.
|
| 662 |
+
message : str
|
| 663 |
+
A string descriptor of the exit status of the optimization.
|
| 664 |
+
postsolve_args : tuple
|
| 665 |
+
Data needed by _postsolve to convert the solution to the standard-form
|
| 666 |
+
problem into the solution to the original problem.
|
| 667 |
+
|
| 668 |
+
Returns
|
| 669 |
+
-------
|
| 670 |
+
x_hat : float
|
| 671 |
+
Solution vector (for standard form problem).
|
| 672 |
+
status : int
|
| 673 |
+
An integer representing the exit status of the optimization::
|
| 674 |
+
|
| 675 |
+
0 : Optimization terminated successfully
|
| 676 |
+
1 : Iteration limit reached
|
| 677 |
+
2 : Problem appears to be infeasible
|
| 678 |
+
3 : Problem appears to be unbounded
|
| 679 |
+
4 : Serious numerical difficulties encountered
|
| 680 |
+
|
| 681 |
+
message : str
|
| 682 |
+
A string descriptor of the exit status of the optimization.
|
| 683 |
+
iteration : int
|
| 684 |
+
The number of iterations taken to solve the problem
|
| 685 |
+
|
| 686 |
+
References
|
| 687 |
+
----------
|
| 688 |
+
.. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
|
| 689 |
+
optimizer for linear programming: an implementation of the
|
| 690 |
+
homogeneous algorithm." High performance optimization. Springer US,
|
| 691 |
+
2000. 197-232.
|
| 692 |
+
.. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear
|
| 693 |
+
Programming based on Newton's Method." Unpublished Course Notes,
|
| 694 |
+
March 2004. Available 2/25/2017 at:
|
| 695 |
+
https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf
|
| 696 |
+
|
| 697 |
+
"""
|
| 698 |
+
|
| 699 |
+
iteration = 0
|
| 700 |
+
|
| 701 |
+
# default initial point
|
| 702 |
+
x, y, z, tau, kappa = _get_blind_start(A.shape)
|
| 703 |
+
|
| 704 |
+
# first iteration is special improvement of initial point
|
| 705 |
+
ip = ip if pc else False
|
| 706 |
+
|
| 707 |
+
# [4] 4.5
|
| 708 |
+
rho_p, rho_d, rho_A, rho_g, rho_mu, obj = _indicators(
|
| 709 |
+
A, b, c, c0, x, y, z, tau, kappa)
|
| 710 |
+
go = rho_p > tol or rho_d > tol or rho_A > tol # we might get lucky : )
|
| 711 |
+
|
| 712 |
+
if disp:
|
| 713 |
+
_display_iter(rho_p, rho_d, rho_g, "-", rho_mu, obj, header=True)
|
| 714 |
+
if callback is not None:
|
| 715 |
+
x_o, fun, slack, con = _postsolve(x/tau, postsolve_args)
|
| 716 |
+
res = OptimizeResult({'x': x_o, 'fun': fun, 'slack': slack,
|
| 717 |
+
'con': con, 'nit': iteration, 'phase': 1,
|
| 718 |
+
'complete': False, 'status': 0,
|
| 719 |
+
'message': "", 'success': False})
|
| 720 |
+
callback(res)
|
| 721 |
+
|
| 722 |
+
status = 0
|
| 723 |
+
message = "Optimization terminated successfully."
|
| 724 |
+
|
| 725 |
+
if sparse:
|
| 726 |
+
A = sps.csc_matrix(A)
|
| 727 |
+
|
| 728 |
+
while go:
|
| 729 |
+
|
| 730 |
+
iteration += 1
|
| 731 |
+
|
| 732 |
+
if ip: # initial point
|
| 733 |
+
# [4] Section 4.4
|
| 734 |
+
gamma = 1
|
| 735 |
+
|
| 736 |
+
def eta(g):
|
| 737 |
+
return 1
|
| 738 |
+
else:
|
| 739 |
+
# gamma = 0 in predictor step according to [4] 4.1
|
| 740 |
+
# if predictor/corrector is off, use mean of complementarity [6]
|
| 741 |
+
# 5.1 / [4] Below Figure 10-4
|
| 742 |
+
gamma = 0 if pc else beta * np.mean(z * x)
|
| 743 |
+
# [4] Section 4.1
|
| 744 |
+
|
| 745 |
+
def eta(g=gamma):
|
| 746 |
+
return 1 - g
|
| 747 |
+
|
| 748 |
+
try:
|
| 749 |
+
# Solve [4] 8.6 and 8.7/8.13/8.23
|
| 750 |
+
d_x, d_y, d_z, d_tau, d_kappa = _get_delta(
|
| 751 |
+
A, b, c, x, y, z, tau, kappa, gamma, eta,
|
| 752 |
+
sparse, lstsq, sym_pos, cholesky, pc, ip, permc_spec)
|
| 753 |
+
|
| 754 |
+
if ip: # initial point
|
| 755 |
+
# [4] 4.4
|
| 756 |
+
# Formula after 8.23 takes a full step regardless if this will
|
| 757 |
+
# take it negative
|
| 758 |
+
alpha = 1.0
|
| 759 |
+
x, y, z, tau, kappa = _do_step(
|
| 760 |
+
x, y, z, tau, kappa, d_x, d_y,
|
| 761 |
+
d_z, d_tau, d_kappa, alpha)
|
| 762 |
+
x[x < 1] = 1
|
| 763 |
+
z[z < 1] = 1
|
| 764 |
+
tau = max(1, tau)
|
| 765 |
+
kappa = max(1, kappa)
|
| 766 |
+
ip = False # done with initial point
|
| 767 |
+
else:
|
| 768 |
+
# [4] Section 4.3
|
| 769 |
+
alpha = _get_step(x, d_x, z, d_z, tau,
|
| 770 |
+
d_tau, kappa, d_kappa, alpha0)
|
| 771 |
+
# [4] Equation 8.9
|
| 772 |
+
x, y, z, tau, kappa = _do_step(
|
| 773 |
+
x, y, z, tau, kappa, d_x, d_y, d_z, d_tau, d_kappa, alpha)
|
| 774 |
+
|
| 775 |
+
except (LinAlgError, FloatingPointError,
|
| 776 |
+
ValueError, ZeroDivisionError):
|
| 777 |
+
# this can happen when sparse solver is used and presolve
|
| 778 |
+
# is turned off. Also observed ValueError in AppVeyor Python 3.6
|
| 779 |
+
# Win32 build (PR #8676). I've never seen it otherwise.
|
| 780 |
+
status = 4
|
| 781 |
+
message = _get_message(status)
|
| 782 |
+
break
|
| 783 |
+
|
| 784 |
+
# [4] 4.5
|
| 785 |
+
rho_p, rho_d, rho_A, rho_g, rho_mu, obj = _indicators(
|
| 786 |
+
A, b, c, c0, x, y, z, tau, kappa)
|
| 787 |
+
go = rho_p > tol or rho_d > tol or rho_A > tol
|
| 788 |
+
|
| 789 |
+
if disp:
|
| 790 |
+
_display_iter(rho_p, rho_d, rho_g, alpha, rho_mu, obj)
|
| 791 |
+
if callback is not None:
|
| 792 |
+
x_o, fun, slack, con = _postsolve(x/tau, postsolve_args)
|
| 793 |
+
res = OptimizeResult({'x': x_o, 'fun': fun, 'slack': slack,
|
| 794 |
+
'con': con, 'nit': iteration, 'phase': 1,
|
| 795 |
+
'complete': False, 'status': 0,
|
| 796 |
+
'message': "", 'success': False})
|
| 797 |
+
callback(res)
|
| 798 |
+
|
| 799 |
+
# [4] 4.5
|
| 800 |
+
inf1 = (rho_p < tol and rho_d < tol and rho_g < tol and tau < tol *
|
| 801 |
+
max(1, kappa))
|
| 802 |
+
inf2 = rho_mu < tol and tau < tol * min(1, kappa)
|
| 803 |
+
if inf1 or inf2:
|
| 804 |
+
# [4] Lemma 8.4 / Theorem 8.3
|
| 805 |
+
if b.transpose().dot(y) > tol:
|
| 806 |
+
status = 2
|
| 807 |
+
else: # elif c.T.dot(x) < tol: ? Probably not necessary.
|
| 808 |
+
status = 3
|
| 809 |
+
message = _get_message(status)
|
| 810 |
+
break
|
| 811 |
+
elif iteration >= maxiter:
|
| 812 |
+
status = 1
|
| 813 |
+
message = _get_message(status)
|
| 814 |
+
break
|
| 815 |
+
|
| 816 |
+
x_hat = x / tau
|
| 817 |
+
# [4] Statement after Theorem 8.2
|
| 818 |
+
return x_hat, status, message, iteration
|
| 819 |
+
|
| 820 |
+
|
| 821 |
+
def _linprog_ip(c, c0, A, b, callback, postsolve_args, maxiter=1000, tol=1e-8,
|
| 822 |
+
disp=False, alpha0=.99995, beta=0.1, sparse=False, lstsq=False,
|
| 823 |
+
sym_pos=True, cholesky=None, pc=True, ip=False,
|
| 824 |
+
permc_spec='MMD_AT_PLUS_A', **unknown_options):
|
| 825 |
+
r"""
|
| 826 |
+
Minimize a linear objective function subject to linear
|
| 827 |
+
equality and non-negativity constraints using the interior point method
|
| 828 |
+
of [4]_. Linear programming is intended to solve problems
|
| 829 |
+
of the following form:
|
| 830 |
+
|
| 831 |
+
Minimize::
|
| 832 |
+
|
| 833 |
+
c @ x
|
| 834 |
+
|
| 835 |
+
Subject to::
|
| 836 |
+
|
| 837 |
+
A @ x == b
|
| 838 |
+
x >= 0
|
| 839 |
+
|
| 840 |
+
User-facing documentation is in _linprog_doc.py.
|
| 841 |
+
|
| 842 |
+
Parameters
|
| 843 |
+
----------
|
| 844 |
+
c : 1-D array
|
| 845 |
+
Coefficients of the linear objective function to be minimized.
|
| 846 |
+
c0 : float
|
| 847 |
+
Constant term in objective function due to fixed (and eliminated)
|
| 848 |
+
variables. (Purely for display.)
|
| 849 |
+
A : 2-D array
|
| 850 |
+
2-D array such that ``A @ x``, gives the values of the equality
|
| 851 |
+
constraints at ``x``.
|
| 852 |
+
b : 1-D array
|
| 853 |
+
1-D array of values representing the right hand side of each equality
|
| 854 |
+
constraint (row) in ``A``.
|
| 855 |
+
callback : callable, optional
|
| 856 |
+
Callback function to be executed once per iteration.
|
| 857 |
+
postsolve_args : tuple
|
| 858 |
+
Data needed by _postsolve to convert the solution to the standard-form
|
| 859 |
+
problem into the solution to the original problem.
|
| 860 |
+
|
| 861 |
+
Options
|
| 862 |
+
-------
|
| 863 |
+
maxiter : int (default = 1000)
|
| 864 |
+
The maximum number of iterations of the algorithm.
|
| 865 |
+
tol : float (default = 1e-8)
|
| 866 |
+
Termination tolerance to be used for all termination criteria;
|
| 867 |
+
see [4]_ Section 4.5.
|
| 868 |
+
disp : bool (default = False)
|
| 869 |
+
Set to ``True`` if indicators of optimization status are to be printed
|
| 870 |
+
to the console each iteration.
|
| 871 |
+
alpha0 : float (default = 0.99995)
|
| 872 |
+
The maximal step size for Mehrota's predictor-corrector search
|
| 873 |
+
direction; see :math:`\beta_{3}` of [4]_ Table 8.1.
|
| 874 |
+
beta : float (default = 0.1)
|
| 875 |
+
The desired reduction of the path parameter :math:`\mu` (see [6]_)
|
| 876 |
+
when Mehrota's predictor-corrector is not in use (uncommon).
|
| 877 |
+
sparse : bool (default = False)
|
| 878 |
+
Set to ``True`` if the problem is to be treated as sparse after
|
| 879 |
+
presolve. If either ``A_eq`` or ``A_ub`` is a sparse matrix,
|
| 880 |
+
this option will automatically be set ``True``, and the problem
|
| 881 |
+
will be treated as sparse even during presolve. If your constraint
|
| 882 |
+
matrices contain mostly zeros and the problem is not very small (less
|
| 883 |
+
than about 100 constraints or variables), consider setting ``True``
|
| 884 |
+
or providing ``A_eq`` and ``A_ub`` as sparse matrices.
|
| 885 |
+
lstsq : bool (default = False)
|
| 886 |
+
Set to ``True`` if the problem is expected to be very poorly
|
| 887 |
+
conditioned. This should always be left ``False`` unless severe
|
| 888 |
+
numerical difficulties are encountered. Leave this at the default
|
| 889 |
+
unless you receive a warning message suggesting otherwise.
|
| 890 |
+
sym_pos : bool (default = True)
|
| 891 |
+
Leave ``True`` if the problem is expected to yield a well conditioned
|
| 892 |
+
symmetric positive definite normal equation matrix
|
| 893 |
+
(almost always). Leave this at the default unless you receive
|
| 894 |
+
a warning message suggesting otherwise.
|
| 895 |
+
cholesky : bool (default = True)
|
| 896 |
+
Set to ``True`` if the normal equations are to be solved by explicit
|
| 897 |
+
Cholesky decomposition followed by explicit forward/backward
|
| 898 |
+
substitution. This is typically faster for problems
|
| 899 |
+
that are numerically well-behaved.
|
| 900 |
+
pc : bool (default = True)
|
| 901 |
+
Leave ``True`` if the predictor-corrector method of Mehrota is to be
|
| 902 |
+
used. This is almost always (if not always) beneficial.
|
| 903 |
+
ip : bool (default = False)
|
| 904 |
+
Set to ``True`` if the improved initial point suggestion due to [4]_
|
| 905 |
+
Section 4.3 is desired. Whether this is beneficial or not
|
| 906 |
+
depends on the problem.
|
| 907 |
+
permc_spec : str (default = 'MMD_AT_PLUS_A')
|
| 908 |
+
(Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos =
|
| 909 |
+
True``, and no SuiteSparse.)
|
| 910 |
+
A matrix is factorized in each iteration of the algorithm.
|
| 911 |
+
This option specifies how to permute the columns of the matrix for
|
| 912 |
+
sparsity preservation. Acceptable values are:
|
| 913 |
+
|
| 914 |
+
- ``NATURAL``: natural ordering.
|
| 915 |
+
- ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
|
| 916 |
+
- ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
|
| 917 |
+
- ``COLAMD``: approximate minimum degree column ordering.
|
| 918 |
+
|
| 919 |
+
This option can impact the convergence of the
|
| 920 |
+
interior point algorithm; test different values to determine which
|
| 921 |
+
performs best for your problem. For more information, refer to
|
| 922 |
+
``scipy.sparse.linalg.splu``.
|
| 923 |
+
unknown_options : dict
|
| 924 |
+
Optional arguments not used by this particular solver. If
|
| 925 |
+
`unknown_options` is non-empty a warning is issued listing all
|
| 926 |
+
unused options.
|
| 927 |
+
|
| 928 |
+
Returns
|
| 929 |
+
-------
|
| 930 |
+
x : 1-D array
|
| 931 |
+
Solution vector.
|
| 932 |
+
status : int
|
| 933 |
+
An integer representing the exit status of the optimization::
|
| 934 |
+
|
| 935 |
+
0 : Optimization terminated successfully
|
| 936 |
+
1 : Iteration limit reached
|
| 937 |
+
2 : Problem appears to be infeasible
|
| 938 |
+
3 : Problem appears to be unbounded
|
| 939 |
+
4 : Serious numerical difficulties encountered
|
| 940 |
+
|
| 941 |
+
message : str
|
| 942 |
+
A string descriptor of the exit status of the optimization.
|
| 943 |
+
iteration : int
|
| 944 |
+
The number of iterations taken to solve the problem.
|
| 945 |
+
|
| 946 |
+
Notes
|
| 947 |
+
-----
|
| 948 |
+
This method implements the algorithm outlined in [4]_ with ideas from [8]_
|
| 949 |
+
and a structure inspired by the simpler methods of [6]_.
|
| 950 |
+
|
| 951 |
+
The primal-dual path following method begins with initial 'guesses' of
|
| 952 |
+
the primal and dual variables of the standard form problem and iteratively
|
| 953 |
+
attempts to solve the (nonlinear) Karush-Kuhn-Tucker conditions for the
|
| 954 |
+
problem with a gradually reduced logarithmic barrier term added to the
|
| 955 |
+
objective. This particular implementation uses a homogeneous self-dual
|
| 956 |
+
formulation, which provides certificates of infeasibility or unboundedness
|
| 957 |
+
where applicable.
|
| 958 |
+
|
| 959 |
+
The default initial point for the primal and dual variables is that
|
| 960 |
+
defined in [4]_ Section 4.4 Equation 8.22. Optionally (by setting initial
|
| 961 |
+
point option ``ip=True``), an alternate (potentially improved) starting
|
| 962 |
+
point can be calculated according to the additional recommendations of
|
| 963 |
+
[4]_ Section 4.4.
|
| 964 |
+
|
| 965 |
+
A search direction is calculated using the predictor-corrector method
|
| 966 |
+
(single correction) proposed by Mehrota and detailed in [4]_ Section 4.1.
|
| 967 |
+
(A potential improvement would be to implement the method of multiple
|
| 968 |
+
corrections described in [4]_ Section 4.2.) In practice, this is
|
| 969 |
+
accomplished by solving the normal equations, [4]_ Section 5.1 Equations
|
| 970 |
+
8.31 and 8.32, derived from the Newton equations [4]_ Section 5 Equations
|
| 971 |
+
8.25 (compare to [4]_ Section 4 Equations 8.6-8.8). The advantage of
|
| 972 |
+
solving the normal equations rather than 8.25 directly is that the
|
| 973 |
+
matrices involved are symmetric positive definite, so Cholesky
|
| 974 |
+
decomposition can be used rather than the more expensive LU factorization.
|
| 975 |
+
|
| 976 |
+
With default options, the solver used to perform the factorization depends
|
| 977 |
+
on third-party software availability and the conditioning of the problem.
|
| 978 |
+
|
| 979 |
+
For dense problems, solvers are tried in the following order:
|
| 980 |
+
|
| 981 |
+
1. ``scipy.linalg.cho_factor``
|
| 982 |
+
|
| 983 |
+
2. ``scipy.linalg.solve`` with option ``sym_pos=True``
|
| 984 |
+
|
| 985 |
+
3. ``scipy.linalg.solve`` with option ``sym_pos=False``
|
| 986 |
+
|
| 987 |
+
4. ``scipy.linalg.lstsq``
|
| 988 |
+
|
| 989 |
+
For sparse problems:
|
| 990 |
+
|
| 991 |
+
1. ``sksparse.cholmod.cholesky`` (if scikit-sparse and SuiteSparse are installed)
|
| 992 |
+
|
| 993 |
+
2. ``scipy.sparse.linalg.factorized``
|
| 994 |
+
(if scikit-umfpack and SuiteSparse are installed)
|
| 995 |
+
|
| 996 |
+
3. ``scipy.sparse.linalg.splu`` (which uses SuperLU distributed with SciPy)
|
| 997 |
+
|
| 998 |
+
4. ``scipy.sparse.linalg.lsqr``
|
| 999 |
+
|
| 1000 |
+
If the solver fails for any reason, successively more robust (but slower)
|
| 1001 |
+
solvers are attempted in the order indicated. Attempting, failing, and
|
| 1002 |
+
re-starting factorization can be time consuming, so if the problem is
|
| 1003 |
+
numerically challenging, options can be set to bypass solvers that are
|
| 1004 |
+
failing. Setting ``cholesky=False`` skips to solver 2,
|
| 1005 |
+
``sym_pos=False`` skips to solver 3, and ``lstsq=True`` skips
|
| 1006 |
+
to solver 4 for both sparse and dense problems.
|
| 1007 |
+
|
| 1008 |
+
Potential improvements for combatting issues associated with dense
|
| 1009 |
+
columns in otherwise sparse problems are outlined in [4]_ Section 5.3 and
|
| 1010 |
+
[10]_ Section 4.1-4.2; the latter also discusses the alleviation of
|
| 1011 |
+
accuracy issues associated with the substitution approach to free
|
| 1012 |
+
variables.
|
| 1013 |
+
|
| 1014 |
+
After calculating the search direction, the maximum possible step size
|
| 1015 |
+
that does not activate the non-negativity constraints is calculated, and
|
| 1016 |
+
the smaller of this step size and unity is applied (as in [4]_ Section
|
| 1017 |
+
4.1.) [4]_ Section 4.3 suggests improvements for choosing the step size.
|
| 1018 |
+
|
| 1019 |
+
The new point is tested according to the termination conditions of [4]_
|
| 1020 |
+
Section 4.5. The same tolerance, which can be set using the ``tol`` option,
|
| 1021 |
+
is used for all checks. (A potential improvement would be to expose
|
| 1022 |
+
the different tolerances to be set independently.) If optimality,
|
| 1023 |
+
unboundedness, or infeasibility is detected, the solve procedure
|
| 1024 |
+
terminates; otherwise it repeats.
|
| 1025 |
+
|
| 1026 |
+
The expected problem formulation differs between the top level ``linprog``
|
| 1027 |
+
module and the method specific solvers. The method specific solvers expect a
|
| 1028 |
+
problem in standard form:
|
| 1029 |
+
|
| 1030 |
+
Minimize::
|
| 1031 |
+
|
| 1032 |
+
c @ x
|
| 1033 |
+
|
| 1034 |
+
Subject to::
|
| 1035 |
+
|
| 1036 |
+
A @ x == b
|
| 1037 |
+
x >= 0
|
| 1038 |
+
|
| 1039 |
+
Whereas the top level ``linprog`` module expects a problem of form:
|
| 1040 |
+
|
| 1041 |
+
Minimize::
|
| 1042 |
+
|
| 1043 |
+
c @ x
|
| 1044 |
+
|
| 1045 |
+
Subject to::
|
| 1046 |
+
|
| 1047 |
+
A_ub @ x <= b_ub
|
| 1048 |
+
A_eq @ x == b_eq
|
| 1049 |
+
lb <= x <= ub
|
| 1050 |
+
|
| 1051 |
+
where ``lb = 0`` and ``ub = None`` unless set in ``bounds``.
|
| 1052 |
+
|
| 1053 |
+
The original problem contains equality, upper-bound and variable constraints
|
| 1054 |
+
whereas the method specific solver requires equality constraints and
|
| 1055 |
+
variable non-negativity.
|
| 1056 |
+
|
| 1057 |
+
``linprog`` module converts the original problem to standard form by
|
| 1058 |
+
converting the simple bounds to upper bound constraints, introducing
|
| 1059 |
+
non-negative slack variables for inequality constraints, and expressing
|
| 1060 |
+
unbounded variables as the difference between two non-negative variables.
|
| 1061 |
+
|
| 1062 |
+
|
| 1063 |
+
References
|
| 1064 |
+
----------
|
| 1065 |
+
.. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
|
| 1066 |
+
optimizer for linear programming: an implementation of the
|
| 1067 |
+
homogeneous algorithm." High performance optimization. Springer US,
|
| 1068 |
+
2000. 197-232.
|
| 1069 |
+
.. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear
|
| 1070 |
+
Programming based on Newton's Method." Unpublished Course Notes,
|
| 1071 |
+
March 2004. Available 2/25/2017 at
|
| 1072 |
+
https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf
|
| 1073 |
+
.. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear
|
| 1074 |
+
programming." Mathematical Programming 71.2 (1995): 221-245.
|
| 1075 |
+
.. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
|
| 1076 |
+
programming." Athena Scientific 1 (1997): 997.
|
| 1077 |
+
.. [10] Andersen, Erling D., et al. Implementation of interior point methods
|
| 1078 |
+
for large scale linear programming. HEC/Universite de Geneve, 1996.
|
| 1079 |
+
|
| 1080 |
+
"""
|
| 1081 |
+
|
| 1082 |
+
_check_unknown_options(unknown_options)
|
| 1083 |
+
|
| 1084 |
+
# These should be warnings, not errors
|
| 1085 |
+
if (cholesky or cholesky is None) and sparse and not has_cholmod:
|
| 1086 |
+
if cholesky:
|
| 1087 |
+
warn("Sparse cholesky is only available with scikit-sparse. "
|
| 1088 |
+
"Setting `cholesky = False`",
|
| 1089 |
+
OptimizeWarning, stacklevel=3)
|
| 1090 |
+
cholesky = False
|
| 1091 |
+
|
| 1092 |
+
if sparse and lstsq:
|
| 1093 |
+
warn("Option combination 'sparse':True and 'lstsq':True "
|
| 1094 |
+
"is not recommended.",
|
| 1095 |
+
OptimizeWarning, stacklevel=3)
|
| 1096 |
+
|
| 1097 |
+
if lstsq and cholesky:
|
| 1098 |
+
warn("Invalid option combination 'lstsq':True "
|
| 1099 |
+
"and 'cholesky':True; option 'cholesky' has no effect when "
|
| 1100 |
+
"'lstsq' is set True.",
|
| 1101 |
+
OptimizeWarning, stacklevel=3)
|
| 1102 |
+
|
| 1103 |
+
valid_permc_spec = ('NATURAL', 'MMD_ATA', 'MMD_AT_PLUS_A', 'COLAMD')
|
| 1104 |
+
if permc_spec.upper() not in valid_permc_spec:
|
| 1105 |
+
warn("Invalid permc_spec option: '" + str(permc_spec) + "'. "
|
| 1106 |
+
"Acceptable values are 'NATURAL', 'MMD_ATA', 'MMD_AT_PLUS_A', "
|
| 1107 |
+
"and 'COLAMD'. Reverting to default.",
|
| 1108 |
+
OptimizeWarning, stacklevel=3)
|
| 1109 |
+
permc_spec = 'MMD_AT_PLUS_A'
|
| 1110 |
+
|
| 1111 |
+
# This can be an error
|
| 1112 |
+
if not sym_pos and cholesky:
|
| 1113 |
+
raise ValueError(
|
| 1114 |
+
"Invalid option combination 'sym_pos':False "
|
| 1115 |
+
"and 'cholesky':True: Cholesky decomposition is only possible "
|
| 1116 |
+
"for symmetric positive definite matrices.")
|
| 1117 |
+
|
| 1118 |
+
cholesky = cholesky or (cholesky is None and sym_pos and not lstsq)
|
| 1119 |
+
|
| 1120 |
+
x, status, message, iteration = _ip_hsd(A, b, c, c0, alpha0, beta,
|
| 1121 |
+
maxiter, disp, tol, sparse,
|
| 1122 |
+
lstsq, sym_pos, cholesky,
|
| 1123 |
+
pc, ip, permc_spec, callback,
|
| 1124 |
+
postsolve_args)
|
| 1125 |
+
|
| 1126 |
+
return x, status, message, iteration
|
evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_linprog_rs.py
ADDED
|
@@ -0,0 +1,572 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Revised simplex method for linear programming
|
| 2 |
+
|
| 3 |
+
The *revised simplex* method uses the method described in [1]_, except
|
| 4 |
+
that a factorization [2]_ of the basis matrix, rather than its inverse,
|
| 5 |
+
is efficiently maintained and used to solve the linear systems at each
|
| 6 |
+
iteration of the algorithm.
|
| 7 |
+
|
| 8 |
+
.. versionadded:: 1.3.0
|
| 9 |
+
|
| 10 |
+
References
|
| 11 |
+
----------
|
| 12 |
+
.. [1] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
|
| 13 |
+
programming." Athena Scientific 1 (1997): 997.
|
| 14 |
+
.. [2] Bartels, Richard H. "A stabilization of the simplex method."
|
| 15 |
+
Journal in Numerische Mathematik 16.5 (1971): 414-434.
|
| 16 |
+
|
| 17 |
+
"""
|
| 18 |
+
# Author: Matt Haberland
|
| 19 |
+
|
| 20 |
+
import numpy as np
|
| 21 |
+
from numpy.linalg import LinAlgError
|
| 22 |
+
|
| 23 |
+
from scipy.linalg import solve
|
| 24 |
+
from ._optimize import _check_unknown_options
|
| 25 |
+
from ._bglu_dense import LU
|
| 26 |
+
from ._bglu_dense import BGLU as BGLU
|
| 27 |
+
from ._linprog_util import _postsolve
|
| 28 |
+
from ._optimize import OptimizeResult
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def _phase_one(A, b, x0, callback, postsolve_args, maxiter, tol, disp,
|
| 32 |
+
maxupdate, mast, pivot):
|
| 33 |
+
"""
|
| 34 |
+
The purpose of phase one is to find an initial basic feasible solution
|
| 35 |
+
(BFS) to the original problem.
|
| 36 |
+
|
| 37 |
+
Generates an auxiliary problem with a trivial BFS and an objective that
|
| 38 |
+
minimizes infeasibility of the original problem. Solves the auxiliary
|
| 39 |
+
problem using the main simplex routine (phase two). This either yields
|
| 40 |
+
a BFS to the original problem or determines that the original problem is
|
| 41 |
+
infeasible. If feasible, phase one detects redundant rows in the original
|
| 42 |
+
constraint matrix and removes them, then chooses additional indices as
|
| 43 |
+
necessary to complete a basis/BFS for the original problem.
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
m, n = A.shape
|
| 47 |
+
status = 0
|
| 48 |
+
|
| 49 |
+
# generate auxiliary problem to get initial BFS
|
| 50 |
+
A, b, c, basis, x, status = _generate_auxiliary_problem(A, b, x0, tol)
|
| 51 |
+
|
| 52 |
+
if status == 6:
|
| 53 |
+
residual = c.dot(x)
|
| 54 |
+
iter_k = 0
|
| 55 |
+
return x, basis, A, b, residual, status, iter_k
|
| 56 |
+
|
| 57 |
+
# solve auxiliary problem
|
| 58 |
+
phase_one_n = n
|
| 59 |
+
iter_k = 0
|
| 60 |
+
x, basis, status, iter_k = _phase_two(c, A, x, basis, callback,
|
| 61 |
+
postsolve_args,
|
| 62 |
+
maxiter, tol, disp,
|
| 63 |
+
maxupdate, mast, pivot,
|
| 64 |
+
iter_k, phase_one_n)
|
| 65 |
+
|
| 66 |
+
# check for infeasibility
|
| 67 |
+
residual = c.dot(x)
|
| 68 |
+
if status == 0 and residual > tol:
|
| 69 |
+
status = 2
|
| 70 |
+
|
| 71 |
+
# drive artificial variables out of basis
|
| 72 |
+
# TODO: test redundant row removal better
|
| 73 |
+
# TODO: make solve more efficient with BGLU? This could take a while.
|
| 74 |
+
keep_rows = np.ones(m, dtype=bool)
|
| 75 |
+
for basis_column in basis[basis >= n]:
|
| 76 |
+
B = A[:, basis]
|
| 77 |
+
try:
|
| 78 |
+
basis_finder = np.abs(solve(B, A)) # inefficient
|
| 79 |
+
pertinent_row = np.argmax(basis_finder[:, basis_column])
|
| 80 |
+
eligible_columns = np.ones(n, dtype=bool)
|
| 81 |
+
eligible_columns[basis[basis < n]] = 0
|
| 82 |
+
eligible_column_indices = np.where(eligible_columns)[0]
|
| 83 |
+
index = np.argmax(basis_finder[:, :n]
|
| 84 |
+
[pertinent_row, eligible_columns])
|
| 85 |
+
new_basis_column = eligible_column_indices[index]
|
| 86 |
+
if basis_finder[pertinent_row, new_basis_column] < tol:
|
| 87 |
+
keep_rows[pertinent_row] = False
|
| 88 |
+
else:
|
| 89 |
+
basis[basis == basis_column] = new_basis_column
|
| 90 |
+
except LinAlgError:
|
| 91 |
+
status = 4
|
| 92 |
+
|
| 93 |
+
# form solution to original problem
|
| 94 |
+
A = A[keep_rows, :n]
|
| 95 |
+
basis = basis[keep_rows]
|
| 96 |
+
x = x[:n]
|
| 97 |
+
m = A.shape[0]
|
| 98 |
+
return x, basis, A, b, residual, status, iter_k
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def _get_more_basis_columns(A, basis):
|
| 102 |
+
"""
|
| 103 |
+
Called when the auxiliary problem terminates with artificial columns in
|
| 104 |
+
the basis, which must be removed and replaced with non-artificial
|
| 105 |
+
columns. Finds additional columns that do not make the matrix singular.
|
| 106 |
+
"""
|
| 107 |
+
m, n = A.shape
|
| 108 |
+
|
| 109 |
+
# options for inclusion are those that aren't already in the basis
|
| 110 |
+
a = np.arange(m+n)
|
| 111 |
+
bl = np.zeros(len(a), dtype=bool)
|
| 112 |
+
bl[basis] = 1
|
| 113 |
+
options = a[~bl]
|
| 114 |
+
options = options[options < n] # and they have to be non-artificial
|
| 115 |
+
|
| 116 |
+
# form basis matrix
|
| 117 |
+
B = np.zeros((m, m))
|
| 118 |
+
B[:, 0:len(basis)] = A[:, basis]
|
| 119 |
+
|
| 120 |
+
if (basis.size > 0 and
|
| 121 |
+
np.linalg.matrix_rank(B[:, :len(basis)]) < len(basis)):
|
| 122 |
+
raise Exception("Basis has dependent columns")
|
| 123 |
+
|
| 124 |
+
rank = 0 # just enter the loop
|
| 125 |
+
for i in range(n): # somewhat arbitrary, but we need another way out
|
| 126 |
+
# permute the options, and take as many as needed
|
| 127 |
+
new_basis = np.random.permutation(options)[:m-len(basis)]
|
| 128 |
+
B[:, len(basis):] = A[:, new_basis] # update the basis matrix
|
| 129 |
+
rank = np.linalg.matrix_rank(B) # check the rank
|
| 130 |
+
if rank == m:
|
| 131 |
+
break
|
| 132 |
+
|
| 133 |
+
return np.concatenate((basis, new_basis))
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def _generate_auxiliary_problem(A, b, x0, tol):
|
| 137 |
+
"""
|
| 138 |
+
Modifies original problem to create an auxiliary problem with a trivial
|
| 139 |
+
initial basic feasible solution and an objective that minimizes
|
| 140 |
+
infeasibility in the original problem.
|
| 141 |
+
|
| 142 |
+
Conceptually, this is done by stacking an identity matrix on the right of
|
| 143 |
+
the original constraint matrix, adding artificial variables to correspond
|
| 144 |
+
with each of these new columns, and generating a cost vector that is all
|
| 145 |
+
zeros except for ones corresponding with each of the new variables.
|
| 146 |
+
|
| 147 |
+
A initial basic feasible solution is trivial: all variables are zero
|
| 148 |
+
except for the artificial variables, which are set equal to the
|
| 149 |
+
corresponding element of the right hand side `b`.
|
| 150 |
+
|
| 151 |
+
Running the simplex method on this auxiliary problem drives all of the
|
| 152 |
+
artificial variables - and thus the cost - to zero if the original problem
|
| 153 |
+
is feasible. The original problem is declared infeasible otherwise.
|
| 154 |
+
|
| 155 |
+
Much of the complexity below is to improve efficiency by using singleton
|
| 156 |
+
columns in the original problem where possible, thus generating artificial
|
| 157 |
+
variables only as necessary, and using an initial 'guess' basic feasible
|
| 158 |
+
solution.
|
| 159 |
+
"""
|
| 160 |
+
status = 0
|
| 161 |
+
m, n = A.shape
|
| 162 |
+
|
| 163 |
+
if x0 is not None:
|
| 164 |
+
x = x0
|
| 165 |
+
else:
|
| 166 |
+
x = np.zeros(n)
|
| 167 |
+
|
| 168 |
+
r = b - A@x # residual; this must be all zeros for feasibility
|
| 169 |
+
|
| 170 |
+
A[r < 0] = -A[r < 0] # express problem with RHS positive for trivial BFS
|
| 171 |
+
b[r < 0] = -b[r < 0] # to the auxiliary problem
|
| 172 |
+
r[r < 0] *= -1
|
| 173 |
+
|
| 174 |
+
# Rows which we will need to find a trivial way to zero.
|
| 175 |
+
# This should just be the rows where there is a nonzero residual.
|
| 176 |
+
# But then we would not necessarily have a column singleton in every row.
|
| 177 |
+
# This makes it difficult to find an initial basis.
|
| 178 |
+
if x0 is None:
|
| 179 |
+
nonzero_constraints = np.arange(m)
|
| 180 |
+
else:
|
| 181 |
+
nonzero_constraints = np.where(r > tol)[0]
|
| 182 |
+
|
| 183 |
+
# these are (at least some of) the initial basis columns
|
| 184 |
+
basis = np.where(np.abs(x) > tol)[0]
|
| 185 |
+
|
| 186 |
+
if len(nonzero_constraints) == 0 and len(basis) <= m: # already a BFS
|
| 187 |
+
c = np.zeros(n)
|
| 188 |
+
basis = _get_more_basis_columns(A, basis)
|
| 189 |
+
return A, b, c, basis, x, status
|
| 190 |
+
elif (len(nonzero_constraints) > m - len(basis) or
|
| 191 |
+
np.any(x < 0)): # can't get trivial BFS
|
| 192 |
+
c = np.zeros(n)
|
| 193 |
+
status = 6
|
| 194 |
+
return A, b, c, basis, x, status
|
| 195 |
+
|
| 196 |
+
# chooses existing columns appropriate for inclusion in initial basis
|
| 197 |
+
cols, rows = _select_singleton_columns(A, r)
|
| 198 |
+
|
| 199 |
+
# find the rows we need to zero that we _can_ zero with column singletons
|
| 200 |
+
i_tofix = np.isin(rows, nonzero_constraints)
|
| 201 |
+
# these columns can't already be in the basis, though
|
| 202 |
+
# we are going to add them to the basis and change the corresponding x val
|
| 203 |
+
i_notinbasis = np.logical_not(np.isin(cols, basis))
|
| 204 |
+
i_fix_without_aux = np.logical_and(i_tofix, i_notinbasis)
|
| 205 |
+
rows = rows[i_fix_without_aux]
|
| 206 |
+
cols = cols[i_fix_without_aux]
|
| 207 |
+
|
| 208 |
+
# indices of the rows we can only zero with auxiliary variable
|
| 209 |
+
# these rows will get a one in each auxiliary column
|
| 210 |
+
arows = nonzero_constraints[np.logical_not(
|
| 211 |
+
np.isin(nonzero_constraints, rows))]
|
| 212 |
+
n_aux = len(arows)
|
| 213 |
+
acols = n + np.arange(n_aux) # indices of auxiliary columns
|
| 214 |
+
|
| 215 |
+
basis_ng = np.concatenate((cols, acols)) # basis columns not from guess
|
| 216 |
+
basis_ng_rows = np.concatenate((rows, arows)) # rows we need to zero
|
| 217 |
+
|
| 218 |
+
# add auxiliary singleton columns
|
| 219 |
+
A = np.hstack((A, np.zeros((m, n_aux))))
|
| 220 |
+
A[arows, acols] = 1
|
| 221 |
+
|
| 222 |
+
# generate initial BFS
|
| 223 |
+
x = np.concatenate((x, np.zeros(n_aux)))
|
| 224 |
+
x[basis_ng] = r[basis_ng_rows]/A[basis_ng_rows, basis_ng]
|
| 225 |
+
|
| 226 |
+
# generate costs to minimize infeasibility
|
| 227 |
+
c = np.zeros(n_aux + n)
|
| 228 |
+
c[acols] = 1
|
| 229 |
+
|
| 230 |
+
# basis columns correspond with nonzeros in guess, those with column
|
| 231 |
+
# singletons we used to zero remaining constraints, and any additional
|
| 232 |
+
# columns to get a full set (m columns)
|
| 233 |
+
basis = np.concatenate((basis, basis_ng))
|
| 234 |
+
basis = _get_more_basis_columns(A, basis) # add columns as needed
|
| 235 |
+
|
| 236 |
+
return A, b, c, basis, x, status
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
def _select_singleton_columns(A, b):
|
| 240 |
+
"""
|
| 241 |
+
Finds singleton columns for which the singleton entry is of the same sign
|
| 242 |
+
as the right-hand side; these columns are eligible for inclusion in an
|
| 243 |
+
initial basis. Determines the rows in which the singleton entries are
|
| 244 |
+
located. For each of these rows, returns the indices of the one singleton
|
| 245 |
+
column and its corresponding row.
|
| 246 |
+
"""
|
| 247 |
+
# find indices of all singleton columns and corresponding row indices
|
| 248 |
+
column_indices = np.nonzero(np.sum(np.abs(A) != 0, axis=0) == 1)[0]
|
| 249 |
+
columns = A[:, column_indices] # array of singleton columns
|
| 250 |
+
row_indices = np.zeros(len(column_indices), dtype=int)
|
| 251 |
+
nonzero_rows, nonzero_columns = np.nonzero(columns)
|
| 252 |
+
row_indices[nonzero_columns] = nonzero_rows # corresponding row indices
|
| 253 |
+
|
| 254 |
+
# keep only singletons with entries that have same sign as RHS
|
| 255 |
+
# this is necessary because all elements of BFS must be non-negative
|
| 256 |
+
same_sign = A[row_indices, column_indices]*b[row_indices] >= 0
|
| 257 |
+
column_indices = column_indices[same_sign][::-1]
|
| 258 |
+
row_indices = row_indices[same_sign][::-1]
|
| 259 |
+
# Reversing the order so that steps below select rightmost columns
|
| 260 |
+
# for initial basis, which will tend to be slack variables. (If the
|
| 261 |
+
# guess corresponds with a basic feasible solution but a constraint
|
| 262 |
+
# is not satisfied with the corresponding slack variable zero, the slack
|
| 263 |
+
# variable must be basic.)
|
| 264 |
+
|
| 265 |
+
# for each row, keep rightmost singleton column with an entry in that row
|
| 266 |
+
unique_row_indices, first_columns = np.unique(row_indices,
|
| 267 |
+
return_index=True)
|
| 268 |
+
return column_indices[first_columns], unique_row_indices
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
def _find_nonzero_rows(A, tol):
|
| 272 |
+
"""
|
| 273 |
+
Returns logical array indicating the locations of rows with at least
|
| 274 |
+
one nonzero element.
|
| 275 |
+
"""
|
| 276 |
+
return np.any(np.abs(A) > tol, axis=1)
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
def _select_enter_pivot(c_hat, bl, a, rule="bland", tol=1e-12):
|
| 280 |
+
"""
|
| 281 |
+
Selects a pivot to enter the basis. Currently Bland's rule - the smallest
|
| 282 |
+
index that has a negative reduced cost - is the default.
|
| 283 |
+
"""
|
| 284 |
+
if rule.lower() == "mrc": # index with minimum reduced cost
|
| 285 |
+
return a[~bl][np.argmin(c_hat)]
|
| 286 |
+
else: # smallest index w/ negative reduced cost
|
| 287 |
+
return a[~bl][c_hat < -tol][0]
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
def _display_iter(phase, iteration, slack, con, fun):
|
| 291 |
+
"""
|
| 292 |
+
Print indicators of optimization status to the console.
|
| 293 |
+
"""
|
| 294 |
+
header = True if not iteration % 20 else False
|
| 295 |
+
|
| 296 |
+
if header:
|
| 297 |
+
print("Phase",
|
| 298 |
+
"Iteration",
|
| 299 |
+
"Minimum Slack ",
|
| 300 |
+
"Constraint Residual",
|
| 301 |
+
"Objective ")
|
| 302 |
+
|
| 303 |
+
# :<X.Y left aligns Y digits in X digit spaces
|
| 304 |
+
fmt = '{0:<6}{1:<10}{2:<20.13}{3:<20.13}{4:<20.13}'
|
| 305 |
+
try:
|
| 306 |
+
slack = np.min(slack)
|
| 307 |
+
except ValueError:
|
| 308 |
+
slack = "NA"
|
| 309 |
+
print(fmt.format(phase, iteration, slack, np.linalg.norm(con), fun))
|
| 310 |
+
|
| 311 |
+
|
| 312 |
+
def _display_and_callback(phase_one_n, x, postsolve_args, status,
|
| 313 |
+
iteration, disp, callback):
|
| 314 |
+
if phase_one_n is not None:
|
| 315 |
+
phase = 1
|
| 316 |
+
x_postsolve = x[:phase_one_n]
|
| 317 |
+
else:
|
| 318 |
+
phase = 2
|
| 319 |
+
x_postsolve = x
|
| 320 |
+
x_o, fun, slack, con = _postsolve(x_postsolve,
|
| 321 |
+
postsolve_args)
|
| 322 |
+
|
| 323 |
+
if callback is not None:
|
| 324 |
+
res = OptimizeResult({'x': x_o, 'fun': fun, 'slack': slack,
|
| 325 |
+
'con': con, 'nit': iteration,
|
| 326 |
+
'phase': phase, 'complete': False,
|
| 327 |
+
'status': status, 'message': "",
|
| 328 |
+
'success': False})
|
| 329 |
+
callback(res)
|
| 330 |
+
if disp:
|
| 331 |
+
_display_iter(phase, iteration, slack, con, fun)
|
| 332 |
+
|
| 333 |
+
|
| 334 |
+
def _phase_two(c, A, x, b, callback, postsolve_args, maxiter, tol, disp,
|
| 335 |
+
maxupdate, mast, pivot, iteration=0, phase_one_n=None):
|
| 336 |
+
"""
|
| 337 |
+
The heart of the simplex method. Beginning with a basic feasible solution,
|
| 338 |
+
moves to adjacent basic feasible solutions successively lower reduced cost.
|
| 339 |
+
Terminates when there are no basic feasible solutions with lower reduced
|
| 340 |
+
cost or if the problem is determined to be unbounded.
|
| 341 |
+
|
| 342 |
+
This implementation follows the revised simplex method based on LU
|
| 343 |
+
decomposition. Rather than maintaining a tableau or an inverse of the
|
| 344 |
+
basis matrix, we keep a factorization of the basis matrix that allows
|
| 345 |
+
efficient solution of linear systems while avoiding stability issues
|
| 346 |
+
associated with inverted matrices.
|
| 347 |
+
"""
|
| 348 |
+
m, n = A.shape
|
| 349 |
+
status = 0
|
| 350 |
+
a = np.arange(n) # indices of columns of A
|
| 351 |
+
ab = np.arange(m) # indices of columns of B
|
| 352 |
+
if maxupdate:
|
| 353 |
+
# basis matrix factorization object; similar to B = A[:, b]
|
| 354 |
+
B = BGLU(A, b, maxupdate, mast)
|
| 355 |
+
else:
|
| 356 |
+
B = LU(A, b)
|
| 357 |
+
|
| 358 |
+
for iteration in range(iteration, maxiter):
|
| 359 |
+
|
| 360 |
+
if disp or callback is not None:
|
| 361 |
+
_display_and_callback(phase_one_n, x, postsolve_args, status,
|
| 362 |
+
iteration, disp, callback)
|
| 363 |
+
|
| 364 |
+
bl = np.zeros(len(a), dtype=bool)
|
| 365 |
+
bl[b] = 1
|
| 366 |
+
|
| 367 |
+
xb = x[b] # basic variables
|
| 368 |
+
cb = c[b] # basic costs
|
| 369 |
+
|
| 370 |
+
try:
|
| 371 |
+
v = B.solve(cb, transposed=True) # similar to v = solve(B.T, cb)
|
| 372 |
+
except LinAlgError:
|
| 373 |
+
status = 4
|
| 374 |
+
break
|
| 375 |
+
|
| 376 |
+
# TODO: cythonize?
|
| 377 |
+
c_hat = c - v.dot(A) # reduced cost
|
| 378 |
+
c_hat = c_hat[~bl]
|
| 379 |
+
# Above is much faster than:
|
| 380 |
+
# N = A[:, ~bl] # slow!
|
| 381 |
+
# c_hat = c[~bl] - v.T.dot(N)
|
| 382 |
+
# Can we perform the multiplication only on the nonbasic columns?
|
| 383 |
+
|
| 384 |
+
if np.all(c_hat >= -tol): # all reduced costs positive -> terminate
|
| 385 |
+
break
|
| 386 |
+
|
| 387 |
+
j = _select_enter_pivot(c_hat, bl, a, rule=pivot, tol=tol)
|
| 388 |
+
u = B.solve(A[:, j]) # similar to u = solve(B, A[:, j])
|
| 389 |
+
|
| 390 |
+
i = u > tol # if none of the u are positive, unbounded
|
| 391 |
+
if not np.any(i):
|
| 392 |
+
status = 3
|
| 393 |
+
break
|
| 394 |
+
|
| 395 |
+
th = xb[i]/u[i]
|
| 396 |
+
l = np.argmin(th) # implicitly selects smallest subscript
|
| 397 |
+
th_star = th[l] # step size
|
| 398 |
+
|
| 399 |
+
x[b] = x[b] - th_star*u # take step
|
| 400 |
+
x[j] = th_star
|
| 401 |
+
B.update(ab[i][l], j) # modify basis
|
| 402 |
+
b = B.b # similar to b[ab[i][l]] =
|
| 403 |
+
|
| 404 |
+
else:
|
| 405 |
+
# If the end of the for loop is reached (without a break statement),
|
| 406 |
+
# then another step has been taken, so the iteration counter should
|
| 407 |
+
# increment, info should be displayed, and callback should be called.
|
| 408 |
+
iteration += 1
|
| 409 |
+
status = 1
|
| 410 |
+
if disp or callback is not None:
|
| 411 |
+
_display_and_callback(phase_one_n, x, postsolve_args, status,
|
| 412 |
+
iteration, disp, callback)
|
| 413 |
+
|
| 414 |
+
return x, b, status, iteration
|
| 415 |
+
|
| 416 |
+
|
| 417 |
+
def _linprog_rs(c, c0, A, b, x0, callback, postsolve_args,
|
| 418 |
+
maxiter=5000, tol=1e-12, disp=False,
|
| 419 |
+
maxupdate=10, mast=False, pivot="mrc",
|
| 420 |
+
**unknown_options):
|
| 421 |
+
"""
|
| 422 |
+
Solve the following linear programming problem via a two-phase
|
| 423 |
+
revised simplex algorithm.::
|
| 424 |
+
|
| 425 |
+
minimize: c @ x
|
| 426 |
+
|
| 427 |
+
subject to: A @ x == b
|
| 428 |
+
0 <= x < oo
|
| 429 |
+
|
| 430 |
+
User-facing documentation is in _linprog_doc.py.
|
| 431 |
+
|
| 432 |
+
Parameters
|
| 433 |
+
----------
|
| 434 |
+
c : 1-D array
|
| 435 |
+
Coefficients of the linear objective function to be minimized.
|
| 436 |
+
c0 : float
|
| 437 |
+
Constant term in objective function due to fixed (and eliminated)
|
| 438 |
+
variables. (Currently unused.)
|
| 439 |
+
A : 2-D array
|
| 440 |
+
2-D array which, when matrix-multiplied by ``x``, gives the values of
|
| 441 |
+
the equality constraints at ``x``.
|
| 442 |
+
b : 1-D array
|
| 443 |
+
1-D array of values representing the RHS of each equality constraint
|
| 444 |
+
(row) in ``A_eq``.
|
| 445 |
+
x0 : 1-D array, optional
|
| 446 |
+
Starting values of the independent variables, which will be refined by
|
| 447 |
+
the optimization algorithm. For the revised simplex method, these must
|
| 448 |
+
correspond with a basic feasible solution.
|
| 449 |
+
callback : callable, optional
|
| 450 |
+
If a callback function is provided, it will be called within each
|
| 451 |
+
iteration of the algorithm. The callback function must accept a single
|
| 452 |
+
`scipy.optimize.OptimizeResult` consisting of the following fields:
|
| 453 |
+
|
| 454 |
+
x : 1-D array
|
| 455 |
+
Current solution vector.
|
| 456 |
+
fun : float
|
| 457 |
+
Current value of the objective function ``c @ x``.
|
| 458 |
+
success : bool
|
| 459 |
+
True only when an algorithm has completed successfully,
|
| 460 |
+
so this is always False as the callback function is called
|
| 461 |
+
only while the algorithm is still iterating.
|
| 462 |
+
slack : 1-D array
|
| 463 |
+
The values of the slack variables. Each slack variable
|
| 464 |
+
corresponds to an inequality constraint. If the slack is zero,
|
| 465 |
+
the corresponding constraint is active.
|
| 466 |
+
con : 1-D array
|
| 467 |
+
The (nominally zero) residuals of the equality constraints,
|
| 468 |
+
that is, ``b - A_eq @ x``.
|
| 469 |
+
phase : int
|
| 470 |
+
The phase of the algorithm being executed.
|
| 471 |
+
status : int
|
| 472 |
+
For revised simplex, this is always 0 because if a different
|
| 473 |
+
status is detected, the algorithm terminates.
|
| 474 |
+
nit : int
|
| 475 |
+
The number of iterations performed.
|
| 476 |
+
message : str
|
| 477 |
+
A string descriptor of the exit status of the optimization.
|
| 478 |
+
postsolve_args : tuple
|
| 479 |
+
Data needed by _postsolve to convert the solution to the standard-form
|
| 480 |
+
problem into the solution to the original problem.
|
| 481 |
+
|
| 482 |
+
Options
|
| 483 |
+
-------
|
| 484 |
+
maxiter : int
|
| 485 |
+
The maximum number of iterations to perform in either phase.
|
| 486 |
+
tol : float
|
| 487 |
+
The tolerance which determines when a solution is "close enough" to
|
| 488 |
+
zero in Phase 1 to be considered a basic feasible solution or close
|
| 489 |
+
enough to positive to serve as an optimal solution.
|
| 490 |
+
disp : bool
|
| 491 |
+
Set to ``True`` if indicators of optimization status are to be printed
|
| 492 |
+
to the console each iteration.
|
| 493 |
+
maxupdate : int
|
| 494 |
+
The maximum number of updates performed on the LU factorization.
|
| 495 |
+
After this many updates is reached, the basis matrix is factorized
|
| 496 |
+
from scratch.
|
| 497 |
+
mast : bool
|
| 498 |
+
Minimize Amortized Solve Time. If enabled, the average time to solve
|
| 499 |
+
a linear system using the basis factorization is measured. Typically,
|
| 500 |
+
the average solve time will decrease with each successive solve after
|
| 501 |
+
initial factorization, as factorization takes much more time than the
|
| 502 |
+
solve operation (and updates). Eventually, however, the updated
|
| 503 |
+
factorization becomes sufficiently complex that the average solve time
|
| 504 |
+
begins to increase. When this is detected, the basis is refactorized
|
| 505 |
+
from scratch. Enable this option to maximize speed at the risk of
|
| 506 |
+
nondeterministic behavior. Ignored if ``maxupdate`` is 0.
|
| 507 |
+
pivot : "mrc" or "bland"
|
| 508 |
+
Pivot rule: Minimum Reduced Cost (default) or Bland's rule. Choose
|
| 509 |
+
Bland's rule if iteration limit is reached and cycling is suspected.
|
| 510 |
+
unknown_options : dict
|
| 511 |
+
Optional arguments not used by this particular solver. If
|
| 512 |
+
`unknown_options` is non-empty a warning is issued listing all
|
| 513 |
+
unused options.
|
| 514 |
+
|
| 515 |
+
Returns
|
| 516 |
+
-------
|
| 517 |
+
x : 1-D array
|
| 518 |
+
Solution vector.
|
| 519 |
+
status : int
|
| 520 |
+
An integer representing the exit status of the optimization::
|
| 521 |
+
|
| 522 |
+
0 : Optimization terminated successfully
|
| 523 |
+
1 : Iteration limit reached
|
| 524 |
+
2 : Problem appears to be infeasible
|
| 525 |
+
3 : Problem appears to be unbounded
|
| 526 |
+
4 : Numerical difficulties encountered
|
| 527 |
+
5 : No constraints; turn presolve on
|
| 528 |
+
6 : Guess x0 cannot be converted to a basic feasible solution
|
| 529 |
+
|
| 530 |
+
message : str
|
| 531 |
+
A string descriptor of the exit status of the optimization.
|
| 532 |
+
iteration : int
|
| 533 |
+
The number of iterations taken to solve the problem.
|
| 534 |
+
"""
|
| 535 |
+
|
| 536 |
+
_check_unknown_options(unknown_options)
|
| 537 |
+
|
| 538 |
+
messages = ["Optimization terminated successfully.",
|
| 539 |
+
"Iteration limit reached.",
|
| 540 |
+
"The problem appears infeasible, as the phase one auxiliary "
|
| 541 |
+
"problem terminated successfully with a residual of {0:.1e}, "
|
| 542 |
+
"greater than the tolerance {1} required for the solution to "
|
| 543 |
+
"be considered feasible. Consider increasing the tolerance to "
|
| 544 |
+
"be greater than {0:.1e}. If this tolerance is unnaceptably "
|
| 545 |
+
"large, the problem is likely infeasible.",
|
| 546 |
+
"The problem is unbounded, as the simplex algorithm found "
|
| 547 |
+
"a basic feasible solution from which there is a direction "
|
| 548 |
+
"with negative reduced cost in which all decision variables "
|
| 549 |
+
"increase.",
|
| 550 |
+
"Numerical difficulties encountered; consider trying "
|
| 551 |
+
"method='interior-point'.",
|
| 552 |
+
"Problems with no constraints are trivially solved; please "
|
| 553 |
+
"turn presolve on.",
|
| 554 |
+
"The guess x0 cannot be converted to a basic feasible "
|
| 555 |
+
"solution. "
|
| 556 |
+
]
|
| 557 |
+
|
| 558 |
+
if A.size == 0: # address test_unbounded_below_no_presolve_corrected
|
| 559 |
+
return np.zeros(c.shape), 5, messages[5], 0
|
| 560 |
+
|
| 561 |
+
x, basis, A, b, residual, status, iteration = (
|
| 562 |
+
_phase_one(A, b, x0, callback, postsolve_args,
|
| 563 |
+
maxiter, tol, disp, maxupdate, mast, pivot))
|
| 564 |
+
|
| 565 |
+
if status == 0:
|
| 566 |
+
x, basis, status, iteration = _phase_two(c, A, x, basis, callback,
|
| 567 |
+
postsolve_args,
|
| 568 |
+
maxiter, tol, disp,
|
| 569 |
+
maxupdate, mast, pivot,
|
| 570 |
+
iteration)
|
| 571 |
+
|
| 572 |
+
return x, status, messages[status].format(residual, tol), iteration
|
evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_linprog_simplex.py
ADDED
|
@@ -0,0 +1,661 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Simplex method for linear programming
|
| 2 |
+
|
| 3 |
+
The *simplex* method uses a traditional, full-tableau implementation of
|
| 4 |
+
Dantzig's simplex algorithm [1]_, [2]_ (*not* the Nelder-Mead simplex).
|
| 5 |
+
This algorithm is included for backwards compatibility and educational
|
| 6 |
+
purposes.
|
| 7 |
+
|
| 8 |
+
.. versionadded:: 0.15.0
|
| 9 |
+
|
| 10 |
+
Warnings
|
| 11 |
+
--------
|
| 12 |
+
|
| 13 |
+
The simplex method may encounter numerical difficulties when pivot
|
| 14 |
+
values are close to the specified tolerance. If encountered try
|
| 15 |
+
remove any redundant constraints, change the pivot strategy to Bland's
|
| 16 |
+
rule or increase the tolerance value.
|
| 17 |
+
|
| 18 |
+
Alternatively, more robust methods maybe be used. See
|
| 19 |
+
:ref:`'interior-point' <optimize.linprog-interior-point>` and
|
| 20 |
+
:ref:`'revised simplex' <optimize.linprog-revised_simplex>`.
|
| 21 |
+
|
| 22 |
+
References
|
| 23 |
+
----------
|
| 24 |
+
.. [1] Dantzig, George B., Linear programming and extensions. Rand
|
| 25 |
+
Corporation Research Study Princeton Univ. Press, Princeton, NJ,
|
| 26 |
+
1963
|
| 27 |
+
.. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to
|
| 28 |
+
Mathematical Programming", McGraw-Hill, Chapter 4.
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
import numpy as np
|
| 32 |
+
from warnings import warn
|
| 33 |
+
from ._optimize import OptimizeResult, OptimizeWarning, _check_unknown_options
|
| 34 |
+
from ._linprog_util import _postsolve
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def _pivot_col(T, tol=1e-9, bland=False):
|
| 38 |
+
"""
|
| 39 |
+
Given a linear programming simplex tableau, determine the column
|
| 40 |
+
of the variable to enter the basis.
|
| 41 |
+
|
| 42 |
+
Parameters
|
| 43 |
+
----------
|
| 44 |
+
T : 2-D array
|
| 45 |
+
A 2-D array representing the simplex tableau, T, corresponding to the
|
| 46 |
+
linear programming problem. It should have the form:
|
| 47 |
+
|
| 48 |
+
[[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
|
| 49 |
+
[A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
|
| 50 |
+
.
|
| 51 |
+
.
|
| 52 |
+
.
|
| 53 |
+
[A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
|
| 54 |
+
[c[0], c[1], ..., c[n_total], 0]]
|
| 55 |
+
|
| 56 |
+
for a Phase 2 problem, or the form:
|
| 57 |
+
|
| 58 |
+
[[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
|
| 59 |
+
[A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
|
| 60 |
+
.
|
| 61 |
+
.
|
| 62 |
+
.
|
| 63 |
+
[A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
|
| 64 |
+
[c[0], c[1], ..., c[n_total], 0],
|
| 65 |
+
[c'[0], c'[1], ..., c'[n_total], 0]]
|
| 66 |
+
|
| 67 |
+
for a Phase 1 problem (a problem in which a basic feasible solution is
|
| 68 |
+
sought prior to maximizing the actual objective. ``T`` is modified in
|
| 69 |
+
place by ``_solve_simplex``.
|
| 70 |
+
tol : float
|
| 71 |
+
Elements in the objective row larger than -tol will not be considered
|
| 72 |
+
for pivoting. Nominally this value is zero, but numerical issues
|
| 73 |
+
cause a tolerance about zero to be necessary.
|
| 74 |
+
bland : bool
|
| 75 |
+
If True, use Bland's rule for selection of the column (select the
|
| 76 |
+
first column with a negative coefficient in the objective row,
|
| 77 |
+
regardless of magnitude).
|
| 78 |
+
|
| 79 |
+
Returns
|
| 80 |
+
-------
|
| 81 |
+
status: bool
|
| 82 |
+
True if a suitable pivot column was found, otherwise False.
|
| 83 |
+
A return of False indicates that the linear programming simplex
|
| 84 |
+
algorithm is complete.
|
| 85 |
+
col: int
|
| 86 |
+
The index of the column of the pivot element.
|
| 87 |
+
If status is False, col will be returned as nan.
|
| 88 |
+
"""
|
| 89 |
+
ma = np.ma.masked_where(T[-1, :-1] >= -tol, T[-1, :-1], copy=False)
|
| 90 |
+
if ma.count() == 0:
|
| 91 |
+
return False, np.nan
|
| 92 |
+
if bland:
|
| 93 |
+
# ma.mask is sometimes 0d
|
| 94 |
+
return True, np.nonzero(np.logical_not(np.atleast_1d(ma.mask)))[0][0]
|
| 95 |
+
return True, np.ma.nonzero(ma == ma.min())[0][0]
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def _pivot_row(T, basis, pivcol, phase, tol=1e-9, bland=False):
|
| 99 |
+
"""
|
| 100 |
+
Given a linear programming simplex tableau, determine the row for the
|
| 101 |
+
pivot operation.
|
| 102 |
+
|
| 103 |
+
Parameters
|
| 104 |
+
----------
|
| 105 |
+
T : 2-D array
|
| 106 |
+
A 2-D array representing the simplex tableau, T, corresponding to the
|
| 107 |
+
linear programming problem. It should have the form:
|
| 108 |
+
|
| 109 |
+
[[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
|
| 110 |
+
[A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
|
| 111 |
+
.
|
| 112 |
+
.
|
| 113 |
+
.
|
| 114 |
+
[A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
|
| 115 |
+
[c[0], c[1], ..., c[n_total], 0]]
|
| 116 |
+
|
| 117 |
+
for a Phase 2 problem, or the form:
|
| 118 |
+
|
| 119 |
+
[[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
|
| 120 |
+
[A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
|
| 121 |
+
.
|
| 122 |
+
.
|
| 123 |
+
.
|
| 124 |
+
[A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
|
| 125 |
+
[c[0], c[1], ..., c[n_total], 0],
|
| 126 |
+
[c'[0], c'[1], ..., c'[n_total], 0]]
|
| 127 |
+
|
| 128 |
+
for a Phase 1 problem (a Problem in which a basic feasible solution is
|
| 129 |
+
sought prior to maximizing the actual objective. ``T`` is modified in
|
| 130 |
+
place by ``_solve_simplex``.
|
| 131 |
+
basis : array
|
| 132 |
+
A list of the current basic variables.
|
| 133 |
+
pivcol : int
|
| 134 |
+
The index of the pivot column.
|
| 135 |
+
phase : int
|
| 136 |
+
The phase of the simplex algorithm (1 or 2).
|
| 137 |
+
tol : float
|
| 138 |
+
Elements in the pivot column smaller than tol will not be considered
|
| 139 |
+
for pivoting. Nominally this value is zero, but numerical issues
|
| 140 |
+
cause a tolerance about zero to be necessary.
|
| 141 |
+
bland : bool
|
| 142 |
+
If True, use Bland's rule for selection of the row (if more than one
|
| 143 |
+
row can be used, choose the one with the lowest variable index).
|
| 144 |
+
|
| 145 |
+
Returns
|
| 146 |
+
-------
|
| 147 |
+
status: bool
|
| 148 |
+
True if a suitable pivot row was found, otherwise False. A return
|
| 149 |
+
of False indicates that the linear programming problem is unbounded.
|
| 150 |
+
row: int
|
| 151 |
+
The index of the row of the pivot element. If status is False, row
|
| 152 |
+
will be returned as nan.
|
| 153 |
+
"""
|
| 154 |
+
if phase == 1:
|
| 155 |
+
k = 2
|
| 156 |
+
else:
|
| 157 |
+
k = 1
|
| 158 |
+
ma = np.ma.masked_where(T[:-k, pivcol] <= tol, T[:-k, pivcol], copy=False)
|
| 159 |
+
if ma.count() == 0:
|
| 160 |
+
return False, np.nan
|
| 161 |
+
mb = np.ma.masked_where(T[:-k, pivcol] <= tol, T[:-k, -1], copy=False)
|
| 162 |
+
q = mb / ma
|
| 163 |
+
min_rows = np.ma.nonzero(q == q.min())[0]
|
| 164 |
+
if bland:
|
| 165 |
+
return True, min_rows[np.argmin(np.take(basis, min_rows))]
|
| 166 |
+
return True, min_rows[0]
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
def _apply_pivot(T, basis, pivrow, pivcol, tol=1e-9):
|
| 170 |
+
"""
|
| 171 |
+
Pivot the simplex tableau inplace on the element given by (pivrow, pivol).
|
| 172 |
+
The entering variable corresponds to the column given by pivcol forcing
|
| 173 |
+
the variable basis[pivrow] to leave the basis.
|
| 174 |
+
|
| 175 |
+
Parameters
|
| 176 |
+
----------
|
| 177 |
+
T : 2-D array
|
| 178 |
+
A 2-D array representing the simplex tableau, T, corresponding to the
|
| 179 |
+
linear programming problem. It should have the form:
|
| 180 |
+
|
| 181 |
+
[[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
|
| 182 |
+
[A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
|
| 183 |
+
.
|
| 184 |
+
.
|
| 185 |
+
.
|
| 186 |
+
[A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
|
| 187 |
+
[c[0], c[1], ..., c[n_total], 0]]
|
| 188 |
+
|
| 189 |
+
for a Phase 2 problem, or the form:
|
| 190 |
+
|
| 191 |
+
[[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
|
| 192 |
+
[A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
|
| 193 |
+
.
|
| 194 |
+
.
|
| 195 |
+
.
|
| 196 |
+
[A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
|
| 197 |
+
[c[0], c[1], ..., c[n_total], 0],
|
| 198 |
+
[c'[0], c'[1], ..., c'[n_total], 0]]
|
| 199 |
+
|
| 200 |
+
for a Phase 1 problem (a problem in which a basic feasible solution is
|
| 201 |
+
sought prior to maximizing the actual objective. ``T`` is modified in
|
| 202 |
+
place by ``_solve_simplex``.
|
| 203 |
+
basis : 1-D array
|
| 204 |
+
An array of the indices of the basic variables, such that basis[i]
|
| 205 |
+
contains the column corresponding to the basic variable for row i.
|
| 206 |
+
Basis is modified in place by _apply_pivot.
|
| 207 |
+
pivrow : int
|
| 208 |
+
Row index of the pivot.
|
| 209 |
+
pivcol : int
|
| 210 |
+
Column index of the pivot.
|
| 211 |
+
"""
|
| 212 |
+
basis[pivrow] = pivcol
|
| 213 |
+
pivval = T[pivrow, pivcol]
|
| 214 |
+
T[pivrow] = T[pivrow] / pivval
|
| 215 |
+
for irow in range(T.shape[0]):
|
| 216 |
+
if irow != pivrow:
|
| 217 |
+
T[irow] = T[irow] - T[pivrow] * T[irow, pivcol]
|
| 218 |
+
|
| 219 |
+
# The selected pivot should never lead to a pivot value less than the tol.
|
| 220 |
+
if np.isclose(pivval, tol, atol=0, rtol=1e4):
|
| 221 |
+
message = (
|
| 222 |
+
f"The pivot operation produces a pivot value of:{pivval: .1e}, "
|
| 223 |
+
"which is only slightly greater than the specified "
|
| 224 |
+
f"tolerance{tol: .1e}. This may lead to issues regarding the "
|
| 225 |
+
"numerical stability of the simplex method. "
|
| 226 |
+
"Removing redundant constraints, changing the pivot strategy "
|
| 227 |
+
"via Bland's rule or increasing the tolerance may "
|
| 228 |
+
"help reduce the issue.")
|
| 229 |
+
warn(message, OptimizeWarning, stacklevel=5)
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
def _solve_simplex(T, n, basis, callback, postsolve_args,
|
| 233 |
+
maxiter=1000, tol=1e-9, phase=2, bland=False, nit0=0,
|
| 234 |
+
):
|
| 235 |
+
"""
|
| 236 |
+
Solve a linear programming problem in "standard form" using the Simplex
|
| 237 |
+
Method. Linear Programming is intended to solve the following problem form:
|
| 238 |
+
|
| 239 |
+
Minimize::
|
| 240 |
+
|
| 241 |
+
c @ x
|
| 242 |
+
|
| 243 |
+
Subject to::
|
| 244 |
+
|
| 245 |
+
A @ x == b
|
| 246 |
+
x >= 0
|
| 247 |
+
|
| 248 |
+
Parameters
|
| 249 |
+
----------
|
| 250 |
+
T : 2-D array
|
| 251 |
+
A 2-D array representing the simplex tableau, T, corresponding to the
|
| 252 |
+
linear programming problem. It should have the form:
|
| 253 |
+
|
| 254 |
+
[[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
|
| 255 |
+
[A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
|
| 256 |
+
.
|
| 257 |
+
.
|
| 258 |
+
.
|
| 259 |
+
[A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
|
| 260 |
+
[c[0], c[1], ..., c[n_total], 0]]
|
| 261 |
+
|
| 262 |
+
for a Phase 2 problem, or the form:
|
| 263 |
+
|
| 264 |
+
[[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
|
| 265 |
+
[A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
|
| 266 |
+
.
|
| 267 |
+
.
|
| 268 |
+
.
|
| 269 |
+
[A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
|
| 270 |
+
[c[0], c[1], ..., c[n_total], 0],
|
| 271 |
+
[c'[0], c'[1], ..., c'[n_total], 0]]
|
| 272 |
+
|
| 273 |
+
for a Phase 1 problem (a problem in which a basic feasible solution is
|
| 274 |
+
sought prior to maximizing the actual objective. ``T`` is modified in
|
| 275 |
+
place by ``_solve_simplex``.
|
| 276 |
+
n : int
|
| 277 |
+
The number of true variables in the problem.
|
| 278 |
+
basis : 1-D array
|
| 279 |
+
An array of the indices of the basic variables, such that basis[i]
|
| 280 |
+
contains the column corresponding to the basic variable for row i.
|
| 281 |
+
Basis is modified in place by _solve_simplex
|
| 282 |
+
callback : callable, optional
|
| 283 |
+
If a callback function is provided, it will be called within each
|
| 284 |
+
iteration of the algorithm. The callback must accept a
|
| 285 |
+
`scipy.optimize.OptimizeResult` consisting of the following fields:
|
| 286 |
+
|
| 287 |
+
x : 1-D array
|
| 288 |
+
Current solution vector
|
| 289 |
+
fun : float
|
| 290 |
+
Current value of the objective function
|
| 291 |
+
success : bool
|
| 292 |
+
True only when a phase has completed successfully. This
|
| 293 |
+
will be False for most iterations.
|
| 294 |
+
slack : 1-D array
|
| 295 |
+
The values of the slack variables. Each slack variable
|
| 296 |
+
corresponds to an inequality constraint. If the slack is zero,
|
| 297 |
+
the corresponding constraint is active.
|
| 298 |
+
con : 1-D array
|
| 299 |
+
The (nominally zero) residuals of the equality constraints,
|
| 300 |
+
that is, ``b - A_eq @ x``
|
| 301 |
+
phase : int
|
| 302 |
+
The phase of the optimization being executed. In phase 1 a basic
|
| 303 |
+
feasible solution is sought and the T has an additional row
|
| 304 |
+
representing an alternate objective function.
|
| 305 |
+
status : int
|
| 306 |
+
An integer representing the exit status of the optimization::
|
| 307 |
+
|
| 308 |
+
0 : Optimization terminated successfully
|
| 309 |
+
1 : Iteration limit reached
|
| 310 |
+
2 : Problem appears to be infeasible
|
| 311 |
+
3 : Problem appears to be unbounded
|
| 312 |
+
4 : Serious numerical difficulties encountered
|
| 313 |
+
|
| 314 |
+
nit : int
|
| 315 |
+
The number of iterations performed.
|
| 316 |
+
message : str
|
| 317 |
+
A string descriptor of the exit status of the optimization.
|
| 318 |
+
postsolve_args : tuple
|
| 319 |
+
Data needed by _postsolve to convert the solution to the standard-form
|
| 320 |
+
problem into the solution to the original problem.
|
| 321 |
+
maxiter : int
|
| 322 |
+
The maximum number of iterations to perform before aborting the
|
| 323 |
+
optimization.
|
| 324 |
+
tol : float
|
| 325 |
+
The tolerance which determines when a solution is "close enough" to
|
| 326 |
+
zero in Phase 1 to be considered a basic feasible solution or close
|
| 327 |
+
enough to positive to serve as an optimal solution.
|
| 328 |
+
phase : int
|
| 329 |
+
The phase of the optimization being executed. In phase 1 a basic
|
| 330 |
+
feasible solution is sought and the T has an additional row
|
| 331 |
+
representing an alternate objective function.
|
| 332 |
+
bland : bool
|
| 333 |
+
If True, choose pivots using Bland's rule [3]_. In problems which
|
| 334 |
+
fail to converge due to cycling, using Bland's rule can provide
|
| 335 |
+
convergence at the expense of a less optimal path about the simplex.
|
| 336 |
+
nit0 : int
|
| 337 |
+
The initial iteration number used to keep an accurate iteration total
|
| 338 |
+
in a two-phase problem.
|
| 339 |
+
|
| 340 |
+
Returns
|
| 341 |
+
-------
|
| 342 |
+
nit : int
|
| 343 |
+
The number of iterations. Used to keep an accurate iteration total
|
| 344 |
+
in the two-phase problem.
|
| 345 |
+
status : int
|
| 346 |
+
An integer representing the exit status of the optimization::
|
| 347 |
+
|
| 348 |
+
0 : Optimization terminated successfully
|
| 349 |
+
1 : Iteration limit reached
|
| 350 |
+
2 : Problem appears to be infeasible
|
| 351 |
+
3 : Problem appears to be unbounded
|
| 352 |
+
4 : Serious numerical difficulties encountered
|
| 353 |
+
|
| 354 |
+
"""
|
| 355 |
+
nit = nit0
|
| 356 |
+
status = 0
|
| 357 |
+
message = ''
|
| 358 |
+
complete = False
|
| 359 |
+
|
| 360 |
+
if phase == 1:
|
| 361 |
+
m = T.shape[1]-2
|
| 362 |
+
elif phase == 2:
|
| 363 |
+
m = T.shape[1]-1
|
| 364 |
+
else:
|
| 365 |
+
raise ValueError("Argument 'phase' to _solve_simplex must be 1 or 2")
|
| 366 |
+
|
| 367 |
+
if phase == 2:
|
| 368 |
+
# Check if any artificial variables are still in the basis.
|
| 369 |
+
# If yes, check if any coefficients from this row and a column
|
| 370 |
+
# corresponding to one of the non-artificial variable is non-zero.
|
| 371 |
+
# If found, pivot at this term. If not, start phase 2.
|
| 372 |
+
# Do this for all artificial variables in the basis.
|
| 373 |
+
# Ref: "An Introduction to Linear Programming and Game Theory"
|
| 374 |
+
# by Paul R. Thie, Gerard E. Keough, 3rd Ed,
|
| 375 |
+
# Chapter 3.7 Redundant Systems (pag 102)
|
| 376 |
+
for pivrow in [row for row in range(basis.size)
|
| 377 |
+
if basis[row] > T.shape[1] - 2]:
|
| 378 |
+
non_zero_row = [col for col in range(T.shape[1] - 1)
|
| 379 |
+
if abs(T[pivrow, col]) > tol]
|
| 380 |
+
if len(non_zero_row) > 0:
|
| 381 |
+
pivcol = non_zero_row[0]
|
| 382 |
+
_apply_pivot(T, basis, pivrow, pivcol, tol)
|
| 383 |
+
nit += 1
|
| 384 |
+
|
| 385 |
+
if len(basis[:m]) == 0:
|
| 386 |
+
solution = np.empty(T.shape[1] - 1, dtype=np.float64)
|
| 387 |
+
else:
|
| 388 |
+
solution = np.empty(max(T.shape[1] - 1, max(basis[:m]) + 1),
|
| 389 |
+
dtype=np.float64)
|
| 390 |
+
|
| 391 |
+
while not complete:
|
| 392 |
+
# Find the pivot column
|
| 393 |
+
pivcol_found, pivcol = _pivot_col(T, tol, bland)
|
| 394 |
+
if not pivcol_found:
|
| 395 |
+
pivcol = np.nan
|
| 396 |
+
pivrow = np.nan
|
| 397 |
+
status = 0
|
| 398 |
+
complete = True
|
| 399 |
+
else:
|
| 400 |
+
# Find the pivot row
|
| 401 |
+
pivrow_found, pivrow = _pivot_row(T, basis, pivcol, phase, tol, bland)
|
| 402 |
+
if not pivrow_found:
|
| 403 |
+
status = 3
|
| 404 |
+
complete = True
|
| 405 |
+
|
| 406 |
+
if callback is not None:
|
| 407 |
+
solution[:] = 0
|
| 408 |
+
solution[basis[:n]] = T[:n, -1]
|
| 409 |
+
x = solution[:m]
|
| 410 |
+
x, fun, slack, con = _postsolve(
|
| 411 |
+
x, postsolve_args
|
| 412 |
+
)
|
| 413 |
+
res = OptimizeResult({
|
| 414 |
+
'x': x,
|
| 415 |
+
'fun': fun,
|
| 416 |
+
'slack': slack,
|
| 417 |
+
'con': con,
|
| 418 |
+
'status': status,
|
| 419 |
+
'message': message,
|
| 420 |
+
'nit': nit,
|
| 421 |
+
'success': status == 0 and complete,
|
| 422 |
+
'phase': phase,
|
| 423 |
+
'complete': complete,
|
| 424 |
+
})
|
| 425 |
+
callback(res)
|
| 426 |
+
|
| 427 |
+
if not complete:
|
| 428 |
+
if nit >= maxiter:
|
| 429 |
+
# Iteration limit exceeded
|
| 430 |
+
status = 1
|
| 431 |
+
complete = True
|
| 432 |
+
else:
|
| 433 |
+
_apply_pivot(T, basis, pivrow, pivcol, tol)
|
| 434 |
+
nit += 1
|
| 435 |
+
return nit, status
|
| 436 |
+
|
| 437 |
+
|
| 438 |
+
def _linprog_simplex(c, c0, A, b, callback, postsolve_args,
|
| 439 |
+
maxiter=1000, tol=1e-9, disp=False, bland=False,
|
| 440 |
+
**unknown_options):
|
| 441 |
+
"""
|
| 442 |
+
Minimize a linear objective function subject to linear equality and
|
| 443 |
+
non-negativity constraints using the two phase simplex method.
|
| 444 |
+
Linear programming is intended to solve problems of the following form:
|
| 445 |
+
|
| 446 |
+
Minimize::
|
| 447 |
+
|
| 448 |
+
c @ x
|
| 449 |
+
|
| 450 |
+
Subject to::
|
| 451 |
+
|
| 452 |
+
A @ x == b
|
| 453 |
+
x >= 0
|
| 454 |
+
|
| 455 |
+
User-facing documentation is in _linprog_doc.py.
|
| 456 |
+
|
| 457 |
+
Parameters
|
| 458 |
+
----------
|
| 459 |
+
c : 1-D array
|
| 460 |
+
Coefficients of the linear objective function to be minimized.
|
| 461 |
+
c0 : float
|
| 462 |
+
Constant term in objective function due to fixed (and eliminated)
|
| 463 |
+
variables. (Purely for display.)
|
| 464 |
+
A : 2-D array
|
| 465 |
+
2-D array such that ``A @ x``, gives the values of the equality
|
| 466 |
+
constraints at ``x``.
|
| 467 |
+
b : 1-D array
|
| 468 |
+
1-D array of values representing the right hand side of each equality
|
| 469 |
+
constraint (row) in ``A``.
|
| 470 |
+
callback : callable, optional
|
| 471 |
+
If a callback function is provided, it will be called within each
|
| 472 |
+
iteration of the algorithm. The callback function must accept a single
|
| 473 |
+
`scipy.optimize.OptimizeResult` consisting of the following fields:
|
| 474 |
+
|
| 475 |
+
x : 1-D array
|
| 476 |
+
Current solution vector
|
| 477 |
+
fun : float
|
| 478 |
+
Current value of the objective function
|
| 479 |
+
success : bool
|
| 480 |
+
True when an algorithm has completed successfully.
|
| 481 |
+
slack : 1-D array
|
| 482 |
+
The values of the slack variables. Each slack variable
|
| 483 |
+
corresponds to an inequality constraint. If the slack is zero,
|
| 484 |
+
the corresponding constraint is active.
|
| 485 |
+
con : 1-D array
|
| 486 |
+
The (nominally zero) residuals of the equality constraints,
|
| 487 |
+
that is, ``b - A_eq @ x``
|
| 488 |
+
phase : int
|
| 489 |
+
The phase of the algorithm being executed.
|
| 490 |
+
status : int
|
| 491 |
+
An integer representing the status of the optimization::
|
| 492 |
+
|
| 493 |
+
0 : Algorithm proceeding nominally
|
| 494 |
+
1 : Iteration limit reached
|
| 495 |
+
2 : Problem appears to be infeasible
|
| 496 |
+
3 : Problem appears to be unbounded
|
| 497 |
+
4 : Serious numerical difficulties encountered
|
| 498 |
+
nit : int
|
| 499 |
+
The number of iterations performed.
|
| 500 |
+
message : str
|
| 501 |
+
A string descriptor of the exit status of the optimization.
|
| 502 |
+
postsolve_args : tuple
|
| 503 |
+
Data needed by _postsolve to convert the solution to the standard-form
|
| 504 |
+
problem into the solution to the original problem.
|
| 505 |
+
|
| 506 |
+
Options
|
| 507 |
+
-------
|
| 508 |
+
maxiter : int
|
| 509 |
+
The maximum number of iterations to perform.
|
| 510 |
+
disp : bool
|
| 511 |
+
If True, print exit status message to sys.stdout
|
| 512 |
+
tol : float
|
| 513 |
+
The tolerance which determines when a solution is "close enough" to
|
| 514 |
+
zero in Phase 1 to be considered a basic feasible solution or close
|
| 515 |
+
enough to positive to serve as an optimal solution.
|
| 516 |
+
bland : bool
|
| 517 |
+
If True, use Bland's anti-cycling rule [3]_ to choose pivots to
|
| 518 |
+
prevent cycling. If False, choose pivots which should lead to a
|
| 519 |
+
converged solution more quickly. The latter method is subject to
|
| 520 |
+
cycling (non-convergence) in rare instances.
|
| 521 |
+
unknown_options : dict
|
| 522 |
+
Optional arguments not used by this particular solver. If
|
| 523 |
+
`unknown_options` is non-empty a warning is issued listing all
|
| 524 |
+
unused options.
|
| 525 |
+
|
| 526 |
+
Returns
|
| 527 |
+
-------
|
| 528 |
+
x : 1-D array
|
| 529 |
+
Solution vector.
|
| 530 |
+
status : int
|
| 531 |
+
An integer representing the exit status of the optimization::
|
| 532 |
+
|
| 533 |
+
0 : Optimization terminated successfully
|
| 534 |
+
1 : Iteration limit reached
|
| 535 |
+
2 : Problem appears to be infeasible
|
| 536 |
+
3 : Problem appears to be unbounded
|
| 537 |
+
4 : Serious numerical difficulties encountered
|
| 538 |
+
|
| 539 |
+
message : str
|
| 540 |
+
A string descriptor of the exit status of the optimization.
|
| 541 |
+
iteration : int
|
| 542 |
+
The number of iterations taken to solve the problem.
|
| 543 |
+
|
| 544 |
+
References
|
| 545 |
+
----------
|
| 546 |
+
.. [1] Dantzig, George B., Linear programming and extensions. Rand
|
| 547 |
+
Corporation Research Study Princeton Univ. Press, Princeton, NJ,
|
| 548 |
+
1963
|
| 549 |
+
.. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to
|
| 550 |
+
Mathematical Programming", McGraw-Hill, Chapter 4.
|
| 551 |
+
.. [3] Bland, Robert G. New finite pivoting rules for the simplex method.
|
| 552 |
+
Mathematics of Operations Research (2), 1977: pp. 103-107.
|
| 553 |
+
|
| 554 |
+
|
| 555 |
+
Notes
|
| 556 |
+
-----
|
| 557 |
+
The expected problem formulation differs between the top level ``linprog``
|
| 558 |
+
module and the method specific solvers. The method specific solvers expect a
|
| 559 |
+
problem in standard form:
|
| 560 |
+
|
| 561 |
+
Minimize::
|
| 562 |
+
|
| 563 |
+
c @ x
|
| 564 |
+
|
| 565 |
+
Subject to::
|
| 566 |
+
|
| 567 |
+
A @ x == b
|
| 568 |
+
x >= 0
|
| 569 |
+
|
| 570 |
+
Whereas the top level ``linprog`` module expects a problem of form:
|
| 571 |
+
|
| 572 |
+
Minimize::
|
| 573 |
+
|
| 574 |
+
c @ x
|
| 575 |
+
|
| 576 |
+
Subject to::
|
| 577 |
+
|
| 578 |
+
A_ub @ x <= b_ub
|
| 579 |
+
A_eq @ x == b_eq
|
| 580 |
+
lb <= x <= ub
|
| 581 |
+
|
| 582 |
+
where ``lb = 0`` and ``ub = None`` unless set in ``bounds``.
|
| 583 |
+
|
| 584 |
+
The original problem contains equality, upper-bound and variable constraints
|
| 585 |
+
whereas the method specific solver requires equality constraints and
|
| 586 |
+
variable non-negativity.
|
| 587 |
+
|
| 588 |
+
``linprog`` module converts the original problem to standard form by
|
| 589 |
+
converting the simple bounds to upper bound constraints, introducing
|
| 590 |
+
non-negative slack variables for inequality constraints, and expressing
|
| 591 |
+
unbounded variables as the difference between two non-negative variables.
|
| 592 |
+
"""
|
| 593 |
+
_check_unknown_options(unknown_options)
|
| 594 |
+
|
| 595 |
+
status = 0
|
| 596 |
+
messages = {0: "Optimization terminated successfully.",
|
| 597 |
+
1: "Iteration limit reached.",
|
| 598 |
+
2: "Optimization failed. Unable to find a feasible"
|
| 599 |
+
" starting point.",
|
| 600 |
+
3: "Optimization failed. The problem appears to be unbounded.",
|
| 601 |
+
4: "Optimization failed. Singular matrix encountered."}
|
| 602 |
+
|
| 603 |
+
n, m = A.shape
|
| 604 |
+
|
| 605 |
+
# All constraints must have b >= 0.
|
| 606 |
+
is_negative_constraint = np.less(b, 0)
|
| 607 |
+
A[is_negative_constraint] *= -1
|
| 608 |
+
b[is_negative_constraint] *= -1
|
| 609 |
+
|
| 610 |
+
# As all constraints are equality constraints the artificial variables
|
| 611 |
+
# will also be basic variables.
|
| 612 |
+
av = np.arange(n) + m
|
| 613 |
+
basis = av.copy()
|
| 614 |
+
|
| 615 |
+
# Format the phase one tableau by adding artificial variables and stacking
|
| 616 |
+
# the constraints, the objective row and pseudo-objective row.
|
| 617 |
+
row_constraints = np.hstack((A, np.eye(n), b[:, np.newaxis]))
|
| 618 |
+
row_objective = np.hstack((c, np.zeros(n), c0))
|
| 619 |
+
row_pseudo_objective = -row_constraints.sum(axis=0)
|
| 620 |
+
row_pseudo_objective[av] = 0
|
| 621 |
+
T = np.vstack((row_constraints, row_objective, row_pseudo_objective))
|
| 622 |
+
|
| 623 |
+
nit1, status = _solve_simplex(T, n, basis, callback=callback,
|
| 624 |
+
postsolve_args=postsolve_args,
|
| 625 |
+
maxiter=maxiter, tol=tol, phase=1,
|
| 626 |
+
bland=bland
|
| 627 |
+
)
|
| 628 |
+
# if pseudo objective is zero, remove the last row from the tableau and
|
| 629 |
+
# proceed to phase 2
|
| 630 |
+
nit2 = nit1
|
| 631 |
+
if abs(T[-1, -1]) < tol:
|
| 632 |
+
# Remove the pseudo-objective row from the tableau
|
| 633 |
+
T = T[:-1, :]
|
| 634 |
+
# Remove the artificial variable columns from the tableau
|
| 635 |
+
T = np.delete(T, av, 1)
|
| 636 |
+
else:
|
| 637 |
+
# Failure to find a feasible starting point
|
| 638 |
+
status = 2
|
| 639 |
+
messages[status] = (
|
| 640 |
+
"Phase 1 of the simplex method failed to find a feasible "
|
| 641 |
+
"solution. The pseudo-objective function evaluates to {0:.1e} "
|
| 642 |
+
"which exceeds the required tolerance of {1} for a solution to be "
|
| 643 |
+
"considered 'close enough' to zero to be a basic solution. "
|
| 644 |
+
"Consider increasing the tolerance to be greater than {0:.1e}. "
|
| 645 |
+
"If this tolerance is unacceptably large the problem may be "
|
| 646 |
+
"infeasible.".format(abs(T[-1, -1]), tol)
|
| 647 |
+
)
|
| 648 |
+
|
| 649 |
+
if status == 0:
|
| 650 |
+
# Phase 2
|
| 651 |
+
nit2, status = _solve_simplex(T, n, basis, callback=callback,
|
| 652 |
+
postsolve_args=postsolve_args,
|
| 653 |
+
maxiter=maxiter, tol=tol, phase=2,
|
| 654 |
+
bland=bland, nit0=nit1
|
| 655 |
+
)
|
| 656 |
+
|
| 657 |
+
solution = np.zeros(n + m)
|
| 658 |
+
solution[basis[:n]] = T[:n, -1]
|
| 659 |
+
x = solution[:m]
|
| 660 |
+
|
| 661 |
+
return x, status, messages[status], int(nit2)
|
evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_linprog_util.py
ADDED
|
@@ -0,0 +1,1522 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Method agnostic utility functions for linear programming
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import scipy.sparse as sps
|
| 7 |
+
from warnings import warn
|
| 8 |
+
from ._optimize import OptimizeWarning
|
| 9 |
+
from scipy.optimize._remove_redundancy import (
|
| 10 |
+
_remove_redundancy_svd, _remove_redundancy_pivot_sparse,
|
| 11 |
+
_remove_redundancy_pivot_dense, _remove_redundancy_id
|
| 12 |
+
)
|
| 13 |
+
from collections import namedtuple
|
| 14 |
+
|
| 15 |
+
_LPProblem = namedtuple('_LPProblem',
|
| 16 |
+
'c A_ub b_ub A_eq b_eq bounds x0 integrality')
|
| 17 |
+
_LPProblem.__new__.__defaults__ = (None,) * 7 # make c the only required arg
|
| 18 |
+
_LPProblem.__doc__ = \
|
| 19 |
+
""" Represents a linear-programming problem.
|
| 20 |
+
|
| 21 |
+
Attributes
|
| 22 |
+
----------
|
| 23 |
+
c : 1D array
|
| 24 |
+
The coefficients of the linear objective function to be minimized.
|
| 25 |
+
A_ub : 2D array, optional
|
| 26 |
+
The inequality constraint matrix. Each row of ``A_ub`` specifies the
|
| 27 |
+
coefficients of a linear inequality constraint on ``x``.
|
| 28 |
+
b_ub : 1D array, optional
|
| 29 |
+
The inequality constraint vector. Each element represents an
|
| 30 |
+
upper bound on the corresponding value of ``A_ub @ x``.
|
| 31 |
+
A_eq : 2D array, optional
|
| 32 |
+
The equality constraint matrix. Each row of ``A_eq`` specifies the
|
| 33 |
+
coefficients of a linear equality constraint on ``x``.
|
| 34 |
+
b_eq : 1D array, optional
|
| 35 |
+
The equality constraint vector. Each element of ``A_eq @ x`` must equal
|
| 36 |
+
the corresponding element of ``b_eq``.
|
| 37 |
+
bounds : various valid formats, optional
|
| 38 |
+
The bounds of ``x``, as ``min`` and ``max`` pairs.
|
| 39 |
+
If bounds are specified for all N variables separately, valid formats
|
| 40 |
+
are:
|
| 41 |
+
* a 2D array (N x 2);
|
| 42 |
+
* a sequence of N sequences, each with 2 values.
|
| 43 |
+
If all variables have the same bounds, the bounds can be specified as
|
| 44 |
+
a 1-D or 2-D array or sequence with 2 scalar values.
|
| 45 |
+
If all variables have a lower bound of 0 and no upper bound, the bounds
|
| 46 |
+
parameter can be omitted (or given as None).
|
| 47 |
+
Absent lower and/or upper bounds can be specified as -numpy.inf (no
|
| 48 |
+
lower bound), numpy.inf (no upper bound) or None (both).
|
| 49 |
+
x0 : 1D array, optional
|
| 50 |
+
Guess values of the decision variables, which will be refined by
|
| 51 |
+
the optimization algorithm. This argument is currently used only by the
|
| 52 |
+
'revised simplex' method, and can only be used if `x0` represents a
|
| 53 |
+
basic feasible solution.
|
| 54 |
+
integrality : 1-D array or int, optional
|
| 55 |
+
Indicates the type of integrality constraint on each decision variable.
|
| 56 |
+
|
| 57 |
+
``0`` : Continuous variable; no integrality constraint.
|
| 58 |
+
|
| 59 |
+
``1`` : Integer variable; decision variable must be an integer
|
| 60 |
+
within `bounds`.
|
| 61 |
+
|
| 62 |
+
``2`` : Semi-continuous variable; decision variable must be within
|
| 63 |
+
`bounds` or take value ``0``.
|
| 64 |
+
|
| 65 |
+
``3`` : Semi-integer variable; decision variable must be an integer
|
| 66 |
+
within `bounds` or take value ``0``.
|
| 67 |
+
|
| 68 |
+
By default, all variables are continuous.
|
| 69 |
+
|
| 70 |
+
For mixed integrality constraints, supply an array of shape `c.shape`.
|
| 71 |
+
To infer a constraint on each decision variable from shorter inputs,
|
| 72 |
+
the argument will be broadcasted to `c.shape` using `np.broadcast_to`.
|
| 73 |
+
|
| 74 |
+
This argument is currently used only by the ``'highs'`` method and
|
| 75 |
+
ignored otherwise.
|
| 76 |
+
|
| 77 |
+
Notes
|
| 78 |
+
-----
|
| 79 |
+
This namedtuple supports 2 ways of initialization:
|
| 80 |
+
>>> lp1 = _LPProblem(c=[-1, 4], A_ub=[[-3, 1], [1, 2]], b_ub=[6, 4])
|
| 81 |
+
>>> lp2 = _LPProblem([-1, 4], [[-3, 1], [1, 2]], [6, 4])
|
| 82 |
+
|
| 83 |
+
Note that only ``c`` is a required argument here, whereas all other arguments
|
| 84 |
+
``A_ub``, ``b_ub``, ``A_eq``, ``b_eq``, ``bounds``, ``x0`` are optional with
|
| 85 |
+
default values of None.
|
| 86 |
+
For example, ``A_eq`` and ``b_eq`` can be set without ``A_ub`` or ``b_ub``:
|
| 87 |
+
>>> lp3 = _LPProblem(c=[-1, 4], A_eq=[[2, 1]], b_eq=[10])
|
| 88 |
+
"""
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def _check_sparse_inputs(options, meth, A_ub, A_eq):
|
| 92 |
+
"""
|
| 93 |
+
Check the provided ``A_ub`` and ``A_eq`` matrices conform to the specified
|
| 94 |
+
optional sparsity variables.
|
| 95 |
+
|
| 96 |
+
Parameters
|
| 97 |
+
----------
|
| 98 |
+
A_ub : 2-D array, optional
|
| 99 |
+
2-D array such that ``A_ub @ x`` gives the values of the upper-bound
|
| 100 |
+
inequality constraints at ``x``.
|
| 101 |
+
A_eq : 2-D array, optional
|
| 102 |
+
2-D array such that ``A_eq @ x`` gives the values of the equality
|
| 103 |
+
constraints at ``x``.
|
| 104 |
+
options : dict
|
| 105 |
+
A dictionary of solver options. All methods accept the following
|
| 106 |
+
generic options:
|
| 107 |
+
|
| 108 |
+
maxiter : int
|
| 109 |
+
Maximum number of iterations to perform.
|
| 110 |
+
disp : bool
|
| 111 |
+
Set to True to print convergence messages.
|
| 112 |
+
|
| 113 |
+
For method-specific options, see :func:`show_options('linprog')`.
|
| 114 |
+
method : str, optional
|
| 115 |
+
The algorithm used to solve the standard form problem.
|
| 116 |
+
|
| 117 |
+
Returns
|
| 118 |
+
-------
|
| 119 |
+
A_ub : 2-D array, optional
|
| 120 |
+
2-D array such that ``A_ub @ x`` gives the values of the upper-bound
|
| 121 |
+
inequality constraints at ``x``.
|
| 122 |
+
A_eq : 2-D array, optional
|
| 123 |
+
2-D array such that ``A_eq @ x`` gives the values of the equality
|
| 124 |
+
constraints at ``x``.
|
| 125 |
+
options : dict
|
| 126 |
+
A dictionary of solver options. All methods accept the following
|
| 127 |
+
generic options:
|
| 128 |
+
|
| 129 |
+
maxiter : int
|
| 130 |
+
Maximum number of iterations to perform.
|
| 131 |
+
disp : bool
|
| 132 |
+
Set to True to print convergence messages.
|
| 133 |
+
|
| 134 |
+
For method-specific options, see :func:`show_options('linprog')`.
|
| 135 |
+
"""
|
| 136 |
+
# This is an undocumented option for unit testing sparse presolve
|
| 137 |
+
_sparse_presolve = options.pop('_sparse_presolve', False)
|
| 138 |
+
if _sparse_presolve and A_eq is not None:
|
| 139 |
+
A_eq = sps.coo_matrix(A_eq)
|
| 140 |
+
if _sparse_presolve and A_ub is not None:
|
| 141 |
+
A_ub = sps.coo_matrix(A_ub)
|
| 142 |
+
|
| 143 |
+
sparse_constraint = sps.issparse(A_eq) or sps.issparse(A_ub)
|
| 144 |
+
|
| 145 |
+
preferred_methods = {"highs", "highs-ds", "highs-ipm"}
|
| 146 |
+
dense_methods = {"simplex", "revised simplex"}
|
| 147 |
+
if meth in dense_methods and sparse_constraint:
|
| 148 |
+
raise ValueError(f"Method '{meth}' does not support sparse "
|
| 149 |
+
"constraint matrices. Please consider using one of "
|
| 150 |
+
f"{preferred_methods}.")
|
| 151 |
+
|
| 152 |
+
sparse = options.get('sparse', False)
|
| 153 |
+
if not sparse and sparse_constraint and meth == 'interior-point':
|
| 154 |
+
options['sparse'] = True
|
| 155 |
+
warn("Sparse constraint matrix detected; setting 'sparse':True.",
|
| 156 |
+
OptimizeWarning, stacklevel=4)
|
| 157 |
+
return options, A_ub, A_eq
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def _format_A_constraints(A, n_x, sparse_lhs=False):
|
| 161 |
+
"""Format the left hand side of the constraints to a 2-D array
|
| 162 |
+
|
| 163 |
+
Parameters
|
| 164 |
+
----------
|
| 165 |
+
A : 2-D array
|
| 166 |
+
2-D array such that ``A @ x`` gives the values of the upper-bound
|
| 167 |
+
(in)equality constraints at ``x``.
|
| 168 |
+
n_x : int
|
| 169 |
+
The number of variables in the linear programming problem.
|
| 170 |
+
sparse_lhs : bool
|
| 171 |
+
Whether either of `A_ub` or `A_eq` are sparse. If true return a
|
| 172 |
+
coo_matrix instead of a numpy array.
|
| 173 |
+
|
| 174 |
+
Returns
|
| 175 |
+
-------
|
| 176 |
+
np.ndarray or sparse.coo_matrix
|
| 177 |
+
2-D array such that ``A @ x`` gives the values of the upper-bound
|
| 178 |
+
(in)equality constraints at ``x``.
|
| 179 |
+
|
| 180 |
+
"""
|
| 181 |
+
if sparse_lhs:
|
| 182 |
+
return sps.coo_matrix(
|
| 183 |
+
(0, n_x) if A is None else A, dtype=float, copy=True
|
| 184 |
+
)
|
| 185 |
+
elif A is None:
|
| 186 |
+
return np.zeros((0, n_x), dtype=float)
|
| 187 |
+
else:
|
| 188 |
+
return np.array(A, dtype=float, copy=True)
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
def _format_b_constraints(b):
|
| 192 |
+
"""Format the upper bounds of the constraints to a 1-D array
|
| 193 |
+
|
| 194 |
+
Parameters
|
| 195 |
+
----------
|
| 196 |
+
b : 1-D array
|
| 197 |
+
1-D array of values representing the upper-bound of each (in)equality
|
| 198 |
+
constraint (row) in ``A``.
|
| 199 |
+
|
| 200 |
+
Returns
|
| 201 |
+
-------
|
| 202 |
+
1-D np.array
|
| 203 |
+
1-D array of values representing the upper-bound of each (in)equality
|
| 204 |
+
constraint (row) in ``A``.
|
| 205 |
+
|
| 206 |
+
"""
|
| 207 |
+
if b is None:
|
| 208 |
+
return np.array([], dtype=float)
|
| 209 |
+
b = np.array(b, dtype=float, copy=True).squeeze()
|
| 210 |
+
return b if b.size != 1 else b.reshape(-1)
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
def _clean_inputs(lp):
|
| 214 |
+
"""
|
| 215 |
+
Given user inputs for a linear programming problem, return the
|
| 216 |
+
objective vector, upper bound constraints, equality constraints,
|
| 217 |
+
and simple bounds in a preferred format.
|
| 218 |
+
|
| 219 |
+
Parameters
|
| 220 |
+
----------
|
| 221 |
+
lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
|
| 222 |
+
|
| 223 |
+
c : 1D array
|
| 224 |
+
The coefficients of the linear objective function to be minimized.
|
| 225 |
+
A_ub : 2D array, optional
|
| 226 |
+
The inequality constraint matrix. Each row of ``A_ub`` specifies the
|
| 227 |
+
coefficients of a linear inequality constraint on ``x``.
|
| 228 |
+
b_ub : 1D array, optional
|
| 229 |
+
The inequality constraint vector. Each element represents an
|
| 230 |
+
upper bound on the corresponding value of ``A_ub @ x``.
|
| 231 |
+
A_eq : 2D array, optional
|
| 232 |
+
The equality constraint matrix. Each row of ``A_eq`` specifies the
|
| 233 |
+
coefficients of a linear equality constraint on ``x``.
|
| 234 |
+
b_eq : 1D array, optional
|
| 235 |
+
The equality constraint vector. Each element of ``A_eq @ x`` must equal
|
| 236 |
+
the corresponding element of ``b_eq``.
|
| 237 |
+
bounds : various valid formats, optional
|
| 238 |
+
The bounds of ``x``, as ``min`` and ``max`` pairs.
|
| 239 |
+
If bounds are specified for all N variables separately, valid formats are:
|
| 240 |
+
* a 2D array (2 x N or N x 2);
|
| 241 |
+
* a sequence of N sequences, each with 2 values.
|
| 242 |
+
If all variables have the same bounds, a single pair of values can
|
| 243 |
+
be specified. Valid formats are:
|
| 244 |
+
* a sequence with 2 scalar values;
|
| 245 |
+
* a sequence with a single element containing 2 scalar values.
|
| 246 |
+
If all variables have a lower bound of 0 and no upper bound, the bounds
|
| 247 |
+
parameter can be omitted (or given as None).
|
| 248 |
+
x0 : 1D array, optional
|
| 249 |
+
Guess values of the decision variables, which will be refined by
|
| 250 |
+
the optimization algorithm. This argument is currently used only by the
|
| 251 |
+
'revised simplex' method, and can only be used if `x0` represents a
|
| 252 |
+
basic feasible solution.
|
| 253 |
+
|
| 254 |
+
Returns
|
| 255 |
+
-------
|
| 256 |
+
lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
|
| 257 |
+
|
| 258 |
+
c : 1D array
|
| 259 |
+
The coefficients of the linear objective function to be minimized.
|
| 260 |
+
A_ub : 2D array, optional
|
| 261 |
+
The inequality constraint matrix. Each row of ``A_ub`` specifies the
|
| 262 |
+
coefficients of a linear inequality constraint on ``x``.
|
| 263 |
+
b_ub : 1D array, optional
|
| 264 |
+
The inequality constraint vector. Each element represents an
|
| 265 |
+
upper bound on the corresponding value of ``A_ub @ x``.
|
| 266 |
+
A_eq : 2D array, optional
|
| 267 |
+
The equality constraint matrix. Each row of ``A_eq`` specifies the
|
| 268 |
+
coefficients of a linear equality constraint on ``x``.
|
| 269 |
+
b_eq : 1D array, optional
|
| 270 |
+
The equality constraint vector. Each element of ``A_eq @ x`` must equal
|
| 271 |
+
the corresponding element of ``b_eq``.
|
| 272 |
+
bounds : 2D array
|
| 273 |
+
The bounds of ``x``, as ``min`` and ``max`` pairs, one for each of the N
|
| 274 |
+
elements of ``x``. The N x 2 array contains lower bounds in the first
|
| 275 |
+
column and upper bounds in the 2nd. Unbounded variables have lower
|
| 276 |
+
bound -np.inf and/or upper bound np.inf.
|
| 277 |
+
x0 : 1D array, optional
|
| 278 |
+
Guess values of the decision variables, which will be refined by
|
| 279 |
+
the optimization algorithm. This argument is currently used only by the
|
| 280 |
+
'revised simplex' method, and can only be used if `x0` represents a
|
| 281 |
+
basic feasible solution.
|
| 282 |
+
|
| 283 |
+
"""
|
| 284 |
+
c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality = lp
|
| 285 |
+
|
| 286 |
+
if c is None:
|
| 287 |
+
raise TypeError
|
| 288 |
+
|
| 289 |
+
try:
|
| 290 |
+
c = np.array(c, dtype=np.float64, copy=True).squeeze()
|
| 291 |
+
except ValueError as e:
|
| 292 |
+
raise TypeError(
|
| 293 |
+
"Invalid input for linprog: c must be a 1-D array of numerical "
|
| 294 |
+
"coefficients") from e
|
| 295 |
+
else:
|
| 296 |
+
# If c is a single value, convert it to a 1-D array.
|
| 297 |
+
if c.size == 1:
|
| 298 |
+
c = c.reshape(-1)
|
| 299 |
+
|
| 300 |
+
n_x = len(c)
|
| 301 |
+
if n_x == 0 or len(c.shape) != 1:
|
| 302 |
+
raise ValueError(
|
| 303 |
+
"Invalid input for linprog: c must be a 1-D array and must "
|
| 304 |
+
"not have more than one non-singleton dimension")
|
| 305 |
+
if not np.isfinite(c).all():
|
| 306 |
+
raise ValueError(
|
| 307 |
+
"Invalid input for linprog: c must not contain values "
|
| 308 |
+
"inf, nan, or None")
|
| 309 |
+
|
| 310 |
+
sparse_lhs = sps.issparse(A_eq) or sps.issparse(A_ub)
|
| 311 |
+
try:
|
| 312 |
+
A_ub = _format_A_constraints(A_ub, n_x, sparse_lhs=sparse_lhs)
|
| 313 |
+
except ValueError as e:
|
| 314 |
+
raise TypeError(
|
| 315 |
+
"Invalid input for linprog: A_ub must be a 2-D array "
|
| 316 |
+
"of numerical values") from e
|
| 317 |
+
else:
|
| 318 |
+
n_ub = A_ub.shape[0]
|
| 319 |
+
if len(A_ub.shape) != 2 or A_ub.shape[1] != n_x:
|
| 320 |
+
raise ValueError(
|
| 321 |
+
"Invalid input for linprog: A_ub must have exactly two "
|
| 322 |
+
"dimensions, and the number of columns in A_ub must be "
|
| 323 |
+
"equal to the size of c")
|
| 324 |
+
if (sps.issparse(A_ub) and not np.isfinite(A_ub.data).all()
|
| 325 |
+
or not sps.issparse(A_ub) and not np.isfinite(A_ub).all()):
|
| 326 |
+
raise ValueError(
|
| 327 |
+
"Invalid input for linprog: A_ub must not contain values "
|
| 328 |
+
"inf, nan, or None")
|
| 329 |
+
|
| 330 |
+
try:
|
| 331 |
+
b_ub = _format_b_constraints(b_ub)
|
| 332 |
+
except ValueError as e:
|
| 333 |
+
raise TypeError(
|
| 334 |
+
"Invalid input for linprog: b_ub must be a 1-D array of "
|
| 335 |
+
"numerical values, each representing the upper bound of an "
|
| 336 |
+
"inequality constraint (row) in A_ub") from e
|
| 337 |
+
else:
|
| 338 |
+
if b_ub.shape != (n_ub,):
|
| 339 |
+
raise ValueError(
|
| 340 |
+
"Invalid input for linprog: b_ub must be a 1-D array; b_ub "
|
| 341 |
+
"must not have more than one non-singleton dimension and "
|
| 342 |
+
"the number of rows in A_ub must equal the number of values "
|
| 343 |
+
"in b_ub")
|
| 344 |
+
if not np.isfinite(b_ub).all():
|
| 345 |
+
raise ValueError(
|
| 346 |
+
"Invalid input for linprog: b_ub must not contain values "
|
| 347 |
+
"inf, nan, or None")
|
| 348 |
+
|
| 349 |
+
try:
|
| 350 |
+
A_eq = _format_A_constraints(A_eq, n_x, sparse_lhs=sparse_lhs)
|
| 351 |
+
except ValueError as e:
|
| 352 |
+
raise TypeError(
|
| 353 |
+
"Invalid input for linprog: A_eq must be a 2-D array "
|
| 354 |
+
"of numerical values") from e
|
| 355 |
+
else:
|
| 356 |
+
n_eq = A_eq.shape[0]
|
| 357 |
+
if len(A_eq.shape) != 2 or A_eq.shape[1] != n_x:
|
| 358 |
+
raise ValueError(
|
| 359 |
+
"Invalid input for linprog: A_eq must have exactly two "
|
| 360 |
+
"dimensions, and the number of columns in A_eq must be "
|
| 361 |
+
"equal to the size of c")
|
| 362 |
+
|
| 363 |
+
if (sps.issparse(A_eq) and not np.isfinite(A_eq.data).all()
|
| 364 |
+
or not sps.issparse(A_eq) and not np.isfinite(A_eq).all()):
|
| 365 |
+
raise ValueError(
|
| 366 |
+
"Invalid input for linprog: A_eq must not contain values "
|
| 367 |
+
"inf, nan, or None")
|
| 368 |
+
|
| 369 |
+
try:
|
| 370 |
+
b_eq = _format_b_constraints(b_eq)
|
| 371 |
+
except ValueError as e:
|
| 372 |
+
raise TypeError(
|
| 373 |
+
"Invalid input for linprog: b_eq must be a dense, 1-D array of "
|
| 374 |
+
"numerical values, each representing the right hand side of an "
|
| 375 |
+
"equality constraint (row) in A_eq") from e
|
| 376 |
+
else:
|
| 377 |
+
if b_eq.shape != (n_eq,):
|
| 378 |
+
raise ValueError(
|
| 379 |
+
"Invalid input for linprog: b_eq must be a 1-D array; b_eq "
|
| 380 |
+
"must not have more than one non-singleton dimension and "
|
| 381 |
+
"the number of rows in A_eq must equal the number of values "
|
| 382 |
+
"in b_eq")
|
| 383 |
+
if not np.isfinite(b_eq).all():
|
| 384 |
+
raise ValueError(
|
| 385 |
+
"Invalid input for linprog: b_eq must not contain values "
|
| 386 |
+
"inf, nan, or None")
|
| 387 |
+
|
| 388 |
+
# x0 gives a (optional) starting solution to the solver. If x0 is None,
|
| 389 |
+
# skip the checks. Initial solution will be generated automatically.
|
| 390 |
+
if x0 is not None:
|
| 391 |
+
try:
|
| 392 |
+
x0 = np.array(x0, dtype=float, copy=True).squeeze()
|
| 393 |
+
except ValueError as e:
|
| 394 |
+
raise TypeError(
|
| 395 |
+
"Invalid input for linprog: x0 must be a 1-D array of "
|
| 396 |
+
"numerical coefficients") from e
|
| 397 |
+
if x0.ndim == 0:
|
| 398 |
+
x0 = x0.reshape(-1)
|
| 399 |
+
if len(x0) == 0 or x0.ndim != 1:
|
| 400 |
+
raise ValueError(
|
| 401 |
+
"Invalid input for linprog: x0 should be a 1-D array; it "
|
| 402 |
+
"must not have more than one non-singleton dimension")
|
| 403 |
+
if not x0.size == c.size:
|
| 404 |
+
raise ValueError(
|
| 405 |
+
"Invalid input for linprog: x0 and c should contain the "
|
| 406 |
+
"same number of elements")
|
| 407 |
+
if not np.isfinite(x0).all():
|
| 408 |
+
raise ValueError(
|
| 409 |
+
"Invalid input for linprog: x0 must not contain values "
|
| 410 |
+
"inf, nan, or None")
|
| 411 |
+
|
| 412 |
+
# Bounds can be one of these formats:
|
| 413 |
+
# (1) a 2-D array or sequence, with shape N x 2
|
| 414 |
+
# (2) a 1-D or 2-D sequence or array with 2 scalars
|
| 415 |
+
# (3) None (or an empty sequence or array)
|
| 416 |
+
# Unspecified bounds can be represented by None or (-)np.inf.
|
| 417 |
+
# All formats are converted into a N x 2 np.array with (-)np.inf where
|
| 418 |
+
# bounds are unspecified.
|
| 419 |
+
|
| 420 |
+
# Prepare clean bounds array
|
| 421 |
+
bounds_clean = np.zeros((n_x, 2), dtype=float)
|
| 422 |
+
|
| 423 |
+
# Convert to a numpy array.
|
| 424 |
+
# np.array(..,dtype=float) raises an error if dimensions are inconsistent
|
| 425 |
+
# or if there are invalid data types in bounds. Just add a linprog prefix
|
| 426 |
+
# to the error and re-raise.
|
| 427 |
+
# Creating at least a 2-D array simplifies the cases to distinguish below.
|
| 428 |
+
if bounds is None or np.array_equal(bounds, []) or np.array_equal(bounds, [[]]):
|
| 429 |
+
bounds = (0, np.inf)
|
| 430 |
+
try:
|
| 431 |
+
bounds_conv = np.atleast_2d(np.array(bounds, dtype=float))
|
| 432 |
+
except ValueError as e:
|
| 433 |
+
raise ValueError(
|
| 434 |
+
"Invalid input for linprog: unable to interpret bounds, "
|
| 435 |
+
"check values and dimensions: " + e.args[0]) from e
|
| 436 |
+
except TypeError as e:
|
| 437 |
+
raise TypeError(
|
| 438 |
+
"Invalid input for linprog: unable to interpret bounds, "
|
| 439 |
+
"check values and dimensions: " + e.args[0]) from e
|
| 440 |
+
|
| 441 |
+
# Check bounds options
|
| 442 |
+
bsh = bounds_conv.shape
|
| 443 |
+
if len(bsh) > 2:
|
| 444 |
+
# Do not try to handle multidimensional bounds input
|
| 445 |
+
raise ValueError(
|
| 446 |
+
"Invalid input for linprog: provide a 2-D array for bounds, "
|
| 447 |
+
f"not a {len(bsh):d}-D array.")
|
| 448 |
+
elif np.all(bsh == (n_x, 2)):
|
| 449 |
+
# Regular N x 2 array
|
| 450 |
+
bounds_clean = bounds_conv
|
| 451 |
+
elif (np.all(bsh == (2, 1)) or np.all(bsh == (1, 2))):
|
| 452 |
+
# 2 values: interpret as overall lower and upper bound
|
| 453 |
+
bounds_flat = bounds_conv.flatten()
|
| 454 |
+
bounds_clean[:, 0] = bounds_flat[0]
|
| 455 |
+
bounds_clean[:, 1] = bounds_flat[1]
|
| 456 |
+
elif np.all(bsh == (2, n_x)):
|
| 457 |
+
# Reject a 2 x N array
|
| 458 |
+
raise ValueError(
|
| 459 |
+
f"Invalid input for linprog: provide a {n_x:d} x 2 array for bounds, "
|
| 460 |
+
f"not a 2 x {n_x:d} array.")
|
| 461 |
+
else:
|
| 462 |
+
raise ValueError(
|
| 463 |
+
"Invalid input for linprog: unable to interpret bounds with this "
|
| 464 |
+
f"dimension tuple: {bsh}.")
|
| 465 |
+
|
| 466 |
+
# The process above creates nan-s where the input specified None
|
| 467 |
+
# Convert the nan-s in the 1st column to -np.inf and in the 2nd column
|
| 468 |
+
# to np.inf
|
| 469 |
+
i_none = np.isnan(bounds_clean[:, 0])
|
| 470 |
+
bounds_clean[i_none, 0] = -np.inf
|
| 471 |
+
i_none = np.isnan(bounds_clean[:, 1])
|
| 472 |
+
bounds_clean[i_none, 1] = np.inf
|
| 473 |
+
|
| 474 |
+
return _LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds_clean, x0, integrality)
|
| 475 |
+
|
| 476 |
+
|
| 477 |
+
def _presolve(lp, rr, rr_method, tol=1e-9):
|
| 478 |
+
"""
|
| 479 |
+
Given inputs for a linear programming problem in preferred format,
|
| 480 |
+
presolve the problem: identify trivial infeasibilities, redundancies,
|
| 481 |
+
and unboundedness, tighten bounds where possible, and eliminate fixed
|
| 482 |
+
variables.
|
| 483 |
+
|
| 484 |
+
Parameters
|
| 485 |
+
----------
|
| 486 |
+
lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
|
| 487 |
+
|
| 488 |
+
c : 1D array
|
| 489 |
+
The coefficients of the linear objective function to be minimized.
|
| 490 |
+
A_ub : 2D array, optional
|
| 491 |
+
The inequality constraint matrix. Each row of ``A_ub`` specifies the
|
| 492 |
+
coefficients of a linear inequality constraint on ``x``.
|
| 493 |
+
b_ub : 1D array, optional
|
| 494 |
+
The inequality constraint vector. Each element represents an
|
| 495 |
+
upper bound on the corresponding value of ``A_ub @ x``.
|
| 496 |
+
A_eq : 2D array, optional
|
| 497 |
+
The equality constraint matrix. Each row of ``A_eq`` specifies the
|
| 498 |
+
coefficients of a linear equality constraint on ``x``.
|
| 499 |
+
b_eq : 1D array, optional
|
| 500 |
+
The equality constraint vector. Each element of ``A_eq @ x`` must equal
|
| 501 |
+
the corresponding element of ``b_eq``.
|
| 502 |
+
bounds : 2D array
|
| 503 |
+
The bounds of ``x``, as ``min`` and ``max`` pairs, one for each of the N
|
| 504 |
+
elements of ``x``. The N x 2 array contains lower bounds in the first
|
| 505 |
+
column and upper bounds in the 2nd. Unbounded variables have lower
|
| 506 |
+
bound -np.inf and/or upper bound np.inf.
|
| 507 |
+
x0 : 1D array, optional
|
| 508 |
+
Guess values of the decision variables, which will be refined by
|
| 509 |
+
the optimization algorithm. This argument is currently used only by the
|
| 510 |
+
'revised simplex' method, and can only be used if `x0` represents a
|
| 511 |
+
basic feasible solution.
|
| 512 |
+
|
| 513 |
+
rr : bool
|
| 514 |
+
If ``True`` attempts to eliminate any redundant rows in ``A_eq``.
|
| 515 |
+
Set False if ``A_eq`` is known to be of full row rank, or if you are
|
| 516 |
+
looking for a potential speedup (at the expense of reliability).
|
| 517 |
+
rr_method : string
|
| 518 |
+
Method used to identify and remove redundant rows from the
|
| 519 |
+
equality constraint matrix after presolve.
|
| 520 |
+
tol : float
|
| 521 |
+
The tolerance which determines when a solution is "close enough" to
|
| 522 |
+
zero in Phase 1 to be considered a basic feasible solution or close
|
| 523 |
+
enough to positive to serve as an optimal solution.
|
| 524 |
+
|
| 525 |
+
Returns
|
| 526 |
+
-------
|
| 527 |
+
lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
|
| 528 |
+
|
| 529 |
+
c : 1D array
|
| 530 |
+
The coefficients of the linear objective function to be minimized.
|
| 531 |
+
A_ub : 2D array, optional
|
| 532 |
+
The inequality constraint matrix. Each row of ``A_ub`` specifies the
|
| 533 |
+
coefficients of a linear inequality constraint on ``x``.
|
| 534 |
+
b_ub : 1D array, optional
|
| 535 |
+
The inequality constraint vector. Each element represents an
|
| 536 |
+
upper bound on the corresponding value of ``A_ub @ x``.
|
| 537 |
+
A_eq : 2D array, optional
|
| 538 |
+
The equality constraint matrix. Each row of ``A_eq`` specifies the
|
| 539 |
+
coefficients of a linear equality constraint on ``x``.
|
| 540 |
+
b_eq : 1D array, optional
|
| 541 |
+
The equality constraint vector. Each element of ``A_eq @ x`` must equal
|
| 542 |
+
the corresponding element of ``b_eq``.
|
| 543 |
+
bounds : 2D array
|
| 544 |
+
The bounds of ``x``, as ``min`` and ``max`` pairs, possibly tightened.
|
| 545 |
+
x0 : 1D array, optional
|
| 546 |
+
Guess values of the decision variables, which will be refined by
|
| 547 |
+
the optimization algorithm. This argument is currently used only by the
|
| 548 |
+
'revised simplex' method, and can only be used if `x0` represents a
|
| 549 |
+
basic feasible solution.
|
| 550 |
+
|
| 551 |
+
c0 : 1D array
|
| 552 |
+
Constant term in objective function due to fixed (and eliminated)
|
| 553 |
+
variables.
|
| 554 |
+
x : 1D array
|
| 555 |
+
Solution vector (when the solution is trivial and can be determined
|
| 556 |
+
in presolve)
|
| 557 |
+
revstack: list of functions
|
| 558 |
+
the functions in the list reverse the operations of _presolve()
|
| 559 |
+
the function signature is x_org = f(x_mod), where x_mod is the result
|
| 560 |
+
of a presolve step and x_org the value at the start of the step
|
| 561 |
+
(currently, the revstack contains only one function)
|
| 562 |
+
complete: bool
|
| 563 |
+
Whether the solution is complete (solved or determined to be infeasible
|
| 564 |
+
or unbounded in presolve)
|
| 565 |
+
status : int
|
| 566 |
+
An integer representing the exit status of the optimization::
|
| 567 |
+
|
| 568 |
+
0 : Optimization terminated successfully
|
| 569 |
+
1 : Iteration limit reached
|
| 570 |
+
2 : Problem appears to be infeasible
|
| 571 |
+
3 : Problem appears to be unbounded
|
| 572 |
+
4 : Serious numerical difficulties encountered
|
| 573 |
+
|
| 574 |
+
message : str
|
| 575 |
+
A string descriptor of the exit status of the optimization.
|
| 576 |
+
|
| 577 |
+
References
|
| 578 |
+
----------
|
| 579 |
+
.. [5] Andersen, Erling D. "Finding all linearly dependent rows in
|
| 580 |
+
large-scale linear programming." Optimization Methods and Software
|
| 581 |
+
6.3 (1995): 219-227.
|
| 582 |
+
.. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear
|
| 583 |
+
programming." Mathematical Programming 71.2 (1995): 221-245.
|
| 584 |
+
|
| 585 |
+
"""
|
| 586 |
+
# ideas from Reference [5] by Andersen and Andersen
|
| 587 |
+
# however, unlike the reference, this is performed before converting
|
| 588 |
+
# problem to standard form
|
| 589 |
+
# There are a few advantages:
|
| 590 |
+
# * artificial variables have not been added, so matrices are smaller
|
| 591 |
+
# * bounds have not been converted to constraints yet. (It is better to
|
| 592 |
+
# do that after presolve because presolve may adjust the simple bounds.)
|
| 593 |
+
# There are many improvements that can be made, namely:
|
| 594 |
+
# * implement remaining checks from [5]
|
| 595 |
+
# * loop presolve until no additional changes are made
|
| 596 |
+
# * implement additional efficiency improvements in redundancy removal [2]
|
| 597 |
+
|
| 598 |
+
c, A_ub, b_ub, A_eq, b_eq, bounds, x0, _ = lp
|
| 599 |
+
|
| 600 |
+
revstack = [] # record of variables eliminated from problem
|
| 601 |
+
# constant term in cost function may be added if variables are eliminated
|
| 602 |
+
c0 = 0
|
| 603 |
+
complete = False # complete is True if detected infeasible/unbounded
|
| 604 |
+
x = np.zeros(c.shape) # this is solution vector if completed in presolve
|
| 605 |
+
|
| 606 |
+
status = 0 # all OK unless determined otherwise
|
| 607 |
+
message = ""
|
| 608 |
+
|
| 609 |
+
# Lower and upper bounds. Copy to prevent feedback.
|
| 610 |
+
lb = bounds[:, 0].copy()
|
| 611 |
+
ub = bounds[:, 1].copy()
|
| 612 |
+
|
| 613 |
+
m_eq, n = A_eq.shape
|
| 614 |
+
m_ub, n = A_ub.shape
|
| 615 |
+
|
| 616 |
+
if (rr_method is not None
|
| 617 |
+
and rr_method.lower() not in {"svd", "pivot", "id"}):
|
| 618 |
+
message = ("'" + str(rr_method) + "' is not a valid option "
|
| 619 |
+
"for redundancy removal. Valid options are 'SVD', "
|
| 620 |
+
"'pivot', and 'ID'.")
|
| 621 |
+
raise ValueError(message)
|
| 622 |
+
|
| 623 |
+
if sps.issparse(A_eq):
|
| 624 |
+
A_eq = A_eq.tocsr()
|
| 625 |
+
A_ub = A_ub.tocsr()
|
| 626 |
+
|
| 627 |
+
def where(A):
|
| 628 |
+
return A.nonzero()
|
| 629 |
+
|
| 630 |
+
vstack = sps.vstack
|
| 631 |
+
else:
|
| 632 |
+
where = np.where
|
| 633 |
+
vstack = np.vstack
|
| 634 |
+
|
| 635 |
+
# upper bounds > lower bounds
|
| 636 |
+
if np.any(ub < lb) or np.any(lb == np.inf) or np.any(ub == -np.inf):
|
| 637 |
+
status = 2
|
| 638 |
+
message = ("The problem is (trivially) infeasible since one "
|
| 639 |
+
"or more upper bounds are smaller than the corresponding "
|
| 640 |
+
"lower bounds, a lower bound is np.inf or an upper bound "
|
| 641 |
+
"is -np.inf.")
|
| 642 |
+
complete = True
|
| 643 |
+
return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
|
| 644 |
+
c0, x, revstack, complete, status, message)
|
| 645 |
+
|
| 646 |
+
# zero row in equality constraints
|
| 647 |
+
zero_row = np.array(np.sum(A_eq != 0, axis=1) == 0).flatten()
|
| 648 |
+
if np.any(zero_row):
|
| 649 |
+
if np.any(
|
| 650 |
+
np.logical_and(
|
| 651 |
+
zero_row,
|
| 652 |
+
np.abs(b_eq) > tol)): # test_zero_row_1
|
| 653 |
+
# infeasible if RHS is not zero
|
| 654 |
+
status = 2
|
| 655 |
+
message = ("The problem is (trivially) infeasible due to a row "
|
| 656 |
+
"of zeros in the equality constraint matrix with a "
|
| 657 |
+
"nonzero corresponding constraint value.")
|
| 658 |
+
complete = True
|
| 659 |
+
return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
|
| 660 |
+
c0, x, revstack, complete, status, message)
|
| 661 |
+
else: # test_zero_row_2
|
| 662 |
+
# if RHS is zero, we can eliminate this equation entirely
|
| 663 |
+
A_eq = A_eq[np.logical_not(zero_row), :]
|
| 664 |
+
b_eq = b_eq[np.logical_not(zero_row)]
|
| 665 |
+
|
| 666 |
+
# zero row in inequality constraints
|
| 667 |
+
zero_row = np.array(np.sum(A_ub != 0, axis=1) == 0).flatten()
|
| 668 |
+
if np.any(zero_row):
|
| 669 |
+
if np.any(np.logical_and(zero_row, b_ub < -tol)): # test_zero_row_1
|
| 670 |
+
# infeasible if RHS is less than zero (because LHS is zero)
|
| 671 |
+
status = 2
|
| 672 |
+
message = ("The problem is (trivially) infeasible due to a row "
|
| 673 |
+
"of zeros in the equality constraint matrix with a "
|
| 674 |
+
"nonzero corresponding constraint value.")
|
| 675 |
+
complete = True
|
| 676 |
+
return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
|
| 677 |
+
c0, x, revstack, complete, status, message)
|
| 678 |
+
else: # test_zero_row_2
|
| 679 |
+
# if LHS is >= 0, we can eliminate this constraint entirely
|
| 680 |
+
A_ub = A_ub[np.logical_not(zero_row), :]
|
| 681 |
+
b_ub = b_ub[np.logical_not(zero_row)]
|
| 682 |
+
|
| 683 |
+
# zero column in (both) constraints
|
| 684 |
+
# this indicates that a variable isn't constrained and can be removed
|
| 685 |
+
A = vstack((A_eq, A_ub))
|
| 686 |
+
if A.shape[0] > 0:
|
| 687 |
+
zero_col = np.array(np.sum(A != 0, axis=0) == 0).flatten()
|
| 688 |
+
# variable will be at upper or lower bound, depending on objective
|
| 689 |
+
x[np.logical_and(zero_col, c < 0)] = ub[
|
| 690 |
+
np.logical_and(zero_col, c < 0)]
|
| 691 |
+
x[np.logical_and(zero_col, c > 0)] = lb[
|
| 692 |
+
np.logical_and(zero_col, c > 0)]
|
| 693 |
+
if np.any(np.isinf(x)): # if an unconstrained variable has no bound
|
| 694 |
+
status = 3
|
| 695 |
+
message = ("If feasible, the problem is (trivially) unbounded "
|
| 696 |
+
"due to a zero column in the constraint matrices. If "
|
| 697 |
+
"you wish to check whether the problem is infeasible, "
|
| 698 |
+
"turn presolve off.")
|
| 699 |
+
complete = True
|
| 700 |
+
return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
|
| 701 |
+
c0, x, revstack, complete, status, message)
|
| 702 |
+
# variables will equal upper/lower bounds will be removed later
|
| 703 |
+
lb[np.logical_and(zero_col, c < 0)] = ub[
|
| 704 |
+
np.logical_and(zero_col, c < 0)]
|
| 705 |
+
ub[np.logical_and(zero_col, c > 0)] = lb[
|
| 706 |
+
np.logical_and(zero_col, c > 0)]
|
| 707 |
+
|
| 708 |
+
# row singleton in equality constraints
|
| 709 |
+
# this fixes a variable and removes the constraint
|
| 710 |
+
singleton_row = np.array(np.sum(A_eq != 0, axis=1) == 1).flatten()
|
| 711 |
+
rows = where(singleton_row)[0]
|
| 712 |
+
cols = where(A_eq[rows, :])[1]
|
| 713 |
+
if len(rows) > 0:
|
| 714 |
+
for row, col in zip(rows, cols):
|
| 715 |
+
val = b_eq[row] / A_eq[row, col]
|
| 716 |
+
if not lb[col] - tol <= val <= ub[col] + tol:
|
| 717 |
+
# infeasible if fixed value is not within bounds
|
| 718 |
+
status = 2
|
| 719 |
+
message = ("The problem is (trivially) infeasible because a "
|
| 720 |
+
"singleton row in the equality constraints is "
|
| 721 |
+
"inconsistent with the bounds.")
|
| 722 |
+
complete = True
|
| 723 |
+
return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
|
| 724 |
+
c0, x, revstack, complete, status, message)
|
| 725 |
+
else:
|
| 726 |
+
# sets upper and lower bounds at that fixed value - variable
|
| 727 |
+
# will be removed later
|
| 728 |
+
lb[col] = val
|
| 729 |
+
ub[col] = val
|
| 730 |
+
A_eq = A_eq[np.logical_not(singleton_row), :]
|
| 731 |
+
b_eq = b_eq[np.logical_not(singleton_row)]
|
| 732 |
+
|
| 733 |
+
# row singleton in inequality constraints
|
| 734 |
+
# this indicates a simple bound and the constraint can be removed
|
| 735 |
+
# simple bounds may be adjusted here
|
| 736 |
+
# After all of the simple bound information is combined here, get_Abc will
|
| 737 |
+
# turn the simple bounds into constraints
|
| 738 |
+
singleton_row = np.array(np.sum(A_ub != 0, axis=1) == 1).flatten()
|
| 739 |
+
cols = where(A_ub[singleton_row, :])[1]
|
| 740 |
+
rows = where(singleton_row)[0]
|
| 741 |
+
if len(rows) > 0:
|
| 742 |
+
for row, col in zip(rows, cols):
|
| 743 |
+
val = b_ub[row] / A_ub[row, col]
|
| 744 |
+
if A_ub[row, col] > 0: # upper bound
|
| 745 |
+
if val < lb[col] - tol: # infeasible
|
| 746 |
+
complete = True
|
| 747 |
+
elif val < ub[col]: # new upper bound
|
| 748 |
+
ub[col] = val
|
| 749 |
+
else: # lower bound
|
| 750 |
+
if val > ub[col] + tol: # infeasible
|
| 751 |
+
complete = True
|
| 752 |
+
elif val > lb[col]: # new lower bound
|
| 753 |
+
lb[col] = val
|
| 754 |
+
if complete:
|
| 755 |
+
status = 2
|
| 756 |
+
message = ("The problem is (trivially) infeasible because a "
|
| 757 |
+
"singleton row in the upper bound constraints is "
|
| 758 |
+
"inconsistent with the bounds.")
|
| 759 |
+
return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
|
| 760 |
+
c0, x, revstack, complete, status, message)
|
| 761 |
+
A_ub = A_ub[np.logical_not(singleton_row), :]
|
| 762 |
+
b_ub = b_ub[np.logical_not(singleton_row)]
|
| 763 |
+
|
| 764 |
+
# identical bounds indicate that variable can be removed
|
| 765 |
+
i_f = np.abs(lb - ub) < tol # indices of "fixed" variables
|
| 766 |
+
i_nf = np.logical_not(i_f) # indices of "not fixed" variables
|
| 767 |
+
|
| 768 |
+
# test_bounds_equal_but_infeasible
|
| 769 |
+
if np.all(i_f): # if bounds define solution, check for consistency
|
| 770 |
+
residual = b_eq - A_eq.dot(lb)
|
| 771 |
+
slack = b_ub - A_ub.dot(lb)
|
| 772 |
+
if ((A_ub.size > 0 and np.any(slack < 0)) or
|
| 773 |
+
(A_eq.size > 0 and not np.allclose(residual, 0))):
|
| 774 |
+
status = 2
|
| 775 |
+
message = ("The problem is (trivially) infeasible because the "
|
| 776 |
+
"bounds fix all variables to values inconsistent with "
|
| 777 |
+
"the constraints")
|
| 778 |
+
complete = True
|
| 779 |
+
return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
|
| 780 |
+
c0, x, revstack, complete, status, message)
|
| 781 |
+
|
| 782 |
+
ub_mod = ub
|
| 783 |
+
lb_mod = lb
|
| 784 |
+
if np.any(i_f):
|
| 785 |
+
c0 += c[i_f].dot(lb[i_f])
|
| 786 |
+
b_eq = b_eq - A_eq[:, i_f].dot(lb[i_f])
|
| 787 |
+
b_ub = b_ub - A_ub[:, i_f].dot(lb[i_f])
|
| 788 |
+
c = c[i_nf]
|
| 789 |
+
x_undo = lb[i_f] # not x[i_f], x is just zeroes
|
| 790 |
+
x = x[i_nf]
|
| 791 |
+
# user guess x0 stays separate from presolve solution x
|
| 792 |
+
if x0 is not None:
|
| 793 |
+
x0 = x0[i_nf]
|
| 794 |
+
A_eq = A_eq[:, i_nf]
|
| 795 |
+
A_ub = A_ub[:, i_nf]
|
| 796 |
+
# modify bounds
|
| 797 |
+
lb_mod = lb[i_nf]
|
| 798 |
+
ub_mod = ub[i_nf]
|
| 799 |
+
|
| 800 |
+
def rev(x_mod):
|
| 801 |
+
# Function to restore x: insert x_undo into x_mod.
|
| 802 |
+
# When elements have been removed at positions k1, k2, k3, ...
|
| 803 |
+
# then these must be replaced at (after) positions k1-1, k2-2,
|
| 804 |
+
# k3-3, ... in the modified array to recreate the original
|
| 805 |
+
i = np.flatnonzero(i_f)
|
| 806 |
+
# Number of variables to restore
|
| 807 |
+
N = len(i)
|
| 808 |
+
index_offset = np.arange(N)
|
| 809 |
+
# Create insert indices
|
| 810 |
+
insert_indices = i - index_offset
|
| 811 |
+
x_rev = np.insert(x_mod.astype(float), insert_indices, x_undo)
|
| 812 |
+
return x_rev
|
| 813 |
+
|
| 814 |
+
# Use revstack as a list of functions, currently just this one.
|
| 815 |
+
revstack.append(rev)
|
| 816 |
+
|
| 817 |
+
# no constraints indicates that problem is trivial
|
| 818 |
+
if A_eq.size == 0 and A_ub.size == 0:
|
| 819 |
+
b_eq = np.array([])
|
| 820 |
+
b_ub = np.array([])
|
| 821 |
+
# test_empty_constraint_1
|
| 822 |
+
if c.size == 0:
|
| 823 |
+
status = 0
|
| 824 |
+
message = ("The solution was determined in presolve as there are "
|
| 825 |
+
"no non-trivial constraints.")
|
| 826 |
+
elif (np.any(np.logical_and(c < 0, ub_mod == np.inf)) or
|
| 827 |
+
np.any(np.logical_and(c > 0, lb_mod == -np.inf))):
|
| 828 |
+
# test_no_constraints()
|
| 829 |
+
# test_unbounded_no_nontrivial_constraints_1
|
| 830 |
+
# test_unbounded_no_nontrivial_constraints_2
|
| 831 |
+
status = 3
|
| 832 |
+
message = ("The problem is (trivially) unbounded "
|
| 833 |
+
"because there are no non-trivial constraints and "
|
| 834 |
+
"a) at least one decision variable is unbounded "
|
| 835 |
+
"above and its corresponding cost is negative, or "
|
| 836 |
+
"b) at least one decision variable is unbounded below "
|
| 837 |
+
"and its corresponding cost is positive. ")
|
| 838 |
+
else: # test_empty_constraint_2
|
| 839 |
+
status = 0
|
| 840 |
+
message = ("The solution was determined in presolve as there are "
|
| 841 |
+
"no non-trivial constraints.")
|
| 842 |
+
complete = True
|
| 843 |
+
x[c < 0] = ub_mod[c < 0]
|
| 844 |
+
x[c > 0] = lb_mod[c > 0]
|
| 845 |
+
# where c is zero, set x to a finite bound or zero
|
| 846 |
+
x_zero_c = ub_mod[c == 0]
|
| 847 |
+
x_zero_c[np.isinf(x_zero_c)] = ub_mod[c == 0][np.isinf(x_zero_c)]
|
| 848 |
+
x_zero_c[np.isinf(x_zero_c)] = 0
|
| 849 |
+
x[c == 0] = x_zero_c
|
| 850 |
+
# if this is not the last step of presolve, should convert bounds back
|
| 851 |
+
# to array and return here
|
| 852 |
+
|
| 853 |
+
# Convert modified lb and ub back into N x 2 bounds
|
| 854 |
+
bounds = np.hstack((lb_mod[:, np.newaxis], ub_mod[:, np.newaxis]))
|
| 855 |
+
|
| 856 |
+
# remove redundant (linearly dependent) rows from equality constraints
|
| 857 |
+
n_rows_A = A_eq.shape[0]
|
| 858 |
+
redundancy_warning = ("A_eq does not appear to be of full row rank. To "
|
| 859 |
+
"improve performance, check the problem formulation "
|
| 860 |
+
"for redundant equality constraints.")
|
| 861 |
+
if (sps.issparse(A_eq)):
|
| 862 |
+
if rr and A_eq.size > 0: # TODO: Fast sparse rank check?
|
| 863 |
+
rr_res = _remove_redundancy_pivot_sparse(A_eq, b_eq)
|
| 864 |
+
A_eq, b_eq, status, message = rr_res
|
| 865 |
+
if A_eq.shape[0] < n_rows_A:
|
| 866 |
+
warn(redundancy_warning, OptimizeWarning, stacklevel=1)
|
| 867 |
+
if status != 0:
|
| 868 |
+
complete = True
|
| 869 |
+
return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
|
| 870 |
+
c0, x, revstack, complete, status, message)
|
| 871 |
+
|
| 872 |
+
# This is a wild guess for which redundancy removal algorithm will be
|
| 873 |
+
# faster. More testing would be good.
|
| 874 |
+
small_nullspace = 5
|
| 875 |
+
if rr and A_eq.size > 0:
|
| 876 |
+
try: # TODO: use results of first SVD in _remove_redundancy_svd
|
| 877 |
+
rank = np.linalg.matrix_rank(A_eq)
|
| 878 |
+
# oh well, we'll have to go with _remove_redundancy_pivot_dense
|
| 879 |
+
except Exception:
|
| 880 |
+
rank = 0
|
| 881 |
+
if rr and A_eq.size > 0 and rank < A_eq.shape[0]:
|
| 882 |
+
warn(redundancy_warning, OptimizeWarning, stacklevel=3)
|
| 883 |
+
dim_row_nullspace = A_eq.shape[0]-rank
|
| 884 |
+
if rr_method is None:
|
| 885 |
+
if dim_row_nullspace <= small_nullspace:
|
| 886 |
+
rr_res = _remove_redundancy_svd(A_eq, b_eq)
|
| 887 |
+
A_eq, b_eq, status, message = rr_res
|
| 888 |
+
if dim_row_nullspace > small_nullspace or status == 4:
|
| 889 |
+
rr_res = _remove_redundancy_pivot_dense(A_eq, b_eq)
|
| 890 |
+
A_eq, b_eq, status, message = rr_res
|
| 891 |
+
|
| 892 |
+
else:
|
| 893 |
+
rr_method = rr_method.lower()
|
| 894 |
+
if rr_method == "svd":
|
| 895 |
+
rr_res = _remove_redundancy_svd(A_eq, b_eq)
|
| 896 |
+
A_eq, b_eq, status, message = rr_res
|
| 897 |
+
elif rr_method == "pivot":
|
| 898 |
+
rr_res = _remove_redundancy_pivot_dense(A_eq, b_eq)
|
| 899 |
+
A_eq, b_eq, status, message = rr_res
|
| 900 |
+
elif rr_method == "id":
|
| 901 |
+
rr_res = _remove_redundancy_id(A_eq, b_eq, rank)
|
| 902 |
+
A_eq, b_eq, status, message = rr_res
|
| 903 |
+
else: # shouldn't get here; option validity checked above
|
| 904 |
+
pass
|
| 905 |
+
if A_eq.shape[0] < rank:
|
| 906 |
+
message = ("Due to numerical issues, redundant equality "
|
| 907 |
+
"constraints could not be removed automatically. "
|
| 908 |
+
"Try providing your constraint matrices as sparse "
|
| 909 |
+
"matrices to activate sparse presolve, try turning "
|
| 910 |
+
"off redundancy removal, or try turning off presolve "
|
| 911 |
+
"altogether.")
|
| 912 |
+
status = 4
|
| 913 |
+
if status != 0:
|
| 914 |
+
complete = True
|
| 915 |
+
return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
|
| 916 |
+
c0, x, revstack, complete, status, message)
|
| 917 |
+
|
| 918 |
+
|
| 919 |
+
def _parse_linprog(lp, options, meth):
|
| 920 |
+
"""
|
| 921 |
+
Parse the provided linear programming problem
|
| 922 |
+
|
| 923 |
+
``_parse_linprog`` employs two main steps ``_check_sparse_inputs`` and
|
| 924 |
+
``_clean_inputs``. ``_check_sparse_inputs`` checks for sparsity in the
|
| 925 |
+
provided constraints (``A_ub`` and ``A_eq) and if these match the provided
|
| 926 |
+
sparsity optional values.
|
| 927 |
+
|
| 928 |
+
``_clean inputs`` checks of the provided inputs. If no violations are
|
| 929 |
+
identified the objective vector, upper bound constraints, equality
|
| 930 |
+
constraints, and simple bounds are returned in the expected format.
|
| 931 |
+
|
| 932 |
+
Parameters
|
| 933 |
+
----------
|
| 934 |
+
lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
|
| 935 |
+
|
| 936 |
+
c : 1D array
|
| 937 |
+
The coefficients of the linear objective function to be minimized.
|
| 938 |
+
A_ub : 2D array, optional
|
| 939 |
+
The inequality constraint matrix. Each row of ``A_ub`` specifies the
|
| 940 |
+
coefficients of a linear inequality constraint on ``x``.
|
| 941 |
+
b_ub : 1D array, optional
|
| 942 |
+
The inequality constraint vector. Each element represents an
|
| 943 |
+
upper bound on the corresponding value of ``A_ub @ x``.
|
| 944 |
+
A_eq : 2D array, optional
|
| 945 |
+
The equality constraint matrix. Each row of ``A_eq`` specifies the
|
| 946 |
+
coefficients of a linear equality constraint on ``x``.
|
| 947 |
+
b_eq : 1D array, optional
|
| 948 |
+
The equality constraint vector. Each element of ``A_eq @ x`` must equal
|
| 949 |
+
the corresponding element of ``b_eq``.
|
| 950 |
+
bounds : various valid formats, optional
|
| 951 |
+
The bounds of ``x``, as ``min`` and ``max`` pairs.
|
| 952 |
+
If bounds are specified for all N variables separately, valid formats are:
|
| 953 |
+
* a 2D array (2 x N or N x 2);
|
| 954 |
+
* a sequence of N sequences, each with 2 values.
|
| 955 |
+
If all variables have the same bounds, a single pair of values can
|
| 956 |
+
be specified. Valid formats are:
|
| 957 |
+
* a sequence with 2 scalar values;
|
| 958 |
+
* a sequence with a single element containing 2 scalar values.
|
| 959 |
+
If all variables have a lower bound of 0 and no upper bound, the bounds
|
| 960 |
+
parameter can be omitted (or given as None).
|
| 961 |
+
x0 : 1D array, optional
|
| 962 |
+
Guess values of the decision variables, which will be refined by
|
| 963 |
+
the optimization algorithm. This argument is currently used only by the
|
| 964 |
+
'revised simplex' method, and can only be used if `x0` represents a
|
| 965 |
+
basic feasible solution.
|
| 966 |
+
|
| 967 |
+
options : dict
|
| 968 |
+
A dictionary of solver options. All methods accept the following
|
| 969 |
+
generic options:
|
| 970 |
+
|
| 971 |
+
maxiter : int
|
| 972 |
+
Maximum number of iterations to perform.
|
| 973 |
+
disp : bool
|
| 974 |
+
Set to True to print convergence messages.
|
| 975 |
+
|
| 976 |
+
For method-specific options, see :func:`show_options('linprog')`.
|
| 977 |
+
|
| 978 |
+
Returns
|
| 979 |
+
-------
|
| 980 |
+
lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
|
| 981 |
+
|
| 982 |
+
c : 1D array
|
| 983 |
+
The coefficients of the linear objective function to be minimized.
|
| 984 |
+
A_ub : 2D array, optional
|
| 985 |
+
The inequality constraint matrix. Each row of ``A_ub`` specifies the
|
| 986 |
+
coefficients of a linear inequality constraint on ``x``.
|
| 987 |
+
b_ub : 1D array, optional
|
| 988 |
+
The inequality constraint vector. Each element represents an
|
| 989 |
+
upper bound on the corresponding value of ``A_ub @ x``.
|
| 990 |
+
A_eq : 2D array, optional
|
| 991 |
+
The equality constraint matrix. Each row of ``A_eq`` specifies the
|
| 992 |
+
coefficients of a linear equality constraint on ``x``.
|
| 993 |
+
b_eq : 1D array, optional
|
| 994 |
+
The equality constraint vector. Each element of ``A_eq @ x`` must equal
|
| 995 |
+
the corresponding element of ``b_eq``.
|
| 996 |
+
bounds : 2D array
|
| 997 |
+
The bounds of ``x``, as ``min`` and ``max`` pairs, one for each of the N
|
| 998 |
+
elements of ``x``. The N x 2 array contains lower bounds in the first
|
| 999 |
+
column and upper bounds in the 2nd. Unbounded variables have lower
|
| 1000 |
+
bound -np.inf and/or upper bound np.inf.
|
| 1001 |
+
x0 : 1D array, optional
|
| 1002 |
+
Guess values of the decision variables, which will be refined by
|
| 1003 |
+
the optimization algorithm. This argument is currently used only by the
|
| 1004 |
+
'revised simplex' method, and can only be used if `x0` represents a
|
| 1005 |
+
basic feasible solution.
|
| 1006 |
+
|
| 1007 |
+
options : dict, optional
|
| 1008 |
+
A dictionary of solver options. All methods accept the following
|
| 1009 |
+
generic options:
|
| 1010 |
+
|
| 1011 |
+
maxiter : int
|
| 1012 |
+
Maximum number of iterations to perform.
|
| 1013 |
+
disp : bool
|
| 1014 |
+
Set to True to print convergence messages.
|
| 1015 |
+
|
| 1016 |
+
For method-specific options, see :func:`show_options('linprog')`.
|
| 1017 |
+
|
| 1018 |
+
"""
|
| 1019 |
+
if options is None:
|
| 1020 |
+
options = {}
|
| 1021 |
+
|
| 1022 |
+
solver_options = {k: v for k, v in options.items()}
|
| 1023 |
+
solver_options, A_ub, A_eq = _check_sparse_inputs(solver_options, meth,
|
| 1024 |
+
lp.A_ub, lp.A_eq)
|
| 1025 |
+
# Convert lists to numpy arrays, etc...
|
| 1026 |
+
lp = _clean_inputs(lp._replace(A_ub=A_ub, A_eq=A_eq))
|
| 1027 |
+
return lp, solver_options
|
| 1028 |
+
|
| 1029 |
+
|
| 1030 |
+
def _get_Abc(lp, c0):
|
| 1031 |
+
"""
|
| 1032 |
+
Given a linear programming problem of the form:
|
| 1033 |
+
|
| 1034 |
+
Minimize::
|
| 1035 |
+
|
| 1036 |
+
c @ x
|
| 1037 |
+
|
| 1038 |
+
Subject to::
|
| 1039 |
+
|
| 1040 |
+
A_ub @ x <= b_ub
|
| 1041 |
+
A_eq @ x == b_eq
|
| 1042 |
+
lb <= x <= ub
|
| 1043 |
+
|
| 1044 |
+
where ``lb = 0`` and ``ub = None`` unless set in ``bounds``.
|
| 1045 |
+
|
| 1046 |
+
Return the problem in standard form:
|
| 1047 |
+
|
| 1048 |
+
Minimize::
|
| 1049 |
+
|
| 1050 |
+
c @ x
|
| 1051 |
+
|
| 1052 |
+
Subject to::
|
| 1053 |
+
|
| 1054 |
+
A @ x == b
|
| 1055 |
+
x >= 0
|
| 1056 |
+
|
| 1057 |
+
by adding slack variables and making variable substitutions as necessary.
|
| 1058 |
+
|
| 1059 |
+
Parameters
|
| 1060 |
+
----------
|
| 1061 |
+
lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
|
| 1062 |
+
|
| 1063 |
+
c : 1D array
|
| 1064 |
+
The coefficients of the linear objective function to be minimized.
|
| 1065 |
+
A_ub : 2D array, optional
|
| 1066 |
+
The inequality constraint matrix. Each row of ``A_ub`` specifies the
|
| 1067 |
+
coefficients of a linear inequality constraint on ``x``.
|
| 1068 |
+
b_ub : 1D array, optional
|
| 1069 |
+
The inequality constraint vector. Each element represents an
|
| 1070 |
+
upper bound on the corresponding value of ``A_ub @ x``.
|
| 1071 |
+
A_eq : 2D array, optional
|
| 1072 |
+
The equality constraint matrix. Each row of ``A_eq`` specifies the
|
| 1073 |
+
coefficients of a linear equality constraint on ``x``.
|
| 1074 |
+
b_eq : 1D array, optional
|
| 1075 |
+
The equality constraint vector. Each element of ``A_eq @ x`` must equal
|
| 1076 |
+
the corresponding element of ``b_eq``.
|
| 1077 |
+
bounds : 2D array
|
| 1078 |
+
The bounds of ``x``, lower bounds in the 1st column, upper
|
| 1079 |
+
bounds in the 2nd column. The bounds are possibly tightened
|
| 1080 |
+
by the presolve procedure.
|
| 1081 |
+
x0 : 1D array, optional
|
| 1082 |
+
Guess values of the decision variables, which will be refined by
|
| 1083 |
+
the optimization algorithm. This argument is currently used only by the
|
| 1084 |
+
'revised simplex' method, and can only be used if `x0` represents a
|
| 1085 |
+
basic feasible solution.
|
| 1086 |
+
|
| 1087 |
+
c0 : float
|
| 1088 |
+
Constant term in objective function due to fixed (and eliminated)
|
| 1089 |
+
variables.
|
| 1090 |
+
|
| 1091 |
+
Returns
|
| 1092 |
+
-------
|
| 1093 |
+
A : 2-D array
|
| 1094 |
+
2-D array such that ``A`` @ ``x``, gives the values of the equality
|
| 1095 |
+
constraints at ``x``.
|
| 1096 |
+
b : 1-D array
|
| 1097 |
+
1-D array of values representing the RHS of each equality constraint
|
| 1098 |
+
(row) in A (for standard form problem).
|
| 1099 |
+
c : 1-D array
|
| 1100 |
+
Coefficients of the linear objective function to be minimized (for
|
| 1101 |
+
standard form problem).
|
| 1102 |
+
c0 : float
|
| 1103 |
+
Constant term in objective function due to fixed (and eliminated)
|
| 1104 |
+
variables.
|
| 1105 |
+
x0 : 1-D array
|
| 1106 |
+
Starting values of the independent variables, which will be refined by
|
| 1107 |
+
the optimization algorithm
|
| 1108 |
+
|
| 1109 |
+
References
|
| 1110 |
+
----------
|
| 1111 |
+
.. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
|
| 1112 |
+
programming." Athena Scientific 1 (1997): 997.
|
| 1113 |
+
|
| 1114 |
+
"""
|
| 1115 |
+
c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality = lp
|
| 1116 |
+
|
| 1117 |
+
if sps.issparse(A_eq):
|
| 1118 |
+
sparse = True
|
| 1119 |
+
A_eq = sps.csr_matrix(A_eq)
|
| 1120 |
+
A_ub = sps.csr_matrix(A_ub)
|
| 1121 |
+
|
| 1122 |
+
def hstack(blocks):
|
| 1123 |
+
return sps.hstack(blocks, format="csr")
|
| 1124 |
+
|
| 1125 |
+
def vstack(blocks):
|
| 1126 |
+
return sps.vstack(blocks, format="csr")
|
| 1127 |
+
|
| 1128 |
+
zeros = sps.csr_matrix
|
| 1129 |
+
eye = sps.eye
|
| 1130 |
+
else:
|
| 1131 |
+
sparse = False
|
| 1132 |
+
hstack = np.hstack
|
| 1133 |
+
vstack = np.vstack
|
| 1134 |
+
zeros = np.zeros
|
| 1135 |
+
eye = np.eye
|
| 1136 |
+
|
| 1137 |
+
# Variables lbs and ubs (see below) may be changed, which feeds back into
|
| 1138 |
+
# bounds, so copy.
|
| 1139 |
+
bounds = np.array(bounds, copy=True)
|
| 1140 |
+
|
| 1141 |
+
# modify problem such that all variables have only non-negativity bounds
|
| 1142 |
+
lbs = bounds[:, 0]
|
| 1143 |
+
ubs = bounds[:, 1]
|
| 1144 |
+
m_ub, n_ub = A_ub.shape
|
| 1145 |
+
|
| 1146 |
+
lb_none = np.equal(lbs, -np.inf)
|
| 1147 |
+
ub_none = np.equal(ubs, np.inf)
|
| 1148 |
+
lb_some = np.logical_not(lb_none)
|
| 1149 |
+
ub_some = np.logical_not(ub_none)
|
| 1150 |
+
|
| 1151 |
+
# unbounded below: substitute xi = -xi' (unbounded above)
|
| 1152 |
+
# if -inf <= xi <= ub, then -ub <= -xi <= inf, so swap and invert bounds
|
| 1153 |
+
l_nolb_someub = np.logical_and(lb_none, ub_some)
|
| 1154 |
+
i_nolb = np.nonzero(l_nolb_someub)[0]
|
| 1155 |
+
lbs[l_nolb_someub], ubs[l_nolb_someub] = (
|
| 1156 |
+
-ubs[l_nolb_someub], -lbs[l_nolb_someub])
|
| 1157 |
+
lb_none = np.equal(lbs, -np.inf)
|
| 1158 |
+
ub_none = np.equal(ubs, np.inf)
|
| 1159 |
+
lb_some = np.logical_not(lb_none)
|
| 1160 |
+
ub_some = np.logical_not(ub_none)
|
| 1161 |
+
c[i_nolb] *= -1
|
| 1162 |
+
if x0 is not None:
|
| 1163 |
+
x0[i_nolb] *= -1
|
| 1164 |
+
if len(i_nolb) > 0:
|
| 1165 |
+
if A_ub.shape[0] > 0: # sometimes needed for sparse arrays... weird
|
| 1166 |
+
A_ub[:, i_nolb] *= -1
|
| 1167 |
+
if A_eq.shape[0] > 0:
|
| 1168 |
+
A_eq[:, i_nolb] *= -1
|
| 1169 |
+
|
| 1170 |
+
# upper bound: add inequality constraint
|
| 1171 |
+
i_newub, = ub_some.nonzero()
|
| 1172 |
+
ub_newub = ubs[ub_some]
|
| 1173 |
+
n_bounds = len(i_newub)
|
| 1174 |
+
if n_bounds > 0:
|
| 1175 |
+
shape = (n_bounds, A_ub.shape[1])
|
| 1176 |
+
if sparse:
|
| 1177 |
+
idxs = (np.arange(n_bounds), i_newub)
|
| 1178 |
+
A_ub = vstack((A_ub, sps.csr_matrix((np.ones(n_bounds), idxs),
|
| 1179 |
+
shape=shape)))
|
| 1180 |
+
else:
|
| 1181 |
+
A_ub = vstack((A_ub, np.zeros(shape)))
|
| 1182 |
+
A_ub[np.arange(m_ub, A_ub.shape[0]), i_newub] = 1
|
| 1183 |
+
b_ub = np.concatenate((b_ub, np.zeros(n_bounds)))
|
| 1184 |
+
b_ub[m_ub:] = ub_newub
|
| 1185 |
+
|
| 1186 |
+
A1 = vstack((A_ub, A_eq))
|
| 1187 |
+
b = np.concatenate((b_ub, b_eq))
|
| 1188 |
+
c = np.concatenate((c, np.zeros((A_ub.shape[0],))))
|
| 1189 |
+
if x0 is not None:
|
| 1190 |
+
x0 = np.concatenate((x0, np.zeros((A_ub.shape[0],))))
|
| 1191 |
+
# unbounded: substitute xi = xi+ + xi-
|
| 1192 |
+
l_free = np.logical_and(lb_none, ub_none)
|
| 1193 |
+
i_free = np.nonzero(l_free)[0]
|
| 1194 |
+
n_free = len(i_free)
|
| 1195 |
+
c = np.concatenate((c, np.zeros(n_free)))
|
| 1196 |
+
if x0 is not None:
|
| 1197 |
+
x0 = np.concatenate((x0, np.zeros(n_free)))
|
| 1198 |
+
A1 = hstack((A1[:, :n_ub], -A1[:, i_free]))
|
| 1199 |
+
c[n_ub:n_ub+n_free] = -c[i_free]
|
| 1200 |
+
if x0 is not None:
|
| 1201 |
+
i_free_neg = x0[i_free] < 0
|
| 1202 |
+
x0[np.arange(n_ub, A1.shape[1])[i_free_neg]] = -x0[i_free[i_free_neg]]
|
| 1203 |
+
x0[i_free[i_free_neg]] = 0
|
| 1204 |
+
|
| 1205 |
+
# add slack variables
|
| 1206 |
+
A2 = vstack([eye(A_ub.shape[0]), zeros((A_eq.shape[0], A_ub.shape[0]))])
|
| 1207 |
+
|
| 1208 |
+
A = hstack([A1, A2])
|
| 1209 |
+
|
| 1210 |
+
# lower bound: substitute xi = xi' + lb
|
| 1211 |
+
# now there is a constant term in objective
|
| 1212 |
+
i_shift = np.nonzero(lb_some)[0]
|
| 1213 |
+
lb_shift = lbs[lb_some].astype(float)
|
| 1214 |
+
c0 += np.sum(lb_shift * c[i_shift])
|
| 1215 |
+
if sparse:
|
| 1216 |
+
b = b.reshape(-1, 1)
|
| 1217 |
+
A = A.tocsc()
|
| 1218 |
+
b -= (A[:, i_shift] * sps.diags(lb_shift)).sum(axis=1)
|
| 1219 |
+
b = b.ravel()
|
| 1220 |
+
else:
|
| 1221 |
+
b -= (A[:, i_shift] * lb_shift).sum(axis=1)
|
| 1222 |
+
if x0 is not None:
|
| 1223 |
+
x0[i_shift] -= lb_shift
|
| 1224 |
+
|
| 1225 |
+
return A, b, c, c0, x0
|
| 1226 |
+
|
| 1227 |
+
|
| 1228 |
+
def _round_to_power_of_two(x):
|
| 1229 |
+
"""
|
| 1230 |
+
Round elements of the array to the nearest power of two.
|
| 1231 |
+
"""
|
| 1232 |
+
return 2**np.around(np.log2(x))
|
| 1233 |
+
|
| 1234 |
+
|
| 1235 |
+
def _autoscale(A, b, c, x0):
|
| 1236 |
+
"""
|
| 1237 |
+
Scales the problem according to equilibration from [12].
|
| 1238 |
+
Also normalizes the right hand side vector by its maximum element.
|
| 1239 |
+
"""
|
| 1240 |
+
m, n = A.shape
|
| 1241 |
+
|
| 1242 |
+
C = 1
|
| 1243 |
+
R = 1
|
| 1244 |
+
|
| 1245 |
+
if A.size > 0:
|
| 1246 |
+
|
| 1247 |
+
R = np.max(np.abs(A), axis=1)
|
| 1248 |
+
if sps.issparse(A):
|
| 1249 |
+
R = R.toarray().flatten()
|
| 1250 |
+
R[R == 0] = 1
|
| 1251 |
+
R = 1/_round_to_power_of_two(R)
|
| 1252 |
+
A = sps.diags(R)*A if sps.issparse(A) else A*R.reshape(m, 1)
|
| 1253 |
+
b = b*R
|
| 1254 |
+
|
| 1255 |
+
C = np.max(np.abs(A), axis=0)
|
| 1256 |
+
if sps.issparse(A):
|
| 1257 |
+
C = C.toarray().flatten()
|
| 1258 |
+
C[C == 0] = 1
|
| 1259 |
+
C = 1/_round_to_power_of_two(C)
|
| 1260 |
+
A = A*sps.diags(C) if sps.issparse(A) else A*C
|
| 1261 |
+
c = c*C
|
| 1262 |
+
|
| 1263 |
+
b_scale = np.max(np.abs(b)) if b.size > 0 else 1
|
| 1264 |
+
if b_scale == 0:
|
| 1265 |
+
b_scale = 1.
|
| 1266 |
+
b = b/b_scale
|
| 1267 |
+
|
| 1268 |
+
if x0 is not None:
|
| 1269 |
+
x0 = x0/b_scale*(1/C)
|
| 1270 |
+
return A, b, c, x0, C, b_scale
|
| 1271 |
+
|
| 1272 |
+
|
| 1273 |
+
def _unscale(x, C, b_scale):
|
| 1274 |
+
"""
|
| 1275 |
+
Converts solution to _autoscale problem -> solution to original problem.
|
| 1276 |
+
"""
|
| 1277 |
+
|
| 1278 |
+
try:
|
| 1279 |
+
n = len(C)
|
| 1280 |
+
# fails if sparse or scalar; that's OK.
|
| 1281 |
+
# this is only needed for original simplex (never sparse)
|
| 1282 |
+
except TypeError:
|
| 1283 |
+
n = len(x)
|
| 1284 |
+
|
| 1285 |
+
return x[:n]*b_scale*C
|
| 1286 |
+
|
| 1287 |
+
|
| 1288 |
+
def _display_summary(message, status, fun, iteration):
|
| 1289 |
+
"""
|
| 1290 |
+
Print the termination summary of the linear program
|
| 1291 |
+
|
| 1292 |
+
Parameters
|
| 1293 |
+
----------
|
| 1294 |
+
message : str
|
| 1295 |
+
A string descriptor of the exit status of the optimization.
|
| 1296 |
+
status : int
|
| 1297 |
+
An integer representing the exit status of the optimization::
|
| 1298 |
+
|
| 1299 |
+
0 : Optimization terminated successfully
|
| 1300 |
+
1 : Iteration limit reached
|
| 1301 |
+
2 : Problem appears to be infeasible
|
| 1302 |
+
3 : Problem appears to be unbounded
|
| 1303 |
+
4 : Serious numerical difficulties encountered
|
| 1304 |
+
|
| 1305 |
+
fun : float
|
| 1306 |
+
Value of the objective function.
|
| 1307 |
+
iteration : iteration
|
| 1308 |
+
The number of iterations performed.
|
| 1309 |
+
"""
|
| 1310 |
+
print(message)
|
| 1311 |
+
if status in (0, 1):
|
| 1312 |
+
print(f" Current function value: {fun: <12.6f}")
|
| 1313 |
+
print(f" Iterations: {iteration:d}")
|
| 1314 |
+
|
| 1315 |
+
|
| 1316 |
+
def _postsolve(x, postsolve_args, complete=False):
|
| 1317 |
+
"""
|
| 1318 |
+
Given solution x to presolved, standard form linear program x, add
|
| 1319 |
+
fixed variables back into the problem and undo the variable substitutions
|
| 1320 |
+
to get solution to original linear program. Also, calculate the objective
|
| 1321 |
+
function value, slack in original upper bound constraints, and residuals
|
| 1322 |
+
in original equality constraints.
|
| 1323 |
+
|
| 1324 |
+
Parameters
|
| 1325 |
+
----------
|
| 1326 |
+
x : 1-D array
|
| 1327 |
+
Solution vector to the standard-form problem.
|
| 1328 |
+
postsolve_args : tuple
|
| 1329 |
+
Data needed by _postsolve to convert the solution to the standard-form
|
| 1330 |
+
problem into the solution to the original problem, including:
|
| 1331 |
+
|
| 1332 |
+
lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
|
| 1333 |
+
|
| 1334 |
+
c : 1D array
|
| 1335 |
+
The coefficients of the linear objective function to be minimized.
|
| 1336 |
+
A_ub : 2D array, optional
|
| 1337 |
+
The inequality constraint matrix. Each row of ``A_ub`` specifies the
|
| 1338 |
+
coefficients of a linear inequality constraint on ``x``.
|
| 1339 |
+
b_ub : 1D array, optional
|
| 1340 |
+
The inequality constraint vector. Each element represents an
|
| 1341 |
+
upper bound on the corresponding value of ``A_ub @ x``.
|
| 1342 |
+
A_eq : 2D array, optional
|
| 1343 |
+
The equality constraint matrix. Each row of ``A_eq`` specifies the
|
| 1344 |
+
coefficients of a linear equality constraint on ``x``.
|
| 1345 |
+
b_eq : 1D array, optional
|
| 1346 |
+
The equality constraint vector. Each element of ``A_eq @ x`` must equal
|
| 1347 |
+
the corresponding element of ``b_eq``.
|
| 1348 |
+
bounds : 2D array
|
| 1349 |
+
The bounds of ``x``, lower bounds in the 1st column, upper
|
| 1350 |
+
bounds in the 2nd column. The bounds are possibly tightened
|
| 1351 |
+
by the presolve procedure.
|
| 1352 |
+
x0 : 1D array, optional
|
| 1353 |
+
Guess values of the decision variables, which will be refined by
|
| 1354 |
+
the optimization algorithm. This argument is currently used only by the
|
| 1355 |
+
'revised simplex' method, and can only be used if `x0` represents a
|
| 1356 |
+
basic feasible solution.
|
| 1357 |
+
|
| 1358 |
+
revstack: list of functions
|
| 1359 |
+
the functions in the list reverse the operations of _presolve()
|
| 1360 |
+
the function signature is x_org = f(x_mod), where x_mod is the result
|
| 1361 |
+
of a presolve step and x_org the value at the start of the step
|
| 1362 |
+
complete : bool
|
| 1363 |
+
Whether the solution is was determined in presolve (``True`` if so)
|
| 1364 |
+
|
| 1365 |
+
Returns
|
| 1366 |
+
-------
|
| 1367 |
+
x : 1-D array
|
| 1368 |
+
Solution vector to original linear programming problem
|
| 1369 |
+
fun: float
|
| 1370 |
+
optimal objective value for original problem
|
| 1371 |
+
slack : 1-D array
|
| 1372 |
+
The (non-negative) slack in the upper bound constraints, that is,
|
| 1373 |
+
``b_ub - A_ub @ x``
|
| 1374 |
+
con : 1-D array
|
| 1375 |
+
The (nominally zero) residuals of the equality constraints, that is,
|
| 1376 |
+
``b - A_eq @ x``
|
| 1377 |
+
"""
|
| 1378 |
+
# note that all the inputs are the ORIGINAL, unmodified versions
|
| 1379 |
+
# no rows, columns have been removed
|
| 1380 |
+
|
| 1381 |
+
c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality = postsolve_args[0]
|
| 1382 |
+
revstack, C, b_scale = postsolve_args[1:]
|
| 1383 |
+
|
| 1384 |
+
x = _unscale(x, C, b_scale)
|
| 1385 |
+
|
| 1386 |
+
# Undo variable substitutions of _get_Abc()
|
| 1387 |
+
# if "complete", problem was solved in presolve; don't do anything here
|
| 1388 |
+
n_x = bounds.shape[0]
|
| 1389 |
+
if not complete and bounds is not None: # bounds are never none, probably
|
| 1390 |
+
n_unbounded = 0
|
| 1391 |
+
for i, bi in enumerate(bounds):
|
| 1392 |
+
lbi = bi[0]
|
| 1393 |
+
ubi = bi[1]
|
| 1394 |
+
if lbi == -np.inf and ubi == np.inf:
|
| 1395 |
+
n_unbounded += 1
|
| 1396 |
+
x[i] = x[i] - x[n_x + n_unbounded - 1]
|
| 1397 |
+
else:
|
| 1398 |
+
if lbi == -np.inf:
|
| 1399 |
+
x[i] = ubi - x[i]
|
| 1400 |
+
else:
|
| 1401 |
+
x[i] += lbi
|
| 1402 |
+
# all the rest of the variables were artificial
|
| 1403 |
+
x = x[:n_x]
|
| 1404 |
+
|
| 1405 |
+
# If there were variables removed from the problem, add them back into the
|
| 1406 |
+
# solution vector
|
| 1407 |
+
# Apply the functions in revstack (reverse direction)
|
| 1408 |
+
for rev in reversed(revstack):
|
| 1409 |
+
x = rev(x)
|
| 1410 |
+
|
| 1411 |
+
fun = x.dot(c)
|
| 1412 |
+
slack = b_ub - A_ub.dot(x) # report slack for ORIGINAL UB constraints
|
| 1413 |
+
# report residuals of ORIGINAL EQ constraints
|
| 1414 |
+
con = b_eq - A_eq.dot(x)
|
| 1415 |
+
|
| 1416 |
+
return x, fun, slack, con
|
| 1417 |
+
|
| 1418 |
+
|
| 1419 |
+
def _check_result(x, fun, status, slack, con, bounds, tol, message,
|
| 1420 |
+
integrality):
|
| 1421 |
+
"""
|
| 1422 |
+
Check the validity of the provided solution.
|
| 1423 |
+
|
| 1424 |
+
A valid (optimal) solution satisfies all bounds, all slack variables are
|
| 1425 |
+
negative and all equality constraint residuals are strictly non-zero.
|
| 1426 |
+
Further, the lower-bounds, upper-bounds, slack and residuals contain
|
| 1427 |
+
no nan values.
|
| 1428 |
+
|
| 1429 |
+
Parameters
|
| 1430 |
+
----------
|
| 1431 |
+
x : 1-D array
|
| 1432 |
+
Solution vector to original linear programming problem
|
| 1433 |
+
fun: float
|
| 1434 |
+
optimal objective value for original problem
|
| 1435 |
+
status : int
|
| 1436 |
+
An integer representing the exit status of the optimization::
|
| 1437 |
+
|
| 1438 |
+
0 : Optimization terminated successfully
|
| 1439 |
+
1 : Iteration limit reached
|
| 1440 |
+
2 : Problem appears to be infeasible
|
| 1441 |
+
3 : Problem appears to be unbounded
|
| 1442 |
+
4 : Serious numerical difficulties encountered
|
| 1443 |
+
|
| 1444 |
+
slack : 1-D array
|
| 1445 |
+
The (non-negative) slack in the upper bound constraints, that is,
|
| 1446 |
+
``b_ub - A_ub @ x``
|
| 1447 |
+
con : 1-D array
|
| 1448 |
+
The (nominally zero) residuals of the equality constraints, that is,
|
| 1449 |
+
``b - A_eq @ x``
|
| 1450 |
+
bounds : 2D array
|
| 1451 |
+
The bounds on the original variables ``x``
|
| 1452 |
+
message : str
|
| 1453 |
+
A string descriptor of the exit status of the optimization.
|
| 1454 |
+
tol : float
|
| 1455 |
+
Termination tolerance; see [1]_ Section 4.5.
|
| 1456 |
+
|
| 1457 |
+
Returns
|
| 1458 |
+
-------
|
| 1459 |
+
status : int
|
| 1460 |
+
An integer representing the exit status of the optimization::
|
| 1461 |
+
|
| 1462 |
+
0 : Optimization terminated successfully
|
| 1463 |
+
1 : Iteration limit reached
|
| 1464 |
+
2 : Problem appears to be infeasible
|
| 1465 |
+
3 : Problem appears to be unbounded
|
| 1466 |
+
4 : Serious numerical difficulties encountered
|
| 1467 |
+
|
| 1468 |
+
message : str
|
| 1469 |
+
A string descriptor of the exit status of the optimization.
|
| 1470 |
+
"""
|
| 1471 |
+
# Somewhat arbitrary
|
| 1472 |
+
tol = np.sqrt(tol) * 10
|
| 1473 |
+
|
| 1474 |
+
if x is None:
|
| 1475 |
+
# HiGHS does not provide x if infeasible/unbounded
|
| 1476 |
+
if status == 0: # Observed with HiGHS Simplex Primal
|
| 1477 |
+
status = 4
|
| 1478 |
+
message = ("The solver did not provide a solution nor did it "
|
| 1479 |
+
"report a failure. Please submit a bug report.")
|
| 1480 |
+
return status, message
|
| 1481 |
+
|
| 1482 |
+
contains_nans = (
|
| 1483 |
+
np.isnan(x).any()
|
| 1484 |
+
or np.isnan(fun)
|
| 1485 |
+
or np.isnan(slack).any()
|
| 1486 |
+
or np.isnan(con).any()
|
| 1487 |
+
)
|
| 1488 |
+
|
| 1489 |
+
if contains_nans:
|
| 1490 |
+
is_feasible = False
|
| 1491 |
+
else:
|
| 1492 |
+
if integrality is None:
|
| 1493 |
+
integrality = 0
|
| 1494 |
+
valid_bounds = (x >= bounds[:, 0] - tol) & (x <= bounds[:, 1] + tol)
|
| 1495 |
+
# When integrality is 2 or 3, x must be within bounds OR take value 0
|
| 1496 |
+
valid_bounds |= (integrality > 1) & np.isclose(x, 0, atol=tol)
|
| 1497 |
+
invalid_bounds = not np.all(valid_bounds)
|
| 1498 |
+
|
| 1499 |
+
invalid_slack = status != 3 and (slack < -tol).any()
|
| 1500 |
+
invalid_con = status != 3 and (np.abs(con) > tol).any()
|
| 1501 |
+
is_feasible = not (invalid_bounds or invalid_slack or invalid_con)
|
| 1502 |
+
|
| 1503 |
+
if status == 0 and not is_feasible:
|
| 1504 |
+
status = 4
|
| 1505 |
+
message = ("The solution does not satisfy the constraints within the "
|
| 1506 |
+
"required tolerance of " + f"{tol:.2E}" + ", yet "
|
| 1507 |
+
"no errors were raised and there is no certificate of "
|
| 1508 |
+
"infeasibility or unboundedness. Check whether "
|
| 1509 |
+
"the slack and constraint residuals are acceptable; "
|
| 1510 |
+
"if not, consider enabling presolve, adjusting the "
|
| 1511 |
+
"tolerance option(s), and/or using a different method. "
|
| 1512 |
+
"Please consider submitting a bug report.")
|
| 1513 |
+
elif status == 2 and is_feasible:
|
| 1514 |
+
# Occurs if the simplex method exits after phase one with a very
|
| 1515 |
+
# nearly basic feasible solution. Postsolving can make the solution
|
| 1516 |
+
# basic, however, this solution is NOT optimal
|
| 1517 |
+
status = 4
|
| 1518 |
+
message = ("The solution is feasible, but the solver did not report "
|
| 1519 |
+
"that the solution was optimal. Please try a different "
|
| 1520 |
+
"method.")
|
| 1521 |
+
|
| 1522 |
+
return status, message
|
evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_minimize.py
ADDED
|
@@ -0,0 +1,1116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Unified interfaces to minimization algorithms.
|
| 3 |
+
|
| 4 |
+
Functions
|
| 5 |
+
---------
|
| 6 |
+
- minimize : minimization of a function of several variables.
|
| 7 |
+
- minimize_scalar : minimization of a function of one variable.
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
__all__ = ['minimize', 'minimize_scalar']
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
from warnings import warn
|
| 14 |
+
|
| 15 |
+
import numpy as np
|
| 16 |
+
|
| 17 |
+
# unconstrained minimization
|
| 18 |
+
from ._optimize import (_minimize_neldermead, _minimize_powell, _minimize_cg,
|
| 19 |
+
_minimize_bfgs, _minimize_newtoncg,
|
| 20 |
+
_minimize_scalar_brent, _minimize_scalar_bounded,
|
| 21 |
+
_minimize_scalar_golden, MemoizeJac, OptimizeResult,
|
| 22 |
+
_wrap_callback, _recover_from_bracket_error)
|
| 23 |
+
from ._trustregion_dogleg import _minimize_dogleg
|
| 24 |
+
from ._trustregion_ncg import _minimize_trust_ncg
|
| 25 |
+
from ._trustregion_krylov import _minimize_trust_krylov
|
| 26 |
+
from ._trustregion_exact import _minimize_trustregion_exact
|
| 27 |
+
from ._trustregion_constr import _minimize_trustregion_constr
|
| 28 |
+
|
| 29 |
+
# constrained minimization
|
| 30 |
+
from ._lbfgsb_py import _minimize_lbfgsb
|
| 31 |
+
from ._tnc import _minimize_tnc
|
| 32 |
+
from ._cobyla_py import _minimize_cobyla
|
| 33 |
+
from ._cobyqa_py import _minimize_cobyqa
|
| 34 |
+
from ._slsqp_py import _minimize_slsqp
|
| 35 |
+
from ._constraints import (old_bound_to_new, new_bounds_to_old,
|
| 36 |
+
old_constraint_to_new, new_constraint_to_old,
|
| 37 |
+
NonlinearConstraint, LinearConstraint, Bounds,
|
| 38 |
+
PreparedConstraint)
|
| 39 |
+
from ._differentiable_functions import FD_METHODS
|
| 40 |
+
|
| 41 |
+
MINIMIZE_METHODS = ['nelder-mead', 'powell', 'cg', 'bfgs', 'newton-cg',
|
| 42 |
+
'l-bfgs-b', 'tnc', 'cobyla', 'cobyqa', 'slsqp',
|
| 43 |
+
'trust-constr', 'dogleg', 'trust-ncg', 'trust-exact',
|
| 44 |
+
'trust-krylov']
|
| 45 |
+
|
| 46 |
+
# These methods support the new callback interface (passed an OptimizeResult)
|
| 47 |
+
MINIMIZE_METHODS_NEW_CB = ['nelder-mead', 'powell', 'cg', 'bfgs', 'newton-cg',
|
| 48 |
+
'l-bfgs-b', 'trust-constr', 'dogleg', 'trust-ncg',
|
| 49 |
+
'trust-exact', 'trust-krylov', 'cobyqa']
|
| 50 |
+
|
| 51 |
+
MINIMIZE_SCALAR_METHODS = ['brent', 'bounded', 'golden']
|
| 52 |
+
|
| 53 |
+
def minimize(fun, x0, args=(), method=None, jac=None, hess=None,
|
| 54 |
+
hessp=None, bounds=None, constraints=(), tol=None,
|
| 55 |
+
callback=None, options=None):
|
| 56 |
+
"""Minimization of scalar function of one or more variables.
|
| 57 |
+
|
| 58 |
+
Parameters
|
| 59 |
+
----------
|
| 60 |
+
fun : callable
|
| 61 |
+
The objective function to be minimized.
|
| 62 |
+
|
| 63 |
+
``fun(x, *args) -> float``
|
| 64 |
+
|
| 65 |
+
where ``x`` is a 1-D array with shape (n,) and ``args``
|
| 66 |
+
is a tuple of the fixed parameters needed to completely
|
| 67 |
+
specify the function.
|
| 68 |
+
x0 : ndarray, shape (n,)
|
| 69 |
+
Initial guess. Array of real elements of size (n,),
|
| 70 |
+
where ``n`` is the number of independent variables.
|
| 71 |
+
args : tuple, optional
|
| 72 |
+
Extra arguments passed to the objective function and its
|
| 73 |
+
derivatives (`fun`, `jac` and `hess` functions).
|
| 74 |
+
method : str or callable, optional
|
| 75 |
+
Type of solver. Should be one of
|
| 76 |
+
|
| 77 |
+
- 'Nelder-Mead' :ref:`(see here) <optimize.minimize-neldermead>`
|
| 78 |
+
- 'Powell' :ref:`(see here) <optimize.minimize-powell>`
|
| 79 |
+
- 'CG' :ref:`(see here) <optimize.minimize-cg>`
|
| 80 |
+
- 'BFGS' :ref:`(see here) <optimize.minimize-bfgs>`
|
| 81 |
+
- 'Newton-CG' :ref:`(see here) <optimize.minimize-newtoncg>`
|
| 82 |
+
- 'L-BFGS-B' :ref:`(see here) <optimize.minimize-lbfgsb>`
|
| 83 |
+
- 'TNC' :ref:`(see here) <optimize.minimize-tnc>`
|
| 84 |
+
- 'COBYLA' :ref:`(see here) <optimize.minimize-cobyla>`
|
| 85 |
+
- 'COBYQA' :ref:`(see here) <optimize.minimize-cobyqa>`
|
| 86 |
+
- 'SLSQP' :ref:`(see here) <optimize.minimize-slsqp>`
|
| 87 |
+
- 'trust-constr':ref:`(see here) <optimize.minimize-trustconstr>`
|
| 88 |
+
- 'dogleg' :ref:`(see here) <optimize.minimize-dogleg>`
|
| 89 |
+
- 'trust-ncg' :ref:`(see here) <optimize.minimize-trustncg>`
|
| 90 |
+
- 'trust-exact' :ref:`(see here) <optimize.minimize-trustexact>`
|
| 91 |
+
- 'trust-krylov' :ref:`(see here) <optimize.minimize-trustkrylov>`
|
| 92 |
+
- custom - a callable object, see below for description.
|
| 93 |
+
|
| 94 |
+
If not given, chosen to be one of ``BFGS``, ``L-BFGS-B``, ``SLSQP``,
|
| 95 |
+
depending on whether or not the problem has constraints or bounds.
|
| 96 |
+
jac : {callable, '2-point', '3-point', 'cs', bool}, optional
|
| 97 |
+
Method for computing the gradient vector. Only for CG, BFGS,
|
| 98 |
+
Newton-CG, L-BFGS-B, TNC, SLSQP, dogleg, trust-ncg, trust-krylov,
|
| 99 |
+
trust-exact and trust-constr.
|
| 100 |
+
If it is a callable, it should be a function that returns the gradient
|
| 101 |
+
vector:
|
| 102 |
+
|
| 103 |
+
``jac(x, *args) -> array_like, shape (n,)``
|
| 104 |
+
|
| 105 |
+
where ``x`` is an array with shape (n,) and ``args`` is a tuple with
|
| 106 |
+
the fixed parameters. If `jac` is a Boolean and is True, `fun` is
|
| 107 |
+
assumed to return a tuple ``(f, g)`` containing the objective
|
| 108 |
+
function and the gradient.
|
| 109 |
+
Methods 'Newton-CG', 'trust-ncg', 'dogleg', 'trust-exact', and
|
| 110 |
+
'trust-krylov' require that either a callable be supplied, or that
|
| 111 |
+
`fun` return the objective and gradient.
|
| 112 |
+
If None or False, the gradient will be estimated using 2-point finite
|
| 113 |
+
difference estimation with an absolute step size.
|
| 114 |
+
Alternatively, the keywords {'2-point', '3-point', 'cs'} can be used
|
| 115 |
+
to select a finite difference scheme for numerical estimation of the
|
| 116 |
+
gradient with a relative step size. These finite difference schemes
|
| 117 |
+
obey any specified `bounds`.
|
| 118 |
+
hess : {callable, '2-point', '3-point', 'cs', HessianUpdateStrategy}, optional
|
| 119 |
+
Method for computing the Hessian matrix. Only for Newton-CG, dogleg,
|
| 120 |
+
trust-ncg, trust-krylov, trust-exact and trust-constr.
|
| 121 |
+
If it is callable, it should return the Hessian matrix:
|
| 122 |
+
|
| 123 |
+
``hess(x, *args) -> {LinearOperator, spmatrix, array}, (n, n)``
|
| 124 |
+
|
| 125 |
+
where ``x`` is a (n,) ndarray and ``args`` is a tuple with the fixed
|
| 126 |
+
parameters.
|
| 127 |
+
The keywords {'2-point', '3-point', 'cs'} can also be used to select
|
| 128 |
+
a finite difference scheme for numerical estimation of the hessian.
|
| 129 |
+
Alternatively, objects implementing the `HessianUpdateStrategy`
|
| 130 |
+
interface can be used to approximate the Hessian. Available
|
| 131 |
+
quasi-Newton methods implementing this interface are:
|
| 132 |
+
|
| 133 |
+
- `BFGS`;
|
| 134 |
+
- `SR1`.
|
| 135 |
+
|
| 136 |
+
Not all of the options are available for each of the methods; for
|
| 137 |
+
availability refer to the notes.
|
| 138 |
+
hessp : callable, optional
|
| 139 |
+
Hessian of objective function times an arbitrary vector p. Only for
|
| 140 |
+
Newton-CG, trust-ncg, trust-krylov, trust-constr.
|
| 141 |
+
Only one of `hessp` or `hess` needs to be given. If `hess` is
|
| 142 |
+
provided, then `hessp` will be ignored. `hessp` must compute the
|
| 143 |
+
Hessian times an arbitrary vector:
|
| 144 |
+
|
| 145 |
+
``hessp(x, p, *args) -> ndarray shape (n,)``
|
| 146 |
+
|
| 147 |
+
where ``x`` is a (n,) ndarray, ``p`` is an arbitrary vector with
|
| 148 |
+
dimension (n,) and ``args`` is a tuple with the fixed
|
| 149 |
+
parameters.
|
| 150 |
+
bounds : sequence or `Bounds`, optional
|
| 151 |
+
Bounds on variables for Nelder-Mead, L-BFGS-B, TNC, SLSQP, Powell,
|
| 152 |
+
trust-constr, COBYLA, and COBYQA methods. There are two ways to specify
|
| 153 |
+
the bounds:
|
| 154 |
+
|
| 155 |
+
1. Instance of `Bounds` class.
|
| 156 |
+
2. Sequence of ``(min, max)`` pairs for each element in `x`. None
|
| 157 |
+
is used to specify no bound.
|
| 158 |
+
|
| 159 |
+
constraints : {Constraint, dict} or List of {Constraint, dict}, optional
|
| 160 |
+
Constraints definition. Only for COBYLA, COBYQA, SLSQP and trust-constr.
|
| 161 |
+
|
| 162 |
+
Constraints for 'trust-constr' and 'cobyqa' are defined as a single object
|
| 163 |
+
or a list of objects specifying constraints to the optimization problem.
|
| 164 |
+
Available constraints are:
|
| 165 |
+
|
| 166 |
+
- `LinearConstraint`
|
| 167 |
+
- `NonlinearConstraint`
|
| 168 |
+
|
| 169 |
+
Constraints for COBYLA, SLSQP are defined as a list of dictionaries.
|
| 170 |
+
Each dictionary with fields:
|
| 171 |
+
|
| 172 |
+
type : str
|
| 173 |
+
Constraint type: 'eq' for equality, 'ineq' for inequality.
|
| 174 |
+
fun : callable
|
| 175 |
+
The function defining the constraint.
|
| 176 |
+
jac : callable, optional
|
| 177 |
+
The Jacobian of `fun` (only for SLSQP).
|
| 178 |
+
args : sequence, optional
|
| 179 |
+
Extra arguments to be passed to the function and Jacobian.
|
| 180 |
+
|
| 181 |
+
Equality constraint means that the constraint function result is to
|
| 182 |
+
be zero whereas inequality means that it is to be non-negative.
|
| 183 |
+
Note that COBYLA only supports inequality constraints.
|
| 184 |
+
|
| 185 |
+
tol : float, optional
|
| 186 |
+
Tolerance for termination. When `tol` is specified, the selected
|
| 187 |
+
minimization algorithm sets some relevant solver-specific tolerance(s)
|
| 188 |
+
equal to `tol`. For detailed control, use solver-specific
|
| 189 |
+
options.
|
| 190 |
+
options : dict, optional
|
| 191 |
+
A dictionary of solver options. All methods except `TNC` accept the
|
| 192 |
+
following generic options:
|
| 193 |
+
|
| 194 |
+
maxiter : int
|
| 195 |
+
Maximum number of iterations to perform. Depending on the
|
| 196 |
+
method each iteration may use several function evaluations.
|
| 197 |
+
|
| 198 |
+
For `TNC` use `maxfun` instead of `maxiter`.
|
| 199 |
+
disp : bool
|
| 200 |
+
Set to True to print convergence messages.
|
| 201 |
+
|
| 202 |
+
For method-specific options, see :func:`show_options()`.
|
| 203 |
+
callback : callable, optional
|
| 204 |
+
A callable called after each iteration.
|
| 205 |
+
|
| 206 |
+
All methods except TNC, SLSQP, and COBYLA support a callable with
|
| 207 |
+
the signature:
|
| 208 |
+
|
| 209 |
+
``callback(intermediate_result: OptimizeResult)``
|
| 210 |
+
|
| 211 |
+
where ``intermediate_result`` is a keyword parameter containing an
|
| 212 |
+
`OptimizeResult` with attributes ``x`` and ``fun``, the present values
|
| 213 |
+
of the parameter vector and objective function. Note that the name
|
| 214 |
+
of the parameter must be ``intermediate_result`` for the callback
|
| 215 |
+
to be passed an `OptimizeResult`. These methods will also terminate if
|
| 216 |
+
the callback raises ``StopIteration``.
|
| 217 |
+
|
| 218 |
+
All methods except trust-constr (also) support a signature like:
|
| 219 |
+
|
| 220 |
+
``callback(xk)``
|
| 221 |
+
|
| 222 |
+
where ``xk`` is the current parameter vector.
|
| 223 |
+
|
| 224 |
+
Introspection is used to determine which of the signatures above to
|
| 225 |
+
invoke.
|
| 226 |
+
|
| 227 |
+
Returns
|
| 228 |
+
-------
|
| 229 |
+
res : OptimizeResult
|
| 230 |
+
The optimization result represented as a ``OptimizeResult`` object.
|
| 231 |
+
Important attributes are: ``x`` the solution array, ``success`` a
|
| 232 |
+
Boolean flag indicating if the optimizer exited successfully and
|
| 233 |
+
``message`` which describes the cause of the termination. See
|
| 234 |
+
`OptimizeResult` for a description of other attributes.
|
| 235 |
+
|
| 236 |
+
See also
|
| 237 |
+
--------
|
| 238 |
+
minimize_scalar : Interface to minimization algorithms for scalar
|
| 239 |
+
univariate functions
|
| 240 |
+
show_options : Additional options accepted by the solvers
|
| 241 |
+
|
| 242 |
+
Notes
|
| 243 |
+
-----
|
| 244 |
+
This section describes the available solvers that can be selected by the
|
| 245 |
+
'method' parameter. The default method is *BFGS*.
|
| 246 |
+
|
| 247 |
+
**Unconstrained minimization**
|
| 248 |
+
|
| 249 |
+
Method :ref:`CG <optimize.minimize-cg>` uses a nonlinear conjugate
|
| 250 |
+
gradient algorithm by Polak and Ribiere, a variant of the
|
| 251 |
+
Fletcher-Reeves method described in [5]_ pp.120-122. Only the
|
| 252 |
+
first derivatives are used.
|
| 253 |
+
|
| 254 |
+
Method :ref:`BFGS <optimize.minimize-bfgs>` uses the quasi-Newton
|
| 255 |
+
method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) [5]_
|
| 256 |
+
pp. 136. It uses the first derivatives only. BFGS has proven good
|
| 257 |
+
performance even for non-smooth optimizations. This method also
|
| 258 |
+
returns an approximation of the Hessian inverse, stored as
|
| 259 |
+
`hess_inv` in the OptimizeResult object.
|
| 260 |
+
|
| 261 |
+
Method :ref:`Newton-CG <optimize.minimize-newtoncg>` uses a
|
| 262 |
+
Newton-CG algorithm [5]_ pp. 168 (also known as the truncated
|
| 263 |
+
Newton method). It uses a CG method to the compute the search
|
| 264 |
+
direction. See also *TNC* method for a box-constrained
|
| 265 |
+
minimization with a similar algorithm. Suitable for large-scale
|
| 266 |
+
problems.
|
| 267 |
+
|
| 268 |
+
Method :ref:`dogleg <optimize.minimize-dogleg>` uses the dog-leg
|
| 269 |
+
trust-region algorithm [5]_ for unconstrained minimization. This
|
| 270 |
+
algorithm requires the gradient and Hessian; furthermore the
|
| 271 |
+
Hessian is required to be positive definite.
|
| 272 |
+
|
| 273 |
+
Method :ref:`trust-ncg <optimize.minimize-trustncg>` uses the
|
| 274 |
+
Newton conjugate gradient trust-region algorithm [5]_ for
|
| 275 |
+
unconstrained minimization. This algorithm requires the gradient
|
| 276 |
+
and either the Hessian or a function that computes the product of
|
| 277 |
+
the Hessian with a given vector. Suitable for large-scale problems.
|
| 278 |
+
|
| 279 |
+
Method :ref:`trust-krylov <optimize.minimize-trustkrylov>` uses
|
| 280 |
+
the Newton GLTR trust-region algorithm [14]_, [15]_ for unconstrained
|
| 281 |
+
minimization. This algorithm requires the gradient
|
| 282 |
+
and either the Hessian or a function that computes the product of
|
| 283 |
+
the Hessian with a given vector. Suitable for large-scale problems.
|
| 284 |
+
On indefinite problems it requires usually less iterations than the
|
| 285 |
+
`trust-ncg` method and is recommended for medium and large-scale problems.
|
| 286 |
+
|
| 287 |
+
Method :ref:`trust-exact <optimize.minimize-trustexact>`
|
| 288 |
+
is a trust-region method for unconstrained minimization in which
|
| 289 |
+
quadratic subproblems are solved almost exactly [13]_. This
|
| 290 |
+
algorithm requires the gradient and the Hessian (which is
|
| 291 |
+
*not* required to be positive definite). It is, in many
|
| 292 |
+
situations, the Newton method to converge in fewer iterations
|
| 293 |
+
and the most recommended for small and medium-size problems.
|
| 294 |
+
|
| 295 |
+
**Bound-Constrained minimization**
|
| 296 |
+
|
| 297 |
+
Method :ref:`Nelder-Mead <optimize.minimize-neldermead>` uses the
|
| 298 |
+
Simplex algorithm [1]_, [2]_. This algorithm is robust in many
|
| 299 |
+
applications. However, if numerical computation of derivative can be
|
| 300 |
+
trusted, other algorithms using the first and/or second derivatives
|
| 301 |
+
information might be preferred for their better performance in
|
| 302 |
+
general.
|
| 303 |
+
|
| 304 |
+
Method :ref:`L-BFGS-B <optimize.minimize-lbfgsb>` uses the L-BFGS-B
|
| 305 |
+
algorithm [6]_, [7]_ for bound constrained minimization.
|
| 306 |
+
|
| 307 |
+
Method :ref:`Powell <optimize.minimize-powell>` is a modification
|
| 308 |
+
of Powell's method [3]_, [4]_ which is a conjugate direction
|
| 309 |
+
method. It performs sequential one-dimensional minimizations along
|
| 310 |
+
each vector of the directions set (`direc` field in `options` and
|
| 311 |
+
`info`), which is updated at each iteration of the main
|
| 312 |
+
minimization loop. The function need not be differentiable, and no
|
| 313 |
+
derivatives are taken. If bounds are not provided, then an
|
| 314 |
+
unbounded line search will be used. If bounds are provided and
|
| 315 |
+
the initial guess is within the bounds, then every function
|
| 316 |
+
evaluation throughout the minimization procedure will be within
|
| 317 |
+
the bounds. If bounds are provided, the initial guess is outside
|
| 318 |
+
the bounds, and `direc` is full rank (default has full rank), then
|
| 319 |
+
some function evaluations during the first iteration may be
|
| 320 |
+
outside the bounds, but every function evaluation after the first
|
| 321 |
+
iteration will be within the bounds. If `direc` is not full rank,
|
| 322 |
+
then some parameters may not be optimized and the solution is not
|
| 323 |
+
guaranteed to be within the bounds.
|
| 324 |
+
|
| 325 |
+
Method :ref:`TNC <optimize.minimize-tnc>` uses a truncated Newton
|
| 326 |
+
algorithm [5]_, [8]_ to minimize a function with variables subject
|
| 327 |
+
to bounds. This algorithm uses gradient information; it is also
|
| 328 |
+
called Newton Conjugate-Gradient. It differs from the *Newton-CG*
|
| 329 |
+
method described above as it wraps a C implementation and allows
|
| 330 |
+
each variable to be given upper and lower bounds.
|
| 331 |
+
|
| 332 |
+
**Constrained Minimization**
|
| 333 |
+
|
| 334 |
+
Method :ref:`COBYLA <optimize.minimize-cobyla>` uses the
|
| 335 |
+
Constrained Optimization BY Linear Approximation (COBYLA) method
|
| 336 |
+
[9]_, [10]_, [11]_. The algorithm is based on linear
|
| 337 |
+
approximations to the objective function and each constraint. The
|
| 338 |
+
method wraps a FORTRAN implementation of the algorithm. The
|
| 339 |
+
constraints functions 'fun' may return either a single number
|
| 340 |
+
or an array or list of numbers.
|
| 341 |
+
|
| 342 |
+
Method :ref:`COBYQA <optimize.minimize-cobyqa>` uses the Constrained
|
| 343 |
+
Optimization BY Quadratic Approximations (COBYQA) method [18]_. The
|
| 344 |
+
algorithm is a derivative-free trust-region SQP method based on quadratic
|
| 345 |
+
approximations to the objective function and each nonlinear constraint. The
|
| 346 |
+
bounds are treated as unrelaxable constraints, in the sense that the
|
| 347 |
+
algorithm always respects them throughout the optimization process.
|
| 348 |
+
|
| 349 |
+
Method :ref:`SLSQP <optimize.minimize-slsqp>` uses Sequential
|
| 350 |
+
Least SQuares Programming to minimize a function of several
|
| 351 |
+
variables with any combination of bounds, equality and inequality
|
| 352 |
+
constraints. The method wraps the SLSQP Optimization subroutine
|
| 353 |
+
originally implemented by Dieter Kraft [12]_. Note that the
|
| 354 |
+
wrapper handles infinite values in bounds by converting them into
|
| 355 |
+
large floating values.
|
| 356 |
+
|
| 357 |
+
Method :ref:`trust-constr <optimize.minimize-trustconstr>` is a
|
| 358 |
+
trust-region algorithm for constrained optimization. It switches
|
| 359 |
+
between two implementations depending on the problem definition.
|
| 360 |
+
It is the most versatile constrained minimization algorithm
|
| 361 |
+
implemented in SciPy and the most appropriate for large-scale problems.
|
| 362 |
+
For equality constrained problems it is an implementation of Byrd-Omojokun
|
| 363 |
+
Trust-Region SQP method described in [17]_ and in [5]_, p. 549. When
|
| 364 |
+
inequality constraints are imposed as well, it switches to the trust-region
|
| 365 |
+
interior point method described in [16]_. This interior point algorithm,
|
| 366 |
+
in turn, solves inequality constraints by introducing slack variables
|
| 367 |
+
and solving a sequence of equality-constrained barrier problems
|
| 368 |
+
for progressively smaller values of the barrier parameter.
|
| 369 |
+
The previously described equality constrained SQP method is
|
| 370 |
+
used to solve the subproblems with increasing levels of accuracy
|
| 371 |
+
as the iterate gets closer to a solution.
|
| 372 |
+
|
| 373 |
+
**Finite-Difference Options**
|
| 374 |
+
|
| 375 |
+
For Method :ref:`trust-constr <optimize.minimize-trustconstr>`
|
| 376 |
+
the gradient and the Hessian may be approximated using
|
| 377 |
+
three finite-difference schemes: {'2-point', '3-point', 'cs'}.
|
| 378 |
+
The scheme 'cs' is, potentially, the most accurate but it
|
| 379 |
+
requires the function to correctly handle complex inputs and to
|
| 380 |
+
be differentiable in the complex plane. The scheme '3-point' is more
|
| 381 |
+
accurate than '2-point' but requires twice as many operations. If the
|
| 382 |
+
gradient is estimated via finite-differences the Hessian must be
|
| 383 |
+
estimated using one of the quasi-Newton strategies.
|
| 384 |
+
|
| 385 |
+
**Method specific options for the** `hess` **keyword**
|
| 386 |
+
|
| 387 |
+
+--------------+------+----------+-------------------------+-----+
|
| 388 |
+
| method/Hess | None | callable | '2-point/'3-point'/'cs' | HUS |
|
| 389 |
+
+==============+======+==========+=========================+=====+
|
| 390 |
+
| Newton-CG | x | (n, n) | x | x |
|
| 391 |
+
| | | LO | | |
|
| 392 |
+
+--------------+------+----------+-------------------------+-----+
|
| 393 |
+
| dogleg | | (n, n) | | |
|
| 394 |
+
+--------------+------+----------+-------------------------+-----+
|
| 395 |
+
| trust-ncg | | (n, n) | x | x |
|
| 396 |
+
+--------------+------+----------+-------------------------+-----+
|
| 397 |
+
| trust-krylov | | (n, n) | x | x |
|
| 398 |
+
+--------------+------+----------+-------------------------+-----+
|
| 399 |
+
| trust-exact | | (n, n) | | |
|
| 400 |
+
+--------------+------+----------+-------------------------+-----+
|
| 401 |
+
| trust-constr | x | (n, n) | x | x |
|
| 402 |
+
| | | LO | | |
|
| 403 |
+
| | | sp | | |
|
| 404 |
+
+--------------+------+----------+-------------------------+-----+
|
| 405 |
+
|
| 406 |
+
where LO=LinearOperator, sp=Sparse matrix, HUS=HessianUpdateStrategy
|
| 407 |
+
|
| 408 |
+
**Custom minimizers**
|
| 409 |
+
|
| 410 |
+
It may be useful to pass a custom minimization method, for example
|
| 411 |
+
when using a frontend to this method such as `scipy.optimize.basinhopping`
|
| 412 |
+
or a different library. You can simply pass a callable as the ``method``
|
| 413 |
+
parameter.
|
| 414 |
+
|
| 415 |
+
The callable is called as ``method(fun, x0, args, **kwargs, **options)``
|
| 416 |
+
where ``kwargs`` corresponds to any other parameters passed to `minimize`
|
| 417 |
+
(such as `callback`, `hess`, etc.), except the `options` dict, which has
|
| 418 |
+
its contents also passed as `method` parameters pair by pair. Also, if
|
| 419 |
+
`jac` has been passed as a bool type, `jac` and `fun` are mangled so that
|
| 420 |
+
`fun` returns just the function values and `jac` is converted to a function
|
| 421 |
+
returning the Jacobian. The method shall return an `OptimizeResult`
|
| 422 |
+
object.
|
| 423 |
+
|
| 424 |
+
The provided `method` callable must be able to accept (and possibly ignore)
|
| 425 |
+
arbitrary parameters; the set of parameters accepted by `minimize` may
|
| 426 |
+
expand in future versions and then these parameters will be passed to
|
| 427 |
+
the method. You can find an example in the scipy.optimize tutorial.
|
| 428 |
+
|
| 429 |
+
References
|
| 430 |
+
----------
|
| 431 |
+
.. [1] Nelder, J A, and R Mead. 1965. A Simplex Method for Function
|
| 432 |
+
Minimization. The Computer Journal 7: 308-13.
|
| 433 |
+
.. [2] Wright M H. 1996. Direct search methods: Once scorned, now
|
| 434 |
+
respectable, in Numerical Analysis 1995: Proceedings of the 1995
|
| 435 |
+
Dundee Biennial Conference in Numerical Analysis (Eds. D F
|
| 436 |
+
Griffiths and G A Watson). Addison Wesley Longman, Harlow, UK.
|
| 437 |
+
191-208.
|
| 438 |
+
.. [3] Powell, M J D. 1964. An efficient method for finding the minimum of
|
| 439 |
+
a function of several variables without calculating derivatives. The
|
| 440 |
+
Computer Journal 7: 155-162.
|
| 441 |
+
.. [4] Press W, S A Teukolsky, W T Vetterling and B P Flannery.
|
| 442 |
+
Numerical Recipes (any edition), Cambridge University Press.
|
| 443 |
+
.. [5] Nocedal, J, and S J Wright. 2006. Numerical Optimization.
|
| 444 |
+
Springer New York.
|
| 445 |
+
.. [6] Byrd, R H and P Lu and J. Nocedal. 1995. A Limited Memory
|
| 446 |
+
Algorithm for Bound Constrained Optimization. SIAM Journal on
|
| 447 |
+
Scientific and Statistical Computing 16 (5): 1190-1208.
|
| 448 |
+
.. [7] Zhu, C and R H Byrd and J Nocedal. 1997. L-BFGS-B: Algorithm
|
| 449 |
+
778: L-BFGS-B, FORTRAN routines for large scale bound constrained
|
| 450 |
+
optimization. ACM Transactions on Mathematical Software 23 (4):
|
| 451 |
+
550-560.
|
| 452 |
+
.. [8] Nash, S G. Newton-Type Minimization Via the Lanczos Method.
|
| 453 |
+
1984. SIAM Journal of Numerical Analysis 21: 770-778.
|
| 454 |
+
.. [9] Powell, M J D. A direct search optimization method that models
|
| 455 |
+
the objective and constraint functions by linear interpolation.
|
| 456 |
+
1994. Advances in Optimization and Numerical Analysis, eds. S. Gomez
|
| 457 |
+
and J-P Hennart, Kluwer Academic (Dordrecht), 51-67.
|
| 458 |
+
.. [10] Powell M J D. Direct search algorithms for optimization
|
| 459 |
+
calculations. 1998. Acta Numerica 7: 287-336.
|
| 460 |
+
.. [11] Powell M J D. A view of algorithms for optimization without
|
| 461 |
+
derivatives. 2007.Cambridge University Technical Report DAMTP
|
| 462 |
+
2007/NA03
|
| 463 |
+
.. [12] Kraft, D. A software package for sequential quadratic
|
| 464 |
+
programming. 1988. Tech. Rep. DFVLR-FB 88-28, DLR German Aerospace
|
| 465 |
+
Center -- Institute for Flight Mechanics, Koln, Germany.
|
| 466 |
+
.. [13] Conn, A. R., Gould, N. I., and Toint, P. L.
|
| 467 |
+
Trust region methods. 2000. Siam. pp. 169-200.
|
| 468 |
+
.. [14] F. Lenders, C. Kirches, A. Potschka: "trlib: A vector-free
|
| 469 |
+
implementation of the GLTR method for iterative solution of
|
| 470 |
+
the trust region problem", :arxiv:`1611.04718`
|
| 471 |
+
.. [15] N. Gould, S. Lucidi, M. Roma, P. Toint: "Solving the
|
| 472 |
+
Trust-Region Subproblem using the Lanczos Method",
|
| 473 |
+
SIAM J. Optim., 9(2), 504--525, (1999).
|
| 474 |
+
.. [16] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal. 1999.
|
| 475 |
+
An interior point algorithm for large-scale nonlinear programming.
|
| 476 |
+
SIAM Journal on Optimization 9.4: 877-900.
|
| 477 |
+
.. [17] Lalee, Marucha, Jorge Nocedal, and Todd Plantega. 1998. On the
|
| 478 |
+
implementation of an algorithm for large-scale equality constrained
|
| 479 |
+
optimization. SIAM Journal on Optimization 8.3: 682-706.
|
| 480 |
+
.. [18] Ragonneau, T. M. *Model-Based Derivative-Free Optimization Methods
|
| 481 |
+
and Software*. PhD thesis, Department of Applied Mathematics, The Hong
|
| 482 |
+
Kong Polytechnic University, Hong Kong, China, 2022. URL:
|
| 483 |
+
https://theses.lib.polyu.edu.hk/handle/200/12294.
|
| 484 |
+
|
| 485 |
+
Examples
|
| 486 |
+
--------
|
| 487 |
+
Let us consider the problem of minimizing the Rosenbrock function. This
|
| 488 |
+
function (and its respective derivatives) is implemented in `rosen`
|
| 489 |
+
(resp. `rosen_der`, `rosen_hess`) in the `scipy.optimize`.
|
| 490 |
+
|
| 491 |
+
>>> from scipy.optimize import minimize, rosen, rosen_der
|
| 492 |
+
|
| 493 |
+
A simple application of the *Nelder-Mead* method is:
|
| 494 |
+
|
| 495 |
+
>>> x0 = [1.3, 0.7, 0.8, 1.9, 1.2]
|
| 496 |
+
>>> res = minimize(rosen, x0, method='Nelder-Mead', tol=1e-6)
|
| 497 |
+
>>> res.x
|
| 498 |
+
array([ 1., 1., 1., 1., 1.])
|
| 499 |
+
|
| 500 |
+
Now using the *BFGS* algorithm, using the first derivative and a few
|
| 501 |
+
options:
|
| 502 |
+
|
| 503 |
+
>>> res = minimize(rosen, x0, method='BFGS', jac=rosen_der,
|
| 504 |
+
... options={'gtol': 1e-6, 'disp': True})
|
| 505 |
+
Optimization terminated successfully.
|
| 506 |
+
Current function value: 0.000000
|
| 507 |
+
Iterations: 26
|
| 508 |
+
Function evaluations: 31
|
| 509 |
+
Gradient evaluations: 31
|
| 510 |
+
>>> res.x
|
| 511 |
+
array([ 1., 1., 1., 1., 1.])
|
| 512 |
+
>>> print(res.message)
|
| 513 |
+
Optimization terminated successfully.
|
| 514 |
+
>>> res.hess_inv
|
| 515 |
+
array([
|
| 516 |
+
[ 0.00749589, 0.01255155, 0.02396251, 0.04750988, 0.09495377], # may vary
|
| 517 |
+
[ 0.01255155, 0.02510441, 0.04794055, 0.09502834, 0.18996269],
|
| 518 |
+
[ 0.02396251, 0.04794055, 0.09631614, 0.19092151, 0.38165151],
|
| 519 |
+
[ 0.04750988, 0.09502834, 0.19092151, 0.38341252, 0.7664427 ],
|
| 520 |
+
[ 0.09495377, 0.18996269, 0.38165151, 0.7664427, 1.53713523]
|
| 521 |
+
])
|
| 522 |
+
|
| 523 |
+
|
| 524 |
+
Next, consider a minimization problem with several constraints (namely
|
| 525 |
+
Example 16.4 from [5]_). The objective function is:
|
| 526 |
+
|
| 527 |
+
>>> fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2
|
| 528 |
+
|
| 529 |
+
There are three constraints defined as:
|
| 530 |
+
|
| 531 |
+
>>> cons = ({'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2},
|
| 532 |
+
... {'type': 'ineq', 'fun': lambda x: -x[0] - 2 * x[1] + 6},
|
| 533 |
+
... {'type': 'ineq', 'fun': lambda x: -x[0] + 2 * x[1] + 2})
|
| 534 |
+
|
| 535 |
+
And variables must be positive, hence the following bounds:
|
| 536 |
+
|
| 537 |
+
>>> bnds = ((0, None), (0, None))
|
| 538 |
+
|
| 539 |
+
The optimization problem is solved using the SLSQP method as:
|
| 540 |
+
|
| 541 |
+
>>> res = minimize(fun, (2, 0), method='SLSQP', bounds=bnds,
|
| 542 |
+
... constraints=cons)
|
| 543 |
+
|
| 544 |
+
It should converge to the theoretical solution (1.4 ,1.7).
|
| 545 |
+
|
| 546 |
+
"""
|
| 547 |
+
x0 = np.atleast_1d(np.asarray(x0))
|
| 548 |
+
|
| 549 |
+
if x0.ndim != 1:
|
| 550 |
+
raise ValueError("'x0' must only have one dimension.")
|
| 551 |
+
|
| 552 |
+
if x0.dtype.kind in np.typecodes["AllInteger"]:
|
| 553 |
+
x0 = np.asarray(x0, dtype=float)
|
| 554 |
+
|
| 555 |
+
if not isinstance(args, tuple):
|
| 556 |
+
args = (args,)
|
| 557 |
+
|
| 558 |
+
if method is None:
|
| 559 |
+
# Select automatically
|
| 560 |
+
if constraints:
|
| 561 |
+
method = 'SLSQP'
|
| 562 |
+
elif bounds is not None:
|
| 563 |
+
method = 'L-BFGS-B'
|
| 564 |
+
else:
|
| 565 |
+
method = 'BFGS'
|
| 566 |
+
|
| 567 |
+
if callable(method):
|
| 568 |
+
meth = "_custom"
|
| 569 |
+
else:
|
| 570 |
+
meth = method.lower()
|
| 571 |
+
|
| 572 |
+
if options is None:
|
| 573 |
+
options = {}
|
| 574 |
+
# check if optional parameters are supported by the selected method
|
| 575 |
+
# - jac
|
| 576 |
+
if meth in ('nelder-mead', 'powell', 'cobyla', 'cobyqa') and bool(jac):
|
| 577 |
+
warn('Method %s does not use gradient information (jac).' % method,
|
| 578 |
+
RuntimeWarning, stacklevel=2)
|
| 579 |
+
# - hess
|
| 580 |
+
if meth not in ('newton-cg', 'dogleg', 'trust-ncg', 'trust-constr',
|
| 581 |
+
'trust-krylov', 'trust-exact', '_custom') and hess is not None:
|
| 582 |
+
warn('Method %s does not use Hessian information (hess).' % method,
|
| 583 |
+
RuntimeWarning, stacklevel=2)
|
| 584 |
+
# - hessp
|
| 585 |
+
if meth not in ('newton-cg', 'trust-ncg', 'trust-constr',
|
| 586 |
+
'trust-krylov', '_custom') \
|
| 587 |
+
and hessp is not None:
|
| 588 |
+
warn('Method %s does not use Hessian-vector product '
|
| 589 |
+
'information (hessp).' % method,
|
| 590 |
+
RuntimeWarning, stacklevel=2)
|
| 591 |
+
# - constraints or bounds
|
| 592 |
+
if (meth not in ('cobyla', 'cobyqa', 'slsqp', 'trust-constr', '_custom') and
|
| 593 |
+
np.any(constraints)):
|
| 594 |
+
warn('Method %s cannot handle constraints.' % method,
|
| 595 |
+
RuntimeWarning, stacklevel=2)
|
| 596 |
+
if meth not in (
|
| 597 |
+
'nelder-mead', 'powell', 'l-bfgs-b', 'cobyla', 'cobyqa', 'slsqp',
|
| 598 |
+
'tnc', 'trust-constr', '_custom') and bounds is not None:
|
| 599 |
+
warn('Method %s cannot handle bounds.' % method,
|
| 600 |
+
RuntimeWarning, stacklevel=2)
|
| 601 |
+
# - return_all
|
| 602 |
+
if (meth in ('l-bfgs-b', 'tnc', 'cobyla', 'cobyqa', 'slsqp') and
|
| 603 |
+
options.get('return_all', False)):
|
| 604 |
+
warn('Method %s does not support the return_all option.' % method,
|
| 605 |
+
RuntimeWarning, stacklevel=2)
|
| 606 |
+
|
| 607 |
+
# check gradient vector
|
| 608 |
+
if callable(jac):
|
| 609 |
+
pass
|
| 610 |
+
elif jac is True:
|
| 611 |
+
# fun returns func and grad
|
| 612 |
+
fun = MemoizeJac(fun)
|
| 613 |
+
jac = fun.derivative
|
| 614 |
+
elif (jac in FD_METHODS and
|
| 615 |
+
meth in ['trust-constr', 'bfgs', 'cg', 'l-bfgs-b', 'tnc', 'slsqp']):
|
| 616 |
+
# finite differences with relative step
|
| 617 |
+
pass
|
| 618 |
+
elif meth in ['trust-constr']:
|
| 619 |
+
# default jac calculation for this method
|
| 620 |
+
jac = '2-point'
|
| 621 |
+
elif jac is None or bool(jac) is False:
|
| 622 |
+
# this will cause e.g. LBFGS to use forward difference, absolute step
|
| 623 |
+
jac = None
|
| 624 |
+
else:
|
| 625 |
+
# default if jac option is not understood
|
| 626 |
+
jac = None
|
| 627 |
+
|
| 628 |
+
# set default tolerances
|
| 629 |
+
if tol is not None:
|
| 630 |
+
options = dict(options)
|
| 631 |
+
if meth == 'nelder-mead':
|
| 632 |
+
options.setdefault('xatol', tol)
|
| 633 |
+
options.setdefault('fatol', tol)
|
| 634 |
+
if meth in ('newton-cg', 'powell', 'tnc'):
|
| 635 |
+
options.setdefault('xtol', tol)
|
| 636 |
+
if meth in ('powell', 'l-bfgs-b', 'tnc', 'slsqp'):
|
| 637 |
+
options.setdefault('ftol', tol)
|
| 638 |
+
if meth in ('bfgs', 'cg', 'l-bfgs-b', 'tnc', 'dogleg',
|
| 639 |
+
'trust-ncg', 'trust-exact', 'trust-krylov'):
|
| 640 |
+
options.setdefault('gtol', tol)
|
| 641 |
+
if meth in ('cobyla', '_custom'):
|
| 642 |
+
options.setdefault('tol', tol)
|
| 643 |
+
if meth == 'cobyqa':
|
| 644 |
+
options.setdefault('final_tr_radius', tol)
|
| 645 |
+
if meth == 'trust-constr':
|
| 646 |
+
options.setdefault('xtol', tol)
|
| 647 |
+
options.setdefault('gtol', tol)
|
| 648 |
+
options.setdefault('barrier_tol', tol)
|
| 649 |
+
|
| 650 |
+
if meth == '_custom':
|
| 651 |
+
# custom method called before bounds and constraints are 'standardised'
|
| 652 |
+
# custom method should be able to accept whatever bounds/constraints
|
| 653 |
+
# are provided to it.
|
| 654 |
+
return method(fun, x0, args=args, jac=jac, hess=hess, hessp=hessp,
|
| 655 |
+
bounds=bounds, constraints=constraints,
|
| 656 |
+
callback=callback, **options)
|
| 657 |
+
|
| 658 |
+
constraints = standardize_constraints(constraints, x0, meth)
|
| 659 |
+
|
| 660 |
+
remove_vars = False
|
| 661 |
+
if bounds is not None:
|
| 662 |
+
# convert to new-style bounds so we only have to consider one case
|
| 663 |
+
bounds = standardize_bounds(bounds, x0, 'new')
|
| 664 |
+
bounds = _validate_bounds(bounds, x0, meth)
|
| 665 |
+
|
| 666 |
+
if meth in {"tnc", "slsqp", "l-bfgs-b"}:
|
| 667 |
+
# These methods can't take the finite-difference derivatives they
|
| 668 |
+
# need when a variable is fixed by the bounds. To avoid this issue,
|
| 669 |
+
# remove fixed variables from the problem.
|
| 670 |
+
# NOTE: if this list is expanded, then be sure to update the
|
| 671 |
+
# accompanying tests and test_optimize.eb_data. Consider also if
|
| 672 |
+
# default OptimizeResult will need updating.
|
| 673 |
+
|
| 674 |
+
# determine whether any variables are fixed
|
| 675 |
+
i_fixed = (bounds.lb == bounds.ub)
|
| 676 |
+
|
| 677 |
+
if np.all(i_fixed):
|
| 678 |
+
# all the parameters are fixed, a minimizer is not able to do
|
| 679 |
+
# anything
|
| 680 |
+
return _optimize_result_for_equal_bounds(
|
| 681 |
+
fun, bounds, meth, args=args, constraints=constraints
|
| 682 |
+
)
|
| 683 |
+
|
| 684 |
+
# determine whether finite differences are needed for any grad/jac
|
| 685 |
+
fd_needed = (not callable(jac))
|
| 686 |
+
for con in constraints:
|
| 687 |
+
if not callable(con.get('jac', None)):
|
| 688 |
+
fd_needed = True
|
| 689 |
+
|
| 690 |
+
# If finite differences are ever used, remove all fixed variables
|
| 691 |
+
# Always remove fixed variables for TNC; see gh-14565
|
| 692 |
+
remove_vars = i_fixed.any() and (fd_needed or meth == "tnc")
|
| 693 |
+
if remove_vars:
|
| 694 |
+
x_fixed = (bounds.lb)[i_fixed]
|
| 695 |
+
x0 = x0[~i_fixed]
|
| 696 |
+
bounds = _remove_from_bounds(bounds, i_fixed)
|
| 697 |
+
fun = _remove_from_func(fun, i_fixed, x_fixed)
|
| 698 |
+
if callable(callback):
|
| 699 |
+
callback = _remove_from_func(callback, i_fixed, x_fixed)
|
| 700 |
+
if callable(jac):
|
| 701 |
+
jac = _remove_from_func(jac, i_fixed, x_fixed, remove=1)
|
| 702 |
+
|
| 703 |
+
# make a copy of the constraints so the user's version doesn't
|
| 704 |
+
# get changed. (Shallow copy is ok)
|
| 705 |
+
constraints = [con.copy() for con in constraints]
|
| 706 |
+
for con in constraints: # yes, guaranteed to be a list
|
| 707 |
+
con['fun'] = _remove_from_func(con['fun'], i_fixed,
|
| 708 |
+
x_fixed, min_dim=1,
|
| 709 |
+
remove=0)
|
| 710 |
+
if callable(con.get('jac', None)):
|
| 711 |
+
con['jac'] = _remove_from_func(con['jac'], i_fixed,
|
| 712 |
+
x_fixed, min_dim=2,
|
| 713 |
+
remove=1)
|
| 714 |
+
bounds = standardize_bounds(bounds, x0, meth)
|
| 715 |
+
|
| 716 |
+
callback = _wrap_callback(callback, meth)
|
| 717 |
+
|
| 718 |
+
if meth == 'nelder-mead':
|
| 719 |
+
res = _minimize_neldermead(fun, x0, args, callback, bounds=bounds,
|
| 720 |
+
**options)
|
| 721 |
+
elif meth == 'powell':
|
| 722 |
+
res = _minimize_powell(fun, x0, args, callback, bounds, **options)
|
| 723 |
+
elif meth == 'cg':
|
| 724 |
+
res = _minimize_cg(fun, x0, args, jac, callback, **options)
|
| 725 |
+
elif meth == 'bfgs':
|
| 726 |
+
res = _minimize_bfgs(fun, x0, args, jac, callback, **options)
|
| 727 |
+
elif meth == 'newton-cg':
|
| 728 |
+
res = _minimize_newtoncg(fun, x0, args, jac, hess, hessp, callback,
|
| 729 |
+
**options)
|
| 730 |
+
elif meth == 'l-bfgs-b':
|
| 731 |
+
res = _minimize_lbfgsb(fun, x0, args, jac, bounds,
|
| 732 |
+
callback=callback, **options)
|
| 733 |
+
elif meth == 'tnc':
|
| 734 |
+
res = _minimize_tnc(fun, x0, args, jac, bounds, callback=callback,
|
| 735 |
+
**options)
|
| 736 |
+
elif meth == 'cobyla':
|
| 737 |
+
res = _minimize_cobyla(fun, x0, args, constraints, callback=callback,
|
| 738 |
+
bounds=bounds, **options)
|
| 739 |
+
elif meth == 'cobyqa':
|
| 740 |
+
res = _minimize_cobyqa(fun, x0, args, bounds, constraints, callback,
|
| 741 |
+
**options)
|
| 742 |
+
elif meth == 'slsqp':
|
| 743 |
+
res = _minimize_slsqp(fun, x0, args, jac, bounds,
|
| 744 |
+
constraints, callback=callback, **options)
|
| 745 |
+
elif meth == 'trust-constr':
|
| 746 |
+
res = _minimize_trustregion_constr(fun, x0, args, jac, hess, hessp,
|
| 747 |
+
bounds, constraints,
|
| 748 |
+
callback=callback, **options)
|
| 749 |
+
elif meth == 'dogleg':
|
| 750 |
+
res = _minimize_dogleg(fun, x0, args, jac, hess,
|
| 751 |
+
callback=callback, **options)
|
| 752 |
+
elif meth == 'trust-ncg':
|
| 753 |
+
res = _minimize_trust_ncg(fun, x0, args, jac, hess, hessp,
|
| 754 |
+
callback=callback, **options)
|
| 755 |
+
elif meth == 'trust-krylov':
|
| 756 |
+
res = _minimize_trust_krylov(fun, x0, args, jac, hess, hessp,
|
| 757 |
+
callback=callback, **options)
|
| 758 |
+
elif meth == 'trust-exact':
|
| 759 |
+
res = _minimize_trustregion_exact(fun, x0, args, jac, hess,
|
| 760 |
+
callback=callback, **options)
|
| 761 |
+
else:
|
| 762 |
+
raise ValueError('Unknown solver %s' % method)
|
| 763 |
+
|
| 764 |
+
if remove_vars:
|
| 765 |
+
res.x = _add_to_array(res.x, i_fixed, x_fixed)
|
| 766 |
+
res.jac = _add_to_array(res.jac, i_fixed, np.nan)
|
| 767 |
+
if "hess_inv" in res:
|
| 768 |
+
res.hess_inv = None # unknown
|
| 769 |
+
|
| 770 |
+
if getattr(callback, 'stop_iteration', False):
|
| 771 |
+
res.success = False
|
| 772 |
+
res.status = 99
|
| 773 |
+
res.message = "`callback` raised `StopIteration`."
|
| 774 |
+
|
| 775 |
+
return res
|
| 776 |
+
|
| 777 |
+
|
| 778 |
+
def minimize_scalar(fun, bracket=None, bounds=None, args=(),
|
| 779 |
+
method=None, tol=None, options=None):
|
| 780 |
+
"""Local minimization of scalar function of one variable.
|
| 781 |
+
|
| 782 |
+
Parameters
|
| 783 |
+
----------
|
| 784 |
+
fun : callable
|
| 785 |
+
Objective function.
|
| 786 |
+
Scalar function, must return a scalar.
|
| 787 |
+
bracket : sequence, optional
|
| 788 |
+
For methods 'brent' and 'golden', `bracket` defines the bracketing
|
| 789 |
+
interval and is required.
|
| 790 |
+
Either a triple ``(xa, xb, xc)`` satisfying ``xa < xb < xc`` and
|
| 791 |
+
``func(xb) < func(xa) and func(xb) < func(xc)``, or a pair
|
| 792 |
+
``(xa, xb)`` to be used as initial points for a downhill bracket search
|
| 793 |
+
(see `scipy.optimize.bracket`).
|
| 794 |
+
The minimizer ``res.x`` will not necessarily satisfy
|
| 795 |
+
``xa <= res.x <= xb``.
|
| 796 |
+
bounds : sequence, optional
|
| 797 |
+
For method 'bounded', `bounds` is mandatory and must have two finite
|
| 798 |
+
items corresponding to the optimization bounds.
|
| 799 |
+
args : tuple, optional
|
| 800 |
+
Extra arguments passed to the objective function.
|
| 801 |
+
method : str or callable, optional
|
| 802 |
+
Type of solver. Should be one of:
|
| 803 |
+
|
| 804 |
+
- :ref:`Brent <optimize.minimize_scalar-brent>`
|
| 805 |
+
- :ref:`Bounded <optimize.minimize_scalar-bounded>`
|
| 806 |
+
- :ref:`Golden <optimize.minimize_scalar-golden>`
|
| 807 |
+
- custom - a callable object (added in version 0.14.0), see below
|
| 808 |
+
|
| 809 |
+
Default is "Bounded" if bounds are provided and "Brent" otherwise.
|
| 810 |
+
See the 'Notes' section for details of each solver.
|
| 811 |
+
|
| 812 |
+
tol : float, optional
|
| 813 |
+
Tolerance for termination. For detailed control, use solver-specific
|
| 814 |
+
options.
|
| 815 |
+
options : dict, optional
|
| 816 |
+
A dictionary of solver options.
|
| 817 |
+
|
| 818 |
+
maxiter : int
|
| 819 |
+
Maximum number of iterations to perform.
|
| 820 |
+
disp : bool
|
| 821 |
+
Set to True to print convergence messages.
|
| 822 |
+
|
| 823 |
+
See :func:`show_options()` for solver-specific options.
|
| 824 |
+
|
| 825 |
+
Returns
|
| 826 |
+
-------
|
| 827 |
+
res : OptimizeResult
|
| 828 |
+
The optimization result represented as a ``OptimizeResult`` object.
|
| 829 |
+
Important attributes are: ``x`` the solution array, ``success`` a
|
| 830 |
+
Boolean flag indicating if the optimizer exited successfully and
|
| 831 |
+
``message`` which describes the cause of the termination. See
|
| 832 |
+
`OptimizeResult` for a description of other attributes.
|
| 833 |
+
|
| 834 |
+
See also
|
| 835 |
+
--------
|
| 836 |
+
minimize : Interface to minimization algorithms for scalar multivariate
|
| 837 |
+
functions
|
| 838 |
+
show_options : Additional options accepted by the solvers
|
| 839 |
+
|
| 840 |
+
Notes
|
| 841 |
+
-----
|
| 842 |
+
This section describes the available solvers that can be selected by the
|
| 843 |
+
'method' parameter. The default method is the ``"Bounded"`` Brent method if
|
| 844 |
+
`bounds` are passed and unbounded ``"Brent"`` otherwise.
|
| 845 |
+
|
| 846 |
+
Method :ref:`Brent <optimize.minimize_scalar-brent>` uses Brent's
|
| 847 |
+
algorithm [1]_ to find a local minimum. The algorithm uses inverse
|
| 848 |
+
parabolic interpolation when possible to speed up convergence of
|
| 849 |
+
the golden section method.
|
| 850 |
+
|
| 851 |
+
Method :ref:`Golden <optimize.minimize_scalar-golden>` uses the
|
| 852 |
+
golden section search technique [1]_. It uses analog of the bisection
|
| 853 |
+
method to decrease the bracketed interval. It is usually
|
| 854 |
+
preferable to use the *Brent* method.
|
| 855 |
+
|
| 856 |
+
Method :ref:`Bounded <optimize.minimize_scalar-bounded>` can
|
| 857 |
+
perform bounded minimization [2]_ [3]_. It uses the Brent method to find a
|
| 858 |
+
local minimum in the interval x1 < xopt < x2.
|
| 859 |
+
|
| 860 |
+
Note that the Brent and Golden methods do not guarantee success unless a
|
| 861 |
+
valid ``bracket`` triple is provided. If a three-point bracket cannot be
|
| 862 |
+
found, consider `scipy.optimize.minimize`. Also, all methods are intended
|
| 863 |
+
only for local minimization. When the function of interest has more than
|
| 864 |
+
one local minimum, consider :ref:`global_optimization`.
|
| 865 |
+
|
| 866 |
+
**Custom minimizers**
|
| 867 |
+
|
| 868 |
+
It may be useful to pass a custom minimization method, for example
|
| 869 |
+
when using some library frontend to minimize_scalar. You can simply
|
| 870 |
+
pass a callable as the ``method`` parameter.
|
| 871 |
+
|
| 872 |
+
The callable is called as ``method(fun, args, **kwargs, **options)``
|
| 873 |
+
where ``kwargs`` corresponds to any other parameters passed to `minimize`
|
| 874 |
+
(such as `bracket`, `tol`, etc.), except the `options` dict, which has
|
| 875 |
+
its contents also passed as `method` parameters pair by pair. The method
|
| 876 |
+
shall return an `OptimizeResult` object.
|
| 877 |
+
|
| 878 |
+
The provided `method` callable must be able to accept (and possibly ignore)
|
| 879 |
+
arbitrary parameters; the set of parameters accepted by `minimize` may
|
| 880 |
+
expand in future versions and then these parameters will be passed to
|
| 881 |
+
the method. You can find an example in the scipy.optimize tutorial.
|
| 882 |
+
|
| 883 |
+
.. versionadded:: 0.11.0
|
| 884 |
+
|
| 885 |
+
References
|
| 886 |
+
----------
|
| 887 |
+
.. [1] Press, W., S.A. Teukolsky, W.T. Vetterling, and B.P. Flannery.
|
| 888 |
+
Numerical Recipes in C. Cambridge University Press.
|
| 889 |
+
.. [2] Forsythe, G.E., M. A. Malcolm, and C. B. Moler. "Computer Methods
|
| 890 |
+
for Mathematical Computations." Prentice-Hall Series in Automatic
|
| 891 |
+
Computation 259 (1977).
|
| 892 |
+
.. [3] Brent, Richard P. Algorithms for Minimization Without Derivatives.
|
| 893 |
+
Courier Corporation, 2013.
|
| 894 |
+
|
| 895 |
+
Examples
|
| 896 |
+
--------
|
| 897 |
+
Consider the problem of minimizing the following function.
|
| 898 |
+
|
| 899 |
+
>>> def f(x):
|
| 900 |
+
... return (x - 2) * x * (x + 2)**2
|
| 901 |
+
|
| 902 |
+
Using the *Brent* method, we find the local minimum as:
|
| 903 |
+
|
| 904 |
+
>>> from scipy.optimize import minimize_scalar
|
| 905 |
+
>>> res = minimize_scalar(f)
|
| 906 |
+
>>> res.fun
|
| 907 |
+
-9.9149495908
|
| 908 |
+
|
| 909 |
+
The minimizer is:
|
| 910 |
+
|
| 911 |
+
>>> res.x
|
| 912 |
+
1.28077640403
|
| 913 |
+
|
| 914 |
+
Using the *Bounded* method, we find a local minimum with specified
|
| 915 |
+
bounds as:
|
| 916 |
+
|
| 917 |
+
>>> res = minimize_scalar(f, bounds=(-3, -1), method='bounded')
|
| 918 |
+
>>> res.fun # minimum
|
| 919 |
+
3.28365179850e-13
|
| 920 |
+
>>> res.x # minimizer
|
| 921 |
+
-2.0000002026
|
| 922 |
+
|
| 923 |
+
"""
|
| 924 |
+
if not isinstance(args, tuple):
|
| 925 |
+
args = (args,)
|
| 926 |
+
|
| 927 |
+
if callable(method):
|
| 928 |
+
meth = "_custom"
|
| 929 |
+
elif method is None:
|
| 930 |
+
meth = 'brent' if bounds is None else 'bounded'
|
| 931 |
+
else:
|
| 932 |
+
meth = method.lower()
|
| 933 |
+
if options is None:
|
| 934 |
+
options = {}
|
| 935 |
+
|
| 936 |
+
if bounds is not None and meth in {'brent', 'golden'}:
|
| 937 |
+
message = f"Use of `bounds` is incompatible with 'method={method}'."
|
| 938 |
+
raise ValueError(message)
|
| 939 |
+
|
| 940 |
+
if tol is not None:
|
| 941 |
+
options = dict(options)
|
| 942 |
+
if meth == 'bounded' and 'xatol' not in options:
|
| 943 |
+
warn("Method 'bounded' does not support relative tolerance in x; "
|
| 944 |
+
"defaulting to absolute tolerance.",
|
| 945 |
+
RuntimeWarning, stacklevel=2)
|
| 946 |
+
options['xatol'] = tol
|
| 947 |
+
elif meth == '_custom':
|
| 948 |
+
options.setdefault('tol', tol)
|
| 949 |
+
else:
|
| 950 |
+
options.setdefault('xtol', tol)
|
| 951 |
+
|
| 952 |
+
# replace boolean "disp" option, if specified, by an integer value.
|
| 953 |
+
disp = options.get('disp')
|
| 954 |
+
if isinstance(disp, bool):
|
| 955 |
+
options['disp'] = 2 * int(disp)
|
| 956 |
+
|
| 957 |
+
if meth == '_custom':
|
| 958 |
+
res = method(fun, args=args, bracket=bracket, bounds=bounds, **options)
|
| 959 |
+
elif meth == 'brent':
|
| 960 |
+
res = _recover_from_bracket_error(_minimize_scalar_brent,
|
| 961 |
+
fun, bracket, args, **options)
|
| 962 |
+
elif meth == 'bounded':
|
| 963 |
+
if bounds is None:
|
| 964 |
+
raise ValueError('The `bounds` parameter is mandatory for '
|
| 965 |
+
'method `bounded`.')
|
| 966 |
+
res = _minimize_scalar_bounded(fun, bounds, args, **options)
|
| 967 |
+
elif meth == 'golden':
|
| 968 |
+
res = _recover_from_bracket_error(_minimize_scalar_golden,
|
| 969 |
+
fun, bracket, args, **options)
|
| 970 |
+
else:
|
| 971 |
+
raise ValueError('Unknown solver %s' % method)
|
| 972 |
+
|
| 973 |
+
# gh-16196 reported inconsistencies in the output shape of `res.x`. While
|
| 974 |
+
# fixing this, future-proof it for when the function is vectorized:
|
| 975 |
+
# the shape of `res.x` should match that of `res.fun`.
|
| 976 |
+
res.fun = np.asarray(res.fun)[()]
|
| 977 |
+
res.x = np.reshape(res.x, res.fun.shape)[()]
|
| 978 |
+
return res
|
| 979 |
+
|
| 980 |
+
|
| 981 |
+
def _remove_from_bounds(bounds, i_fixed):
|
| 982 |
+
"""Removes fixed variables from a `Bounds` instance"""
|
| 983 |
+
lb = bounds.lb[~i_fixed]
|
| 984 |
+
ub = bounds.ub[~i_fixed]
|
| 985 |
+
return Bounds(lb, ub) # don't mutate original Bounds object
|
| 986 |
+
|
| 987 |
+
|
| 988 |
+
def _remove_from_func(fun_in, i_fixed, x_fixed, min_dim=None, remove=0):
|
| 989 |
+
"""Wraps a function such that fixed variables need not be passed in"""
|
| 990 |
+
def fun_out(x_in, *args, **kwargs):
|
| 991 |
+
x_out = np.zeros_like(i_fixed, dtype=x_in.dtype)
|
| 992 |
+
x_out[i_fixed] = x_fixed
|
| 993 |
+
x_out[~i_fixed] = x_in
|
| 994 |
+
y_out = fun_in(x_out, *args, **kwargs)
|
| 995 |
+
y_out = np.array(y_out)
|
| 996 |
+
|
| 997 |
+
if min_dim == 1:
|
| 998 |
+
y_out = np.atleast_1d(y_out)
|
| 999 |
+
elif min_dim == 2:
|
| 1000 |
+
y_out = np.atleast_2d(y_out)
|
| 1001 |
+
|
| 1002 |
+
if remove == 1:
|
| 1003 |
+
y_out = y_out[..., ~i_fixed]
|
| 1004 |
+
elif remove == 2:
|
| 1005 |
+
y_out = y_out[~i_fixed, ~i_fixed]
|
| 1006 |
+
|
| 1007 |
+
return y_out
|
| 1008 |
+
return fun_out
|
| 1009 |
+
|
| 1010 |
+
|
| 1011 |
+
def _add_to_array(x_in, i_fixed, x_fixed):
|
| 1012 |
+
"""Adds fixed variables back to an array"""
|
| 1013 |
+
i_free = ~i_fixed
|
| 1014 |
+
if x_in.ndim == 2:
|
| 1015 |
+
i_free = i_free[:, None] @ i_free[None, :]
|
| 1016 |
+
x_out = np.zeros_like(i_free, dtype=x_in.dtype)
|
| 1017 |
+
x_out[~i_free] = x_fixed
|
| 1018 |
+
x_out[i_free] = x_in.ravel()
|
| 1019 |
+
return x_out
|
| 1020 |
+
|
| 1021 |
+
|
| 1022 |
+
def _validate_bounds(bounds, x0, meth):
|
| 1023 |
+
"""Check that bounds are valid."""
|
| 1024 |
+
|
| 1025 |
+
msg = "An upper bound is less than the corresponding lower bound."
|
| 1026 |
+
if np.any(bounds.ub < bounds.lb):
|
| 1027 |
+
raise ValueError(msg)
|
| 1028 |
+
|
| 1029 |
+
msg = "The number of bounds is not compatible with the length of `x0`."
|
| 1030 |
+
try:
|
| 1031 |
+
bounds.lb = np.broadcast_to(bounds.lb, x0.shape)
|
| 1032 |
+
bounds.ub = np.broadcast_to(bounds.ub, x0.shape)
|
| 1033 |
+
except Exception as e:
|
| 1034 |
+
raise ValueError(msg) from e
|
| 1035 |
+
|
| 1036 |
+
return bounds
|
| 1037 |
+
|
| 1038 |
+
def standardize_bounds(bounds, x0, meth):
|
| 1039 |
+
"""Converts bounds to the form required by the solver."""
|
| 1040 |
+
if meth in {'trust-constr', 'powell', 'nelder-mead', 'cobyla', 'cobyqa',
|
| 1041 |
+
'new'}:
|
| 1042 |
+
if not isinstance(bounds, Bounds):
|
| 1043 |
+
lb, ub = old_bound_to_new(bounds)
|
| 1044 |
+
bounds = Bounds(lb, ub)
|
| 1045 |
+
elif meth in ('l-bfgs-b', 'tnc', 'slsqp', 'old'):
|
| 1046 |
+
if isinstance(bounds, Bounds):
|
| 1047 |
+
bounds = new_bounds_to_old(bounds.lb, bounds.ub, x0.shape[0])
|
| 1048 |
+
return bounds
|
| 1049 |
+
|
| 1050 |
+
|
| 1051 |
+
def standardize_constraints(constraints, x0, meth):
|
| 1052 |
+
"""Converts constraints to the form required by the solver."""
|
| 1053 |
+
all_constraint_types = (NonlinearConstraint, LinearConstraint, dict)
|
| 1054 |
+
new_constraint_types = all_constraint_types[:-1]
|
| 1055 |
+
if constraints is None:
|
| 1056 |
+
constraints = []
|
| 1057 |
+
elif isinstance(constraints, all_constraint_types):
|
| 1058 |
+
constraints = [constraints]
|
| 1059 |
+
else:
|
| 1060 |
+
constraints = list(constraints) # ensure it's a mutable sequence
|
| 1061 |
+
|
| 1062 |
+
if meth in ['trust-constr', 'cobyqa', 'new']:
|
| 1063 |
+
for i, con in enumerate(constraints):
|
| 1064 |
+
if not isinstance(con, new_constraint_types):
|
| 1065 |
+
constraints[i] = old_constraint_to_new(i, con)
|
| 1066 |
+
else:
|
| 1067 |
+
# iterate over copy, changing original
|
| 1068 |
+
for i, con in enumerate(list(constraints)):
|
| 1069 |
+
if isinstance(con, new_constraint_types):
|
| 1070 |
+
old_constraints = new_constraint_to_old(con, x0)
|
| 1071 |
+
constraints[i] = old_constraints[0]
|
| 1072 |
+
constraints.extend(old_constraints[1:]) # appends 1 if present
|
| 1073 |
+
|
| 1074 |
+
return constraints
|
| 1075 |
+
|
| 1076 |
+
|
| 1077 |
+
def _optimize_result_for_equal_bounds(
|
| 1078 |
+
fun, bounds, method, args=(), constraints=()
|
| 1079 |
+
):
|
| 1080 |
+
"""
|
| 1081 |
+
Provides a default OptimizeResult for when a bounded minimization method
|
| 1082 |
+
has (lb == ub).all().
|
| 1083 |
+
|
| 1084 |
+
Parameters
|
| 1085 |
+
----------
|
| 1086 |
+
fun: callable
|
| 1087 |
+
bounds: Bounds
|
| 1088 |
+
method: str
|
| 1089 |
+
constraints: Constraint
|
| 1090 |
+
"""
|
| 1091 |
+
success = True
|
| 1092 |
+
message = 'All independent variables were fixed by bounds.'
|
| 1093 |
+
|
| 1094 |
+
# bounds is new-style
|
| 1095 |
+
x0 = bounds.lb
|
| 1096 |
+
|
| 1097 |
+
if constraints:
|
| 1098 |
+
message = ("All independent variables were fixed by bounds at values"
|
| 1099 |
+
" that satisfy the constraints.")
|
| 1100 |
+
constraints = standardize_constraints(constraints, x0, 'new')
|
| 1101 |
+
|
| 1102 |
+
maxcv = 0
|
| 1103 |
+
for c in constraints:
|
| 1104 |
+
pc = PreparedConstraint(c, x0)
|
| 1105 |
+
violation = pc.violation(x0)
|
| 1106 |
+
if np.sum(violation):
|
| 1107 |
+
maxcv = max(maxcv, np.max(violation))
|
| 1108 |
+
success = False
|
| 1109 |
+
message = (f"All independent variables were fixed by bounds, but "
|
| 1110 |
+
f"the independent variables do not satisfy the "
|
| 1111 |
+
f"constraints exactly. (Maximum violation: {maxcv}).")
|
| 1112 |
+
|
| 1113 |
+
return OptimizeResult(
|
| 1114 |
+
x=x0, fun=fun(x0, *args), success=success, message=message, nfev=1,
|
| 1115 |
+
njev=0, nhev=0,
|
| 1116 |
+
)
|
evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_minpack2.cpython-310-x86_64-linux-gnu.so
ADDED
|
Binary file (61.1 kB). View file
|
|
|
evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_moduleTNC.cpython-310-x86_64-linux-gnu.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d7584b3d74b2c7f2804c049af2291355762236b8a294520a6c7a83085ac11544
|
| 3 |
+
size 152168
|
evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_nnls.py
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from scipy.linalg import solve, LinAlgWarning
|
| 3 |
+
import warnings
|
| 4 |
+
|
| 5 |
+
__all__ = ['nnls']
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def nnls(A, b, maxiter=None, *, atol=None):
|
| 9 |
+
"""
|
| 10 |
+
Solve ``argmin_x || Ax - b ||_2`` for ``x>=0``.
|
| 11 |
+
|
| 12 |
+
This problem, often called as NonNegative Least Squares, is a convex
|
| 13 |
+
optimization problem with convex constraints. It typically arises when
|
| 14 |
+
the ``x`` models quantities for which only nonnegative values are
|
| 15 |
+
attainable; weight of ingredients, component costs and so on.
|
| 16 |
+
|
| 17 |
+
Parameters
|
| 18 |
+
----------
|
| 19 |
+
A : (m, n) ndarray
|
| 20 |
+
Coefficient array
|
| 21 |
+
b : (m,) ndarray, float
|
| 22 |
+
Right-hand side vector.
|
| 23 |
+
maxiter: int, optional
|
| 24 |
+
Maximum number of iterations, optional. Default value is ``3 * n``.
|
| 25 |
+
atol: float
|
| 26 |
+
Tolerance value used in the algorithm to assess closeness to zero in
|
| 27 |
+
the projected residual ``(A.T @ (A x - b)`` entries. Increasing this
|
| 28 |
+
value relaxes the solution constraints. A typical relaxation value can
|
| 29 |
+
be selected as ``max(m, n) * np.linalg.norm(a, 1) * np.spacing(1.)``.
|
| 30 |
+
This value is not set as default since the norm operation becomes
|
| 31 |
+
expensive for large problems hence can be used only when necessary.
|
| 32 |
+
|
| 33 |
+
Returns
|
| 34 |
+
-------
|
| 35 |
+
x : ndarray
|
| 36 |
+
Solution vector.
|
| 37 |
+
rnorm : float
|
| 38 |
+
The 2-norm of the residual, ``|| Ax-b ||_2``.
|
| 39 |
+
|
| 40 |
+
See Also
|
| 41 |
+
--------
|
| 42 |
+
lsq_linear : Linear least squares with bounds on the variables
|
| 43 |
+
|
| 44 |
+
Notes
|
| 45 |
+
-----
|
| 46 |
+
The code is based on [2]_ which is an improved version of the classical
|
| 47 |
+
algorithm of [1]_. It utilizes an active set method and solves the KKT
|
| 48 |
+
(Karush-Kuhn-Tucker) conditions for the non-negative least squares problem.
|
| 49 |
+
|
| 50 |
+
References
|
| 51 |
+
----------
|
| 52 |
+
.. [1] : Lawson C., Hanson R.J., "Solving Least Squares Problems", SIAM,
|
| 53 |
+
1995, :doi:`10.1137/1.9781611971217`
|
| 54 |
+
.. [2] : Bro, Rasmus and de Jong, Sijmen, "A Fast Non-Negativity-
|
| 55 |
+
Constrained Least Squares Algorithm", Journal Of Chemometrics, 1997,
|
| 56 |
+
:doi:`10.1002/(SICI)1099-128X(199709/10)11:5<393::AID-CEM483>3.0.CO;2-L`
|
| 57 |
+
|
| 58 |
+
Examples
|
| 59 |
+
--------
|
| 60 |
+
>>> import numpy as np
|
| 61 |
+
>>> from scipy.optimize import nnls
|
| 62 |
+
...
|
| 63 |
+
>>> A = np.array([[1, 0], [1, 0], [0, 1]])
|
| 64 |
+
>>> b = np.array([2, 1, 1])
|
| 65 |
+
>>> nnls(A, b)
|
| 66 |
+
(array([1.5, 1. ]), 0.7071067811865475)
|
| 67 |
+
|
| 68 |
+
>>> b = np.array([-1, -1, -1])
|
| 69 |
+
>>> nnls(A, b)
|
| 70 |
+
(array([0., 0.]), 1.7320508075688772)
|
| 71 |
+
|
| 72 |
+
"""
|
| 73 |
+
|
| 74 |
+
A = np.asarray_chkfinite(A)
|
| 75 |
+
b = np.asarray_chkfinite(b)
|
| 76 |
+
|
| 77 |
+
if len(A.shape) != 2:
|
| 78 |
+
raise ValueError("Expected a two-dimensional array (matrix)" +
|
| 79 |
+
f", but the shape of A is {A.shape}")
|
| 80 |
+
if len(b.shape) != 1:
|
| 81 |
+
raise ValueError("Expected a one-dimensional array (vector)" +
|
| 82 |
+
f", but the shape of b is {b.shape}")
|
| 83 |
+
|
| 84 |
+
m, n = A.shape
|
| 85 |
+
|
| 86 |
+
if m != b.shape[0]:
|
| 87 |
+
raise ValueError(
|
| 88 |
+
"Incompatible dimensions. The first dimension of " +
|
| 89 |
+
f"A is {m}, while the shape of b is {(b.shape[0], )}")
|
| 90 |
+
|
| 91 |
+
x, rnorm, mode = _nnls(A, b, maxiter, tol=atol)
|
| 92 |
+
if mode != 1:
|
| 93 |
+
raise RuntimeError("Maximum number of iterations reached.")
|
| 94 |
+
|
| 95 |
+
return x, rnorm
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def _nnls(A, b, maxiter=None, tol=None):
|
| 99 |
+
"""
|
| 100 |
+
This is a single RHS algorithm from ref [2] above. For multiple RHS
|
| 101 |
+
support, the algorithm is given in :doi:`10.1002/cem.889`
|
| 102 |
+
"""
|
| 103 |
+
m, n = A.shape
|
| 104 |
+
|
| 105 |
+
AtA = A.T @ A
|
| 106 |
+
Atb = b @ A # Result is 1D - let NumPy figure it out
|
| 107 |
+
|
| 108 |
+
if not maxiter:
|
| 109 |
+
maxiter = 3*n
|
| 110 |
+
if tol is None:
|
| 111 |
+
tol = 10 * max(m, n) * np.spacing(1.)
|
| 112 |
+
|
| 113 |
+
# Initialize vars
|
| 114 |
+
x = np.zeros(n, dtype=np.float64)
|
| 115 |
+
s = np.zeros(n, dtype=np.float64)
|
| 116 |
+
# Inactive constraint switches
|
| 117 |
+
P = np.zeros(n, dtype=bool)
|
| 118 |
+
|
| 119 |
+
# Projected residual
|
| 120 |
+
w = Atb.copy().astype(np.float64) # x=0. Skip (-AtA @ x) term
|
| 121 |
+
|
| 122 |
+
# Overall iteration counter
|
| 123 |
+
# Outer loop is not counted, inner iter is counted across outer spins
|
| 124 |
+
iter = 0
|
| 125 |
+
|
| 126 |
+
while (not P.all()) and (w[~P] > tol).any(): # B
|
| 127 |
+
# Get the "most" active coeff index and move to inactive set
|
| 128 |
+
k = np.argmax(w * (~P)) # B.2
|
| 129 |
+
P[k] = True # B.3
|
| 130 |
+
|
| 131 |
+
# Iteration solution
|
| 132 |
+
s[:] = 0.
|
| 133 |
+
# B.4
|
| 134 |
+
with warnings.catch_warnings():
|
| 135 |
+
warnings.filterwarnings('ignore', message='Ill-conditioned matrix',
|
| 136 |
+
category=LinAlgWarning)
|
| 137 |
+
s[P] = solve(AtA[np.ix_(P, P)], Atb[P], assume_a='sym', check_finite=False)
|
| 138 |
+
|
| 139 |
+
# Inner loop
|
| 140 |
+
while (iter < maxiter) and (s[P].min() < 0): # C.1
|
| 141 |
+
iter += 1
|
| 142 |
+
inds = P * (s < 0)
|
| 143 |
+
alpha = (x[inds] / (x[inds] - s[inds])).min() # C.2
|
| 144 |
+
x *= (1 - alpha)
|
| 145 |
+
x += alpha*s
|
| 146 |
+
P[x <= tol] = False
|
| 147 |
+
with warnings.catch_warnings():
|
| 148 |
+
warnings.filterwarnings('ignore', message='Ill-conditioned matrix',
|
| 149 |
+
category=LinAlgWarning)
|
| 150 |
+
s[P] = solve(AtA[np.ix_(P, P)], Atb[P], assume_a='sym',
|
| 151 |
+
check_finite=False)
|
| 152 |
+
s[~P] = 0 # C.6
|
| 153 |
+
|
| 154 |
+
x[:] = s[:]
|
| 155 |
+
w[:] = Atb - AtA @ x
|
| 156 |
+
|
| 157 |
+
if iter == maxiter:
|
| 158 |
+
# Typically following line should return
|
| 159 |
+
# return x, np.linalg.norm(A@x - b), -1
|
| 160 |
+
# however at the top level, -1 raises an exception wasting norm
|
| 161 |
+
# Instead return dummy number 0.
|
| 162 |
+
return x, 0., -1
|
| 163 |
+
|
| 164 |
+
return x, np.linalg.norm(A@x - b), 1
|
evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_nonlin.py
ADDED
|
@@ -0,0 +1,1585 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) 2009, Pauli Virtanen <pav@iki.fi>
|
| 2 |
+
# Distributed under the same license as SciPy.
|
| 3 |
+
|
| 4 |
+
import inspect
|
| 5 |
+
import sys
|
| 6 |
+
import warnings
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
from numpy import asarray, dot, vdot
|
| 10 |
+
|
| 11 |
+
from scipy.linalg import norm, solve, inv, qr, svd, LinAlgError
|
| 12 |
+
import scipy.sparse.linalg
|
| 13 |
+
import scipy.sparse
|
| 14 |
+
from scipy.linalg import get_blas_funcs
|
| 15 |
+
from scipy._lib._util import copy_if_needed
|
| 16 |
+
from scipy._lib._util import getfullargspec_no_self as _getfullargspec
|
| 17 |
+
from ._linesearch import scalar_search_wolfe1, scalar_search_armijo
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
__all__ = [
|
| 21 |
+
'broyden1', 'broyden2', 'anderson', 'linearmixing',
|
| 22 |
+
'diagbroyden', 'excitingmixing', 'newton_krylov',
|
| 23 |
+
'BroydenFirst', 'KrylovJacobian', 'InverseJacobian', 'NoConvergence']
|
| 24 |
+
|
| 25 |
+
#------------------------------------------------------------------------------
|
| 26 |
+
# Utility functions
|
| 27 |
+
#------------------------------------------------------------------------------
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class NoConvergence(Exception):
|
| 31 |
+
"""Exception raised when nonlinear solver fails to converge within the specified
|
| 32 |
+
`maxiter`."""
|
| 33 |
+
pass
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def maxnorm(x):
|
| 37 |
+
return np.absolute(x).max()
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def _as_inexact(x):
|
| 41 |
+
"""Return `x` as an array, of either floats or complex floats"""
|
| 42 |
+
x = asarray(x)
|
| 43 |
+
if not np.issubdtype(x.dtype, np.inexact):
|
| 44 |
+
return asarray(x, dtype=np.float64)
|
| 45 |
+
return x
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def _array_like(x, x0):
|
| 49 |
+
"""Return ndarray `x` as same array subclass and shape as `x0`"""
|
| 50 |
+
x = np.reshape(x, np.shape(x0))
|
| 51 |
+
wrap = getattr(x0, '__array_wrap__', x.__array_wrap__)
|
| 52 |
+
return wrap(x)
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def _safe_norm(v):
|
| 56 |
+
if not np.isfinite(v).all():
|
| 57 |
+
return np.array(np.inf)
|
| 58 |
+
return norm(v)
|
| 59 |
+
|
| 60 |
+
#------------------------------------------------------------------------------
|
| 61 |
+
# Generic nonlinear solver machinery
|
| 62 |
+
#------------------------------------------------------------------------------
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
_doc_parts = dict(
|
| 66 |
+
params_basic="""
|
| 67 |
+
F : function(x) -> f
|
| 68 |
+
Function whose root to find; should take and return an array-like
|
| 69 |
+
object.
|
| 70 |
+
xin : array_like
|
| 71 |
+
Initial guess for the solution
|
| 72 |
+
""".strip(),
|
| 73 |
+
params_extra="""
|
| 74 |
+
iter : int, optional
|
| 75 |
+
Number of iterations to make. If omitted (default), make as many
|
| 76 |
+
as required to meet tolerances.
|
| 77 |
+
verbose : bool, optional
|
| 78 |
+
Print status to stdout on every iteration.
|
| 79 |
+
maxiter : int, optional
|
| 80 |
+
Maximum number of iterations to make. If more are needed to
|
| 81 |
+
meet convergence, `NoConvergence` is raised.
|
| 82 |
+
f_tol : float, optional
|
| 83 |
+
Absolute tolerance (in max-norm) for the residual.
|
| 84 |
+
If omitted, default is 6e-6.
|
| 85 |
+
f_rtol : float, optional
|
| 86 |
+
Relative tolerance for the residual. If omitted, not used.
|
| 87 |
+
x_tol : float, optional
|
| 88 |
+
Absolute minimum step size, as determined from the Jacobian
|
| 89 |
+
approximation. If the step size is smaller than this, optimization
|
| 90 |
+
is terminated as successful. If omitted, not used.
|
| 91 |
+
x_rtol : float, optional
|
| 92 |
+
Relative minimum step size. If omitted, not used.
|
| 93 |
+
tol_norm : function(vector) -> scalar, optional
|
| 94 |
+
Norm to use in convergence check. Default is the maximum norm.
|
| 95 |
+
line_search : {None, 'armijo' (default), 'wolfe'}, optional
|
| 96 |
+
Which type of a line search to use to determine the step size in the
|
| 97 |
+
direction given by the Jacobian approximation. Defaults to 'armijo'.
|
| 98 |
+
callback : function, optional
|
| 99 |
+
Optional callback function. It is called on every iteration as
|
| 100 |
+
``callback(x, f)`` where `x` is the current solution and `f`
|
| 101 |
+
the corresponding residual.
|
| 102 |
+
|
| 103 |
+
Returns
|
| 104 |
+
-------
|
| 105 |
+
sol : ndarray
|
| 106 |
+
An array (of similar array type as `x0`) containing the final solution.
|
| 107 |
+
|
| 108 |
+
Raises
|
| 109 |
+
------
|
| 110 |
+
NoConvergence
|
| 111 |
+
When a solution was not found.
|
| 112 |
+
|
| 113 |
+
""".strip()
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def _set_doc(obj):
|
| 118 |
+
if obj.__doc__:
|
| 119 |
+
obj.__doc__ = obj.__doc__ % _doc_parts
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def nonlin_solve(F, x0, jacobian='krylov', iter=None, verbose=False,
|
| 123 |
+
maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
|
| 124 |
+
tol_norm=None, line_search='armijo', callback=None,
|
| 125 |
+
full_output=False, raise_exception=True):
|
| 126 |
+
"""
|
| 127 |
+
Find a root of a function, in a way suitable for large-scale problems.
|
| 128 |
+
|
| 129 |
+
Parameters
|
| 130 |
+
----------
|
| 131 |
+
%(params_basic)s
|
| 132 |
+
jacobian : Jacobian
|
| 133 |
+
A Jacobian approximation: `Jacobian` object or something that
|
| 134 |
+
`asjacobian` can transform to one. Alternatively, a string specifying
|
| 135 |
+
which of the builtin Jacobian approximations to use:
|
| 136 |
+
|
| 137 |
+
krylov, broyden1, broyden2, anderson
|
| 138 |
+
diagbroyden, linearmixing, excitingmixing
|
| 139 |
+
|
| 140 |
+
%(params_extra)s
|
| 141 |
+
full_output : bool
|
| 142 |
+
If true, returns a dictionary `info` containing convergence
|
| 143 |
+
information.
|
| 144 |
+
raise_exception : bool
|
| 145 |
+
If True, a `NoConvergence` exception is raise if no solution is found.
|
| 146 |
+
|
| 147 |
+
See Also
|
| 148 |
+
--------
|
| 149 |
+
asjacobian, Jacobian
|
| 150 |
+
|
| 151 |
+
Notes
|
| 152 |
+
-----
|
| 153 |
+
This algorithm implements the inexact Newton method, with
|
| 154 |
+
backtracking or full line searches. Several Jacobian
|
| 155 |
+
approximations are available, including Krylov and Quasi-Newton
|
| 156 |
+
methods.
|
| 157 |
+
|
| 158 |
+
References
|
| 159 |
+
----------
|
| 160 |
+
.. [KIM] C. T. Kelley, \"Iterative Methods for Linear and Nonlinear
|
| 161 |
+
Equations\". Society for Industrial and Applied Mathematics. (1995)
|
| 162 |
+
https://archive.siam.org/books/kelley/fr16/
|
| 163 |
+
|
| 164 |
+
"""
|
| 165 |
+
# Can't use default parameters because it's being explicitly passed as None
|
| 166 |
+
# from the calling function, so we need to set it here.
|
| 167 |
+
tol_norm = maxnorm if tol_norm is None else tol_norm
|
| 168 |
+
condition = TerminationCondition(f_tol=f_tol, f_rtol=f_rtol,
|
| 169 |
+
x_tol=x_tol, x_rtol=x_rtol,
|
| 170 |
+
iter=iter, norm=tol_norm)
|
| 171 |
+
|
| 172 |
+
x0 = _as_inexact(x0)
|
| 173 |
+
def func(z):
|
| 174 |
+
return _as_inexact(F(_array_like(z, x0))).flatten()
|
| 175 |
+
x = x0.flatten()
|
| 176 |
+
|
| 177 |
+
dx = np.full_like(x, np.inf)
|
| 178 |
+
Fx = func(x)
|
| 179 |
+
Fx_norm = norm(Fx)
|
| 180 |
+
|
| 181 |
+
jacobian = asjacobian(jacobian)
|
| 182 |
+
jacobian.setup(x.copy(), Fx, func)
|
| 183 |
+
|
| 184 |
+
if maxiter is None:
|
| 185 |
+
if iter is not None:
|
| 186 |
+
maxiter = iter + 1
|
| 187 |
+
else:
|
| 188 |
+
maxiter = 100*(x.size+1)
|
| 189 |
+
|
| 190 |
+
if line_search is True:
|
| 191 |
+
line_search = 'armijo'
|
| 192 |
+
elif line_search is False:
|
| 193 |
+
line_search = None
|
| 194 |
+
|
| 195 |
+
if line_search not in (None, 'armijo', 'wolfe'):
|
| 196 |
+
raise ValueError("Invalid line search")
|
| 197 |
+
|
| 198 |
+
# Solver tolerance selection
|
| 199 |
+
gamma = 0.9
|
| 200 |
+
eta_max = 0.9999
|
| 201 |
+
eta_treshold = 0.1
|
| 202 |
+
eta = 1e-3
|
| 203 |
+
|
| 204 |
+
for n in range(maxiter):
|
| 205 |
+
status = condition.check(Fx, x, dx)
|
| 206 |
+
if status:
|
| 207 |
+
break
|
| 208 |
+
|
| 209 |
+
# The tolerance, as computed for scipy.sparse.linalg.* routines
|
| 210 |
+
tol = min(eta, eta*Fx_norm)
|
| 211 |
+
dx = -jacobian.solve(Fx, tol=tol)
|
| 212 |
+
|
| 213 |
+
if norm(dx) == 0:
|
| 214 |
+
raise ValueError("Jacobian inversion yielded zero vector. "
|
| 215 |
+
"This indicates a bug in the Jacobian "
|
| 216 |
+
"approximation.")
|
| 217 |
+
|
| 218 |
+
# Line search, or Newton step
|
| 219 |
+
if line_search:
|
| 220 |
+
s, x, Fx, Fx_norm_new = _nonlin_line_search(func, x, Fx, dx,
|
| 221 |
+
line_search)
|
| 222 |
+
else:
|
| 223 |
+
s = 1.0
|
| 224 |
+
x = x + dx
|
| 225 |
+
Fx = func(x)
|
| 226 |
+
Fx_norm_new = norm(Fx)
|
| 227 |
+
|
| 228 |
+
jacobian.update(x.copy(), Fx)
|
| 229 |
+
|
| 230 |
+
if callback:
|
| 231 |
+
callback(x, Fx)
|
| 232 |
+
|
| 233 |
+
# Adjust forcing parameters for inexact methods
|
| 234 |
+
eta_A = gamma * Fx_norm_new**2 / Fx_norm**2
|
| 235 |
+
if gamma * eta**2 < eta_treshold:
|
| 236 |
+
eta = min(eta_max, eta_A)
|
| 237 |
+
else:
|
| 238 |
+
eta = min(eta_max, max(eta_A, gamma*eta**2))
|
| 239 |
+
|
| 240 |
+
Fx_norm = Fx_norm_new
|
| 241 |
+
|
| 242 |
+
# Print status
|
| 243 |
+
if verbose:
|
| 244 |
+
sys.stdout.write("%d: |F(x)| = %g; step %g\n" % (
|
| 245 |
+
n, tol_norm(Fx), s))
|
| 246 |
+
sys.stdout.flush()
|
| 247 |
+
else:
|
| 248 |
+
if raise_exception:
|
| 249 |
+
raise NoConvergence(_array_like(x, x0))
|
| 250 |
+
else:
|
| 251 |
+
status = 2
|
| 252 |
+
|
| 253 |
+
if full_output:
|
| 254 |
+
info = {'nit': condition.iteration,
|
| 255 |
+
'fun': Fx,
|
| 256 |
+
'status': status,
|
| 257 |
+
'success': status == 1,
|
| 258 |
+
'message': {1: 'A solution was found at the specified '
|
| 259 |
+
'tolerance.',
|
| 260 |
+
2: 'The maximum number of iterations allowed '
|
| 261 |
+
'has been reached.'
|
| 262 |
+
}[status]
|
| 263 |
+
}
|
| 264 |
+
return _array_like(x, x0), info
|
| 265 |
+
else:
|
| 266 |
+
return _array_like(x, x0)
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
_set_doc(nonlin_solve)
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
def _nonlin_line_search(func, x, Fx, dx, search_type='armijo', rdiff=1e-8,
|
| 273 |
+
smin=1e-2):
|
| 274 |
+
tmp_s = [0]
|
| 275 |
+
tmp_Fx = [Fx]
|
| 276 |
+
tmp_phi = [norm(Fx)**2]
|
| 277 |
+
s_norm = norm(x) / norm(dx)
|
| 278 |
+
|
| 279 |
+
def phi(s, store=True):
|
| 280 |
+
if s == tmp_s[0]:
|
| 281 |
+
return tmp_phi[0]
|
| 282 |
+
xt = x + s*dx
|
| 283 |
+
v = func(xt)
|
| 284 |
+
p = _safe_norm(v)**2
|
| 285 |
+
if store:
|
| 286 |
+
tmp_s[0] = s
|
| 287 |
+
tmp_phi[0] = p
|
| 288 |
+
tmp_Fx[0] = v
|
| 289 |
+
return p
|
| 290 |
+
|
| 291 |
+
def derphi(s):
|
| 292 |
+
ds = (abs(s) + s_norm + 1) * rdiff
|
| 293 |
+
return (phi(s+ds, store=False) - phi(s)) / ds
|
| 294 |
+
|
| 295 |
+
if search_type == 'wolfe':
|
| 296 |
+
s, phi1, phi0 = scalar_search_wolfe1(phi, derphi, tmp_phi[0],
|
| 297 |
+
xtol=1e-2, amin=smin)
|
| 298 |
+
elif search_type == 'armijo':
|
| 299 |
+
s, phi1 = scalar_search_armijo(phi, tmp_phi[0], -tmp_phi[0],
|
| 300 |
+
amin=smin)
|
| 301 |
+
|
| 302 |
+
if s is None:
|
| 303 |
+
# XXX: No suitable step length found. Take the full Newton step,
|
| 304 |
+
# and hope for the best.
|
| 305 |
+
s = 1.0
|
| 306 |
+
|
| 307 |
+
x = x + s*dx
|
| 308 |
+
if s == tmp_s[0]:
|
| 309 |
+
Fx = tmp_Fx[0]
|
| 310 |
+
else:
|
| 311 |
+
Fx = func(x)
|
| 312 |
+
Fx_norm = norm(Fx)
|
| 313 |
+
|
| 314 |
+
return s, x, Fx, Fx_norm
|
| 315 |
+
|
| 316 |
+
|
| 317 |
+
class TerminationCondition:
|
| 318 |
+
"""
|
| 319 |
+
Termination condition for an iteration. It is terminated if
|
| 320 |
+
|
| 321 |
+
- |F| < f_rtol*|F_0|, AND
|
| 322 |
+
- |F| < f_tol
|
| 323 |
+
|
| 324 |
+
AND
|
| 325 |
+
|
| 326 |
+
- |dx| < x_rtol*|x|, AND
|
| 327 |
+
- |dx| < x_tol
|
| 328 |
+
|
| 329 |
+
"""
|
| 330 |
+
def __init__(self, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
|
| 331 |
+
iter=None, norm=maxnorm):
|
| 332 |
+
|
| 333 |
+
if f_tol is None:
|
| 334 |
+
f_tol = np.finfo(np.float64).eps ** (1./3)
|
| 335 |
+
if f_rtol is None:
|
| 336 |
+
f_rtol = np.inf
|
| 337 |
+
if x_tol is None:
|
| 338 |
+
x_tol = np.inf
|
| 339 |
+
if x_rtol is None:
|
| 340 |
+
x_rtol = np.inf
|
| 341 |
+
|
| 342 |
+
self.x_tol = x_tol
|
| 343 |
+
self.x_rtol = x_rtol
|
| 344 |
+
self.f_tol = f_tol
|
| 345 |
+
self.f_rtol = f_rtol
|
| 346 |
+
|
| 347 |
+
self.norm = norm
|
| 348 |
+
|
| 349 |
+
self.iter = iter
|
| 350 |
+
|
| 351 |
+
self.f0_norm = None
|
| 352 |
+
self.iteration = 0
|
| 353 |
+
|
| 354 |
+
def check(self, f, x, dx):
|
| 355 |
+
self.iteration += 1
|
| 356 |
+
f_norm = self.norm(f)
|
| 357 |
+
x_norm = self.norm(x)
|
| 358 |
+
dx_norm = self.norm(dx)
|
| 359 |
+
|
| 360 |
+
if self.f0_norm is None:
|
| 361 |
+
self.f0_norm = f_norm
|
| 362 |
+
|
| 363 |
+
if f_norm == 0:
|
| 364 |
+
return 1
|
| 365 |
+
|
| 366 |
+
if self.iter is not None:
|
| 367 |
+
# backwards compatibility with SciPy 0.6.0
|
| 368 |
+
return 2 * (self.iteration > self.iter)
|
| 369 |
+
|
| 370 |
+
# NB: condition must succeed for rtol=inf even if norm == 0
|
| 371 |
+
return int((f_norm <= self.f_tol
|
| 372 |
+
and f_norm/self.f_rtol <= self.f0_norm)
|
| 373 |
+
and (dx_norm <= self.x_tol
|
| 374 |
+
and dx_norm/self.x_rtol <= x_norm))
|
| 375 |
+
|
| 376 |
+
|
| 377 |
+
#------------------------------------------------------------------------------
|
| 378 |
+
# Generic Jacobian approximation
|
| 379 |
+
#------------------------------------------------------------------------------
|
| 380 |
+
|
| 381 |
+
class Jacobian:
|
| 382 |
+
"""
|
| 383 |
+
Common interface for Jacobians or Jacobian approximations.
|
| 384 |
+
|
| 385 |
+
The optional methods come useful when implementing trust region
|
| 386 |
+
etc., algorithms that often require evaluating transposes of the
|
| 387 |
+
Jacobian.
|
| 388 |
+
|
| 389 |
+
Methods
|
| 390 |
+
-------
|
| 391 |
+
solve
|
| 392 |
+
Returns J^-1 * v
|
| 393 |
+
update
|
| 394 |
+
Updates Jacobian to point `x` (where the function has residual `Fx`)
|
| 395 |
+
|
| 396 |
+
matvec : optional
|
| 397 |
+
Returns J * v
|
| 398 |
+
rmatvec : optional
|
| 399 |
+
Returns A^H * v
|
| 400 |
+
rsolve : optional
|
| 401 |
+
Returns A^-H * v
|
| 402 |
+
matmat : optional
|
| 403 |
+
Returns A * V, where V is a dense matrix with dimensions (N,K).
|
| 404 |
+
todense : optional
|
| 405 |
+
Form the dense Jacobian matrix. Necessary for dense trust region
|
| 406 |
+
algorithms, and useful for testing.
|
| 407 |
+
|
| 408 |
+
Attributes
|
| 409 |
+
----------
|
| 410 |
+
shape
|
| 411 |
+
Matrix dimensions (M, N)
|
| 412 |
+
dtype
|
| 413 |
+
Data type of the matrix.
|
| 414 |
+
func : callable, optional
|
| 415 |
+
Function the Jacobian corresponds to
|
| 416 |
+
|
| 417 |
+
"""
|
| 418 |
+
|
| 419 |
+
def __init__(self, **kw):
|
| 420 |
+
names = ["solve", "update", "matvec", "rmatvec", "rsolve",
|
| 421 |
+
"matmat", "todense", "shape", "dtype"]
|
| 422 |
+
for name, value in kw.items():
|
| 423 |
+
if name not in names:
|
| 424 |
+
raise ValueError("Unknown keyword argument %s" % name)
|
| 425 |
+
if value is not None:
|
| 426 |
+
setattr(self, name, kw[name])
|
| 427 |
+
|
| 428 |
+
|
| 429 |
+
if hasattr(self, "todense"):
|
| 430 |
+
def __array__(self, dtype=None, copy=None):
|
| 431 |
+
if dtype is not None:
|
| 432 |
+
raise ValueError(f"`dtype` must be None, was {dtype}")
|
| 433 |
+
return self.todense()
|
| 434 |
+
|
| 435 |
+
def aspreconditioner(self):
|
| 436 |
+
return InverseJacobian(self)
|
| 437 |
+
|
| 438 |
+
def solve(self, v, tol=0):
|
| 439 |
+
raise NotImplementedError
|
| 440 |
+
|
| 441 |
+
def update(self, x, F):
|
| 442 |
+
pass
|
| 443 |
+
|
| 444 |
+
def setup(self, x, F, func):
|
| 445 |
+
self.func = func
|
| 446 |
+
self.shape = (F.size, x.size)
|
| 447 |
+
self.dtype = F.dtype
|
| 448 |
+
if self.__class__.setup is Jacobian.setup:
|
| 449 |
+
# Call on the first point unless overridden
|
| 450 |
+
self.update(x, F)
|
| 451 |
+
|
| 452 |
+
|
| 453 |
+
class InverseJacobian:
|
| 454 |
+
def __init__(self, jacobian):
|
| 455 |
+
self.jacobian = jacobian
|
| 456 |
+
self.matvec = jacobian.solve
|
| 457 |
+
self.update = jacobian.update
|
| 458 |
+
if hasattr(jacobian, 'setup'):
|
| 459 |
+
self.setup = jacobian.setup
|
| 460 |
+
if hasattr(jacobian, 'rsolve'):
|
| 461 |
+
self.rmatvec = jacobian.rsolve
|
| 462 |
+
|
| 463 |
+
@property
|
| 464 |
+
def shape(self):
|
| 465 |
+
return self.jacobian.shape
|
| 466 |
+
|
| 467 |
+
@property
|
| 468 |
+
def dtype(self):
|
| 469 |
+
return self.jacobian.dtype
|
| 470 |
+
|
| 471 |
+
|
| 472 |
+
def asjacobian(J):
|
| 473 |
+
"""
|
| 474 |
+
Convert given object to one suitable for use as a Jacobian.
|
| 475 |
+
"""
|
| 476 |
+
spsolve = scipy.sparse.linalg.spsolve
|
| 477 |
+
if isinstance(J, Jacobian):
|
| 478 |
+
return J
|
| 479 |
+
elif inspect.isclass(J) and issubclass(J, Jacobian):
|
| 480 |
+
return J()
|
| 481 |
+
elif isinstance(J, np.ndarray):
|
| 482 |
+
if J.ndim > 2:
|
| 483 |
+
raise ValueError('array must have rank <= 2')
|
| 484 |
+
J = np.atleast_2d(np.asarray(J))
|
| 485 |
+
if J.shape[0] != J.shape[1]:
|
| 486 |
+
raise ValueError('array must be square')
|
| 487 |
+
|
| 488 |
+
return Jacobian(matvec=lambda v: dot(J, v),
|
| 489 |
+
rmatvec=lambda v: dot(J.conj().T, v),
|
| 490 |
+
solve=lambda v, tol=0: solve(J, v),
|
| 491 |
+
rsolve=lambda v, tol=0: solve(J.conj().T, v),
|
| 492 |
+
dtype=J.dtype, shape=J.shape)
|
| 493 |
+
elif scipy.sparse.issparse(J):
|
| 494 |
+
if J.shape[0] != J.shape[1]:
|
| 495 |
+
raise ValueError('matrix must be square')
|
| 496 |
+
return Jacobian(matvec=lambda v: J @ v,
|
| 497 |
+
rmatvec=lambda v: J.conj().T @ v,
|
| 498 |
+
solve=lambda v, tol=0: spsolve(J, v),
|
| 499 |
+
rsolve=lambda v, tol=0: spsolve(J.conj().T, v),
|
| 500 |
+
dtype=J.dtype, shape=J.shape)
|
| 501 |
+
elif hasattr(J, 'shape') and hasattr(J, 'dtype') and hasattr(J, 'solve'):
|
| 502 |
+
return Jacobian(matvec=getattr(J, 'matvec'),
|
| 503 |
+
rmatvec=getattr(J, 'rmatvec'),
|
| 504 |
+
solve=J.solve,
|
| 505 |
+
rsolve=getattr(J, 'rsolve'),
|
| 506 |
+
update=getattr(J, 'update'),
|
| 507 |
+
setup=getattr(J, 'setup'),
|
| 508 |
+
dtype=J.dtype,
|
| 509 |
+
shape=J.shape)
|
| 510 |
+
elif callable(J):
|
| 511 |
+
# Assume it's a function J(x) that returns the Jacobian
|
| 512 |
+
class Jac(Jacobian):
|
| 513 |
+
def update(self, x, F):
|
| 514 |
+
self.x = x
|
| 515 |
+
|
| 516 |
+
def solve(self, v, tol=0):
|
| 517 |
+
m = J(self.x)
|
| 518 |
+
if isinstance(m, np.ndarray):
|
| 519 |
+
return solve(m, v)
|
| 520 |
+
elif scipy.sparse.issparse(m):
|
| 521 |
+
return spsolve(m, v)
|
| 522 |
+
else:
|
| 523 |
+
raise ValueError("Unknown matrix type")
|
| 524 |
+
|
| 525 |
+
def matvec(self, v):
|
| 526 |
+
m = J(self.x)
|
| 527 |
+
if isinstance(m, np.ndarray):
|
| 528 |
+
return dot(m, v)
|
| 529 |
+
elif scipy.sparse.issparse(m):
|
| 530 |
+
return m @ v
|
| 531 |
+
else:
|
| 532 |
+
raise ValueError("Unknown matrix type")
|
| 533 |
+
|
| 534 |
+
def rsolve(self, v, tol=0):
|
| 535 |
+
m = J(self.x)
|
| 536 |
+
if isinstance(m, np.ndarray):
|
| 537 |
+
return solve(m.conj().T, v)
|
| 538 |
+
elif scipy.sparse.issparse(m):
|
| 539 |
+
return spsolve(m.conj().T, v)
|
| 540 |
+
else:
|
| 541 |
+
raise ValueError("Unknown matrix type")
|
| 542 |
+
|
| 543 |
+
def rmatvec(self, v):
|
| 544 |
+
m = J(self.x)
|
| 545 |
+
if isinstance(m, np.ndarray):
|
| 546 |
+
return dot(m.conj().T, v)
|
| 547 |
+
elif scipy.sparse.issparse(m):
|
| 548 |
+
return m.conj().T @ v
|
| 549 |
+
else:
|
| 550 |
+
raise ValueError("Unknown matrix type")
|
| 551 |
+
return Jac()
|
| 552 |
+
elif isinstance(J, str):
|
| 553 |
+
return dict(broyden1=BroydenFirst,
|
| 554 |
+
broyden2=BroydenSecond,
|
| 555 |
+
anderson=Anderson,
|
| 556 |
+
diagbroyden=DiagBroyden,
|
| 557 |
+
linearmixing=LinearMixing,
|
| 558 |
+
excitingmixing=ExcitingMixing,
|
| 559 |
+
krylov=KrylovJacobian)[J]()
|
| 560 |
+
else:
|
| 561 |
+
raise TypeError('Cannot convert object to a Jacobian')
|
| 562 |
+
|
| 563 |
+
|
| 564 |
+
#------------------------------------------------------------------------------
|
| 565 |
+
# Broyden
|
| 566 |
+
#------------------------------------------------------------------------------
|
| 567 |
+
|
| 568 |
+
class GenericBroyden(Jacobian):
|
| 569 |
+
def setup(self, x0, f0, func):
|
| 570 |
+
Jacobian.setup(self, x0, f0, func)
|
| 571 |
+
self.last_f = f0
|
| 572 |
+
self.last_x = x0
|
| 573 |
+
|
| 574 |
+
if hasattr(self, 'alpha') and self.alpha is None:
|
| 575 |
+
# Autoscale the initial Jacobian parameter
|
| 576 |
+
# unless we have already guessed the solution.
|
| 577 |
+
normf0 = norm(f0)
|
| 578 |
+
if normf0:
|
| 579 |
+
self.alpha = 0.5*max(norm(x0), 1) / normf0
|
| 580 |
+
else:
|
| 581 |
+
self.alpha = 1.0
|
| 582 |
+
|
| 583 |
+
def _update(self, x, f, dx, df, dx_norm, df_norm):
|
| 584 |
+
raise NotImplementedError
|
| 585 |
+
|
| 586 |
+
def update(self, x, f):
|
| 587 |
+
df = f - self.last_f
|
| 588 |
+
dx = x - self.last_x
|
| 589 |
+
self._update(x, f, dx, df, norm(dx), norm(df))
|
| 590 |
+
self.last_f = f
|
| 591 |
+
self.last_x = x
|
| 592 |
+
|
| 593 |
+
|
| 594 |
+
class LowRankMatrix:
|
| 595 |
+
r"""
|
| 596 |
+
A matrix represented as
|
| 597 |
+
|
| 598 |
+
.. math:: \alpha I + \sum_{n=0}^{n=M} c_n d_n^\dagger
|
| 599 |
+
|
| 600 |
+
However, if the rank of the matrix reaches the dimension of the vectors,
|
| 601 |
+
full matrix representation will be used thereon.
|
| 602 |
+
|
| 603 |
+
"""
|
| 604 |
+
|
| 605 |
+
def __init__(self, alpha, n, dtype):
|
| 606 |
+
self.alpha = alpha
|
| 607 |
+
self.cs = []
|
| 608 |
+
self.ds = []
|
| 609 |
+
self.n = n
|
| 610 |
+
self.dtype = dtype
|
| 611 |
+
self.collapsed = None
|
| 612 |
+
|
| 613 |
+
@staticmethod
|
| 614 |
+
def _matvec(v, alpha, cs, ds):
|
| 615 |
+
axpy, scal, dotc = get_blas_funcs(['axpy', 'scal', 'dotc'],
|
| 616 |
+
cs[:1] + [v])
|
| 617 |
+
w = alpha * v
|
| 618 |
+
for c, d in zip(cs, ds):
|
| 619 |
+
a = dotc(d, v)
|
| 620 |
+
w = axpy(c, w, w.size, a)
|
| 621 |
+
return w
|
| 622 |
+
|
| 623 |
+
@staticmethod
|
| 624 |
+
def _solve(v, alpha, cs, ds):
|
| 625 |
+
"""Evaluate w = M^-1 v"""
|
| 626 |
+
if len(cs) == 0:
|
| 627 |
+
return v/alpha
|
| 628 |
+
|
| 629 |
+
# (B + C D^H)^-1 = B^-1 - B^-1 C (I + D^H B^-1 C)^-1 D^H B^-1
|
| 630 |
+
|
| 631 |
+
axpy, dotc = get_blas_funcs(['axpy', 'dotc'], cs[:1] + [v])
|
| 632 |
+
|
| 633 |
+
c0 = cs[0]
|
| 634 |
+
A = alpha * np.identity(len(cs), dtype=c0.dtype)
|
| 635 |
+
for i, d in enumerate(ds):
|
| 636 |
+
for j, c in enumerate(cs):
|
| 637 |
+
A[i,j] += dotc(d, c)
|
| 638 |
+
|
| 639 |
+
q = np.zeros(len(cs), dtype=c0.dtype)
|
| 640 |
+
for j, d in enumerate(ds):
|
| 641 |
+
q[j] = dotc(d, v)
|
| 642 |
+
q /= alpha
|
| 643 |
+
q = solve(A, q)
|
| 644 |
+
|
| 645 |
+
w = v/alpha
|
| 646 |
+
for c, qc in zip(cs, q):
|
| 647 |
+
w = axpy(c, w, w.size, -qc)
|
| 648 |
+
|
| 649 |
+
return w
|
| 650 |
+
|
| 651 |
+
def matvec(self, v):
|
| 652 |
+
"""Evaluate w = M v"""
|
| 653 |
+
if self.collapsed is not None:
|
| 654 |
+
return np.dot(self.collapsed, v)
|
| 655 |
+
return LowRankMatrix._matvec(v, self.alpha, self.cs, self.ds)
|
| 656 |
+
|
| 657 |
+
def rmatvec(self, v):
|
| 658 |
+
"""Evaluate w = M^H v"""
|
| 659 |
+
if self.collapsed is not None:
|
| 660 |
+
return np.dot(self.collapsed.T.conj(), v)
|
| 661 |
+
return LowRankMatrix._matvec(v, np.conj(self.alpha), self.ds, self.cs)
|
| 662 |
+
|
| 663 |
+
def solve(self, v, tol=0):
|
| 664 |
+
"""Evaluate w = M^-1 v"""
|
| 665 |
+
if self.collapsed is not None:
|
| 666 |
+
return solve(self.collapsed, v)
|
| 667 |
+
return LowRankMatrix._solve(v, self.alpha, self.cs, self.ds)
|
| 668 |
+
|
| 669 |
+
def rsolve(self, v, tol=0):
|
| 670 |
+
"""Evaluate w = M^-H v"""
|
| 671 |
+
if self.collapsed is not None:
|
| 672 |
+
return solve(self.collapsed.T.conj(), v)
|
| 673 |
+
return LowRankMatrix._solve(v, np.conj(self.alpha), self.ds, self.cs)
|
| 674 |
+
|
| 675 |
+
def append(self, c, d):
|
| 676 |
+
if self.collapsed is not None:
|
| 677 |
+
self.collapsed += c[:,None] * d[None,:].conj()
|
| 678 |
+
return
|
| 679 |
+
|
| 680 |
+
self.cs.append(c)
|
| 681 |
+
self.ds.append(d)
|
| 682 |
+
|
| 683 |
+
if len(self.cs) > c.size:
|
| 684 |
+
self.collapse()
|
| 685 |
+
|
| 686 |
+
def __array__(self, dtype=None, copy=None):
|
| 687 |
+
if dtype is not None:
|
| 688 |
+
warnings.warn("LowRankMatrix is scipy-internal code, `dtype` "
|
| 689 |
+
f"should only be None but was {dtype} (not handled)",
|
| 690 |
+
stacklevel=3)
|
| 691 |
+
if copy is not None:
|
| 692 |
+
warnings.warn("LowRankMatrix is scipy-internal code, `copy` "
|
| 693 |
+
f"should only be None but was {copy} (not handled)",
|
| 694 |
+
stacklevel=3)
|
| 695 |
+
if self.collapsed is not None:
|
| 696 |
+
return self.collapsed
|
| 697 |
+
|
| 698 |
+
Gm = self.alpha*np.identity(self.n, dtype=self.dtype)
|
| 699 |
+
for c, d in zip(self.cs, self.ds):
|
| 700 |
+
Gm += c[:,None]*d[None,:].conj()
|
| 701 |
+
return Gm
|
| 702 |
+
|
| 703 |
+
def collapse(self):
|
| 704 |
+
"""Collapse the low-rank matrix to a full-rank one."""
|
| 705 |
+
self.collapsed = np.array(self, copy=copy_if_needed)
|
| 706 |
+
self.cs = None
|
| 707 |
+
self.ds = None
|
| 708 |
+
self.alpha = None
|
| 709 |
+
|
| 710 |
+
def restart_reduce(self, rank):
|
| 711 |
+
"""
|
| 712 |
+
Reduce the rank of the matrix by dropping all vectors.
|
| 713 |
+
"""
|
| 714 |
+
if self.collapsed is not None:
|
| 715 |
+
return
|
| 716 |
+
assert rank > 0
|
| 717 |
+
if len(self.cs) > rank:
|
| 718 |
+
del self.cs[:]
|
| 719 |
+
del self.ds[:]
|
| 720 |
+
|
| 721 |
+
def simple_reduce(self, rank):
|
| 722 |
+
"""
|
| 723 |
+
Reduce the rank of the matrix by dropping oldest vectors.
|
| 724 |
+
"""
|
| 725 |
+
if self.collapsed is not None:
|
| 726 |
+
return
|
| 727 |
+
assert rank > 0
|
| 728 |
+
while len(self.cs) > rank:
|
| 729 |
+
del self.cs[0]
|
| 730 |
+
del self.ds[0]
|
| 731 |
+
|
| 732 |
+
def svd_reduce(self, max_rank, to_retain=None):
|
| 733 |
+
"""
|
| 734 |
+
Reduce the rank of the matrix by retaining some SVD components.
|
| 735 |
+
|
| 736 |
+
This corresponds to the \"Broyden Rank Reduction Inverse\"
|
| 737 |
+
algorithm described in [1]_.
|
| 738 |
+
|
| 739 |
+
Note that the SVD decomposition can be done by solving only a
|
| 740 |
+
problem whose size is the effective rank of this matrix, which
|
| 741 |
+
is viable even for large problems.
|
| 742 |
+
|
| 743 |
+
Parameters
|
| 744 |
+
----------
|
| 745 |
+
max_rank : int
|
| 746 |
+
Maximum rank of this matrix after reduction.
|
| 747 |
+
to_retain : int, optional
|
| 748 |
+
Number of SVD components to retain when reduction is done
|
| 749 |
+
(ie. rank > max_rank). Default is ``max_rank - 2``.
|
| 750 |
+
|
| 751 |
+
References
|
| 752 |
+
----------
|
| 753 |
+
.. [1] B.A. van der Rotten, PhD thesis,
|
| 754 |
+
\"A limited memory Broyden method to solve high-dimensional
|
| 755 |
+
systems of nonlinear equations\". Mathematisch Instituut,
|
| 756 |
+
Universiteit Leiden, The Netherlands (2003).
|
| 757 |
+
|
| 758 |
+
https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf
|
| 759 |
+
|
| 760 |
+
"""
|
| 761 |
+
if self.collapsed is not None:
|
| 762 |
+
return
|
| 763 |
+
|
| 764 |
+
p = max_rank
|
| 765 |
+
if to_retain is not None:
|
| 766 |
+
q = to_retain
|
| 767 |
+
else:
|
| 768 |
+
q = p - 2
|
| 769 |
+
|
| 770 |
+
if self.cs:
|
| 771 |
+
p = min(p, len(self.cs[0]))
|
| 772 |
+
q = max(0, min(q, p-1))
|
| 773 |
+
|
| 774 |
+
m = len(self.cs)
|
| 775 |
+
if m < p:
|
| 776 |
+
# nothing to do
|
| 777 |
+
return
|
| 778 |
+
|
| 779 |
+
C = np.array(self.cs).T
|
| 780 |
+
D = np.array(self.ds).T
|
| 781 |
+
|
| 782 |
+
D, R = qr(D, mode='economic')
|
| 783 |
+
C = dot(C, R.T.conj())
|
| 784 |
+
|
| 785 |
+
U, S, WH = svd(C, full_matrices=False)
|
| 786 |
+
|
| 787 |
+
C = dot(C, inv(WH))
|
| 788 |
+
D = dot(D, WH.T.conj())
|
| 789 |
+
|
| 790 |
+
for k in range(q):
|
| 791 |
+
self.cs[k] = C[:,k].copy()
|
| 792 |
+
self.ds[k] = D[:,k].copy()
|
| 793 |
+
|
| 794 |
+
del self.cs[q:]
|
| 795 |
+
del self.ds[q:]
|
| 796 |
+
|
| 797 |
+
|
| 798 |
+
_doc_parts['broyden_params'] = """
|
| 799 |
+
alpha : float, optional
|
| 800 |
+
Initial guess for the Jacobian is ``(-1/alpha)``.
|
| 801 |
+
reduction_method : str or tuple, optional
|
| 802 |
+
Method used in ensuring that the rank of the Broyden matrix
|
| 803 |
+
stays low. Can either be a string giving the name of the method,
|
| 804 |
+
or a tuple of the form ``(method, param1, param2, ...)``
|
| 805 |
+
that gives the name of the method and values for additional parameters.
|
| 806 |
+
|
| 807 |
+
Methods available:
|
| 808 |
+
|
| 809 |
+
- ``restart``: drop all matrix columns. Has no extra parameters.
|
| 810 |
+
- ``simple``: drop oldest matrix column. Has no extra parameters.
|
| 811 |
+
- ``svd``: keep only the most significant SVD components.
|
| 812 |
+
Takes an extra parameter, ``to_retain``, which determines the
|
| 813 |
+
number of SVD components to retain when rank reduction is done.
|
| 814 |
+
Default is ``max_rank - 2``.
|
| 815 |
+
|
| 816 |
+
max_rank : int, optional
|
| 817 |
+
Maximum rank for the Broyden matrix.
|
| 818 |
+
Default is infinity (i.e., no rank reduction).
|
| 819 |
+
""".strip()
|
| 820 |
+
|
| 821 |
+
|
| 822 |
+
class BroydenFirst(GenericBroyden):
|
| 823 |
+
r"""
|
| 824 |
+
Find a root of a function, using Broyden's first Jacobian approximation.
|
| 825 |
+
|
| 826 |
+
This method is also known as \"Broyden's good method\".
|
| 827 |
+
|
| 828 |
+
Parameters
|
| 829 |
+
----------
|
| 830 |
+
%(params_basic)s
|
| 831 |
+
%(broyden_params)s
|
| 832 |
+
%(params_extra)s
|
| 833 |
+
|
| 834 |
+
See Also
|
| 835 |
+
--------
|
| 836 |
+
root : Interface to root finding algorithms for multivariate
|
| 837 |
+
functions. See ``method='broyden1'`` in particular.
|
| 838 |
+
|
| 839 |
+
Notes
|
| 840 |
+
-----
|
| 841 |
+
This algorithm implements the inverse Jacobian Quasi-Newton update
|
| 842 |
+
|
| 843 |
+
.. math:: H_+ = H + (dx - H df) dx^\dagger H / ( dx^\dagger H df)
|
| 844 |
+
|
| 845 |
+
which corresponds to Broyden's first Jacobian update
|
| 846 |
+
|
| 847 |
+
.. math:: J_+ = J + (df - J dx) dx^\dagger / dx^\dagger dx
|
| 848 |
+
|
| 849 |
+
|
| 850 |
+
References
|
| 851 |
+
----------
|
| 852 |
+
.. [1] B.A. van der Rotten, PhD thesis,
|
| 853 |
+
\"A limited memory Broyden method to solve high-dimensional
|
| 854 |
+
systems of nonlinear equations\". Mathematisch Instituut,
|
| 855 |
+
Universiteit Leiden, The Netherlands (2003).
|
| 856 |
+
|
| 857 |
+
https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf
|
| 858 |
+
|
| 859 |
+
Examples
|
| 860 |
+
--------
|
| 861 |
+
The following functions define a system of nonlinear equations
|
| 862 |
+
|
| 863 |
+
>>> def fun(x):
|
| 864 |
+
... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0,
|
| 865 |
+
... 0.5 * (x[1] - x[0])**3 + x[1]]
|
| 866 |
+
|
| 867 |
+
A solution can be obtained as follows.
|
| 868 |
+
|
| 869 |
+
>>> from scipy import optimize
|
| 870 |
+
>>> sol = optimize.broyden1(fun, [0, 0])
|
| 871 |
+
>>> sol
|
| 872 |
+
array([0.84116396, 0.15883641])
|
| 873 |
+
|
| 874 |
+
"""
|
| 875 |
+
|
| 876 |
+
def __init__(self, alpha=None, reduction_method='restart', max_rank=None):
|
| 877 |
+
GenericBroyden.__init__(self)
|
| 878 |
+
self.alpha = alpha
|
| 879 |
+
self.Gm = None
|
| 880 |
+
|
| 881 |
+
if max_rank is None:
|
| 882 |
+
max_rank = np.inf
|
| 883 |
+
self.max_rank = max_rank
|
| 884 |
+
|
| 885 |
+
if isinstance(reduction_method, str):
|
| 886 |
+
reduce_params = ()
|
| 887 |
+
else:
|
| 888 |
+
reduce_params = reduction_method[1:]
|
| 889 |
+
reduction_method = reduction_method[0]
|
| 890 |
+
reduce_params = (max_rank - 1,) + reduce_params
|
| 891 |
+
|
| 892 |
+
if reduction_method == 'svd':
|
| 893 |
+
self._reduce = lambda: self.Gm.svd_reduce(*reduce_params)
|
| 894 |
+
elif reduction_method == 'simple':
|
| 895 |
+
self._reduce = lambda: self.Gm.simple_reduce(*reduce_params)
|
| 896 |
+
elif reduction_method == 'restart':
|
| 897 |
+
self._reduce = lambda: self.Gm.restart_reduce(*reduce_params)
|
| 898 |
+
else:
|
| 899 |
+
raise ValueError("Unknown rank reduction method '%s'" %
|
| 900 |
+
reduction_method)
|
| 901 |
+
|
| 902 |
+
def setup(self, x, F, func):
|
| 903 |
+
GenericBroyden.setup(self, x, F, func)
|
| 904 |
+
self.Gm = LowRankMatrix(-self.alpha, self.shape[0], self.dtype)
|
| 905 |
+
|
| 906 |
+
def todense(self):
|
| 907 |
+
return inv(self.Gm)
|
| 908 |
+
|
| 909 |
+
def solve(self, f, tol=0):
|
| 910 |
+
r = self.Gm.matvec(f)
|
| 911 |
+
if not np.isfinite(r).all():
|
| 912 |
+
# singular; reset the Jacobian approximation
|
| 913 |
+
self.setup(self.last_x, self.last_f, self.func)
|
| 914 |
+
return self.Gm.matvec(f)
|
| 915 |
+
return r
|
| 916 |
+
|
| 917 |
+
def matvec(self, f):
|
| 918 |
+
return self.Gm.solve(f)
|
| 919 |
+
|
| 920 |
+
def rsolve(self, f, tol=0):
|
| 921 |
+
return self.Gm.rmatvec(f)
|
| 922 |
+
|
| 923 |
+
def rmatvec(self, f):
|
| 924 |
+
return self.Gm.rsolve(f)
|
| 925 |
+
|
| 926 |
+
def _update(self, x, f, dx, df, dx_norm, df_norm):
|
| 927 |
+
self._reduce() # reduce first to preserve secant condition
|
| 928 |
+
|
| 929 |
+
v = self.Gm.rmatvec(dx)
|
| 930 |
+
c = dx - self.Gm.matvec(df)
|
| 931 |
+
d = v / vdot(df, v)
|
| 932 |
+
|
| 933 |
+
self.Gm.append(c, d)
|
| 934 |
+
|
| 935 |
+
|
| 936 |
+
class BroydenSecond(BroydenFirst):
|
| 937 |
+
"""
|
| 938 |
+
Find a root of a function, using Broyden\'s second Jacobian approximation.
|
| 939 |
+
|
| 940 |
+
This method is also known as \"Broyden's bad method\".
|
| 941 |
+
|
| 942 |
+
Parameters
|
| 943 |
+
----------
|
| 944 |
+
%(params_basic)s
|
| 945 |
+
%(broyden_params)s
|
| 946 |
+
%(params_extra)s
|
| 947 |
+
|
| 948 |
+
See Also
|
| 949 |
+
--------
|
| 950 |
+
root : Interface to root finding algorithms for multivariate
|
| 951 |
+
functions. See ``method='broyden2'`` in particular.
|
| 952 |
+
|
| 953 |
+
Notes
|
| 954 |
+
-----
|
| 955 |
+
This algorithm implements the inverse Jacobian Quasi-Newton update
|
| 956 |
+
|
| 957 |
+
.. math:: H_+ = H + (dx - H df) df^\\dagger / ( df^\\dagger df)
|
| 958 |
+
|
| 959 |
+
corresponding to Broyden's second method.
|
| 960 |
+
|
| 961 |
+
References
|
| 962 |
+
----------
|
| 963 |
+
.. [1] B.A. van der Rotten, PhD thesis,
|
| 964 |
+
\"A limited memory Broyden method to solve high-dimensional
|
| 965 |
+
systems of nonlinear equations\". Mathematisch Instituut,
|
| 966 |
+
Universiteit Leiden, The Netherlands (2003).
|
| 967 |
+
|
| 968 |
+
https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf
|
| 969 |
+
|
| 970 |
+
Examples
|
| 971 |
+
--------
|
| 972 |
+
The following functions define a system of nonlinear equations
|
| 973 |
+
|
| 974 |
+
>>> def fun(x):
|
| 975 |
+
... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0,
|
| 976 |
+
... 0.5 * (x[1] - x[0])**3 + x[1]]
|
| 977 |
+
|
| 978 |
+
A solution can be obtained as follows.
|
| 979 |
+
|
| 980 |
+
>>> from scipy import optimize
|
| 981 |
+
>>> sol = optimize.broyden2(fun, [0, 0])
|
| 982 |
+
>>> sol
|
| 983 |
+
array([0.84116365, 0.15883529])
|
| 984 |
+
|
| 985 |
+
"""
|
| 986 |
+
|
| 987 |
+
def _update(self, x, f, dx, df, dx_norm, df_norm):
|
| 988 |
+
self._reduce() # reduce first to preserve secant condition
|
| 989 |
+
|
| 990 |
+
v = df
|
| 991 |
+
c = dx - self.Gm.matvec(df)
|
| 992 |
+
d = v / df_norm**2
|
| 993 |
+
self.Gm.append(c, d)
|
| 994 |
+
|
| 995 |
+
|
| 996 |
+
#------------------------------------------------------------------------------
|
| 997 |
+
# Broyden-like (restricted memory)
|
| 998 |
+
#------------------------------------------------------------------------------
|
| 999 |
+
|
| 1000 |
+
class Anderson(GenericBroyden):
|
| 1001 |
+
"""
|
| 1002 |
+
Find a root of a function, using (extended) Anderson mixing.
|
| 1003 |
+
|
| 1004 |
+
The Jacobian is formed by for a 'best' solution in the space
|
| 1005 |
+
spanned by last `M` vectors. As a result, only a MxM matrix
|
| 1006 |
+
inversions and MxN multiplications are required. [Ey]_
|
| 1007 |
+
|
| 1008 |
+
Parameters
|
| 1009 |
+
----------
|
| 1010 |
+
%(params_basic)s
|
| 1011 |
+
alpha : float, optional
|
| 1012 |
+
Initial guess for the Jacobian is (-1/alpha).
|
| 1013 |
+
M : float, optional
|
| 1014 |
+
Number of previous vectors to retain. Defaults to 5.
|
| 1015 |
+
w0 : float, optional
|
| 1016 |
+
Regularization parameter for numerical stability.
|
| 1017 |
+
Compared to unity, good values of the order of 0.01.
|
| 1018 |
+
%(params_extra)s
|
| 1019 |
+
|
| 1020 |
+
See Also
|
| 1021 |
+
--------
|
| 1022 |
+
root : Interface to root finding algorithms for multivariate
|
| 1023 |
+
functions. See ``method='anderson'`` in particular.
|
| 1024 |
+
|
| 1025 |
+
References
|
| 1026 |
+
----------
|
| 1027 |
+
.. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996).
|
| 1028 |
+
|
| 1029 |
+
Examples
|
| 1030 |
+
--------
|
| 1031 |
+
The following functions define a system of nonlinear equations
|
| 1032 |
+
|
| 1033 |
+
>>> def fun(x):
|
| 1034 |
+
... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0,
|
| 1035 |
+
... 0.5 * (x[1] - x[0])**3 + x[1]]
|
| 1036 |
+
|
| 1037 |
+
A solution can be obtained as follows.
|
| 1038 |
+
|
| 1039 |
+
>>> from scipy import optimize
|
| 1040 |
+
>>> sol = optimize.anderson(fun, [0, 0])
|
| 1041 |
+
>>> sol
|
| 1042 |
+
array([0.84116588, 0.15883789])
|
| 1043 |
+
|
| 1044 |
+
"""
|
| 1045 |
+
|
| 1046 |
+
# Note:
|
| 1047 |
+
#
|
| 1048 |
+
# Anderson method maintains a rank M approximation of the inverse Jacobian,
|
| 1049 |
+
#
|
| 1050 |
+
# J^-1 v ~ -v*alpha + (dX + alpha dF) A^-1 dF^H v
|
| 1051 |
+
# A = W + dF^H dF
|
| 1052 |
+
# W = w0^2 diag(dF^H dF)
|
| 1053 |
+
#
|
| 1054 |
+
# so that for w0 = 0 the secant condition applies for last M iterates, i.e.,
|
| 1055 |
+
#
|
| 1056 |
+
# J^-1 df_j = dx_j
|
| 1057 |
+
#
|
| 1058 |
+
# for all j = 0 ... M-1.
|
| 1059 |
+
#
|
| 1060 |
+
# Moreover, (from Sherman-Morrison-Woodbury formula)
|
| 1061 |
+
#
|
| 1062 |
+
# J v ~ [ b I - b^2 C (I + b dF^H A^-1 C)^-1 dF^H ] v
|
| 1063 |
+
# C = (dX + alpha dF) A^-1
|
| 1064 |
+
# b = -1/alpha
|
| 1065 |
+
#
|
| 1066 |
+
# and after simplification
|
| 1067 |
+
#
|
| 1068 |
+
# J v ~ -v/alpha + (dX/alpha + dF) (dF^H dX - alpha W)^-1 dF^H v
|
| 1069 |
+
#
|
| 1070 |
+
|
| 1071 |
+
def __init__(self, alpha=None, w0=0.01, M=5):
|
| 1072 |
+
GenericBroyden.__init__(self)
|
| 1073 |
+
self.alpha = alpha
|
| 1074 |
+
self.M = M
|
| 1075 |
+
self.dx = []
|
| 1076 |
+
self.df = []
|
| 1077 |
+
self.gamma = None
|
| 1078 |
+
self.w0 = w0
|
| 1079 |
+
|
| 1080 |
+
def solve(self, f, tol=0):
|
| 1081 |
+
dx = -self.alpha*f
|
| 1082 |
+
|
| 1083 |
+
n = len(self.dx)
|
| 1084 |
+
if n == 0:
|
| 1085 |
+
return dx
|
| 1086 |
+
|
| 1087 |
+
df_f = np.empty(n, dtype=f.dtype)
|
| 1088 |
+
for k in range(n):
|
| 1089 |
+
df_f[k] = vdot(self.df[k], f)
|
| 1090 |
+
|
| 1091 |
+
try:
|
| 1092 |
+
gamma = solve(self.a, df_f)
|
| 1093 |
+
except LinAlgError:
|
| 1094 |
+
# singular; reset the Jacobian approximation
|
| 1095 |
+
del self.dx[:]
|
| 1096 |
+
del self.df[:]
|
| 1097 |
+
return dx
|
| 1098 |
+
|
| 1099 |
+
for m in range(n):
|
| 1100 |
+
dx += gamma[m]*(self.dx[m] + self.alpha*self.df[m])
|
| 1101 |
+
return dx
|
| 1102 |
+
|
| 1103 |
+
def matvec(self, f):
|
| 1104 |
+
dx = -f/self.alpha
|
| 1105 |
+
|
| 1106 |
+
n = len(self.dx)
|
| 1107 |
+
if n == 0:
|
| 1108 |
+
return dx
|
| 1109 |
+
|
| 1110 |
+
df_f = np.empty(n, dtype=f.dtype)
|
| 1111 |
+
for k in range(n):
|
| 1112 |
+
df_f[k] = vdot(self.df[k], f)
|
| 1113 |
+
|
| 1114 |
+
b = np.empty((n, n), dtype=f.dtype)
|
| 1115 |
+
for i in range(n):
|
| 1116 |
+
for j in range(n):
|
| 1117 |
+
b[i,j] = vdot(self.df[i], self.dx[j])
|
| 1118 |
+
if i == j and self.w0 != 0:
|
| 1119 |
+
b[i,j] -= vdot(self.df[i], self.df[i])*self.w0**2*self.alpha
|
| 1120 |
+
gamma = solve(b, df_f)
|
| 1121 |
+
|
| 1122 |
+
for m in range(n):
|
| 1123 |
+
dx += gamma[m]*(self.df[m] + self.dx[m]/self.alpha)
|
| 1124 |
+
return dx
|
| 1125 |
+
|
| 1126 |
+
def _update(self, x, f, dx, df, dx_norm, df_norm):
|
| 1127 |
+
if self.M == 0:
|
| 1128 |
+
return
|
| 1129 |
+
|
| 1130 |
+
self.dx.append(dx)
|
| 1131 |
+
self.df.append(df)
|
| 1132 |
+
|
| 1133 |
+
while len(self.dx) > self.M:
|
| 1134 |
+
self.dx.pop(0)
|
| 1135 |
+
self.df.pop(0)
|
| 1136 |
+
|
| 1137 |
+
n = len(self.dx)
|
| 1138 |
+
a = np.zeros((n, n), dtype=f.dtype)
|
| 1139 |
+
|
| 1140 |
+
for i in range(n):
|
| 1141 |
+
for j in range(i, n):
|
| 1142 |
+
if i == j:
|
| 1143 |
+
wd = self.w0**2
|
| 1144 |
+
else:
|
| 1145 |
+
wd = 0
|
| 1146 |
+
a[i,j] = (1+wd)*vdot(self.df[i], self.df[j])
|
| 1147 |
+
|
| 1148 |
+
a += np.triu(a, 1).T.conj()
|
| 1149 |
+
self.a = a
|
| 1150 |
+
|
| 1151 |
+
#------------------------------------------------------------------------------
|
| 1152 |
+
# Simple iterations
|
| 1153 |
+
#------------------------------------------------------------------------------
|
| 1154 |
+
|
| 1155 |
+
|
| 1156 |
+
class DiagBroyden(GenericBroyden):
|
| 1157 |
+
"""
|
| 1158 |
+
Find a root of a function, using diagonal Broyden Jacobian approximation.
|
| 1159 |
+
|
| 1160 |
+
The Jacobian approximation is derived from previous iterations, by
|
| 1161 |
+
retaining only the diagonal of Broyden matrices.
|
| 1162 |
+
|
| 1163 |
+
.. warning::
|
| 1164 |
+
|
| 1165 |
+
This algorithm may be useful for specific problems, but whether
|
| 1166 |
+
it will work may depend strongly on the problem.
|
| 1167 |
+
|
| 1168 |
+
Parameters
|
| 1169 |
+
----------
|
| 1170 |
+
%(params_basic)s
|
| 1171 |
+
alpha : float, optional
|
| 1172 |
+
Initial guess for the Jacobian is (-1/alpha).
|
| 1173 |
+
%(params_extra)s
|
| 1174 |
+
|
| 1175 |
+
See Also
|
| 1176 |
+
--------
|
| 1177 |
+
root : Interface to root finding algorithms for multivariate
|
| 1178 |
+
functions. See ``method='diagbroyden'`` in particular.
|
| 1179 |
+
|
| 1180 |
+
Examples
|
| 1181 |
+
--------
|
| 1182 |
+
The following functions define a system of nonlinear equations
|
| 1183 |
+
|
| 1184 |
+
>>> def fun(x):
|
| 1185 |
+
... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0,
|
| 1186 |
+
... 0.5 * (x[1] - x[0])**3 + x[1]]
|
| 1187 |
+
|
| 1188 |
+
A solution can be obtained as follows.
|
| 1189 |
+
|
| 1190 |
+
>>> from scipy import optimize
|
| 1191 |
+
>>> sol = optimize.diagbroyden(fun, [0, 0])
|
| 1192 |
+
>>> sol
|
| 1193 |
+
array([0.84116403, 0.15883384])
|
| 1194 |
+
|
| 1195 |
+
"""
|
| 1196 |
+
|
| 1197 |
+
def __init__(self, alpha=None):
|
| 1198 |
+
GenericBroyden.__init__(self)
|
| 1199 |
+
self.alpha = alpha
|
| 1200 |
+
|
| 1201 |
+
def setup(self, x, F, func):
|
| 1202 |
+
GenericBroyden.setup(self, x, F, func)
|
| 1203 |
+
self.d = np.full((self.shape[0],), 1 / self.alpha, dtype=self.dtype)
|
| 1204 |
+
|
| 1205 |
+
def solve(self, f, tol=0):
|
| 1206 |
+
return -f / self.d
|
| 1207 |
+
|
| 1208 |
+
def matvec(self, f):
|
| 1209 |
+
return -f * self.d
|
| 1210 |
+
|
| 1211 |
+
def rsolve(self, f, tol=0):
|
| 1212 |
+
return -f / self.d.conj()
|
| 1213 |
+
|
| 1214 |
+
def rmatvec(self, f):
|
| 1215 |
+
return -f * self.d.conj()
|
| 1216 |
+
|
| 1217 |
+
def todense(self):
|
| 1218 |
+
return np.diag(-self.d)
|
| 1219 |
+
|
| 1220 |
+
def _update(self, x, f, dx, df, dx_norm, df_norm):
|
| 1221 |
+
self.d -= (df + self.d*dx)*dx/dx_norm**2
|
| 1222 |
+
|
| 1223 |
+
|
| 1224 |
+
class LinearMixing(GenericBroyden):
|
| 1225 |
+
"""
|
| 1226 |
+
Find a root of a function, using a scalar Jacobian approximation.
|
| 1227 |
+
|
| 1228 |
+
.. warning::
|
| 1229 |
+
|
| 1230 |
+
This algorithm may be useful for specific problems, but whether
|
| 1231 |
+
it will work may depend strongly on the problem.
|
| 1232 |
+
|
| 1233 |
+
Parameters
|
| 1234 |
+
----------
|
| 1235 |
+
%(params_basic)s
|
| 1236 |
+
alpha : float, optional
|
| 1237 |
+
The Jacobian approximation is (-1/alpha).
|
| 1238 |
+
%(params_extra)s
|
| 1239 |
+
|
| 1240 |
+
See Also
|
| 1241 |
+
--------
|
| 1242 |
+
root : Interface to root finding algorithms for multivariate
|
| 1243 |
+
functions. See ``method='linearmixing'`` in particular.
|
| 1244 |
+
|
| 1245 |
+
"""
|
| 1246 |
+
|
| 1247 |
+
def __init__(self, alpha=None):
|
| 1248 |
+
GenericBroyden.__init__(self)
|
| 1249 |
+
self.alpha = alpha
|
| 1250 |
+
|
| 1251 |
+
def solve(self, f, tol=0):
|
| 1252 |
+
return -f*self.alpha
|
| 1253 |
+
|
| 1254 |
+
def matvec(self, f):
|
| 1255 |
+
return -f/self.alpha
|
| 1256 |
+
|
| 1257 |
+
def rsolve(self, f, tol=0):
|
| 1258 |
+
return -f*np.conj(self.alpha)
|
| 1259 |
+
|
| 1260 |
+
def rmatvec(self, f):
|
| 1261 |
+
return -f/np.conj(self.alpha)
|
| 1262 |
+
|
| 1263 |
+
def todense(self):
|
| 1264 |
+
return np.diag(np.full(self.shape[0], -1/self.alpha))
|
| 1265 |
+
|
| 1266 |
+
def _update(self, x, f, dx, df, dx_norm, df_norm):
|
| 1267 |
+
pass
|
| 1268 |
+
|
| 1269 |
+
|
| 1270 |
+
class ExcitingMixing(GenericBroyden):
|
| 1271 |
+
"""
|
| 1272 |
+
Find a root of a function, using a tuned diagonal Jacobian approximation.
|
| 1273 |
+
|
| 1274 |
+
The Jacobian matrix is diagonal and is tuned on each iteration.
|
| 1275 |
+
|
| 1276 |
+
.. warning::
|
| 1277 |
+
|
| 1278 |
+
This algorithm may be useful for specific problems, but whether
|
| 1279 |
+
it will work may depend strongly on the problem.
|
| 1280 |
+
|
| 1281 |
+
See Also
|
| 1282 |
+
--------
|
| 1283 |
+
root : Interface to root finding algorithms for multivariate
|
| 1284 |
+
functions. See ``method='excitingmixing'`` in particular.
|
| 1285 |
+
|
| 1286 |
+
Parameters
|
| 1287 |
+
----------
|
| 1288 |
+
%(params_basic)s
|
| 1289 |
+
alpha : float, optional
|
| 1290 |
+
Initial Jacobian approximation is (-1/alpha).
|
| 1291 |
+
alphamax : float, optional
|
| 1292 |
+
The entries of the diagonal Jacobian are kept in the range
|
| 1293 |
+
``[alpha, alphamax]``.
|
| 1294 |
+
%(params_extra)s
|
| 1295 |
+
"""
|
| 1296 |
+
|
| 1297 |
+
def __init__(self, alpha=None, alphamax=1.0):
|
| 1298 |
+
GenericBroyden.__init__(self)
|
| 1299 |
+
self.alpha = alpha
|
| 1300 |
+
self.alphamax = alphamax
|
| 1301 |
+
self.beta = None
|
| 1302 |
+
|
| 1303 |
+
def setup(self, x, F, func):
|
| 1304 |
+
GenericBroyden.setup(self, x, F, func)
|
| 1305 |
+
self.beta = np.full((self.shape[0],), self.alpha, dtype=self.dtype)
|
| 1306 |
+
|
| 1307 |
+
def solve(self, f, tol=0):
|
| 1308 |
+
return -f*self.beta
|
| 1309 |
+
|
| 1310 |
+
def matvec(self, f):
|
| 1311 |
+
return -f/self.beta
|
| 1312 |
+
|
| 1313 |
+
def rsolve(self, f, tol=0):
|
| 1314 |
+
return -f*self.beta.conj()
|
| 1315 |
+
|
| 1316 |
+
def rmatvec(self, f):
|
| 1317 |
+
return -f/self.beta.conj()
|
| 1318 |
+
|
| 1319 |
+
def todense(self):
|
| 1320 |
+
return np.diag(-1/self.beta)
|
| 1321 |
+
|
| 1322 |
+
def _update(self, x, f, dx, df, dx_norm, df_norm):
|
| 1323 |
+
incr = f*self.last_f > 0
|
| 1324 |
+
self.beta[incr] += self.alpha
|
| 1325 |
+
self.beta[~incr] = self.alpha
|
| 1326 |
+
np.clip(self.beta, 0, self.alphamax, out=self.beta)
|
| 1327 |
+
|
| 1328 |
+
|
| 1329 |
+
#------------------------------------------------------------------------------
|
| 1330 |
+
# Iterative/Krylov approximated Jacobians
|
| 1331 |
+
#------------------------------------------------------------------------------
|
| 1332 |
+
|
| 1333 |
+
class KrylovJacobian(Jacobian):
|
| 1334 |
+
r"""
|
| 1335 |
+
Find a root of a function, using Krylov approximation for inverse Jacobian.
|
| 1336 |
+
|
| 1337 |
+
This method is suitable for solving large-scale problems.
|
| 1338 |
+
|
| 1339 |
+
Parameters
|
| 1340 |
+
----------
|
| 1341 |
+
%(params_basic)s
|
| 1342 |
+
rdiff : float, optional
|
| 1343 |
+
Relative step size to use in numerical differentiation.
|
| 1344 |
+
method : str or callable, optional
|
| 1345 |
+
Krylov method to use to approximate the Jacobian. Can be a string,
|
| 1346 |
+
or a function implementing the same interface as the iterative
|
| 1347 |
+
solvers in `scipy.sparse.linalg`. If a string, needs to be one of:
|
| 1348 |
+
``'lgmres'``, ``'gmres'``, ``'bicgstab'``, ``'cgs'``, ``'minres'``,
|
| 1349 |
+
``'tfqmr'``.
|
| 1350 |
+
|
| 1351 |
+
The default is `scipy.sparse.linalg.lgmres`.
|
| 1352 |
+
inner_maxiter : int, optional
|
| 1353 |
+
Parameter to pass to the "inner" Krylov solver: maximum number of
|
| 1354 |
+
iterations. Iteration will stop after maxiter steps even if the
|
| 1355 |
+
specified tolerance has not been achieved.
|
| 1356 |
+
inner_M : LinearOperator or InverseJacobian
|
| 1357 |
+
Preconditioner for the inner Krylov iteration.
|
| 1358 |
+
Note that you can use also inverse Jacobians as (adaptive)
|
| 1359 |
+
preconditioners. For example,
|
| 1360 |
+
|
| 1361 |
+
>>> from scipy.optimize import BroydenFirst, KrylovJacobian
|
| 1362 |
+
>>> from scipy.optimize import InverseJacobian
|
| 1363 |
+
>>> jac = BroydenFirst()
|
| 1364 |
+
>>> kjac = KrylovJacobian(inner_M=InverseJacobian(jac))
|
| 1365 |
+
|
| 1366 |
+
If the preconditioner has a method named 'update', it will be called
|
| 1367 |
+
as ``update(x, f)`` after each nonlinear step, with ``x`` giving
|
| 1368 |
+
the current point, and ``f`` the current function value.
|
| 1369 |
+
outer_k : int, optional
|
| 1370 |
+
Size of the subspace kept across LGMRES nonlinear iterations.
|
| 1371 |
+
See `scipy.sparse.linalg.lgmres` for details.
|
| 1372 |
+
inner_kwargs : kwargs
|
| 1373 |
+
Keyword parameters for the "inner" Krylov solver
|
| 1374 |
+
(defined with `method`). Parameter names must start with
|
| 1375 |
+
the `inner_` prefix which will be stripped before passing on
|
| 1376 |
+
the inner method. See, e.g., `scipy.sparse.linalg.gmres` for details.
|
| 1377 |
+
%(params_extra)s
|
| 1378 |
+
|
| 1379 |
+
See Also
|
| 1380 |
+
--------
|
| 1381 |
+
root : Interface to root finding algorithms for multivariate
|
| 1382 |
+
functions. See ``method='krylov'`` in particular.
|
| 1383 |
+
scipy.sparse.linalg.gmres
|
| 1384 |
+
scipy.sparse.linalg.lgmres
|
| 1385 |
+
|
| 1386 |
+
Notes
|
| 1387 |
+
-----
|
| 1388 |
+
This function implements a Newton-Krylov solver. The basic idea is
|
| 1389 |
+
to compute the inverse of the Jacobian with an iterative Krylov
|
| 1390 |
+
method. These methods require only evaluating the Jacobian-vector
|
| 1391 |
+
products, which are conveniently approximated by a finite difference:
|
| 1392 |
+
|
| 1393 |
+
.. math:: J v \approx (f(x + \omega*v/|v|) - f(x)) / \omega
|
| 1394 |
+
|
| 1395 |
+
Due to the use of iterative matrix inverses, these methods can
|
| 1396 |
+
deal with large nonlinear problems.
|
| 1397 |
+
|
| 1398 |
+
SciPy's `scipy.sparse.linalg` module offers a selection of Krylov
|
| 1399 |
+
solvers to choose from. The default here is `lgmres`, which is a
|
| 1400 |
+
variant of restarted GMRES iteration that reuses some of the
|
| 1401 |
+
information obtained in the previous Newton steps to invert
|
| 1402 |
+
Jacobians in subsequent steps.
|
| 1403 |
+
|
| 1404 |
+
For a review on Newton-Krylov methods, see for example [1]_,
|
| 1405 |
+
and for the LGMRES sparse inverse method, see [2]_.
|
| 1406 |
+
|
| 1407 |
+
References
|
| 1408 |
+
----------
|
| 1409 |
+
.. [1] C. T. Kelley, Solving Nonlinear Equations with Newton's Method,
|
| 1410 |
+
SIAM, pp.57-83, 2003.
|
| 1411 |
+
:doi:`10.1137/1.9780898718898.ch3`
|
| 1412 |
+
.. [2] D.A. Knoll and D.E. Keyes, J. Comp. Phys. 193, 357 (2004).
|
| 1413 |
+
:doi:`10.1016/j.jcp.2003.08.010`
|
| 1414 |
+
.. [3] A.H. Baker and E.R. Jessup and T. Manteuffel,
|
| 1415 |
+
SIAM J. Matrix Anal. Appl. 26, 962 (2005).
|
| 1416 |
+
:doi:`10.1137/S0895479803422014`
|
| 1417 |
+
|
| 1418 |
+
Examples
|
| 1419 |
+
--------
|
| 1420 |
+
The following functions define a system of nonlinear equations
|
| 1421 |
+
|
| 1422 |
+
>>> def fun(x):
|
| 1423 |
+
... return [x[0] + 0.5 * x[1] - 1.0,
|
| 1424 |
+
... 0.5 * (x[1] - x[0]) ** 2]
|
| 1425 |
+
|
| 1426 |
+
A solution can be obtained as follows.
|
| 1427 |
+
|
| 1428 |
+
>>> from scipy import optimize
|
| 1429 |
+
>>> sol = optimize.newton_krylov(fun, [0, 0])
|
| 1430 |
+
>>> sol
|
| 1431 |
+
array([0.66731771, 0.66536458])
|
| 1432 |
+
|
| 1433 |
+
"""
|
| 1434 |
+
|
| 1435 |
+
def __init__(self, rdiff=None, method='lgmres', inner_maxiter=20,
|
| 1436 |
+
inner_M=None, outer_k=10, **kw):
|
| 1437 |
+
self.preconditioner = inner_M
|
| 1438 |
+
self.rdiff = rdiff
|
| 1439 |
+
# Note that this retrieves one of the named functions, or otherwise
|
| 1440 |
+
# uses `method` as is (i.e., for a user-provided callable).
|
| 1441 |
+
self.method = dict(
|
| 1442 |
+
bicgstab=scipy.sparse.linalg.bicgstab,
|
| 1443 |
+
gmres=scipy.sparse.linalg.gmres,
|
| 1444 |
+
lgmres=scipy.sparse.linalg.lgmres,
|
| 1445 |
+
cgs=scipy.sparse.linalg.cgs,
|
| 1446 |
+
minres=scipy.sparse.linalg.minres,
|
| 1447 |
+
tfqmr=scipy.sparse.linalg.tfqmr,
|
| 1448 |
+
).get(method, method)
|
| 1449 |
+
|
| 1450 |
+
self.method_kw = dict(maxiter=inner_maxiter, M=self.preconditioner)
|
| 1451 |
+
|
| 1452 |
+
if self.method is scipy.sparse.linalg.gmres:
|
| 1453 |
+
# Replace GMRES's outer iteration with Newton steps
|
| 1454 |
+
self.method_kw['restart'] = inner_maxiter
|
| 1455 |
+
self.method_kw['maxiter'] = 1
|
| 1456 |
+
self.method_kw.setdefault('atol', 0)
|
| 1457 |
+
elif self.method in (scipy.sparse.linalg.gcrotmk,
|
| 1458 |
+
scipy.sparse.linalg.bicgstab,
|
| 1459 |
+
scipy.sparse.linalg.cgs):
|
| 1460 |
+
self.method_kw.setdefault('atol', 0)
|
| 1461 |
+
elif self.method is scipy.sparse.linalg.lgmres:
|
| 1462 |
+
self.method_kw['outer_k'] = outer_k
|
| 1463 |
+
# Replace LGMRES's outer iteration with Newton steps
|
| 1464 |
+
self.method_kw['maxiter'] = 1
|
| 1465 |
+
# Carry LGMRES's `outer_v` vectors across nonlinear iterations
|
| 1466 |
+
self.method_kw.setdefault('outer_v', [])
|
| 1467 |
+
self.method_kw.setdefault('prepend_outer_v', True)
|
| 1468 |
+
# But don't carry the corresponding Jacobian*v products, in case
|
| 1469 |
+
# the Jacobian changes a lot in the nonlinear step
|
| 1470 |
+
#
|
| 1471 |
+
# XXX: some trust-region inspired ideas might be more efficient...
|
| 1472 |
+
# See e.g., Brown & Saad. But needs to be implemented separately
|
| 1473 |
+
# since it's not an inexact Newton method.
|
| 1474 |
+
self.method_kw.setdefault('store_outer_Av', False)
|
| 1475 |
+
self.method_kw.setdefault('atol', 0)
|
| 1476 |
+
|
| 1477 |
+
for key, value in kw.items():
|
| 1478 |
+
if not key.startswith('inner_'):
|
| 1479 |
+
raise ValueError("Unknown parameter %s" % key)
|
| 1480 |
+
self.method_kw[key[6:]] = value
|
| 1481 |
+
|
| 1482 |
+
def _update_diff_step(self):
|
| 1483 |
+
mx = abs(self.x0).max()
|
| 1484 |
+
mf = abs(self.f0).max()
|
| 1485 |
+
self.omega = self.rdiff * max(1, mx) / max(1, mf)
|
| 1486 |
+
|
| 1487 |
+
def matvec(self, v):
|
| 1488 |
+
nv = norm(v)
|
| 1489 |
+
if nv == 0:
|
| 1490 |
+
return 0*v
|
| 1491 |
+
sc = self.omega / nv
|
| 1492 |
+
r = (self.func(self.x0 + sc*v) - self.f0) / sc
|
| 1493 |
+
if not np.all(np.isfinite(r)) and np.all(np.isfinite(v)):
|
| 1494 |
+
raise ValueError('Function returned non-finite results')
|
| 1495 |
+
return r
|
| 1496 |
+
|
| 1497 |
+
def solve(self, rhs, tol=0):
|
| 1498 |
+
if 'rtol' in self.method_kw:
|
| 1499 |
+
sol, info = self.method(self.op, rhs, **self.method_kw)
|
| 1500 |
+
else:
|
| 1501 |
+
sol, info = self.method(self.op, rhs, rtol=tol, **self.method_kw)
|
| 1502 |
+
return sol
|
| 1503 |
+
|
| 1504 |
+
def update(self, x, f):
|
| 1505 |
+
self.x0 = x
|
| 1506 |
+
self.f0 = f
|
| 1507 |
+
self._update_diff_step()
|
| 1508 |
+
|
| 1509 |
+
# Update also the preconditioner, if possible
|
| 1510 |
+
if self.preconditioner is not None:
|
| 1511 |
+
if hasattr(self.preconditioner, 'update'):
|
| 1512 |
+
self.preconditioner.update(x, f)
|
| 1513 |
+
|
| 1514 |
+
def setup(self, x, f, func):
|
| 1515 |
+
Jacobian.setup(self, x, f, func)
|
| 1516 |
+
self.x0 = x
|
| 1517 |
+
self.f0 = f
|
| 1518 |
+
self.op = scipy.sparse.linalg.aslinearoperator(self)
|
| 1519 |
+
|
| 1520 |
+
if self.rdiff is None:
|
| 1521 |
+
self.rdiff = np.finfo(x.dtype).eps ** (1./2)
|
| 1522 |
+
|
| 1523 |
+
self._update_diff_step()
|
| 1524 |
+
|
| 1525 |
+
# Setup also the preconditioner, if possible
|
| 1526 |
+
if self.preconditioner is not None:
|
| 1527 |
+
if hasattr(self.preconditioner, 'setup'):
|
| 1528 |
+
self.preconditioner.setup(x, f, func)
|
| 1529 |
+
|
| 1530 |
+
|
| 1531 |
+
#------------------------------------------------------------------------------
|
| 1532 |
+
# Wrapper functions
|
| 1533 |
+
#------------------------------------------------------------------------------
|
| 1534 |
+
|
| 1535 |
+
def _nonlin_wrapper(name, jac):
|
| 1536 |
+
"""
|
| 1537 |
+
Construct a solver wrapper with given name and Jacobian approx.
|
| 1538 |
+
|
| 1539 |
+
It inspects the keyword arguments of ``jac.__init__``, and allows to
|
| 1540 |
+
use the same arguments in the wrapper function, in addition to the
|
| 1541 |
+
keyword arguments of `nonlin_solve`
|
| 1542 |
+
|
| 1543 |
+
"""
|
| 1544 |
+
signature = _getfullargspec(jac.__init__)
|
| 1545 |
+
args, varargs, varkw, defaults, kwonlyargs, kwdefaults, _ = signature
|
| 1546 |
+
kwargs = list(zip(args[-len(defaults):], defaults))
|
| 1547 |
+
kw_str = ", ".join([f"{k}={v!r}" for k, v in kwargs])
|
| 1548 |
+
if kw_str:
|
| 1549 |
+
kw_str = ", " + kw_str
|
| 1550 |
+
kwkw_str = ", ".join([f"{k}={k}" for k, v in kwargs])
|
| 1551 |
+
if kwkw_str:
|
| 1552 |
+
kwkw_str = kwkw_str + ", "
|
| 1553 |
+
if kwonlyargs:
|
| 1554 |
+
raise ValueError('Unexpected signature %s' % signature)
|
| 1555 |
+
|
| 1556 |
+
# Construct the wrapper function so that its keyword arguments
|
| 1557 |
+
# are visible in pydoc.help etc.
|
| 1558 |
+
wrapper = """
|
| 1559 |
+
def %(name)s(F, xin, iter=None %(kw)s, verbose=False, maxiter=None,
|
| 1560 |
+
f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
|
| 1561 |
+
tol_norm=None, line_search='armijo', callback=None, **kw):
|
| 1562 |
+
jac = %(jac)s(%(kwkw)s **kw)
|
| 1563 |
+
return nonlin_solve(F, xin, jac, iter, verbose, maxiter,
|
| 1564 |
+
f_tol, f_rtol, x_tol, x_rtol, tol_norm, line_search,
|
| 1565 |
+
callback)
|
| 1566 |
+
"""
|
| 1567 |
+
|
| 1568 |
+
wrapper = wrapper % dict(name=name, kw=kw_str, jac=jac.__name__,
|
| 1569 |
+
kwkw=kwkw_str)
|
| 1570 |
+
ns = {}
|
| 1571 |
+
ns.update(globals())
|
| 1572 |
+
exec(wrapper, ns)
|
| 1573 |
+
func = ns[name]
|
| 1574 |
+
func.__doc__ = jac.__doc__
|
| 1575 |
+
_set_doc(func)
|
| 1576 |
+
return func
|
| 1577 |
+
|
| 1578 |
+
|
| 1579 |
+
broyden1 = _nonlin_wrapper('broyden1', BroydenFirst)
|
| 1580 |
+
broyden2 = _nonlin_wrapper('broyden2', BroydenSecond)
|
| 1581 |
+
anderson = _nonlin_wrapper('anderson', Anderson)
|
| 1582 |
+
linearmixing = _nonlin_wrapper('linearmixing', LinearMixing)
|
| 1583 |
+
diagbroyden = _nonlin_wrapper('diagbroyden', DiagBroyden)
|
| 1584 |
+
excitingmixing = _nonlin_wrapper('excitingmixing', ExcitingMixing)
|
| 1585 |
+
newton_krylov = _nonlin_wrapper('newton_krylov', KrylovJacobian)
|
evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_numdiff.py
ADDED
|
@@ -0,0 +1,779 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Routines for numerical differentiation."""
|
| 2 |
+
import functools
|
| 3 |
+
import numpy as np
|
| 4 |
+
from numpy.linalg import norm
|
| 5 |
+
|
| 6 |
+
from scipy.sparse.linalg import LinearOperator
|
| 7 |
+
from ..sparse import issparse, csc_matrix, csr_matrix, coo_matrix, find
|
| 8 |
+
from ._group_columns import group_dense, group_sparse
|
| 9 |
+
from scipy._lib._array_api import atleast_nd, array_namespace
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def _adjust_scheme_to_bounds(x0, h, num_steps, scheme, lb, ub):
|
| 13 |
+
"""Adjust final difference scheme to the presence of bounds.
|
| 14 |
+
|
| 15 |
+
Parameters
|
| 16 |
+
----------
|
| 17 |
+
x0 : ndarray, shape (n,)
|
| 18 |
+
Point at which we wish to estimate derivative.
|
| 19 |
+
h : ndarray, shape (n,)
|
| 20 |
+
Desired absolute finite difference steps.
|
| 21 |
+
num_steps : int
|
| 22 |
+
Number of `h` steps in one direction required to implement finite
|
| 23 |
+
difference scheme. For example, 2 means that we need to evaluate
|
| 24 |
+
f(x0 + 2 * h) or f(x0 - 2 * h)
|
| 25 |
+
scheme : {'1-sided', '2-sided'}
|
| 26 |
+
Whether steps in one or both directions are required. In other
|
| 27 |
+
words '1-sided' applies to forward and backward schemes, '2-sided'
|
| 28 |
+
applies to center schemes.
|
| 29 |
+
lb : ndarray, shape (n,)
|
| 30 |
+
Lower bounds on independent variables.
|
| 31 |
+
ub : ndarray, shape (n,)
|
| 32 |
+
Upper bounds on independent variables.
|
| 33 |
+
|
| 34 |
+
Returns
|
| 35 |
+
-------
|
| 36 |
+
h_adjusted : ndarray, shape (n,)
|
| 37 |
+
Adjusted absolute step sizes. Step size decreases only if a sign flip
|
| 38 |
+
or switching to one-sided scheme doesn't allow to take a full step.
|
| 39 |
+
use_one_sided : ndarray of bool, shape (n,)
|
| 40 |
+
Whether to switch to one-sided scheme. Informative only for
|
| 41 |
+
``scheme='2-sided'``.
|
| 42 |
+
"""
|
| 43 |
+
if scheme == '1-sided':
|
| 44 |
+
use_one_sided = np.ones_like(h, dtype=bool)
|
| 45 |
+
elif scheme == '2-sided':
|
| 46 |
+
h = np.abs(h)
|
| 47 |
+
use_one_sided = np.zeros_like(h, dtype=bool)
|
| 48 |
+
else:
|
| 49 |
+
raise ValueError("`scheme` must be '1-sided' or '2-sided'.")
|
| 50 |
+
|
| 51 |
+
if np.all((lb == -np.inf) & (ub == np.inf)):
|
| 52 |
+
return h, use_one_sided
|
| 53 |
+
|
| 54 |
+
h_total = h * num_steps
|
| 55 |
+
h_adjusted = h.copy()
|
| 56 |
+
|
| 57 |
+
lower_dist = x0 - lb
|
| 58 |
+
upper_dist = ub - x0
|
| 59 |
+
|
| 60 |
+
if scheme == '1-sided':
|
| 61 |
+
x = x0 + h_total
|
| 62 |
+
violated = (x < lb) | (x > ub)
|
| 63 |
+
fitting = np.abs(h_total) <= np.maximum(lower_dist, upper_dist)
|
| 64 |
+
h_adjusted[violated & fitting] *= -1
|
| 65 |
+
|
| 66 |
+
forward = (upper_dist >= lower_dist) & ~fitting
|
| 67 |
+
h_adjusted[forward] = upper_dist[forward] / num_steps
|
| 68 |
+
backward = (upper_dist < lower_dist) & ~fitting
|
| 69 |
+
h_adjusted[backward] = -lower_dist[backward] / num_steps
|
| 70 |
+
elif scheme == '2-sided':
|
| 71 |
+
central = (lower_dist >= h_total) & (upper_dist >= h_total)
|
| 72 |
+
|
| 73 |
+
forward = (upper_dist >= lower_dist) & ~central
|
| 74 |
+
h_adjusted[forward] = np.minimum(
|
| 75 |
+
h[forward], 0.5 * upper_dist[forward] / num_steps)
|
| 76 |
+
use_one_sided[forward] = True
|
| 77 |
+
|
| 78 |
+
backward = (upper_dist < lower_dist) & ~central
|
| 79 |
+
h_adjusted[backward] = -np.minimum(
|
| 80 |
+
h[backward], 0.5 * lower_dist[backward] / num_steps)
|
| 81 |
+
use_one_sided[backward] = True
|
| 82 |
+
|
| 83 |
+
min_dist = np.minimum(upper_dist, lower_dist) / num_steps
|
| 84 |
+
adjusted_central = (~central & (np.abs(h_adjusted) <= min_dist))
|
| 85 |
+
h_adjusted[adjusted_central] = min_dist[adjusted_central]
|
| 86 |
+
use_one_sided[adjusted_central] = False
|
| 87 |
+
|
| 88 |
+
return h_adjusted, use_one_sided
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
@functools.lru_cache
|
| 92 |
+
def _eps_for_method(x0_dtype, f0_dtype, method):
|
| 93 |
+
"""
|
| 94 |
+
Calculates relative EPS step to use for a given data type
|
| 95 |
+
and numdiff step method.
|
| 96 |
+
|
| 97 |
+
Progressively smaller steps are used for larger floating point types.
|
| 98 |
+
|
| 99 |
+
Parameters
|
| 100 |
+
----------
|
| 101 |
+
f0_dtype: np.dtype
|
| 102 |
+
dtype of function evaluation
|
| 103 |
+
|
| 104 |
+
x0_dtype: np.dtype
|
| 105 |
+
dtype of parameter vector
|
| 106 |
+
|
| 107 |
+
method: {'2-point', '3-point', 'cs'}
|
| 108 |
+
|
| 109 |
+
Returns
|
| 110 |
+
-------
|
| 111 |
+
EPS: float
|
| 112 |
+
relative step size. May be np.float16, np.float32, np.float64
|
| 113 |
+
|
| 114 |
+
Notes
|
| 115 |
+
-----
|
| 116 |
+
The default relative step will be np.float64. However, if x0 or f0 are
|
| 117 |
+
smaller floating point types (np.float16, np.float32), then the smallest
|
| 118 |
+
floating point type is chosen.
|
| 119 |
+
"""
|
| 120 |
+
# the default EPS value
|
| 121 |
+
EPS = np.finfo(np.float64).eps
|
| 122 |
+
|
| 123 |
+
x0_is_fp = False
|
| 124 |
+
if np.issubdtype(x0_dtype, np.inexact):
|
| 125 |
+
# if you're a floating point type then over-ride the default EPS
|
| 126 |
+
EPS = np.finfo(x0_dtype).eps
|
| 127 |
+
x0_itemsize = np.dtype(x0_dtype).itemsize
|
| 128 |
+
x0_is_fp = True
|
| 129 |
+
|
| 130 |
+
if np.issubdtype(f0_dtype, np.inexact):
|
| 131 |
+
f0_itemsize = np.dtype(f0_dtype).itemsize
|
| 132 |
+
# choose the smallest itemsize between x0 and f0
|
| 133 |
+
if x0_is_fp and f0_itemsize < x0_itemsize:
|
| 134 |
+
EPS = np.finfo(f0_dtype).eps
|
| 135 |
+
|
| 136 |
+
if method in ["2-point", "cs"]:
|
| 137 |
+
return EPS**0.5
|
| 138 |
+
elif method in ["3-point"]:
|
| 139 |
+
return EPS**(1/3)
|
| 140 |
+
else:
|
| 141 |
+
raise RuntimeError("Unknown step method, should be one of "
|
| 142 |
+
"{'2-point', '3-point', 'cs'}")
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def _compute_absolute_step(rel_step, x0, f0, method):
|
| 146 |
+
"""
|
| 147 |
+
Computes an absolute step from a relative step for finite difference
|
| 148 |
+
calculation.
|
| 149 |
+
|
| 150 |
+
Parameters
|
| 151 |
+
----------
|
| 152 |
+
rel_step: None or array-like
|
| 153 |
+
Relative step for the finite difference calculation
|
| 154 |
+
x0 : np.ndarray
|
| 155 |
+
Parameter vector
|
| 156 |
+
f0 : np.ndarray or scalar
|
| 157 |
+
method : {'2-point', '3-point', 'cs'}
|
| 158 |
+
|
| 159 |
+
Returns
|
| 160 |
+
-------
|
| 161 |
+
h : float
|
| 162 |
+
The absolute step size
|
| 163 |
+
|
| 164 |
+
Notes
|
| 165 |
+
-----
|
| 166 |
+
`h` will always be np.float64. However, if `x0` or `f0` are
|
| 167 |
+
smaller floating point dtypes (e.g. np.float32), then the absolute
|
| 168 |
+
step size will be calculated from the smallest floating point size.
|
| 169 |
+
"""
|
| 170 |
+
# this is used instead of np.sign(x0) because we need
|
| 171 |
+
# sign_x0 to be 1 when x0 == 0.
|
| 172 |
+
sign_x0 = (x0 >= 0).astype(float) * 2 - 1
|
| 173 |
+
|
| 174 |
+
rstep = _eps_for_method(x0.dtype, f0.dtype, method)
|
| 175 |
+
|
| 176 |
+
if rel_step is None:
|
| 177 |
+
abs_step = rstep * sign_x0 * np.maximum(1.0, np.abs(x0))
|
| 178 |
+
else:
|
| 179 |
+
# User has requested specific relative steps.
|
| 180 |
+
# Don't multiply by max(1, abs(x0) because if x0 < 1 then their
|
| 181 |
+
# requested step is not used.
|
| 182 |
+
abs_step = rel_step * sign_x0 * np.abs(x0)
|
| 183 |
+
|
| 184 |
+
# however we don't want an abs_step of 0, which can happen if
|
| 185 |
+
# rel_step is 0, or x0 is 0. Instead, substitute a realistic step
|
| 186 |
+
dx = ((x0 + abs_step) - x0)
|
| 187 |
+
abs_step = np.where(dx == 0,
|
| 188 |
+
rstep * sign_x0 * np.maximum(1.0, np.abs(x0)),
|
| 189 |
+
abs_step)
|
| 190 |
+
|
| 191 |
+
return abs_step
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
def _prepare_bounds(bounds, x0):
|
| 195 |
+
"""
|
| 196 |
+
Prepares new-style bounds from a two-tuple specifying the lower and upper
|
| 197 |
+
limits for values in x0. If a value is not bound then the lower/upper bound
|
| 198 |
+
will be expected to be -np.inf/np.inf.
|
| 199 |
+
|
| 200 |
+
Examples
|
| 201 |
+
--------
|
| 202 |
+
>>> _prepare_bounds([(0, 1, 2), (1, 2, np.inf)], [0.5, 1.5, 2.5])
|
| 203 |
+
(array([0., 1., 2.]), array([ 1., 2., inf]))
|
| 204 |
+
"""
|
| 205 |
+
lb, ub = (np.asarray(b, dtype=float) for b in bounds)
|
| 206 |
+
if lb.ndim == 0:
|
| 207 |
+
lb = np.resize(lb, x0.shape)
|
| 208 |
+
|
| 209 |
+
if ub.ndim == 0:
|
| 210 |
+
ub = np.resize(ub, x0.shape)
|
| 211 |
+
|
| 212 |
+
return lb, ub
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
def group_columns(A, order=0):
|
| 216 |
+
"""Group columns of a 2-D matrix for sparse finite differencing [1]_.
|
| 217 |
+
|
| 218 |
+
Two columns are in the same group if in each row at least one of them
|
| 219 |
+
has zero. A greedy sequential algorithm is used to construct groups.
|
| 220 |
+
|
| 221 |
+
Parameters
|
| 222 |
+
----------
|
| 223 |
+
A : array_like or sparse matrix, shape (m, n)
|
| 224 |
+
Matrix of which to group columns.
|
| 225 |
+
order : int, iterable of int with shape (n,) or None
|
| 226 |
+
Permutation array which defines the order of columns enumeration.
|
| 227 |
+
If int or None, a random permutation is used with `order` used as
|
| 228 |
+
a random seed. Default is 0, that is use a random permutation but
|
| 229 |
+
guarantee repeatability.
|
| 230 |
+
|
| 231 |
+
Returns
|
| 232 |
+
-------
|
| 233 |
+
groups : ndarray of int, shape (n,)
|
| 234 |
+
Contains values from 0 to n_groups-1, where n_groups is the number
|
| 235 |
+
of found groups. Each value ``groups[i]`` is an index of a group to
|
| 236 |
+
which ith column assigned. The procedure was helpful only if
|
| 237 |
+
n_groups is significantly less than n.
|
| 238 |
+
|
| 239 |
+
References
|
| 240 |
+
----------
|
| 241 |
+
.. [1] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
|
| 242 |
+
sparse Jacobian matrices", Journal of the Institute of Mathematics
|
| 243 |
+
and its Applications, 13 (1974), pp. 117-120.
|
| 244 |
+
"""
|
| 245 |
+
if issparse(A):
|
| 246 |
+
A = csc_matrix(A)
|
| 247 |
+
else:
|
| 248 |
+
A = np.atleast_2d(A)
|
| 249 |
+
A = (A != 0).astype(np.int32)
|
| 250 |
+
|
| 251 |
+
if A.ndim != 2:
|
| 252 |
+
raise ValueError("`A` must be 2-dimensional.")
|
| 253 |
+
|
| 254 |
+
m, n = A.shape
|
| 255 |
+
|
| 256 |
+
if order is None or np.isscalar(order):
|
| 257 |
+
rng = np.random.RandomState(order)
|
| 258 |
+
order = rng.permutation(n)
|
| 259 |
+
else:
|
| 260 |
+
order = np.asarray(order)
|
| 261 |
+
if order.shape != (n,):
|
| 262 |
+
raise ValueError("`order` has incorrect shape.")
|
| 263 |
+
|
| 264 |
+
A = A[:, order]
|
| 265 |
+
|
| 266 |
+
if issparse(A):
|
| 267 |
+
groups = group_sparse(m, n, A.indices, A.indptr)
|
| 268 |
+
else:
|
| 269 |
+
groups = group_dense(m, n, A)
|
| 270 |
+
|
| 271 |
+
groups[order] = groups.copy()
|
| 272 |
+
|
| 273 |
+
return groups
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
def approx_derivative(fun, x0, method='3-point', rel_step=None, abs_step=None,
|
| 277 |
+
f0=None, bounds=(-np.inf, np.inf), sparsity=None,
|
| 278 |
+
as_linear_operator=False, args=(), kwargs={}):
|
| 279 |
+
"""Compute finite difference approximation of the derivatives of a
|
| 280 |
+
vector-valued function.
|
| 281 |
+
|
| 282 |
+
If a function maps from R^n to R^m, its derivatives form m-by-n matrix
|
| 283 |
+
called the Jacobian, where an element (i, j) is a partial derivative of
|
| 284 |
+
f[i] with respect to x[j].
|
| 285 |
+
|
| 286 |
+
Parameters
|
| 287 |
+
----------
|
| 288 |
+
fun : callable
|
| 289 |
+
Function of which to estimate the derivatives. The argument x
|
| 290 |
+
passed to this function is ndarray of shape (n,) (never a scalar
|
| 291 |
+
even if n=1). It must return 1-D array_like of shape (m,) or a scalar.
|
| 292 |
+
x0 : array_like of shape (n,) or float
|
| 293 |
+
Point at which to estimate the derivatives. Float will be converted
|
| 294 |
+
to a 1-D array.
|
| 295 |
+
method : {'3-point', '2-point', 'cs'}, optional
|
| 296 |
+
Finite difference method to use:
|
| 297 |
+
- '2-point' - use the first order accuracy forward or backward
|
| 298 |
+
difference.
|
| 299 |
+
- '3-point' - use central difference in interior points and the
|
| 300 |
+
second order accuracy forward or backward difference
|
| 301 |
+
near the boundary.
|
| 302 |
+
- 'cs' - use a complex-step finite difference scheme. This assumes
|
| 303 |
+
that the user function is real-valued and can be
|
| 304 |
+
analytically continued to the complex plane. Otherwise,
|
| 305 |
+
produces bogus results.
|
| 306 |
+
rel_step : None or array_like, optional
|
| 307 |
+
Relative step size to use. If None (default) the absolute step size is
|
| 308 |
+
computed as ``h = rel_step * sign(x0) * max(1, abs(x0))``, with
|
| 309 |
+
`rel_step` being selected automatically, see Notes. Otherwise
|
| 310 |
+
``h = rel_step * sign(x0) * abs(x0)``. For ``method='3-point'`` the
|
| 311 |
+
sign of `h` is ignored. The calculated step size is possibly adjusted
|
| 312 |
+
to fit into the bounds.
|
| 313 |
+
abs_step : array_like, optional
|
| 314 |
+
Absolute step size to use, possibly adjusted to fit into the bounds.
|
| 315 |
+
For ``method='3-point'`` the sign of `abs_step` is ignored. By default
|
| 316 |
+
relative steps are used, only if ``abs_step is not None`` are absolute
|
| 317 |
+
steps used.
|
| 318 |
+
f0 : None or array_like, optional
|
| 319 |
+
If not None it is assumed to be equal to ``fun(x0)``, in this case
|
| 320 |
+
the ``fun(x0)`` is not called. Default is None.
|
| 321 |
+
bounds : tuple of array_like, optional
|
| 322 |
+
Lower and upper bounds on independent variables. Defaults to no bounds.
|
| 323 |
+
Each bound must match the size of `x0` or be a scalar, in the latter
|
| 324 |
+
case the bound will be the same for all variables. Use it to limit the
|
| 325 |
+
range of function evaluation. Bounds checking is not implemented
|
| 326 |
+
when `as_linear_operator` is True.
|
| 327 |
+
sparsity : {None, array_like, sparse matrix, 2-tuple}, optional
|
| 328 |
+
Defines a sparsity structure of the Jacobian matrix. If the Jacobian
|
| 329 |
+
matrix is known to have only few non-zero elements in each row, then
|
| 330 |
+
it's possible to estimate its several columns by a single function
|
| 331 |
+
evaluation [3]_. To perform such economic computations two ingredients
|
| 332 |
+
are required:
|
| 333 |
+
|
| 334 |
+
* structure : array_like or sparse matrix of shape (m, n). A zero
|
| 335 |
+
element means that a corresponding element of the Jacobian
|
| 336 |
+
identically equals to zero.
|
| 337 |
+
* groups : array_like of shape (n,). A column grouping for a given
|
| 338 |
+
sparsity structure, use `group_columns` to obtain it.
|
| 339 |
+
|
| 340 |
+
A single array or a sparse matrix is interpreted as a sparsity
|
| 341 |
+
structure, and groups are computed inside the function. A tuple is
|
| 342 |
+
interpreted as (structure, groups). If None (default), a standard
|
| 343 |
+
dense differencing will be used.
|
| 344 |
+
|
| 345 |
+
Note, that sparse differencing makes sense only for large Jacobian
|
| 346 |
+
matrices where each row contains few non-zero elements.
|
| 347 |
+
as_linear_operator : bool, optional
|
| 348 |
+
When True the function returns an `scipy.sparse.linalg.LinearOperator`.
|
| 349 |
+
Otherwise it returns a dense array or a sparse matrix depending on
|
| 350 |
+
`sparsity`. The linear operator provides an efficient way of computing
|
| 351 |
+
``J.dot(p)`` for any vector ``p`` of shape (n,), but does not allow
|
| 352 |
+
direct access to individual elements of the matrix. By default
|
| 353 |
+
`as_linear_operator` is False.
|
| 354 |
+
args, kwargs : tuple and dict, optional
|
| 355 |
+
Additional arguments passed to `fun`. Both empty by default.
|
| 356 |
+
The calling signature is ``fun(x, *args, **kwargs)``.
|
| 357 |
+
|
| 358 |
+
Returns
|
| 359 |
+
-------
|
| 360 |
+
J : {ndarray, sparse matrix, LinearOperator}
|
| 361 |
+
Finite difference approximation of the Jacobian matrix.
|
| 362 |
+
If `as_linear_operator` is True returns a LinearOperator
|
| 363 |
+
with shape (m, n). Otherwise it returns a dense array or sparse
|
| 364 |
+
matrix depending on how `sparsity` is defined. If `sparsity`
|
| 365 |
+
is None then a ndarray with shape (m, n) is returned. If
|
| 366 |
+
`sparsity` is not None returns a csr_matrix with shape (m, n).
|
| 367 |
+
For sparse matrices and linear operators it is always returned as
|
| 368 |
+
a 2-D structure, for ndarrays, if m=1 it is returned
|
| 369 |
+
as a 1-D gradient array with shape (n,).
|
| 370 |
+
|
| 371 |
+
See Also
|
| 372 |
+
--------
|
| 373 |
+
check_derivative : Check correctness of a function computing derivatives.
|
| 374 |
+
|
| 375 |
+
Notes
|
| 376 |
+
-----
|
| 377 |
+
If `rel_step` is not provided, it assigned as ``EPS**(1/s)``, where EPS is
|
| 378 |
+
determined from the smallest floating point dtype of `x0` or `fun(x0)`,
|
| 379 |
+
``np.finfo(x0.dtype).eps``, s=2 for '2-point' method and
|
| 380 |
+
s=3 for '3-point' method. Such relative step approximately minimizes a sum
|
| 381 |
+
of truncation and round-off errors, see [1]_. Relative steps are used by
|
| 382 |
+
default. However, absolute steps are used when ``abs_step is not None``.
|
| 383 |
+
If any of the absolute or relative steps produces an indistinguishable
|
| 384 |
+
difference from the original `x0`, ``(x0 + dx) - x0 == 0``, then a
|
| 385 |
+
automatic step size is substituted for that particular entry.
|
| 386 |
+
|
| 387 |
+
A finite difference scheme for '3-point' method is selected automatically.
|
| 388 |
+
The well-known central difference scheme is used for points sufficiently
|
| 389 |
+
far from the boundary, and 3-point forward or backward scheme is used for
|
| 390 |
+
points near the boundary. Both schemes have the second-order accuracy in
|
| 391 |
+
terms of Taylor expansion. Refer to [2]_ for the formulas of 3-point
|
| 392 |
+
forward and backward difference schemes.
|
| 393 |
+
|
| 394 |
+
For dense differencing when m=1 Jacobian is returned with a shape (n,),
|
| 395 |
+
on the other hand when n=1 Jacobian is returned with a shape (m, 1).
|
| 396 |
+
Our motivation is the following: a) It handles a case of gradient
|
| 397 |
+
computation (m=1) in a conventional way. b) It clearly separates these two
|
| 398 |
+
different cases. b) In all cases np.atleast_2d can be called to get 2-D
|
| 399 |
+
Jacobian with correct dimensions.
|
| 400 |
+
|
| 401 |
+
References
|
| 402 |
+
----------
|
| 403 |
+
.. [1] W. H. Press et. al. "Numerical Recipes. The Art of Scientific
|
| 404 |
+
Computing. 3rd edition", sec. 5.7.
|
| 405 |
+
|
| 406 |
+
.. [2] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
|
| 407 |
+
sparse Jacobian matrices", Journal of the Institute of Mathematics
|
| 408 |
+
and its Applications, 13 (1974), pp. 117-120.
|
| 409 |
+
|
| 410 |
+
.. [3] B. Fornberg, "Generation of Finite Difference Formulas on
|
| 411 |
+
Arbitrarily Spaced Grids", Mathematics of Computation 51, 1988.
|
| 412 |
+
|
| 413 |
+
Examples
|
| 414 |
+
--------
|
| 415 |
+
>>> import numpy as np
|
| 416 |
+
>>> from scipy.optimize._numdiff import approx_derivative
|
| 417 |
+
>>>
|
| 418 |
+
>>> def f(x, c1, c2):
|
| 419 |
+
... return np.array([x[0] * np.sin(c1 * x[1]),
|
| 420 |
+
... x[0] * np.cos(c2 * x[1])])
|
| 421 |
+
...
|
| 422 |
+
>>> x0 = np.array([1.0, 0.5 * np.pi])
|
| 423 |
+
>>> approx_derivative(f, x0, args=(1, 2))
|
| 424 |
+
array([[ 1., 0.],
|
| 425 |
+
[-1., 0.]])
|
| 426 |
+
|
| 427 |
+
Bounds can be used to limit the region of function evaluation.
|
| 428 |
+
In the example below we compute left and right derivative at point 1.0.
|
| 429 |
+
|
| 430 |
+
>>> def g(x):
|
| 431 |
+
... return x**2 if x >= 1 else x
|
| 432 |
+
...
|
| 433 |
+
>>> x0 = 1.0
|
| 434 |
+
>>> approx_derivative(g, x0, bounds=(-np.inf, 1.0))
|
| 435 |
+
array([ 1.])
|
| 436 |
+
>>> approx_derivative(g, x0, bounds=(1.0, np.inf))
|
| 437 |
+
array([ 2.])
|
| 438 |
+
"""
|
| 439 |
+
if method not in ['2-point', '3-point', 'cs']:
|
| 440 |
+
raise ValueError("Unknown method '%s'. " % method)
|
| 441 |
+
|
| 442 |
+
xp = array_namespace(x0)
|
| 443 |
+
_x = atleast_nd(x0, ndim=1, xp=xp)
|
| 444 |
+
_dtype = xp.float64
|
| 445 |
+
if xp.isdtype(_x.dtype, "real floating"):
|
| 446 |
+
_dtype = _x.dtype
|
| 447 |
+
|
| 448 |
+
# promotes to floating
|
| 449 |
+
x0 = xp.astype(_x, _dtype)
|
| 450 |
+
|
| 451 |
+
if x0.ndim > 1:
|
| 452 |
+
raise ValueError("`x0` must have at most 1 dimension.")
|
| 453 |
+
|
| 454 |
+
lb, ub = _prepare_bounds(bounds, x0)
|
| 455 |
+
|
| 456 |
+
if lb.shape != x0.shape or ub.shape != x0.shape:
|
| 457 |
+
raise ValueError("Inconsistent shapes between bounds and `x0`.")
|
| 458 |
+
|
| 459 |
+
if as_linear_operator and not (np.all(np.isinf(lb))
|
| 460 |
+
and np.all(np.isinf(ub))):
|
| 461 |
+
raise ValueError("Bounds not supported when "
|
| 462 |
+
"`as_linear_operator` is True.")
|
| 463 |
+
|
| 464 |
+
def fun_wrapped(x):
|
| 465 |
+
# send user function same fp type as x0. (but only if cs is not being
|
| 466 |
+
# used
|
| 467 |
+
if xp.isdtype(x.dtype, "real floating"):
|
| 468 |
+
x = xp.astype(x, x0.dtype)
|
| 469 |
+
|
| 470 |
+
f = np.atleast_1d(fun(x, *args, **kwargs))
|
| 471 |
+
if f.ndim > 1:
|
| 472 |
+
raise RuntimeError("`fun` return value has "
|
| 473 |
+
"more than 1 dimension.")
|
| 474 |
+
return f
|
| 475 |
+
|
| 476 |
+
if f0 is None:
|
| 477 |
+
f0 = fun_wrapped(x0)
|
| 478 |
+
else:
|
| 479 |
+
f0 = np.atleast_1d(f0)
|
| 480 |
+
if f0.ndim > 1:
|
| 481 |
+
raise ValueError("`f0` passed has more than 1 dimension.")
|
| 482 |
+
|
| 483 |
+
if np.any((x0 < lb) | (x0 > ub)):
|
| 484 |
+
raise ValueError("`x0` violates bound constraints.")
|
| 485 |
+
|
| 486 |
+
if as_linear_operator:
|
| 487 |
+
if rel_step is None:
|
| 488 |
+
rel_step = _eps_for_method(x0.dtype, f0.dtype, method)
|
| 489 |
+
|
| 490 |
+
return _linear_operator_difference(fun_wrapped, x0,
|
| 491 |
+
f0, rel_step, method)
|
| 492 |
+
else:
|
| 493 |
+
# by default we use rel_step
|
| 494 |
+
if abs_step is None:
|
| 495 |
+
h = _compute_absolute_step(rel_step, x0, f0, method)
|
| 496 |
+
else:
|
| 497 |
+
# user specifies an absolute step
|
| 498 |
+
sign_x0 = (x0 >= 0).astype(float) * 2 - 1
|
| 499 |
+
h = abs_step
|
| 500 |
+
|
| 501 |
+
# cannot have a zero step. This might happen if x0 is very large
|
| 502 |
+
# or small. In which case fall back to relative step.
|
| 503 |
+
dx = ((x0 + h) - x0)
|
| 504 |
+
h = np.where(dx == 0,
|
| 505 |
+
_eps_for_method(x0.dtype, f0.dtype, method) *
|
| 506 |
+
sign_x0 * np.maximum(1.0, np.abs(x0)),
|
| 507 |
+
h)
|
| 508 |
+
|
| 509 |
+
if method == '2-point':
|
| 510 |
+
h, use_one_sided = _adjust_scheme_to_bounds(
|
| 511 |
+
x0, h, 1, '1-sided', lb, ub)
|
| 512 |
+
elif method == '3-point':
|
| 513 |
+
h, use_one_sided = _adjust_scheme_to_bounds(
|
| 514 |
+
x0, h, 1, '2-sided', lb, ub)
|
| 515 |
+
elif method == 'cs':
|
| 516 |
+
use_one_sided = False
|
| 517 |
+
|
| 518 |
+
if sparsity is None:
|
| 519 |
+
return _dense_difference(fun_wrapped, x0, f0, h,
|
| 520 |
+
use_one_sided, method)
|
| 521 |
+
else:
|
| 522 |
+
if not issparse(sparsity) and len(sparsity) == 2:
|
| 523 |
+
structure, groups = sparsity
|
| 524 |
+
else:
|
| 525 |
+
structure = sparsity
|
| 526 |
+
groups = group_columns(sparsity)
|
| 527 |
+
|
| 528 |
+
if issparse(structure):
|
| 529 |
+
structure = csc_matrix(structure)
|
| 530 |
+
else:
|
| 531 |
+
structure = np.atleast_2d(structure)
|
| 532 |
+
|
| 533 |
+
groups = np.atleast_1d(groups)
|
| 534 |
+
return _sparse_difference(fun_wrapped, x0, f0, h,
|
| 535 |
+
use_one_sided, structure,
|
| 536 |
+
groups, method)
|
| 537 |
+
|
| 538 |
+
|
| 539 |
+
def _linear_operator_difference(fun, x0, f0, h, method):
|
| 540 |
+
m = f0.size
|
| 541 |
+
n = x0.size
|
| 542 |
+
|
| 543 |
+
if method == '2-point':
|
| 544 |
+
def matvec(p):
|
| 545 |
+
if np.array_equal(p, np.zeros_like(p)):
|
| 546 |
+
return np.zeros(m)
|
| 547 |
+
dx = h / norm(p)
|
| 548 |
+
x = x0 + dx*p
|
| 549 |
+
df = fun(x) - f0
|
| 550 |
+
return df / dx
|
| 551 |
+
|
| 552 |
+
elif method == '3-point':
|
| 553 |
+
def matvec(p):
|
| 554 |
+
if np.array_equal(p, np.zeros_like(p)):
|
| 555 |
+
return np.zeros(m)
|
| 556 |
+
dx = 2*h / norm(p)
|
| 557 |
+
x1 = x0 - (dx/2)*p
|
| 558 |
+
x2 = x0 + (dx/2)*p
|
| 559 |
+
f1 = fun(x1)
|
| 560 |
+
f2 = fun(x2)
|
| 561 |
+
df = f2 - f1
|
| 562 |
+
return df / dx
|
| 563 |
+
|
| 564 |
+
elif method == 'cs':
|
| 565 |
+
def matvec(p):
|
| 566 |
+
if np.array_equal(p, np.zeros_like(p)):
|
| 567 |
+
return np.zeros(m)
|
| 568 |
+
dx = h / norm(p)
|
| 569 |
+
x = x0 + dx*p*1.j
|
| 570 |
+
f1 = fun(x)
|
| 571 |
+
df = f1.imag
|
| 572 |
+
return df / dx
|
| 573 |
+
|
| 574 |
+
else:
|
| 575 |
+
raise RuntimeError("Never be here.")
|
| 576 |
+
|
| 577 |
+
return LinearOperator((m, n), matvec)
|
| 578 |
+
|
| 579 |
+
|
| 580 |
+
def _dense_difference(fun, x0, f0, h, use_one_sided, method):
|
| 581 |
+
m = f0.size
|
| 582 |
+
n = x0.size
|
| 583 |
+
J_transposed = np.empty((n, m))
|
| 584 |
+
x1 = x0.copy()
|
| 585 |
+
x2 = x0.copy()
|
| 586 |
+
xc = x0.astype(complex, copy=True)
|
| 587 |
+
|
| 588 |
+
for i in range(h.size):
|
| 589 |
+
if method == '2-point':
|
| 590 |
+
x1[i] += h[i]
|
| 591 |
+
dx = x1[i] - x0[i] # Recompute dx as exactly representable number.
|
| 592 |
+
df = fun(x1) - f0
|
| 593 |
+
elif method == '3-point' and use_one_sided[i]:
|
| 594 |
+
x1[i] += h[i]
|
| 595 |
+
x2[i] += 2 * h[i]
|
| 596 |
+
dx = x2[i] - x0[i]
|
| 597 |
+
f1 = fun(x1)
|
| 598 |
+
f2 = fun(x2)
|
| 599 |
+
df = -3.0 * f0 + 4 * f1 - f2
|
| 600 |
+
elif method == '3-point' and not use_one_sided[i]:
|
| 601 |
+
x1[i] -= h[i]
|
| 602 |
+
x2[i] += h[i]
|
| 603 |
+
dx = x2[i] - x1[i]
|
| 604 |
+
f1 = fun(x1)
|
| 605 |
+
f2 = fun(x2)
|
| 606 |
+
df = f2 - f1
|
| 607 |
+
elif method == 'cs':
|
| 608 |
+
xc[i] += h[i] * 1.j
|
| 609 |
+
f1 = fun(xc)
|
| 610 |
+
df = f1.imag
|
| 611 |
+
dx = h[i]
|
| 612 |
+
else:
|
| 613 |
+
raise RuntimeError("Never be here.")
|
| 614 |
+
|
| 615 |
+
J_transposed[i] = df / dx
|
| 616 |
+
x1[i] = x2[i] = xc[i] = x0[i]
|
| 617 |
+
|
| 618 |
+
if m == 1:
|
| 619 |
+
J_transposed = np.ravel(J_transposed)
|
| 620 |
+
|
| 621 |
+
return J_transposed.T
|
| 622 |
+
|
| 623 |
+
|
| 624 |
+
def _sparse_difference(fun, x0, f0, h, use_one_sided,
|
| 625 |
+
structure, groups, method):
|
| 626 |
+
m = f0.size
|
| 627 |
+
n = x0.size
|
| 628 |
+
row_indices = []
|
| 629 |
+
col_indices = []
|
| 630 |
+
fractions = []
|
| 631 |
+
|
| 632 |
+
n_groups = np.max(groups) + 1
|
| 633 |
+
for group in range(n_groups):
|
| 634 |
+
# Perturb variables which are in the same group simultaneously.
|
| 635 |
+
e = np.equal(group, groups)
|
| 636 |
+
h_vec = h * e
|
| 637 |
+
if method == '2-point':
|
| 638 |
+
x = x0 + h_vec
|
| 639 |
+
dx = x - x0
|
| 640 |
+
df = fun(x) - f0
|
| 641 |
+
# The result is written to columns which correspond to perturbed
|
| 642 |
+
# variables.
|
| 643 |
+
cols, = np.nonzero(e)
|
| 644 |
+
# Find all non-zero elements in selected columns of Jacobian.
|
| 645 |
+
i, j, _ = find(structure[:, cols])
|
| 646 |
+
# Restore column indices in the full array.
|
| 647 |
+
j = cols[j]
|
| 648 |
+
elif method == '3-point':
|
| 649 |
+
# Here we do conceptually the same but separate one-sided
|
| 650 |
+
# and two-sided schemes.
|
| 651 |
+
x1 = x0.copy()
|
| 652 |
+
x2 = x0.copy()
|
| 653 |
+
|
| 654 |
+
mask_1 = use_one_sided & e
|
| 655 |
+
x1[mask_1] += h_vec[mask_1]
|
| 656 |
+
x2[mask_1] += 2 * h_vec[mask_1]
|
| 657 |
+
|
| 658 |
+
mask_2 = ~use_one_sided & e
|
| 659 |
+
x1[mask_2] -= h_vec[mask_2]
|
| 660 |
+
x2[mask_2] += h_vec[mask_2]
|
| 661 |
+
|
| 662 |
+
dx = np.zeros(n)
|
| 663 |
+
dx[mask_1] = x2[mask_1] - x0[mask_1]
|
| 664 |
+
dx[mask_2] = x2[mask_2] - x1[mask_2]
|
| 665 |
+
|
| 666 |
+
f1 = fun(x1)
|
| 667 |
+
f2 = fun(x2)
|
| 668 |
+
|
| 669 |
+
cols, = np.nonzero(e)
|
| 670 |
+
i, j, _ = find(structure[:, cols])
|
| 671 |
+
j = cols[j]
|
| 672 |
+
|
| 673 |
+
mask = use_one_sided[j]
|
| 674 |
+
df = np.empty(m)
|
| 675 |
+
|
| 676 |
+
rows = i[mask]
|
| 677 |
+
df[rows] = -3 * f0[rows] + 4 * f1[rows] - f2[rows]
|
| 678 |
+
|
| 679 |
+
rows = i[~mask]
|
| 680 |
+
df[rows] = f2[rows] - f1[rows]
|
| 681 |
+
elif method == 'cs':
|
| 682 |
+
f1 = fun(x0 + h_vec*1.j)
|
| 683 |
+
df = f1.imag
|
| 684 |
+
dx = h_vec
|
| 685 |
+
cols, = np.nonzero(e)
|
| 686 |
+
i, j, _ = find(structure[:, cols])
|
| 687 |
+
j = cols[j]
|
| 688 |
+
else:
|
| 689 |
+
raise ValueError("Never be here.")
|
| 690 |
+
|
| 691 |
+
# All that's left is to compute the fraction. We store i, j and
|
| 692 |
+
# fractions as separate arrays and later construct coo_matrix.
|
| 693 |
+
row_indices.append(i)
|
| 694 |
+
col_indices.append(j)
|
| 695 |
+
fractions.append(df[i] / dx[j])
|
| 696 |
+
|
| 697 |
+
row_indices = np.hstack(row_indices)
|
| 698 |
+
col_indices = np.hstack(col_indices)
|
| 699 |
+
fractions = np.hstack(fractions)
|
| 700 |
+
J = coo_matrix((fractions, (row_indices, col_indices)), shape=(m, n))
|
| 701 |
+
return csr_matrix(J)
|
| 702 |
+
|
| 703 |
+
|
| 704 |
+
def check_derivative(fun, jac, x0, bounds=(-np.inf, np.inf), args=(),
|
| 705 |
+
kwargs={}):
|
| 706 |
+
"""Check correctness of a function computing derivatives (Jacobian or
|
| 707 |
+
gradient) by comparison with a finite difference approximation.
|
| 708 |
+
|
| 709 |
+
Parameters
|
| 710 |
+
----------
|
| 711 |
+
fun : callable
|
| 712 |
+
Function of which to estimate the derivatives. The argument x
|
| 713 |
+
passed to this function is ndarray of shape (n,) (never a scalar
|
| 714 |
+
even if n=1). It must return 1-D array_like of shape (m,) or a scalar.
|
| 715 |
+
jac : callable
|
| 716 |
+
Function which computes Jacobian matrix of `fun`. It must work with
|
| 717 |
+
argument x the same way as `fun`. The return value must be array_like
|
| 718 |
+
or sparse matrix with an appropriate shape.
|
| 719 |
+
x0 : array_like of shape (n,) or float
|
| 720 |
+
Point at which to estimate the derivatives. Float will be converted
|
| 721 |
+
to 1-D array.
|
| 722 |
+
bounds : 2-tuple of array_like, optional
|
| 723 |
+
Lower and upper bounds on independent variables. Defaults to no bounds.
|
| 724 |
+
Each bound must match the size of `x0` or be a scalar, in the latter
|
| 725 |
+
case the bound will be the same for all variables. Use it to limit the
|
| 726 |
+
range of function evaluation.
|
| 727 |
+
args, kwargs : tuple and dict, optional
|
| 728 |
+
Additional arguments passed to `fun` and `jac`. Both empty by default.
|
| 729 |
+
The calling signature is ``fun(x, *args, **kwargs)`` and the same
|
| 730 |
+
for `jac`.
|
| 731 |
+
|
| 732 |
+
Returns
|
| 733 |
+
-------
|
| 734 |
+
accuracy : float
|
| 735 |
+
The maximum among all relative errors for elements with absolute values
|
| 736 |
+
higher than 1 and absolute errors for elements with absolute values
|
| 737 |
+
less or equal than 1. If `accuracy` is on the order of 1e-6 or lower,
|
| 738 |
+
then it is likely that your `jac` implementation is correct.
|
| 739 |
+
|
| 740 |
+
See Also
|
| 741 |
+
--------
|
| 742 |
+
approx_derivative : Compute finite difference approximation of derivative.
|
| 743 |
+
|
| 744 |
+
Examples
|
| 745 |
+
--------
|
| 746 |
+
>>> import numpy as np
|
| 747 |
+
>>> from scipy.optimize._numdiff import check_derivative
|
| 748 |
+
>>>
|
| 749 |
+
>>>
|
| 750 |
+
>>> def f(x, c1, c2):
|
| 751 |
+
... return np.array([x[0] * np.sin(c1 * x[1]),
|
| 752 |
+
... x[0] * np.cos(c2 * x[1])])
|
| 753 |
+
...
|
| 754 |
+
>>> def jac(x, c1, c2):
|
| 755 |
+
... return np.array([
|
| 756 |
+
... [np.sin(c1 * x[1]), c1 * x[0] * np.cos(c1 * x[1])],
|
| 757 |
+
... [np.cos(c2 * x[1]), -c2 * x[0] * np.sin(c2 * x[1])]
|
| 758 |
+
... ])
|
| 759 |
+
...
|
| 760 |
+
>>>
|
| 761 |
+
>>> x0 = np.array([1.0, 0.5 * np.pi])
|
| 762 |
+
>>> check_derivative(f, jac, x0, args=(1, 2))
|
| 763 |
+
2.4492935982947064e-16
|
| 764 |
+
"""
|
| 765 |
+
J_to_test = jac(x0, *args, **kwargs)
|
| 766 |
+
if issparse(J_to_test):
|
| 767 |
+
J_diff = approx_derivative(fun, x0, bounds=bounds, sparsity=J_to_test,
|
| 768 |
+
args=args, kwargs=kwargs)
|
| 769 |
+
J_to_test = csr_matrix(J_to_test)
|
| 770 |
+
abs_err = J_to_test - J_diff
|
| 771 |
+
i, j, abs_err_data = find(abs_err)
|
| 772 |
+
J_diff_data = np.asarray(J_diff[i, j]).ravel()
|
| 773 |
+
return np.max(np.abs(abs_err_data) /
|
| 774 |
+
np.maximum(1, np.abs(J_diff_data)))
|
| 775 |
+
else:
|
| 776 |
+
J_diff = approx_derivative(fun, x0, bounds=bounds,
|
| 777 |
+
args=args, kwargs=kwargs)
|
| 778 |
+
abs_err = np.abs(J_to_test - J_diff)
|
| 779 |
+
return np.max(abs_err / np.maximum(1, np.abs(J_diff)))
|
evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_optimize.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_slsqp.cpython-310-x86_64-linux-gnu.so
ADDED
|
Binary file (86.6 kB). View file
|
|
|
evalkit_tf446/lib/python3.10/site-packages/scipy/optimize/_spectral.py
ADDED
|
@@ -0,0 +1,260 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Spectral Algorithm for Nonlinear Equations
|
| 3 |
+
"""
|
| 4 |
+
import collections
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
from scipy.optimize import OptimizeResult
|
| 8 |
+
from scipy.optimize._optimize import _check_unknown_options
|
| 9 |
+
from ._linesearch import _nonmonotone_line_search_cruz, _nonmonotone_line_search_cheng
|
| 10 |
+
|
| 11 |
+
class _NoConvergence(Exception):
|
| 12 |
+
pass
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def _root_df_sane(func, x0, args=(), ftol=1e-8, fatol=1e-300, maxfev=1000,
|
| 16 |
+
fnorm=None, callback=None, disp=False, M=10, eta_strategy=None,
|
| 17 |
+
sigma_eps=1e-10, sigma_0=1.0, line_search='cruz', **unknown_options):
|
| 18 |
+
r"""
|
| 19 |
+
Solve nonlinear equation with the DF-SANE method
|
| 20 |
+
|
| 21 |
+
Options
|
| 22 |
+
-------
|
| 23 |
+
ftol : float, optional
|
| 24 |
+
Relative norm tolerance.
|
| 25 |
+
fatol : float, optional
|
| 26 |
+
Absolute norm tolerance.
|
| 27 |
+
Algorithm terminates when ``||func(x)|| < fatol + ftol ||func(x_0)||``.
|
| 28 |
+
fnorm : callable, optional
|
| 29 |
+
Norm to use in the convergence check. If None, 2-norm is used.
|
| 30 |
+
maxfev : int, optional
|
| 31 |
+
Maximum number of function evaluations.
|
| 32 |
+
disp : bool, optional
|
| 33 |
+
Whether to print convergence process to stdout.
|
| 34 |
+
eta_strategy : callable, optional
|
| 35 |
+
Choice of the ``eta_k`` parameter, which gives slack for growth
|
| 36 |
+
of ``||F||**2``. Called as ``eta_k = eta_strategy(k, x, F)`` with
|
| 37 |
+
`k` the iteration number, `x` the current iterate and `F` the current
|
| 38 |
+
residual. Should satisfy ``eta_k > 0`` and ``sum(eta, k=0..inf) < inf``.
|
| 39 |
+
Default: ``||F||**2 / (1 + k)**2``.
|
| 40 |
+
sigma_eps : float, optional
|
| 41 |
+
The spectral coefficient is constrained to ``sigma_eps < sigma < 1/sigma_eps``.
|
| 42 |
+
Default: 1e-10
|
| 43 |
+
sigma_0 : float, optional
|
| 44 |
+
Initial spectral coefficient.
|
| 45 |
+
Default: 1.0
|
| 46 |
+
M : int, optional
|
| 47 |
+
Number of iterates to include in the nonmonotonic line search.
|
| 48 |
+
Default: 10
|
| 49 |
+
line_search : {'cruz', 'cheng'}
|
| 50 |
+
Type of line search to employ. 'cruz' is the original one defined in
|
| 51 |
+
[Martinez & Raydan. Math. Comp. 75, 1429 (2006)], 'cheng' is
|
| 52 |
+
a modified search defined in [Cheng & Li. IMA J. Numer. Anal. 29, 814 (2009)].
|
| 53 |
+
Default: 'cruz'
|
| 54 |
+
|
| 55 |
+
References
|
| 56 |
+
----------
|
| 57 |
+
.. [1] "Spectral residual method without gradient information for solving
|
| 58 |
+
large-scale nonlinear systems of equations." W. La Cruz,
|
| 59 |
+
J.M. Martinez, M. Raydan. Math. Comp. **75**, 1429 (2006).
|
| 60 |
+
.. [2] W. La Cruz, Opt. Meth. Software, 29, 24 (2014).
|
| 61 |
+
.. [3] W. Cheng, D.-H. Li. IMA J. Numer. Anal. **29**, 814 (2009).
|
| 62 |
+
|
| 63 |
+
"""
|
| 64 |
+
_check_unknown_options(unknown_options)
|
| 65 |
+
|
| 66 |
+
if line_search not in ('cheng', 'cruz'):
|
| 67 |
+
raise ValueError(f"Invalid value {line_search!r} for 'line_search'")
|
| 68 |
+
|
| 69 |
+
nexp = 2
|
| 70 |
+
|
| 71 |
+
if eta_strategy is None:
|
| 72 |
+
# Different choice from [1], as their eta is not invariant
|
| 73 |
+
# vs. scaling of F.
|
| 74 |
+
def eta_strategy(k, x, F):
|
| 75 |
+
# Obtain squared 2-norm of the initial residual from the outer scope
|
| 76 |
+
return f_0 / (1 + k)**2
|
| 77 |
+
|
| 78 |
+
if fnorm is None:
|
| 79 |
+
def fnorm(F):
|
| 80 |
+
# Obtain squared 2-norm of the current residual from the outer scope
|
| 81 |
+
return f_k**(1.0/nexp)
|
| 82 |
+
|
| 83 |
+
def fmerit(F):
|
| 84 |
+
return np.linalg.norm(F)**nexp
|
| 85 |
+
|
| 86 |
+
nfev = [0]
|
| 87 |
+
f, x_k, x_shape, f_k, F_k, is_complex = _wrap_func(func, x0, fmerit,
|
| 88 |
+
nfev, maxfev, args)
|
| 89 |
+
|
| 90 |
+
k = 0
|
| 91 |
+
f_0 = f_k
|
| 92 |
+
sigma_k = sigma_0
|
| 93 |
+
|
| 94 |
+
F_0_norm = fnorm(F_k)
|
| 95 |
+
|
| 96 |
+
# For the 'cruz' line search
|
| 97 |
+
prev_fs = collections.deque([f_k], M)
|
| 98 |
+
|
| 99 |
+
# For the 'cheng' line search
|
| 100 |
+
Q = 1.0
|
| 101 |
+
C = f_0
|
| 102 |
+
|
| 103 |
+
converged = False
|
| 104 |
+
message = "too many function evaluations required"
|
| 105 |
+
|
| 106 |
+
while True:
|
| 107 |
+
F_k_norm = fnorm(F_k)
|
| 108 |
+
|
| 109 |
+
if disp:
|
| 110 |
+
print("iter %d: ||F|| = %g, sigma = %g" % (k, F_k_norm, sigma_k))
|
| 111 |
+
|
| 112 |
+
if callback is not None:
|
| 113 |
+
callback(x_k, F_k)
|
| 114 |
+
|
| 115 |
+
if F_k_norm < ftol * F_0_norm + fatol:
|
| 116 |
+
# Converged!
|
| 117 |
+
message = "successful convergence"
|
| 118 |
+
converged = True
|
| 119 |
+
break
|
| 120 |
+
|
| 121 |
+
# Control spectral parameter, from [2]
|
| 122 |
+
if abs(sigma_k) > 1/sigma_eps:
|
| 123 |
+
sigma_k = 1/sigma_eps * np.sign(sigma_k)
|
| 124 |
+
elif abs(sigma_k) < sigma_eps:
|
| 125 |
+
sigma_k = sigma_eps
|
| 126 |
+
|
| 127 |
+
# Line search direction
|
| 128 |
+
d = -sigma_k * F_k
|
| 129 |
+
|
| 130 |
+
# Nonmonotone line search
|
| 131 |
+
eta = eta_strategy(k, x_k, F_k)
|
| 132 |
+
try:
|
| 133 |
+
if line_search == 'cruz':
|
| 134 |
+
alpha, xp, fp, Fp = _nonmonotone_line_search_cruz(f, x_k, d, prev_fs,
|
| 135 |
+
eta=eta)
|
| 136 |
+
elif line_search == 'cheng':
|
| 137 |
+
alpha, xp, fp, Fp, C, Q = _nonmonotone_line_search_cheng(f, x_k, d, f_k,
|
| 138 |
+
C, Q, eta=eta)
|
| 139 |
+
except _NoConvergence:
|
| 140 |
+
break
|
| 141 |
+
|
| 142 |
+
# Update spectral parameter
|
| 143 |
+
s_k = xp - x_k
|
| 144 |
+
y_k = Fp - F_k
|
| 145 |
+
sigma_k = np.vdot(s_k, s_k) / np.vdot(s_k, y_k)
|
| 146 |
+
|
| 147 |
+
# Take step
|
| 148 |
+
x_k = xp
|
| 149 |
+
F_k = Fp
|
| 150 |
+
f_k = fp
|
| 151 |
+
|
| 152 |
+
# Store function value
|
| 153 |
+
if line_search == 'cruz':
|
| 154 |
+
prev_fs.append(fp)
|
| 155 |
+
|
| 156 |
+
k += 1
|
| 157 |
+
|
| 158 |
+
x = _wrap_result(x_k, is_complex, shape=x_shape)
|
| 159 |
+
F = _wrap_result(F_k, is_complex)
|
| 160 |
+
|
| 161 |
+
result = OptimizeResult(x=x, success=converged,
|
| 162 |
+
message=message,
|
| 163 |
+
fun=F, nfev=nfev[0], nit=k, method="df-sane")
|
| 164 |
+
|
| 165 |
+
return result
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
def _wrap_func(func, x0, fmerit, nfev_list, maxfev, args=()):
|
| 169 |
+
"""
|
| 170 |
+
Wrap a function and an initial value so that (i) complex values
|
| 171 |
+
are wrapped to reals, and (ii) value for a merit function
|
| 172 |
+
fmerit(x, f) is computed at the same time, (iii) iteration count
|
| 173 |
+
is maintained and an exception is raised if it is exceeded.
|
| 174 |
+
|
| 175 |
+
Parameters
|
| 176 |
+
----------
|
| 177 |
+
func : callable
|
| 178 |
+
Function to wrap
|
| 179 |
+
x0 : ndarray
|
| 180 |
+
Initial value
|
| 181 |
+
fmerit : callable
|
| 182 |
+
Merit function fmerit(f) for computing merit value from residual.
|
| 183 |
+
nfev_list : list
|
| 184 |
+
List to store number of evaluations in. Should be [0] in the beginning.
|
| 185 |
+
maxfev : int
|
| 186 |
+
Maximum number of evaluations before _NoConvergence is raised.
|
| 187 |
+
args : tuple
|
| 188 |
+
Extra arguments to func
|
| 189 |
+
|
| 190 |
+
Returns
|
| 191 |
+
-------
|
| 192 |
+
wrap_func : callable
|
| 193 |
+
Wrapped function, to be called as
|
| 194 |
+
``F, fp = wrap_func(x0)``
|
| 195 |
+
x0_wrap : ndarray of float
|
| 196 |
+
Wrapped initial value; raveled to 1-D and complex
|
| 197 |
+
values mapped to reals.
|
| 198 |
+
x0_shape : tuple
|
| 199 |
+
Shape of the initial value array
|
| 200 |
+
f : float
|
| 201 |
+
Merit function at F
|
| 202 |
+
F : ndarray of float
|
| 203 |
+
Residual at x0_wrap
|
| 204 |
+
is_complex : bool
|
| 205 |
+
Whether complex values were mapped to reals
|
| 206 |
+
|
| 207 |
+
"""
|
| 208 |
+
x0 = np.asarray(x0)
|
| 209 |
+
x0_shape = x0.shape
|
| 210 |
+
F = np.asarray(func(x0, *args)).ravel()
|
| 211 |
+
is_complex = np.iscomplexobj(x0) or np.iscomplexobj(F)
|
| 212 |
+
x0 = x0.ravel()
|
| 213 |
+
|
| 214 |
+
nfev_list[0] = 1
|
| 215 |
+
|
| 216 |
+
if is_complex:
|
| 217 |
+
def wrap_func(x):
|
| 218 |
+
if nfev_list[0] >= maxfev:
|
| 219 |
+
raise _NoConvergence()
|
| 220 |
+
nfev_list[0] += 1
|
| 221 |
+
z = _real2complex(x).reshape(x0_shape)
|
| 222 |
+
v = np.asarray(func(z, *args)).ravel()
|
| 223 |
+
F = _complex2real(v)
|
| 224 |
+
f = fmerit(F)
|
| 225 |
+
return f, F
|
| 226 |
+
|
| 227 |
+
x0 = _complex2real(x0)
|
| 228 |
+
F = _complex2real(F)
|
| 229 |
+
else:
|
| 230 |
+
def wrap_func(x):
|
| 231 |
+
if nfev_list[0] >= maxfev:
|
| 232 |
+
raise _NoConvergence()
|
| 233 |
+
nfev_list[0] += 1
|
| 234 |
+
x = x.reshape(x0_shape)
|
| 235 |
+
F = np.asarray(func(x, *args)).ravel()
|
| 236 |
+
f = fmerit(F)
|
| 237 |
+
return f, F
|
| 238 |
+
|
| 239 |
+
return wrap_func, x0, x0_shape, fmerit(F), F, is_complex
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
def _wrap_result(result, is_complex, shape=None):
|
| 243 |
+
"""
|
| 244 |
+
Convert from real to complex and reshape result arrays.
|
| 245 |
+
"""
|
| 246 |
+
if is_complex:
|
| 247 |
+
z = _real2complex(result)
|
| 248 |
+
else:
|
| 249 |
+
z = result
|
| 250 |
+
if shape is not None:
|
| 251 |
+
z = z.reshape(shape)
|
| 252 |
+
return z
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
def _real2complex(x):
|
| 256 |
+
return np.ascontiguousarray(x, dtype=float).view(np.complex128)
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
def _complex2real(z):
|
| 260 |
+
return np.ascontiguousarray(z, dtype=complex).view(np.float64)
|