Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +4 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_basinhopping.py +753 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_cobyla.cpython-310-x86_64-linux-gnu.so +3 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_cobyqa_py.py +62 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_direct_py.py +278 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_group_columns.cpython-310-x86_64-linux-gnu.so +0 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_highs/__pycache__/__init__.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsIO.pxd +20 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsInfo.pxd +22 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsLp.pxd +46 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsModelUtils.pxd +10 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsOptions.pxd +110 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsStatus.pxd +12 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/SimplexConst.pxd +95 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/highs_c_api.pxd +7 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_lbfgsb.cpython-310-x86_64-linux-gnu.so +3 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_linprog.py +716 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_linprog_highs.py +440 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_linprog_util.py +1522 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_lsq/__init__.py +5 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/bvls.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_lsq/bvls.py +183 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_lsq/common.py +733 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_lsq/dogbox.py +331 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_lsq/least_squares.py +967 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_lsq/lsq_linear.py +362 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_lsq/trf.py +560 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_lsq/trf_linear.py +249 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_pava_pybind.cpython-310-x86_64-linux-gnu.so +3 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__init__.py +0 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/__init__.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/_complex.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/_vertex.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/_vertex.py +460 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_tnc.py +430 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__init__.py +6 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/__init__.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/canonical_constraint.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/equality_constrained_sqp.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/minimize_trustregion_constr.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/projections.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/qp_subproblem.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/report.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/tr_interior_point.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/canonical_constraint.py +390 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/equality_constrained_sqp.py +231 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py +564 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/projections.py +407 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/qp_subproblem.py +637 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/report.py +51 -0
.gitattributes
CHANGED
|
@@ -353,3 +353,7 @@ llava_next/lib/python3.10/site-packages/scipy/cluster/__pycache__/hierarchy.cpyt
|
|
| 353 |
llava_next/lib/python3.10/site-packages/scipy/optimize/_moduleTNC.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 354 |
parrot/lib/python3.10/site-packages/gradio_client/__pycache__/media_data.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 355 |
parrot/lib/python3.10/site-packages/torchvision.libs/libjpeg.ceea7512.so.62 filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 353 |
llava_next/lib/python3.10/site-packages/scipy/optimize/_moduleTNC.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 354 |
parrot/lib/python3.10/site-packages/gradio_client/__pycache__/media_data.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 355 |
parrot/lib/python3.10/site-packages/torchvision.libs/libjpeg.ceea7512.so.62 filter=lfs diff=lfs merge=lfs -text
|
| 356 |
+
llava_next/lib/python3.10/site-packages/scipy/optimize/_lbfgsb.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 357 |
+
llava_next/lib/python3.10/site-packages/scipy/optimize/_pava_pybind.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 358 |
+
llava_next/lib/python3.10/site-packages/scipy/signal/__pycache__/_filter_design.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 359 |
+
llava_next/lib/python3.10/site-packages/scipy/optimize/_cobyla.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_basinhopping.py
ADDED
|
@@ -0,0 +1,753 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
basinhopping: The basinhopping global optimization algorithm
|
| 3 |
+
"""
|
| 4 |
+
import numpy as np
|
| 5 |
+
import math
|
| 6 |
+
import inspect
|
| 7 |
+
import scipy.optimize
|
| 8 |
+
from scipy._lib._util import check_random_state
|
| 9 |
+
|
| 10 |
+
__all__ = ['basinhopping']
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
_params = (inspect.Parameter('res_new', kind=inspect.Parameter.KEYWORD_ONLY),
|
| 14 |
+
inspect.Parameter('res_old', kind=inspect.Parameter.KEYWORD_ONLY))
|
| 15 |
+
_new_accept_test_signature = inspect.Signature(parameters=_params)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class Storage:
|
| 19 |
+
"""
|
| 20 |
+
Class used to store the lowest energy structure
|
| 21 |
+
"""
|
| 22 |
+
def __init__(self, minres):
|
| 23 |
+
self._add(minres)
|
| 24 |
+
|
| 25 |
+
def _add(self, minres):
|
| 26 |
+
self.minres = minres
|
| 27 |
+
self.minres.x = np.copy(minres.x)
|
| 28 |
+
|
| 29 |
+
def update(self, minres):
|
| 30 |
+
if minres.success and (minres.fun < self.minres.fun
|
| 31 |
+
or not self.minres.success):
|
| 32 |
+
self._add(minres)
|
| 33 |
+
return True
|
| 34 |
+
else:
|
| 35 |
+
return False
|
| 36 |
+
|
| 37 |
+
def get_lowest(self):
|
| 38 |
+
return self.minres
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class BasinHoppingRunner:
|
| 42 |
+
"""This class implements the core of the basinhopping algorithm.
|
| 43 |
+
|
| 44 |
+
x0 : ndarray
|
| 45 |
+
The starting coordinates.
|
| 46 |
+
minimizer : callable
|
| 47 |
+
The local minimizer, with signature ``result = minimizer(x)``.
|
| 48 |
+
The return value is an `optimize.OptimizeResult` object.
|
| 49 |
+
step_taking : callable
|
| 50 |
+
This function displaces the coordinates randomly. Signature should
|
| 51 |
+
be ``x_new = step_taking(x)``. Note that `x` may be modified in-place.
|
| 52 |
+
accept_tests : list of callables
|
| 53 |
+
Each test is passed the kwargs `f_new`, `x_new`, `f_old` and
|
| 54 |
+
`x_old`. These tests will be used to judge whether or not to accept
|
| 55 |
+
the step. The acceptable return values are True, False, or ``"force
|
| 56 |
+
accept"``. If any of the tests return False then the step is rejected.
|
| 57 |
+
If ``"force accept"``, then this will override any other tests in
|
| 58 |
+
order to accept the step. This can be used, for example, to forcefully
|
| 59 |
+
escape from a local minimum that ``basinhopping`` is trapped in.
|
| 60 |
+
disp : bool, optional
|
| 61 |
+
Display status messages.
|
| 62 |
+
|
| 63 |
+
"""
|
| 64 |
+
def __init__(self, x0, minimizer, step_taking, accept_tests, disp=False):
|
| 65 |
+
self.x = np.copy(x0)
|
| 66 |
+
self.minimizer = minimizer
|
| 67 |
+
self.step_taking = step_taking
|
| 68 |
+
self.accept_tests = accept_tests
|
| 69 |
+
self.disp = disp
|
| 70 |
+
|
| 71 |
+
self.nstep = 0
|
| 72 |
+
|
| 73 |
+
# initialize return object
|
| 74 |
+
self.res = scipy.optimize.OptimizeResult()
|
| 75 |
+
self.res.minimization_failures = 0
|
| 76 |
+
|
| 77 |
+
# do initial minimization
|
| 78 |
+
minres = minimizer(self.x)
|
| 79 |
+
if not minres.success:
|
| 80 |
+
self.res.minimization_failures += 1
|
| 81 |
+
if self.disp:
|
| 82 |
+
print("warning: basinhopping: local minimization failure")
|
| 83 |
+
self.x = np.copy(minres.x)
|
| 84 |
+
self.energy = minres.fun
|
| 85 |
+
self.incumbent_minres = minres # best minimize result found so far
|
| 86 |
+
if self.disp:
|
| 87 |
+
print("basinhopping step %d: f %g" % (self.nstep, self.energy))
|
| 88 |
+
|
| 89 |
+
# initialize storage class
|
| 90 |
+
self.storage = Storage(minres)
|
| 91 |
+
|
| 92 |
+
if hasattr(minres, "nfev"):
|
| 93 |
+
self.res.nfev = minres.nfev
|
| 94 |
+
if hasattr(minres, "njev"):
|
| 95 |
+
self.res.njev = minres.njev
|
| 96 |
+
if hasattr(minres, "nhev"):
|
| 97 |
+
self.res.nhev = minres.nhev
|
| 98 |
+
|
| 99 |
+
def _monte_carlo_step(self):
|
| 100 |
+
"""Do one Monte Carlo iteration
|
| 101 |
+
|
| 102 |
+
Randomly displace the coordinates, minimize, and decide whether
|
| 103 |
+
or not to accept the new coordinates.
|
| 104 |
+
"""
|
| 105 |
+
# Take a random step. Make a copy of x because the step_taking
|
| 106 |
+
# algorithm might change x in place
|
| 107 |
+
x_after_step = np.copy(self.x)
|
| 108 |
+
x_after_step = self.step_taking(x_after_step)
|
| 109 |
+
|
| 110 |
+
# do a local minimization
|
| 111 |
+
minres = self.minimizer(x_after_step)
|
| 112 |
+
x_after_quench = minres.x
|
| 113 |
+
energy_after_quench = minres.fun
|
| 114 |
+
if not minres.success:
|
| 115 |
+
self.res.minimization_failures += 1
|
| 116 |
+
if self.disp:
|
| 117 |
+
print("warning: basinhopping: local minimization failure")
|
| 118 |
+
if hasattr(minres, "nfev"):
|
| 119 |
+
self.res.nfev += minres.nfev
|
| 120 |
+
if hasattr(minres, "njev"):
|
| 121 |
+
self.res.njev += minres.njev
|
| 122 |
+
if hasattr(minres, "nhev"):
|
| 123 |
+
self.res.nhev += minres.nhev
|
| 124 |
+
|
| 125 |
+
# accept the move based on self.accept_tests. If any test is False,
|
| 126 |
+
# then reject the step. If any test returns the special string
|
| 127 |
+
# 'force accept', then accept the step regardless. This can be used
|
| 128 |
+
# to forcefully escape from a local minimum if normal basin hopping
|
| 129 |
+
# steps are not sufficient.
|
| 130 |
+
accept = True
|
| 131 |
+
for test in self.accept_tests:
|
| 132 |
+
if inspect.signature(test) == _new_accept_test_signature:
|
| 133 |
+
testres = test(res_new=minres, res_old=self.incumbent_minres)
|
| 134 |
+
else:
|
| 135 |
+
testres = test(f_new=energy_after_quench, x_new=x_after_quench,
|
| 136 |
+
f_old=self.energy, x_old=self.x)
|
| 137 |
+
|
| 138 |
+
if testres == 'force accept':
|
| 139 |
+
accept = True
|
| 140 |
+
break
|
| 141 |
+
elif testres is None:
|
| 142 |
+
raise ValueError("accept_tests must return True, False, or "
|
| 143 |
+
"'force accept'")
|
| 144 |
+
elif not testres:
|
| 145 |
+
accept = False
|
| 146 |
+
|
| 147 |
+
# Report the result of the acceptance test to the take step class.
|
| 148 |
+
# This is for adaptive step taking
|
| 149 |
+
if hasattr(self.step_taking, "report"):
|
| 150 |
+
self.step_taking.report(accept, f_new=energy_after_quench,
|
| 151 |
+
x_new=x_after_quench, f_old=self.energy,
|
| 152 |
+
x_old=self.x)
|
| 153 |
+
|
| 154 |
+
return accept, minres
|
| 155 |
+
|
| 156 |
+
def one_cycle(self):
|
| 157 |
+
"""Do one cycle of the basinhopping algorithm
|
| 158 |
+
"""
|
| 159 |
+
self.nstep += 1
|
| 160 |
+
new_global_min = False
|
| 161 |
+
|
| 162 |
+
accept, minres = self._monte_carlo_step()
|
| 163 |
+
|
| 164 |
+
if accept:
|
| 165 |
+
self.energy = minres.fun
|
| 166 |
+
self.x = np.copy(minres.x)
|
| 167 |
+
self.incumbent_minres = minres # best minimize result found so far
|
| 168 |
+
new_global_min = self.storage.update(minres)
|
| 169 |
+
|
| 170 |
+
# print some information
|
| 171 |
+
if self.disp:
|
| 172 |
+
self.print_report(minres.fun, accept)
|
| 173 |
+
if new_global_min:
|
| 174 |
+
print("found new global minimum on step %d with function"
|
| 175 |
+
" value %g" % (self.nstep, self.energy))
|
| 176 |
+
|
| 177 |
+
# save some variables as BasinHoppingRunner attributes
|
| 178 |
+
self.xtrial = minres.x
|
| 179 |
+
self.energy_trial = minres.fun
|
| 180 |
+
self.accept = accept
|
| 181 |
+
|
| 182 |
+
return new_global_min
|
| 183 |
+
|
| 184 |
+
def print_report(self, energy_trial, accept):
|
| 185 |
+
"""print a status update"""
|
| 186 |
+
minres = self.storage.get_lowest()
|
| 187 |
+
print("basinhopping step %d: f %g trial_f %g accepted %d "
|
| 188 |
+
" lowest_f %g" % (self.nstep, self.energy, energy_trial,
|
| 189 |
+
accept, minres.fun))
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
class AdaptiveStepsize:
|
| 193 |
+
"""
|
| 194 |
+
Class to implement adaptive stepsize.
|
| 195 |
+
|
| 196 |
+
This class wraps the step taking class and modifies the stepsize to
|
| 197 |
+
ensure the true acceptance rate is as close as possible to the target.
|
| 198 |
+
|
| 199 |
+
Parameters
|
| 200 |
+
----------
|
| 201 |
+
takestep : callable
|
| 202 |
+
The step taking routine. Must contain modifiable attribute
|
| 203 |
+
takestep.stepsize
|
| 204 |
+
accept_rate : float, optional
|
| 205 |
+
The target step acceptance rate
|
| 206 |
+
interval : int, optional
|
| 207 |
+
Interval for how often to update the stepsize
|
| 208 |
+
factor : float, optional
|
| 209 |
+
The step size is multiplied or divided by this factor upon each
|
| 210 |
+
update.
|
| 211 |
+
verbose : bool, optional
|
| 212 |
+
Print information about each update
|
| 213 |
+
|
| 214 |
+
"""
|
| 215 |
+
def __init__(self, takestep, accept_rate=0.5, interval=50, factor=0.9,
|
| 216 |
+
verbose=True):
|
| 217 |
+
self.takestep = takestep
|
| 218 |
+
self.target_accept_rate = accept_rate
|
| 219 |
+
self.interval = interval
|
| 220 |
+
self.factor = factor
|
| 221 |
+
self.verbose = verbose
|
| 222 |
+
|
| 223 |
+
self.nstep = 0
|
| 224 |
+
self.nstep_tot = 0
|
| 225 |
+
self.naccept = 0
|
| 226 |
+
|
| 227 |
+
def __call__(self, x):
|
| 228 |
+
return self.take_step(x)
|
| 229 |
+
|
| 230 |
+
def _adjust_step_size(self):
|
| 231 |
+
old_stepsize = self.takestep.stepsize
|
| 232 |
+
accept_rate = float(self.naccept) / self.nstep
|
| 233 |
+
if accept_rate > self.target_accept_rate:
|
| 234 |
+
# We're accepting too many steps. This generally means we're
|
| 235 |
+
# trapped in a basin. Take bigger steps.
|
| 236 |
+
self.takestep.stepsize /= self.factor
|
| 237 |
+
else:
|
| 238 |
+
# We're not accepting enough steps. Take smaller steps.
|
| 239 |
+
self.takestep.stepsize *= self.factor
|
| 240 |
+
if self.verbose:
|
| 241 |
+
print(f"adaptive stepsize: acceptance rate {accept_rate:f} target "
|
| 242 |
+
f"{self.target_accept_rate:f} new stepsize "
|
| 243 |
+
f"{self.takestep.stepsize:g} old stepsize {old_stepsize:g}")
|
| 244 |
+
|
| 245 |
+
def take_step(self, x):
|
| 246 |
+
self.nstep += 1
|
| 247 |
+
self.nstep_tot += 1
|
| 248 |
+
if self.nstep % self.interval == 0:
|
| 249 |
+
self._adjust_step_size()
|
| 250 |
+
return self.takestep(x)
|
| 251 |
+
|
| 252 |
+
def report(self, accept, **kwargs):
|
| 253 |
+
"called by basinhopping to report the result of the step"
|
| 254 |
+
if accept:
|
| 255 |
+
self.naccept += 1
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
class RandomDisplacement:
|
| 259 |
+
"""Add a random displacement of maximum size `stepsize` to each coordinate.
|
| 260 |
+
|
| 261 |
+
Calling this updates `x` in-place.
|
| 262 |
+
|
| 263 |
+
Parameters
|
| 264 |
+
----------
|
| 265 |
+
stepsize : float, optional
|
| 266 |
+
Maximum stepsize in any dimension
|
| 267 |
+
random_gen : {None, int, `numpy.random.Generator`,
|
| 268 |
+
`numpy.random.RandomState`}, optional
|
| 269 |
+
|
| 270 |
+
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
|
| 271 |
+
singleton is used.
|
| 272 |
+
If `seed` is an int, a new ``RandomState`` instance is used,
|
| 273 |
+
seeded with `seed`.
|
| 274 |
+
If `seed` is already a ``Generator`` or ``RandomState`` instance then
|
| 275 |
+
that instance is used.
|
| 276 |
+
|
| 277 |
+
"""
|
| 278 |
+
|
| 279 |
+
def __init__(self, stepsize=0.5, random_gen=None):
|
| 280 |
+
self.stepsize = stepsize
|
| 281 |
+
self.random_gen = check_random_state(random_gen)
|
| 282 |
+
|
| 283 |
+
def __call__(self, x):
|
| 284 |
+
x += self.random_gen.uniform(-self.stepsize, self.stepsize,
|
| 285 |
+
np.shape(x))
|
| 286 |
+
return x
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
class MinimizerWrapper:
|
| 290 |
+
"""
|
| 291 |
+
wrap a minimizer function as a minimizer class
|
| 292 |
+
"""
|
| 293 |
+
def __init__(self, minimizer, func=None, **kwargs):
|
| 294 |
+
self.minimizer = minimizer
|
| 295 |
+
self.func = func
|
| 296 |
+
self.kwargs = kwargs
|
| 297 |
+
|
| 298 |
+
def __call__(self, x0):
|
| 299 |
+
if self.func is None:
|
| 300 |
+
return self.minimizer(x0, **self.kwargs)
|
| 301 |
+
else:
|
| 302 |
+
return self.minimizer(self.func, x0, **self.kwargs)
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
class Metropolis:
|
| 306 |
+
"""Metropolis acceptance criterion.
|
| 307 |
+
|
| 308 |
+
Parameters
|
| 309 |
+
----------
|
| 310 |
+
T : float
|
| 311 |
+
The "temperature" parameter for the accept or reject criterion.
|
| 312 |
+
random_gen : {None, int, `numpy.random.Generator`,
|
| 313 |
+
`numpy.random.RandomState`}, optional
|
| 314 |
+
|
| 315 |
+
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
|
| 316 |
+
singleton is used.
|
| 317 |
+
If `seed` is an int, a new ``RandomState`` instance is used,
|
| 318 |
+
seeded with `seed`.
|
| 319 |
+
If `seed` is already a ``Generator`` or ``RandomState`` instance then
|
| 320 |
+
that instance is used.
|
| 321 |
+
Random number generator used for acceptance test.
|
| 322 |
+
|
| 323 |
+
"""
|
| 324 |
+
|
| 325 |
+
def __init__(self, T, random_gen=None):
|
| 326 |
+
# Avoid ZeroDivisionError since "MBH can be regarded as a special case
|
| 327 |
+
# of the BH framework with the Metropolis criterion, where temperature
|
| 328 |
+
# T = 0." (Reject all steps that increase energy.)
|
| 329 |
+
self.beta = 1.0 / T if T != 0 else float('inf')
|
| 330 |
+
self.random_gen = check_random_state(random_gen)
|
| 331 |
+
|
| 332 |
+
def accept_reject(self, res_new, res_old):
|
| 333 |
+
"""
|
| 334 |
+
Assuming the local search underlying res_new was successful:
|
| 335 |
+
If new energy is lower than old, it will always be accepted.
|
| 336 |
+
If new is higher than old, there is a chance it will be accepted,
|
| 337 |
+
less likely for larger differences.
|
| 338 |
+
"""
|
| 339 |
+
with np.errstate(invalid='ignore'):
|
| 340 |
+
# The energy values being fed to Metropolis are 1-length arrays, and if
|
| 341 |
+
# they are equal, their difference is 0, which gets multiplied by beta,
|
| 342 |
+
# which is inf, and array([0]) * float('inf') causes
|
| 343 |
+
#
|
| 344 |
+
# RuntimeWarning: invalid value encountered in multiply
|
| 345 |
+
#
|
| 346 |
+
# Ignore this warning so when the algorithm is on a flat plane, it always
|
| 347 |
+
# accepts the step, to try to move off the plane.
|
| 348 |
+
prod = -(res_new.fun - res_old.fun) * self.beta
|
| 349 |
+
w = math.exp(min(0, prod))
|
| 350 |
+
|
| 351 |
+
rand = self.random_gen.uniform()
|
| 352 |
+
return w >= rand and (res_new.success or not res_old.success)
|
| 353 |
+
|
| 354 |
+
def __call__(self, *, res_new, res_old):
|
| 355 |
+
"""
|
| 356 |
+
f_new and f_old are mandatory in kwargs
|
| 357 |
+
"""
|
| 358 |
+
return bool(self.accept_reject(res_new, res_old))
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
def basinhopping(func, x0, niter=100, T=1.0, stepsize=0.5,
|
| 362 |
+
minimizer_kwargs=None, take_step=None, accept_test=None,
|
| 363 |
+
callback=None, interval=50, disp=False, niter_success=None,
|
| 364 |
+
seed=None, *, target_accept_rate=0.5, stepwise_factor=0.9):
|
| 365 |
+
"""Find the global minimum of a function using the basin-hopping algorithm.
|
| 366 |
+
|
| 367 |
+
Basin-hopping is a two-phase method that combines a global stepping
|
| 368 |
+
algorithm with local minimization at each step. Designed to mimic
|
| 369 |
+
the natural process of energy minimization of clusters of atoms, it works
|
| 370 |
+
well for similar problems with "funnel-like, but rugged" energy landscapes
|
| 371 |
+
[5]_.
|
| 372 |
+
|
| 373 |
+
As the step-taking, step acceptance, and minimization methods are all
|
| 374 |
+
customizable, this function can also be used to implement other two-phase
|
| 375 |
+
methods.
|
| 376 |
+
|
| 377 |
+
Parameters
|
| 378 |
+
----------
|
| 379 |
+
func : callable ``f(x, *args)``
|
| 380 |
+
Function to be optimized. ``args`` can be passed as an optional item
|
| 381 |
+
in the dict `minimizer_kwargs`
|
| 382 |
+
x0 : array_like
|
| 383 |
+
Initial guess.
|
| 384 |
+
niter : integer, optional
|
| 385 |
+
The number of basin-hopping iterations. There will be a total of
|
| 386 |
+
``niter + 1`` runs of the local minimizer.
|
| 387 |
+
T : float, optional
|
| 388 |
+
The "temperature" parameter for the acceptance or rejection criterion.
|
| 389 |
+
Higher "temperatures" mean that larger jumps in function value will be
|
| 390 |
+
accepted. For best results `T` should be comparable to the
|
| 391 |
+
separation (in function value) between local minima.
|
| 392 |
+
stepsize : float, optional
|
| 393 |
+
Maximum step size for use in the random displacement.
|
| 394 |
+
minimizer_kwargs : dict, optional
|
| 395 |
+
Extra keyword arguments to be passed to the local minimizer
|
| 396 |
+
`scipy.optimize.minimize` Some important options could be:
|
| 397 |
+
|
| 398 |
+
method : str
|
| 399 |
+
The minimization method (e.g. ``"L-BFGS-B"``)
|
| 400 |
+
args : tuple
|
| 401 |
+
Extra arguments passed to the objective function (`func`) and
|
| 402 |
+
its derivatives (Jacobian, Hessian).
|
| 403 |
+
|
| 404 |
+
take_step : callable ``take_step(x)``, optional
|
| 405 |
+
Replace the default step-taking routine with this routine. The default
|
| 406 |
+
step-taking routine is a random displacement of the coordinates, but
|
| 407 |
+
other step-taking algorithms may be better for some systems.
|
| 408 |
+
`take_step` can optionally have the attribute ``take_step.stepsize``.
|
| 409 |
+
If this attribute exists, then `basinhopping` will adjust
|
| 410 |
+
``take_step.stepsize`` in order to try to optimize the global minimum
|
| 411 |
+
search.
|
| 412 |
+
accept_test : callable, ``accept_test(f_new=f_new, x_new=x_new, f_old=fold, x_old=x_old)``, optional
|
| 413 |
+
Define a test which will be used to judge whether to accept the
|
| 414 |
+
step. This will be used in addition to the Metropolis test based on
|
| 415 |
+
"temperature" `T`. The acceptable return values are True,
|
| 416 |
+
False, or ``"force accept"``. If any of the tests return False
|
| 417 |
+
then the step is rejected. If the latter, then this will override any
|
| 418 |
+
other tests in order to accept the step. This can be used, for example,
|
| 419 |
+
to forcefully escape from a local minimum that `basinhopping` is
|
| 420 |
+
trapped in.
|
| 421 |
+
callback : callable, ``callback(x, f, accept)``, optional
|
| 422 |
+
A callback function which will be called for all minima found. ``x``
|
| 423 |
+
and ``f`` are the coordinates and function value of the trial minimum,
|
| 424 |
+
and ``accept`` is whether that minimum was accepted. This can
|
| 425 |
+
be used, for example, to save the lowest N minima found. Also,
|
| 426 |
+
`callback` can be used to specify a user defined stop criterion by
|
| 427 |
+
optionally returning True to stop the `basinhopping` routine.
|
| 428 |
+
interval : integer, optional
|
| 429 |
+
interval for how often to update the `stepsize`
|
| 430 |
+
disp : bool, optional
|
| 431 |
+
Set to True to print status messages
|
| 432 |
+
niter_success : integer, optional
|
| 433 |
+
Stop the run if the global minimum candidate remains the same for this
|
| 434 |
+
number of iterations.
|
| 435 |
+
seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
|
| 436 |
+
|
| 437 |
+
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
|
| 438 |
+
singleton is used.
|
| 439 |
+
If `seed` is an int, a new ``RandomState`` instance is used,
|
| 440 |
+
seeded with `seed`.
|
| 441 |
+
If `seed` is already a ``Generator`` or ``RandomState`` instance then
|
| 442 |
+
that instance is used.
|
| 443 |
+
Specify `seed` for repeatable minimizations. The random numbers
|
| 444 |
+
generated with this seed only affect the default Metropolis
|
| 445 |
+
`accept_test` and the default `take_step`. If you supply your own
|
| 446 |
+
`take_step` and `accept_test`, and these functions use random
|
| 447 |
+
number generation, then those functions are responsible for the state
|
| 448 |
+
of their random number generator.
|
| 449 |
+
target_accept_rate : float, optional
|
| 450 |
+
The target acceptance rate that is used to adjust the `stepsize`.
|
| 451 |
+
If the current acceptance rate is greater than the target,
|
| 452 |
+
then the `stepsize` is increased. Otherwise, it is decreased.
|
| 453 |
+
Range is (0, 1). Default is 0.5.
|
| 454 |
+
|
| 455 |
+
.. versionadded:: 1.8.0
|
| 456 |
+
|
| 457 |
+
stepwise_factor : float, optional
|
| 458 |
+
The `stepsize` is multiplied or divided by this stepwise factor upon
|
| 459 |
+
each update. Range is (0, 1). Default is 0.9.
|
| 460 |
+
|
| 461 |
+
.. versionadded:: 1.8.0
|
| 462 |
+
|
| 463 |
+
Returns
|
| 464 |
+
-------
|
| 465 |
+
res : OptimizeResult
|
| 466 |
+
The optimization result represented as a `OptimizeResult` object.
|
| 467 |
+
Important attributes are: ``x`` the solution array, ``fun`` the value
|
| 468 |
+
of the function at the solution, and ``message`` which describes the
|
| 469 |
+
cause of the termination. The ``OptimizeResult`` object returned by the
|
| 470 |
+
selected minimizer at the lowest minimum is also contained within this
|
| 471 |
+
object and can be accessed through the ``lowest_optimization_result``
|
| 472 |
+
attribute. See `OptimizeResult` for a description of other attributes.
|
| 473 |
+
|
| 474 |
+
See Also
|
| 475 |
+
--------
|
| 476 |
+
minimize :
|
| 477 |
+
The local minimization function called once for each basinhopping step.
|
| 478 |
+
`minimizer_kwargs` is passed to this routine.
|
| 479 |
+
|
| 480 |
+
Notes
|
| 481 |
+
-----
|
| 482 |
+
Basin-hopping is a stochastic algorithm which attempts to find the global
|
| 483 |
+
minimum of a smooth scalar function of one or more variables [1]_ [2]_ [3]_
|
| 484 |
+
[4]_. The algorithm in its current form was described by David Wales and
|
| 485 |
+
Jonathan Doye [2]_ http://www-wales.ch.cam.ac.uk/.
|
| 486 |
+
|
| 487 |
+
The algorithm is iterative with each cycle composed of the following
|
| 488 |
+
features
|
| 489 |
+
|
| 490 |
+
1) random perturbation of the coordinates
|
| 491 |
+
|
| 492 |
+
2) local minimization
|
| 493 |
+
|
| 494 |
+
3) accept or reject the new coordinates based on the minimized function
|
| 495 |
+
value
|
| 496 |
+
|
| 497 |
+
The acceptance test used here is the Metropolis criterion of standard Monte
|
| 498 |
+
Carlo algorithms, although there are many other possibilities [3]_.
|
| 499 |
+
|
| 500 |
+
This global minimization method has been shown to be extremely efficient
|
| 501 |
+
for a wide variety of problems in physics and chemistry. It is
|
| 502 |
+
particularly useful when the function has many minima separated by large
|
| 503 |
+
barriers. See the `Cambridge Cluster Database
|
| 504 |
+
<https://www-wales.ch.cam.ac.uk/CCD.html>`_ for databases of molecular
|
| 505 |
+
systems that have been optimized primarily using basin-hopping. This
|
| 506 |
+
database includes minimization problems exceeding 300 degrees of freedom.
|
| 507 |
+
|
| 508 |
+
See the free software program `GMIN <https://www-wales.ch.cam.ac.uk/GMIN>`_
|
| 509 |
+
for a Fortran implementation of basin-hopping. This implementation has many
|
| 510 |
+
variations of the procedure described above, including more
|
| 511 |
+
advanced step taking algorithms and alternate acceptance criterion.
|
| 512 |
+
|
| 513 |
+
For stochastic global optimization there is no way to determine if the true
|
| 514 |
+
global minimum has actually been found. Instead, as a consistency check,
|
| 515 |
+
the algorithm can be run from a number of different random starting points
|
| 516 |
+
to ensure the lowest minimum found in each example has converged to the
|
| 517 |
+
global minimum. For this reason, `basinhopping` will by default simply
|
| 518 |
+
run for the number of iterations `niter` and return the lowest minimum
|
| 519 |
+
found. It is left to the user to ensure that this is in fact the global
|
| 520 |
+
minimum.
|
| 521 |
+
|
| 522 |
+
Choosing `stepsize`: This is a crucial parameter in `basinhopping` and
|
| 523 |
+
depends on the problem being solved. The step is chosen uniformly in the
|
| 524 |
+
region from x0-stepsize to x0+stepsize, in each dimension. Ideally, it
|
| 525 |
+
should be comparable to the typical separation (in argument values) between
|
| 526 |
+
local minima of the function being optimized. `basinhopping` will, by
|
| 527 |
+
default, adjust `stepsize` to find an optimal value, but this may take
|
| 528 |
+
many iterations. You will get quicker results if you set a sensible
|
| 529 |
+
initial value for ``stepsize``.
|
| 530 |
+
|
| 531 |
+
Choosing `T`: The parameter `T` is the "temperature" used in the
|
| 532 |
+
Metropolis criterion. Basinhopping steps are always accepted if
|
| 533 |
+
``func(xnew) < func(xold)``. Otherwise, they are accepted with
|
| 534 |
+
probability::
|
| 535 |
+
|
| 536 |
+
exp( -(func(xnew) - func(xold)) / T )
|
| 537 |
+
|
| 538 |
+
So, for best results, `T` should to be comparable to the typical
|
| 539 |
+
difference (in function values) between local minima. (The height of
|
| 540 |
+
"walls" between local minima is irrelevant.)
|
| 541 |
+
|
| 542 |
+
If `T` is 0, the algorithm becomes Monotonic Basin-Hopping, in which all
|
| 543 |
+
steps that increase energy are rejected.
|
| 544 |
+
|
| 545 |
+
.. versionadded:: 0.12.0
|
| 546 |
+
|
| 547 |
+
References
|
| 548 |
+
----------
|
| 549 |
+
.. [1] Wales, David J. 2003, Energy Landscapes, Cambridge University Press,
|
| 550 |
+
Cambridge, UK.
|
| 551 |
+
.. [2] Wales, D J, and Doye J P K, Global Optimization by Basin-Hopping and
|
| 552 |
+
the Lowest Energy Structures of Lennard-Jones Clusters Containing up to
|
| 553 |
+
110 Atoms. Journal of Physical Chemistry A, 1997, 101, 5111.
|
| 554 |
+
.. [3] Li, Z. and Scheraga, H. A., Monte Carlo-minimization approach to the
|
| 555 |
+
multiple-minima problem in protein folding, Proc. Natl. Acad. Sci. USA,
|
| 556 |
+
1987, 84, 6611.
|
| 557 |
+
.. [4] Wales, D. J. and Scheraga, H. A., Global optimization of clusters,
|
| 558 |
+
crystals, and biomolecules, Science, 1999, 285, 1368.
|
| 559 |
+
.. [5] Olson, B., Hashmi, I., Molloy, K., and Shehu1, A., Basin Hopping as
|
| 560 |
+
a General and Versatile Optimization Framework for the Characterization
|
| 561 |
+
of Biological Macromolecules, Advances in Artificial Intelligence,
|
| 562 |
+
Volume 2012 (2012), Article ID 674832, :doi:`10.1155/2012/674832`
|
| 563 |
+
|
| 564 |
+
Examples
|
| 565 |
+
--------
|
| 566 |
+
The following example is a 1-D minimization problem, with many
|
| 567 |
+
local minima superimposed on a parabola.
|
| 568 |
+
|
| 569 |
+
>>> import numpy as np
|
| 570 |
+
>>> from scipy.optimize import basinhopping
|
| 571 |
+
>>> func = lambda x: np.cos(14.5 * x - 0.3) + (x + 0.2) * x
|
| 572 |
+
>>> x0 = [1.]
|
| 573 |
+
|
| 574 |
+
Basinhopping, internally, uses a local minimization algorithm. We will use
|
| 575 |
+
the parameter `minimizer_kwargs` to tell basinhopping which algorithm to
|
| 576 |
+
use and how to set up that minimizer. This parameter will be passed to
|
| 577 |
+
`scipy.optimize.minimize`.
|
| 578 |
+
|
| 579 |
+
>>> minimizer_kwargs = {"method": "BFGS"}
|
| 580 |
+
>>> ret = basinhopping(func, x0, minimizer_kwargs=minimizer_kwargs,
|
| 581 |
+
... niter=200)
|
| 582 |
+
>>> # the global minimum is:
|
| 583 |
+
>>> ret.x, ret.fun
|
| 584 |
+
-0.1951, -1.0009
|
| 585 |
+
|
| 586 |
+
Next consider a 2-D minimization problem. Also, this time, we
|
| 587 |
+
will use gradient information to significantly speed up the search.
|
| 588 |
+
|
| 589 |
+
>>> def func2d(x):
|
| 590 |
+
... f = np.cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] +
|
| 591 |
+
... 0.2) * x[0]
|
| 592 |
+
... df = np.zeros(2)
|
| 593 |
+
... df[0] = -14.5 * np.sin(14.5 * x[0] - 0.3) + 2. * x[0] + 0.2
|
| 594 |
+
... df[1] = 2. * x[1] + 0.2
|
| 595 |
+
... return f, df
|
| 596 |
+
|
| 597 |
+
We'll also use a different local minimization algorithm. Also, we must tell
|
| 598 |
+
the minimizer that our function returns both energy and gradient (Jacobian).
|
| 599 |
+
|
| 600 |
+
>>> minimizer_kwargs = {"method":"L-BFGS-B", "jac":True}
|
| 601 |
+
>>> x0 = [1.0, 1.0]
|
| 602 |
+
>>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs,
|
| 603 |
+
... niter=200)
|
| 604 |
+
>>> print("global minimum: x = [%.4f, %.4f], f(x) = %.4f" % (ret.x[0],
|
| 605 |
+
... ret.x[1],
|
| 606 |
+
... ret.fun))
|
| 607 |
+
global minimum: x = [-0.1951, -0.1000], f(x) = -1.0109
|
| 608 |
+
|
| 609 |
+
Here is an example using a custom step-taking routine. Imagine you want
|
| 610 |
+
the first coordinate to take larger steps than the rest of the coordinates.
|
| 611 |
+
This can be implemented like so:
|
| 612 |
+
|
| 613 |
+
>>> class MyTakeStep:
|
| 614 |
+
... def __init__(self, stepsize=0.5):
|
| 615 |
+
... self.stepsize = stepsize
|
| 616 |
+
... self.rng = np.random.default_rng()
|
| 617 |
+
... def __call__(self, x):
|
| 618 |
+
... s = self.stepsize
|
| 619 |
+
... x[0] += self.rng.uniform(-2.*s, 2.*s)
|
| 620 |
+
... x[1:] += self.rng.uniform(-s, s, x[1:].shape)
|
| 621 |
+
... return x
|
| 622 |
+
|
| 623 |
+
Since ``MyTakeStep.stepsize`` exists basinhopping will adjust the magnitude
|
| 624 |
+
of `stepsize` to optimize the search. We'll use the same 2-D function as
|
| 625 |
+
before
|
| 626 |
+
|
| 627 |
+
>>> mytakestep = MyTakeStep()
|
| 628 |
+
>>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs,
|
| 629 |
+
... niter=200, take_step=mytakestep)
|
| 630 |
+
>>> print("global minimum: x = [%.4f, %.4f], f(x) = %.4f" % (ret.x[0],
|
| 631 |
+
... ret.x[1],
|
| 632 |
+
... ret.fun))
|
| 633 |
+
global minimum: x = [-0.1951, -0.1000], f(x) = -1.0109
|
| 634 |
+
|
| 635 |
+
Now, let's do an example using a custom callback function which prints the
|
| 636 |
+
value of every minimum found
|
| 637 |
+
|
| 638 |
+
>>> def print_fun(x, f, accepted):
|
| 639 |
+
... print("at minimum %.4f accepted %d" % (f, int(accepted)))
|
| 640 |
+
|
| 641 |
+
We'll run it for only 10 basinhopping steps this time.
|
| 642 |
+
|
| 643 |
+
>>> rng = np.random.default_rng()
|
| 644 |
+
>>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs,
|
| 645 |
+
... niter=10, callback=print_fun, seed=rng)
|
| 646 |
+
at minimum 0.4159 accepted 1
|
| 647 |
+
at minimum -0.4317 accepted 1
|
| 648 |
+
at minimum -1.0109 accepted 1
|
| 649 |
+
at minimum -0.9073 accepted 1
|
| 650 |
+
at minimum -0.4317 accepted 0
|
| 651 |
+
at minimum -0.1021 accepted 1
|
| 652 |
+
at minimum -0.7425 accepted 1
|
| 653 |
+
at minimum -0.9073 accepted 1
|
| 654 |
+
at minimum -0.4317 accepted 0
|
| 655 |
+
at minimum -0.7425 accepted 1
|
| 656 |
+
at minimum -0.9073 accepted 1
|
| 657 |
+
|
| 658 |
+
The minimum at -1.0109 is actually the global minimum, found already on the
|
| 659 |
+
8th iteration.
|
| 660 |
+
|
| 661 |
+
""" # numpy/numpydoc#87 # noqa: E501
|
| 662 |
+
if target_accept_rate <= 0. or target_accept_rate >= 1.:
|
| 663 |
+
raise ValueError('target_accept_rate has to be in range (0, 1)')
|
| 664 |
+
if stepwise_factor <= 0. or stepwise_factor >= 1.:
|
| 665 |
+
raise ValueError('stepwise_factor has to be in range (0, 1)')
|
| 666 |
+
|
| 667 |
+
x0 = np.array(x0)
|
| 668 |
+
|
| 669 |
+
# set up the np.random generator
|
| 670 |
+
rng = check_random_state(seed)
|
| 671 |
+
|
| 672 |
+
# set up minimizer
|
| 673 |
+
if minimizer_kwargs is None:
|
| 674 |
+
minimizer_kwargs = dict()
|
| 675 |
+
wrapped_minimizer = MinimizerWrapper(scipy.optimize.minimize, func,
|
| 676 |
+
**minimizer_kwargs)
|
| 677 |
+
|
| 678 |
+
# set up step-taking algorithm
|
| 679 |
+
if take_step is not None:
|
| 680 |
+
if not callable(take_step):
|
| 681 |
+
raise TypeError("take_step must be callable")
|
| 682 |
+
# if take_step.stepsize exists then use AdaptiveStepsize to control
|
| 683 |
+
# take_step.stepsize
|
| 684 |
+
if hasattr(take_step, "stepsize"):
|
| 685 |
+
take_step_wrapped = AdaptiveStepsize(
|
| 686 |
+
take_step, interval=interval,
|
| 687 |
+
accept_rate=target_accept_rate,
|
| 688 |
+
factor=stepwise_factor,
|
| 689 |
+
verbose=disp)
|
| 690 |
+
else:
|
| 691 |
+
take_step_wrapped = take_step
|
| 692 |
+
else:
|
| 693 |
+
# use default
|
| 694 |
+
displace = RandomDisplacement(stepsize=stepsize, random_gen=rng)
|
| 695 |
+
take_step_wrapped = AdaptiveStepsize(displace, interval=interval,
|
| 696 |
+
accept_rate=target_accept_rate,
|
| 697 |
+
factor=stepwise_factor,
|
| 698 |
+
verbose=disp)
|
| 699 |
+
|
| 700 |
+
# set up accept tests
|
| 701 |
+
accept_tests = []
|
| 702 |
+
if accept_test is not None:
|
| 703 |
+
if not callable(accept_test):
|
| 704 |
+
raise TypeError("accept_test must be callable")
|
| 705 |
+
accept_tests = [accept_test]
|
| 706 |
+
|
| 707 |
+
# use default
|
| 708 |
+
metropolis = Metropolis(T, random_gen=rng)
|
| 709 |
+
accept_tests.append(metropolis)
|
| 710 |
+
|
| 711 |
+
if niter_success is None:
|
| 712 |
+
niter_success = niter + 2
|
| 713 |
+
|
| 714 |
+
bh = BasinHoppingRunner(x0, wrapped_minimizer, take_step_wrapped,
|
| 715 |
+
accept_tests, disp=disp)
|
| 716 |
+
|
| 717 |
+
# The wrapped minimizer is called once during construction of
|
| 718 |
+
# BasinHoppingRunner, so run the callback
|
| 719 |
+
if callable(callback):
|
| 720 |
+
callback(bh.storage.minres.x, bh.storage.minres.fun, True)
|
| 721 |
+
|
| 722 |
+
# start main iteration loop
|
| 723 |
+
count, i = 0, 0
|
| 724 |
+
message = ["requested number of basinhopping iterations completed"
|
| 725 |
+
" successfully"]
|
| 726 |
+
for i in range(niter):
|
| 727 |
+
new_global_min = bh.one_cycle()
|
| 728 |
+
|
| 729 |
+
if callable(callback):
|
| 730 |
+
# should we pass a copy of x?
|
| 731 |
+
val = callback(bh.xtrial, bh.energy_trial, bh.accept)
|
| 732 |
+
if val is not None:
|
| 733 |
+
if val:
|
| 734 |
+
message = ["callback function requested stop early by"
|
| 735 |
+
"returning True"]
|
| 736 |
+
break
|
| 737 |
+
|
| 738 |
+
count += 1
|
| 739 |
+
if new_global_min:
|
| 740 |
+
count = 0
|
| 741 |
+
elif count > niter_success:
|
| 742 |
+
message = ["success condition satisfied"]
|
| 743 |
+
break
|
| 744 |
+
|
| 745 |
+
# prepare return object
|
| 746 |
+
res = bh.res
|
| 747 |
+
res.lowest_optimization_result = bh.storage.get_lowest()
|
| 748 |
+
res.x = np.copy(res.lowest_optimization_result.x)
|
| 749 |
+
res.fun = res.lowest_optimization_result.fun
|
| 750 |
+
res.message = message
|
| 751 |
+
res.nit = i + 1
|
| 752 |
+
res.success = res.lowest_optimization_result.success
|
| 753 |
+
return res
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_cobyla.cpython-310-x86_64-linux-gnu.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:416e378eb1a89181832672ab92b32ec9d9c9c4da4eec9cfdfc7551ab8e7ee113
|
| 3 |
+
size 100545
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_cobyqa_py.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
from ._optimize import _check_unknown_options
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def _minimize_cobyqa(fun, x0, args=(), bounds=None, constraints=(),
|
| 7 |
+
callback=None, disp=False, maxfev=None, maxiter=None,
|
| 8 |
+
f_target=-np.inf, feasibility_tol=1e-8,
|
| 9 |
+
initial_tr_radius=1.0, final_tr_radius=1e-6, scale=False,
|
| 10 |
+
**unknown_options):
|
| 11 |
+
"""
|
| 12 |
+
Minimize a scalar function of one or more variables using the
|
| 13 |
+
Constrained Optimization BY Quadratic Approximations (COBYQA) algorithm [1]_.
|
| 14 |
+
|
| 15 |
+
.. versionadded:: 1.14.0
|
| 16 |
+
|
| 17 |
+
Options
|
| 18 |
+
-------
|
| 19 |
+
disp : bool
|
| 20 |
+
Set to True to print information about the optimization procedure.
|
| 21 |
+
maxfev : int
|
| 22 |
+
Maximum number of function evaluations.
|
| 23 |
+
maxiter : int
|
| 24 |
+
Maximum number of iterations.
|
| 25 |
+
f_target : float
|
| 26 |
+
Target value for the objective function. The optimization procedure is
|
| 27 |
+
terminated when the objective function value of a feasible point (see
|
| 28 |
+
`feasibility_tol` below) is less than or equal to this target.
|
| 29 |
+
feasibility_tol : float
|
| 30 |
+
Absolute tolerance for the constraint violation.
|
| 31 |
+
initial_tr_radius : float
|
| 32 |
+
Initial trust-region radius. Typically, this value should be in the
|
| 33 |
+
order of one tenth of the greatest expected change to the variables.
|
| 34 |
+
final_tr_radius : float
|
| 35 |
+
Final trust-region radius. It should indicate the accuracy required in
|
| 36 |
+
the final values of the variables. If provided, this option overrides
|
| 37 |
+
the value of `tol` in the `minimize` function.
|
| 38 |
+
scale : bool
|
| 39 |
+
Set to True to scale the variables according to the bounds. If True and
|
| 40 |
+
if all the lower and upper bounds are finite, the variables are scaled
|
| 41 |
+
to be within the range :math:`[-1, 1]`. If any of the lower or upper
|
| 42 |
+
bounds is infinite, the variables are not scaled.
|
| 43 |
+
|
| 44 |
+
References
|
| 45 |
+
----------
|
| 46 |
+
.. [1] COBYQA
|
| 47 |
+
https://www.cobyqa.com/stable/
|
| 48 |
+
"""
|
| 49 |
+
from .._lib.cobyqa import minimize # import here to avoid circular imports
|
| 50 |
+
|
| 51 |
+
_check_unknown_options(unknown_options)
|
| 52 |
+
options = {
|
| 53 |
+
'disp': bool(disp),
|
| 54 |
+
'maxfev': int(maxfev) if maxfev is not None else 500 * len(x0),
|
| 55 |
+
'maxiter': int(maxiter) if maxiter is not None else 1000 * len(x0),
|
| 56 |
+
'target': float(f_target),
|
| 57 |
+
'feasibility_tol': float(feasibility_tol),
|
| 58 |
+
'radius_init': float(initial_tr_radius),
|
| 59 |
+
'radius_final': float(final_tr_radius),
|
| 60 |
+
'scale': bool(scale),
|
| 61 |
+
}
|
| 62 |
+
return minimize(fun, x0, args, bounds, constraints, callback, options)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_direct_py.py
ADDED
|
@@ -0,0 +1,278 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
from typing import ( # noqa: UP035
|
| 3 |
+
Any, Callable, Iterable, TYPE_CHECKING
|
| 4 |
+
)
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
from scipy.optimize import OptimizeResult
|
| 8 |
+
from ._constraints import old_bound_to_new, Bounds
|
| 9 |
+
from ._direct import direct as _direct # type: ignore
|
| 10 |
+
|
| 11 |
+
if TYPE_CHECKING:
|
| 12 |
+
import numpy.typing as npt
|
| 13 |
+
|
| 14 |
+
__all__ = ['direct']
|
| 15 |
+
|
| 16 |
+
ERROR_MESSAGES = (
|
| 17 |
+
"Number of function evaluations done is larger than maxfun={}",
|
| 18 |
+
"Number of iterations is larger than maxiter={}",
|
| 19 |
+
"u[i] < l[i] for some i",
|
| 20 |
+
"maxfun is too large",
|
| 21 |
+
"Initialization failed",
|
| 22 |
+
"There was an error in the creation of the sample points",
|
| 23 |
+
"An error occurred while the function was sampled",
|
| 24 |
+
"Maximum number of levels has been reached.",
|
| 25 |
+
"Forced stop",
|
| 26 |
+
"Invalid arguments",
|
| 27 |
+
"Out of memory",
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
SUCCESS_MESSAGES = (
|
| 31 |
+
("The best function value found is within a relative error={} "
|
| 32 |
+
"of the (known) global optimum f_min"),
|
| 33 |
+
("The volume of the hyperrectangle containing the lowest function value "
|
| 34 |
+
"found is below vol_tol={}"),
|
| 35 |
+
("The side length measure of the hyperrectangle containing the lowest "
|
| 36 |
+
"function value found is below len_tol={}"),
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def direct(
|
| 41 |
+
func: Callable[[npt.ArrayLike, tuple[Any]], float],
|
| 42 |
+
bounds: Iterable | Bounds,
|
| 43 |
+
*,
|
| 44 |
+
args: tuple = (),
|
| 45 |
+
eps: float = 1e-4,
|
| 46 |
+
maxfun: int | None = None,
|
| 47 |
+
maxiter: int = 1000,
|
| 48 |
+
locally_biased: bool = True,
|
| 49 |
+
f_min: float = -np.inf,
|
| 50 |
+
f_min_rtol: float = 1e-4,
|
| 51 |
+
vol_tol: float = 1e-16,
|
| 52 |
+
len_tol: float = 1e-6,
|
| 53 |
+
callback: Callable[[npt.ArrayLike], None] | None = None
|
| 54 |
+
) -> OptimizeResult:
|
| 55 |
+
"""
|
| 56 |
+
Finds the global minimum of a function using the
|
| 57 |
+
DIRECT algorithm.
|
| 58 |
+
|
| 59 |
+
Parameters
|
| 60 |
+
----------
|
| 61 |
+
func : callable
|
| 62 |
+
The objective function to be minimized.
|
| 63 |
+
``func(x, *args) -> float``
|
| 64 |
+
where ``x`` is an 1-D array with shape (n,) and ``args`` is a tuple of
|
| 65 |
+
the fixed parameters needed to completely specify the function.
|
| 66 |
+
bounds : sequence or `Bounds`
|
| 67 |
+
Bounds for variables. There are two ways to specify the bounds:
|
| 68 |
+
|
| 69 |
+
1. Instance of `Bounds` class.
|
| 70 |
+
2. ``(min, max)`` pairs for each element in ``x``.
|
| 71 |
+
|
| 72 |
+
args : tuple, optional
|
| 73 |
+
Any additional fixed parameters needed to
|
| 74 |
+
completely specify the objective function.
|
| 75 |
+
eps : float, optional
|
| 76 |
+
Minimal required difference of the objective function values
|
| 77 |
+
between the current best hyperrectangle and the next potentially
|
| 78 |
+
optimal hyperrectangle to be divided. In consequence, `eps` serves as a
|
| 79 |
+
tradeoff between local and global search: the smaller, the more local
|
| 80 |
+
the search becomes. Default is 1e-4.
|
| 81 |
+
maxfun : int or None, optional
|
| 82 |
+
Approximate upper bound on objective function evaluations.
|
| 83 |
+
If `None`, will be automatically set to ``1000 * N`` where ``N``
|
| 84 |
+
represents the number of dimensions. Will be capped if necessary to
|
| 85 |
+
limit DIRECT's RAM usage to app. 1GiB. This will only occur for very
|
| 86 |
+
high dimensional problems and excessive `max_fun`. Default is `None`.
|
| 87 |
+
maxiter : int, optional
|
| 88 |
+
Maximum number of iterations. Default is 1000.
|
| 89 |
+
locally_biased : bool, optional
|
| 90 |
+
If `True` (default), use the locally biased variant of the
|
| 91 |
+
algorithm known as DIRECT_L. If `False`, use the original unbiased
|
| 92 |
+
DIRECT algorithm. For hard problems with many local minima,
|
| 93 |
+
`False` is recommended.
|
| 94 |
+
f_min : float, optional
|
| 95 |
+
Function value of the global optimum. Set this value only if the
|
| 96 |
+
global optimum is known. Default is ``-np.inf``, so that this
|
| 97 |
+
termination criterion is deactivated.
|
| 98 |
+
f_min_rtol : float, optional
|
| 99 |
+
Terminate the optimization once the relative error between the
|
| 100 |
+
current best minimum `f` and the supplied global minimum `f_min`
|
| 101 |
+
is smaller than `f_min_rtol`. This parameter is only used if
|
| 102 |
+
`f_min` is also set. Must lie between 0 and 1. Default is 1e-4.
|
| 103 |
+
vol_tol : float, optional
|
| 104 |
+
Terminate the optimization once the volume of the hyperrectangle
|
| 105 |
+
containing the lowest function value is smaller than `vol_tol`
|
| 106 |
+
of the complete search space. Must lie between 0 and 1.
|
| 107 |
+
Default is 1e-16.
|
| 108 |
+
len_tol : float, optional
|
| 109 |
+
If `locally_biased=True`, terminate the optimization once half of
|
| 110 |
+
the normalized maximal side length of the hyperrectangle containing
|
| 111 |
+
the lowest function value is smaller than `len_tol`.
|
| 112 |
+
If `locally_biased=False`, terminate the optimization once half of
|
| 113 |
+
the normalized diagonal of the hyperrectangle containing the lowest
|
| 114 |
+
function value is smaller than `len_tol`. Must lie between 0 and 1.
|
| 115 |
+
Default is 1e-6.
|
| 116 |
+
callback : callable, optional
|
| 117 |
+
A callback function with signature ``callback(xk)`` where ``xk``
|
| 118 |
+
represents the best function value found so far.
|
| 119 |
+
|
| 120 |
+
Returns
|
| 121 |
+
-------
|
| 122 |
+
res : OptimizeResult
|
| 123 |
+
The optimization result represented as a ``OptimizeResult`` object.
|
| 124 |
+
Important attributes are: ``x`` the solution array, ``success`` a
|
| 125 |
+
Boolean flag indicating if the optimizer exited successfully and
|
| 126 |
+
``message`` which describes the cause of the termination. See
|
| 127 |
+
`OptimizeResult` for a description of other attributes.
|
| 128 |
+
|
| 129 |
+
Notes
|
| 130 |
+
-----
|
| 131 |
+
DIviding RECTangles (DIRECT) is a deterministic global
|
| 132 |
+
optimization algorithm capable of minimizing a black box function with
|
| 133 |
+
its variables subject to lower and upper bound constraints by sampling
|
| 134 |
+
potential solutions in the search space [1]_. The algorithm starts by
|
| 135 |
+
normalising the search space to an n-dimensional unit hypercube.
|
| 136 |
+
It samples the function at the center of this hypercube and at 2n
|
| 137 |
+
(n is the number of variables) more points, 2 in each coordinate
|
| 138 |
+
direction. Using these function values, DIRECT then divides the
|
| 139 |
+
domain into hyperrectangles, each having exactly one of the sampling
|
| 140 |
+
points as its center. In each iteration, DIRECT chooses, using the `eps`
|
| 141 |
+
parameter which defaults to 1e-4, some of the existing hyperrectangles
|
| 142 |
+
to be further divided. This division process continues until either the
|
| 143 |
+
maximum number of iterations or maximum function evaluations allowed
|
| 144 |
+
are exceeded, or the hyperrectangle containing the minimal value found
|
| 145 |
+
so far becomes small enough. If `f_min` is specified, the optimization
|
| 146 |
+
will stop once this function value is reached within a relative tolerance.
|
| 147 |
+
The locally biased variant of DIRECT (originally called DIRECT_L) [2]_ is
|
| 148 |
+
used by default. It makes the search more locally biased and more
|
| 149 |
+
efficient for cases with only a few local minima.
|
| 150 |
+
|
| 151 |
+
A note about termination criteria: `vol_tol` refers to the volume of the
|
| 152 |
+
hyperrectangle containing the lowest function value found so far. This
|
| 153 |
+
volume decreases exponentially with increasing dimensionality of the
|
| 154 |
+
problem. Therefore `vol_tol` should be decreased to avoid premature
|
| 155 |
+
termination of the algorithm for higher dimensions. This does not hold
|
| 156 |
+
for `len_tol`: it refers either to half of the maximal side length
|
| 157 |
+
(for ``locally_biased=True``) or half of the diagonal of the
|
| 158 |
+
hyperrectangle (for ``locally_biased=False``).
|
| 159 |
+
|
| 160 |
+
This code is based on the DIRECT 2.0.4 Fortran code by Gablonsky et al. at
|
| 161 |
+
https://ctk.math.ncsu.edu/SOFTWARE/DIRECTv204.tar.gz .
|
| 162 |
+
This original version was initially converted via f2c and then cleaned up
|
| 163 |
+
and reorganized by Steven G. Johnson, August 2007, for the NLopt project.
|
| 164 |
+
The `direct` function wraps the C implementation.
|
| 165 |
+
|
| 166 |
+
.. versionadded:: 1.9.0
|
| 167 |
+
|
| 168 |
+
References
|
| 169 |
+
----------
|
| 170 |
+
.. [1] Jones, D.R., Perttunen, C.D. & Stuckman, B.E. Lipschitzian
|
| 171 |
+
optimization without the Lipschitz constant. J Optim Theory Appl
|
| 172 |
+
79, 157-181 (1993).
|
| 173 |
+
.. [2] Gablonsky, J., Kelley, C. A Locally-Biased form of the DIRECT
|
| 174 |
+
Algorithm. Journal of Global Optimization 21, 27-37 (2001).
|
| 175 |
+
|
| 176 |
+
Examples
|
| 177 |
+
--------
|
| 178 |
+
The following example is a 2-D problem with four local minima: minimizing
|
| 179 |
+
the Styblinski-Tang function
|
| 180 |
+
(https://en.wikipedia.org/wiki/Test_functions_for_optimization).
|
| 181 |
+
|
| 182 |
+
>>> from scipy.optimize import direct, Bounds
|
| 183 |
+
>>> def styblinski_tang(pos):
|
| 184 |
+
... x, y = pos
|
| 185 |
+
... return 0.5 * (x**4 - 16*x**2 + 5*x + y**4 - 16*y**2 + 5*y)
|
| 186 |
+
>>> bounds = Bounds([-4., -4.], [4., 4.])
|
| 187 |
+
>>> result = direct(styblinski_tang, bounds)
|
| 188 |
+
>>> result.x, result.fun, result.nfev
|
| 189 |
+
array([-2.90321597, -2.90321597]), -78.3323279095383, 2011
|
| 190 |
+
|
| 191 |
+
The correct global minimum was found but with a huge number of function
|
| 192 |
+
evaluations (2011). Loosening the termination tolerances `vol_tol` and
|
| 193 |
+
`len_tol` can be used to stop DIRECT earlier.
|
| 194 |
+
|
| 195 |
+
>>> result = direct(styblinski_tang, bounds, len_tol=1e-3)
|
| 196 |
+
>>> result.x, result.fun, result.nfev
|
| 197 |
+
array([-2.9044353, -2.9044353]), -78.33230330754142, 207
|
| 198 |
+
|
| 199 |
+
"""
|
| 200 |
+
# convert bounds to new Bounds class if necessary
|
| 201 |
+
if not isinstance(bounds, Bounds):
|
| 202 |
+
if isinstance(bounds, list) or isinstance(bounds, tuple):
|
| 203 |
+
lb, ub = old_bound_to_new(bounds)
|
| 204 |
+
bounds = Bounds(lb, ub)
|
| 205 |
+
else:
|
| 206 |
+
message = ("bounds must be a sequence or "
|
| 207 |
+
"instance of Bounds class")
|
| 208 |
+
raise ValueError(message)
|
| 209 |
+
|
| 210 |
+
lb = np.ascontiguousarray(bounds.lb, dtype=np.float64)
|
| 211 |
+
ub = np.ascontiguousarray(bounds.ub, dtype=np.float64)
|
| 212 |
+
|
| 213 |
+
# validate bounds
|
| 214 |
+
# check that lower bounds are smaller than upper bounds
|
| 215 |
+
if not np.all(lb < ub):
|
| 216 |
+
raise ValueError('Bounds are not consistent min < max')
|
| 217 |
+
# check for infs
|
| 218 |
+
if (np.any(np.isinf(lb)) or np.any(np.isinf(ub))):
|
| 219 |
+
raise ValueError("Bounds must not be inf.")
|
| 220 |
+
|
| 221 |
+
# validate tolerances
|
| 222 |
+
if (vol_tol < 0 or vol_tol > 1):
|
| 223 |
+
raise ValueError("vol_tol must be between 0 and 1.")
|
| 224 |
+
if (len_tol < 0 or len_tol > 1):
|
| 225 |
+
raise ValueError("len_tol must be between 0 and 1.")
|
| 226 |
+
if (f_min_rtol < 0 or f_min_rtol > 1):
|
| 227 |
+
raise ValueError("f_min_rtol must be between 0 and 1.")
|
| 228 |
+
|
| 229 |
+
# validate maxfun and maxiter
|
| 230 |
+
if maxfun is None:
|
| 231 |
+
maxfun = 1000 * lb.shape[0]
|
| 232 |
+
if not isinstance(maxfun, int):
|
| 233 |
+
raise ValueError("maxfun must be of type int.")
|
| 234 |
+
if maxfun < 0:
|
| 235 |
+
raise ValueError("maxfun must be > 0.")
|
| 236 |
+
if not isinstance(maxiter, int):
|
| 237 |
+
raise ValueError("maxiter must be of type int.")
|
| 238 |
+
if maxiter < 0:
|
| 239 |
+
raise ValueError("maxiter must be > 0.")
|
| 240 |
+
|
| 241 |
+
# validate boolean parameters
|
| 242 |
+
if not isinstance(locally_biased, bool):
|
| 243 |
+
raise ValueError("locally_biased must be True or False.")
|
| 244 |
+
|
| 245 |
+
def _func_wrap(x, args=None):
|
| 246 |
+
x = np.asarray(x)
|
| 247 |
+
if args is None:
|
| 248 |
+
f = func(x)
|
| 249 |
+
else:
|
| 250 |
+
f = func(x, *args)
|
| 251 |
+
# always return a float
|
| 252 |
+
return np.asarray(f).item()
|
| 253 |
+
|
| 254 |
+
# TODO: fix disp argument
|
| 255 |
+
x, fun, ret_code, nfev, nit = _direct(
|
| 256 |
+
_func_wrap,
|
| 257 |
+
np.asarray(lb), np.asarray(ub),
|
| 258 |
+
args,
|
| 259 |
+
False, eps, maxfun, maxiter,
|
| 260 |
+
locally_biased,
|
| 261 |
+
f_min, f_min_rtol,
|
| 262 |
+
vol_tol, len_tol, callback
|
| 263 |
+
)
|
| 264 |
+
|
| 265 |
+
format_val = (maxfun, maxiter, f_min_rtol, vol_tol, len_tol)
|
| 266 |
+
if ret_code > 2:
|
| 267 |
+
message = SUCCESS_MESSAGES[ret_code - 3].format(
|
| 268 |
+
format_val[ret_code - 1])
|
| 269 |
+
elif 0 < ret_code <= 2:
|
| 270 |
+
message = ERROR_MESSAGES[ret_code - 1].format(format_val[ret_code - 1])
|
| 271 |
+
elif 0 > ret_code > -100:
|
| 272 |
+
message = ERROR_MESSAGES[abs(ret_code) + 1]
|
| 273 |
+
else:
|
| 274 |
+
message = ERROR_MESSAGES[ret_code + 99]
|
| 275 |
+
|
| 276 |
+
return OptimizeResult(x=np.asarray(x), fun=fun, status=ret_code,
|
| 277 |
+
success=ret_code > 2, message=message,
|
| 278 |
+
nfev=nfev, nit=nit)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_group_columns.cpython-310-x86_64-linux-gnu.so
ADDED
|
Binary file (99.8 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_highs/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (177 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsIO.pxd
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# cython: language_level=3
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
cdef extern from "HighsIO.h" nogil:
|
| 5 |
+
# workaround for lack of enum class support in Cython < 3.x
|
| 6 |
+
# cdef enum class HighsLogType(int):
|
| 7 |
+
# kInfo "HighsLogType::kInfo" = 1
|
| 8 |
+
# kDetailed "HighsLogType::kDetailed"
|
| 9 |
+
# kVerbose "HighsLogType::kVerbose"
|
| 10 |
+
# kWarning "HighsLogType::kWarning"
|
| 11 |
+
# kError "HighsLogType::kError"
|
| 12 |
+
|
| 13 |
+
cdef cppclass HighsLogType:
|
| 14 |
+
pass
|
| 15 |
+
|
| 16 |
+
cdef HighsLogType kInfo "HighsLogType::kInfo"
|
| 17 |
+
cdef HighsLogType kDetailed "HighsLogType::kDetailed"
|
| 18 |
+
cdef HighsLogType kVerbose "HighsLogType::kVerbose"
|
| 19 |
+
cdef HighsLogType kWarning "HighsLogType::kWarning"
|
| 20 |
+
cdef HighsLogType kError "HighsLogType::kError"
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsInfo.pxd
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# cython: language_level=3
|
| 2 |
+
|
| 3 |
+
cdef extern from "HighsInfo.h" nogil:
|
| 4 |
+
# From HiGHS/src/lp_data/HighsInfo.h
|
| 5 |
+
cdef cppclass HighsInfo:
|
| 6 |
+
# Inherited from HighsInfoStruct:
|
| 7 |
+
int mip_node_count
|
| 8 |
+
int simplex_iteration_count
|
| 9 |
+
int ipm_iteration_count
|
| 10 |
+
int crossover_iteration_count
|
| 11 |
+
int primal_solution_status
|
| 12 |
+
int dual_solution_status
|
| 13 |
+
int basis_validity
|
| 14 |
+
double objective_function_value
|
| 15 |
+
double mip_dual_bound
|
| 16 |
+
double mip_gap
|
| 17 |
+
int num_primal_infeasibilities
|
| 18 |
+
double max_primal_infeasibility
|
| 19 |
+
double sum_primal_infeasibilities
|
| 20 |
+
int num_dual_infeasibilities
|
| 21 |
+
double max_dual_infeasibility
|
| 22 |
+
double sum_dual_infeasibilities
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsLp.pxd
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# cython: language_level=3
|
| 2 |
+
|
| 3 |
+
from libcpp cimport bool
|
| 4 |
+
from libcpp.string cimport string
|
| 5 |
+
from libcpp.vector cimport vector
|
| 6 |
+
|
| 7 |
+
from .HConst cimport HighsBasisStatus, ObjSense, HighsVarType
|
| 8 |
+
from .HighsSparseMatrix cimport HighsSparseMatrix
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
cdef extern from "HighsLp.h" nogil:
|
| 12 |
+
# From HiGHS/src/lp_data/HighsLp.h
|
| 13 |
+
cdef cppclass HighsLp:
|
| 14 |
+
int num_col_
|
| 15 |
+
int num_row_
|
| 16 |
+
|
| 17 |
+
vector[double] col_cost_
|
| 18 |
+
vector[double] col_lower_
|
| 19 |
+
vector[double] col_upper_
|
| 20 |
+
vector[double] row_lower_
|
| 21 |
+
vector[double] row_upper_
|
| 22 |
+
|
| 23 |
+
HighsSparseMatrix a_matrix_
|
| 24 |
+
|
| 25 |
+
ObjSense sense_
|
| 26 |
+
double offset_
|
| 27 |
+
|
| 28 |
+
string model_name_
|
| 29 |
+
|
| 30 |
+
vector[string] row_names_
|
| 31 |
+
vector[string] col_names_
|
| 32 |
+
|
| 33 |
+
vector[HighsVarType] integrality_
|
| 34 |
+
|
| 35 |
+
bool isMip() const
|
| 36 |
+
|
| 37 |
+
cdef cppclass HighsSolution:
|
| 38 |
+
vector[double] col_value
|
| 39 |
+
vector[double] col_dual
|
| 40 |
+
vector[double] row_value
|
| 41 |
+
vector[double] row_dual
|
| 42 |
+
|
| 43 |
+
cdef cppclass HighsBasis:
|
| 44 |
+
bool valid_
|
| 45 |
+
vector[HighsBasisStatus] col_status
|
| 46 |
+
vector[HighsBasisStatus] row_status
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsModelUtils.pxd
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# cython: language_level=3
|
| 2 |
+
|
| 3 |
+
from libcpp.string cimport string
|
| 4 |
+
|
| 5 |
+
from .HConst cimport HighsModelStatus
|
| 6 |
+
|
| 7 |
+
cdef extern from "HighsModelUtils.h" nogil:
|
| 8 |
+
# From HiGHS/src/lp_data/HighsModelUtils.h
|
| 9 |
+
string utilHighsModelStatusToString(const HighsModelStatus model_status)
|
| 10 |
+
string utilBasisStatusToString(const int primal_dual_status)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsOptions.pxd
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# cython: language_level=3
|
| 2 |
+
|
| 3 |
+
from libc.stdio cimport FILE
|
| 4 |
+
|
| 5 |
+
from libcpp cimport bool
|
| 6 |
+
from libcpp.string cimport string
|
| 7 |
+
from libcpp.vector cimport vector
|
| 8 |
+
|
| 9 |
+
from .HConst cimport HighsOptionType
|
| 10 |
+
|
| 11 |
+
cdef extern from "HighsOptions.h" nogil:
|
| 12 |
+
|
| 13 |
+
cdef cppclass OptionRecord:
|
| 14 |
+
HighsOptionType type
|
| 15 |
+
string name
|
| 16 |
+
string description
|
| 17 |
+
bool advanced
|
| 18 |
+
|
| 19 |
+
cdef cppclass OptionRecordBool(OptionRecord):
|
| 20 |
+
bool* value
|
| 21 |
+
bool default_value
|
| 22 |
+
|
| 23 |
+
cdef cppclass OptionRecordInt(OptionRecord):
|
| 24 |
+
int* value
|
| 25 |
+
int lower_bound
|
| 26 |
+
int default_value
|
| 27 |
+
int upper_bound
|
| 28 |
+
|
| 29 |
+
cdef cppclass OptionRecordDouble(OptionRecord):
|
| 30 |
+
double* value
|
| 31 |
+
double lower_bound
|
| 32 |
+
double default_value
|
| 33 |
+
double upper_bound
|
| 34 |
+
|
| 35 |
+
cdef cppclass OptionRecordString(OptionRecord):
|
| 36 |
+
string* value
|
| 37 |
+
string default_value
|
| 38 |
+
|
| 39 |
+
cdef cppclass HighsOptions:
|
| 40 |
+
# From HighsOptionsStruct:
|
| 41 |
+
|
| 42 |
+
# Options read from the command line
|
| 43 |
+
string model_file
|
| 44 |
+
string presolve
|
| 45 |
+
string solver
|
| 46 |
+
string parallel
|
| 47 |
+
double time_limit
|
| 48 |
+
string options_file
|
| 49 |
+
|
| 50 |
+
# Options read from the file
|
| 51 |
+
double infinite_cost
|
| 52 |
+
double infinite_bound
|
| 53 |
+
double small_matrix_value
|
| 54 |
+
double large_matrix_value
|
| 55 |
+
double primal_feasibility_tolerance
|
| 56 |
+
double dual_feasibility_tolerance
|
| 57 |
+
double ipm_optimality_tolerance
|
| 58 |
+
double dual_objective_value_upper_bound
|
| 59 |
+
int highs_debug_level
|
| 60 |
+
int simplex_strategy
|
| 61 |
+
int simplex_scale_strategy
|
| 62 |
+
int simplex_crash_strategy
|
| 63 |
+
int simplex_dual_edge_weight_strategy
|
| 64 |
+
int simplex_primal_edge_weight_strategy
|
| 65 |
+
int simplex_iteration_limit
|
| 66 |
+
int simplex_update_limit
|
| 67 |
+
int ipm_iteration_limit
|
| 68 |
+
int highs_min_threads
|
| 69 |
+
int highs_max_threads
|
| 70 |
+
int message_level
|
| 71 |
+
string solution_file
|
| 72 |
+
bool write_solution_to_file
|
| 73 |
+
bool write_solution_pretty
|
| 74 |
+
|
| 75 |
+
# Advanced options
|
| 76 |
+
bool run_crossover
|
| 77 |
+
bool mps_parser_type_free
|
| 78 |
+
int keep_n_rows
|
| 79 |
+
int allowed_simplex_matrix_scale_factor
|
| 80 |
+
int allowed_simplex_cost_scale_factor
|
| 81 |
+
int simplex_dualise_strategy
|
| 82 |
+
int simplex_permute_strategy
|
| 83 |
+
int dual_simplex_cleanup_strategy
|
| 84 |
+
int simplex_price_strategy
|
| 85 |
+
int dual_chuzc_sort_strategy
|
| 86 |
+
bool simplex_initial_condition_check
|
| 87 |
+
double simplex_initial_condition_tolerance
|
| 88 |
+
double dual_steepest_edge_weight_log_error_threshhold
|
| 89 |
+
double dual_simplex_cost_perturbation_multiplier
|
| 90 |
+
double start_crossover_tolerance
|
| 91 |
+
bool less_infeasible_DSE_check
|
| 92 |
+
bool less_infeasible_DSE_choose_row
|
| 93 |
+
bool use_original_HFactor_logic
|
| 94 |
+
|
| 95 |
+
# Options for MIP solver
|
| 96 |
+
int mip_max_nodes
|
| 97 |
+
int mip_report_level
|
| 98 |
+
|
| 99 |
+
# Switch for MIP solver
|
| 100 |
+
bool mip
|
| 101 |
+
|
| 102 |
+
# Options for HighsPrintMessage and HighsLogMessage
|
| 103 |
+
FILE* logfile
|
| 104 |
+
FILE* output
|
| 105 |
+
int message_level
|
| 106 |
+
string solution_file
|
| 107 |
+
bool write_solution_to_file
|
| 108 |
+
bool write_solution_pretty
|
| 109 |
+
|
| 110 |
+
vector[OptionRecord*] records
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsStatus.pxd
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# cython: language_level=3
|
| 2 |
+
|
| 3 |
+
from libcpp.string cimport string
|
| 4 |
+
|
| 5 |
+
cdef extern from "HighsStatus.h" nogil:
|
| 6 |
+
ctypedef enum HighsStatus:
|
| 7 |
+
HighsStatusError "HighsStatus::kError" = -1
|
| 8 |
+
HighsStatusOK "HighsStatus::kOk" = 0
|
| 9 |
+
HighsStatusWarning "HighsStatus::kWarning" = 1
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
string highsStatusToString(HighsStatus status)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/SimplexConst.pxd
ADDED
|
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# cython: language_level=3
|
| 2 |
+
|
| 3 |
+
from libcpp cimport bool
|
| 4 |
+
|
| 5 |
+
cdef extern from "SimplexConst.h" nogil:
|
| 6 |
+
|
| 7 |
+
cdef enum SimplexAlgorithm:
|
| 8 |
+
PRIMAL "SimplexAlgorithm::kPrimal" = 0
|
| 9 |
+
DUAL "SimplexAlgorithm::kDual"
|
| 10 |
+
|
| 11 |
+
cdef enum SimplexStrategy:
|
| 12 |
+
SIMPLEX_STRATEGY_MIN "SimplexStrategy::kSimplexStrategyMin" = 0
|
| 13 |
+
SIMPLEX_STRATEGY_CHOOSE "SimplexStrategy::kSimplexStrategyChoose" = SIMPLEX_STRATEGY_MIN
|
| 14 |
+
SIMPLEX_STRATEGY_DUAL "SimplexStrategy::kSimplexStrategyDual"
|
| 15 |
+
SIMPLEX_STRATEGY_DUAL_PLAIN "SimplexStrategy::kSimplexStrategyDualPlain" = SIMPLEX_STRATEGY_DUAL
|
| 16 |
+
SIMPLEX_STRATEGY_DUAL_TASKS "SimplexStrategy::kSimplexStrategyDualTasks"
|
| 17 |
+
SIMPLEX_STRATEGY_DUAL_MULTI "SimplexStrategy::kSimplexStrategyDualMulti"
|
| 18 |
+
SIMPLEX_STRATEGY_PRIMAL "SimplexStrategy::kSimplexStrategyPrimal"
|
| 19 |
+
SIMPLEX_STRATEGY_MAX "SimplexStrategy::kSimplexStrategyMax" = SIMPLEX_STRATEGY_PRIMAL
|
| 20 |
+
SIMPLEX_STRATEGY_NUM "SimplexStrategy::kSimplexStrategyNum"
|
| 21 |
+
|
| 22 |
+
cdef enum SimplexCrashStrategy:
|
| 23 |
+
SIMPLEX_CRASH_STRATEGY_MIN "SimplexCrashStrategy::kSimplexCrashStrategyMin" = 0
|
| 24 |
+
SIMPLEX_CRASH_STRATEGY_OFF "SimplexCrashStrategy::kSimplexCrashStrategyOff" = SIMPLEX_CRASH_STRATEGY_MIN
|
| 25 |
+
SIMPLEX_CRASH_STRATEGY_LTSSF_K "SimplexCrashStrategy::kSimplexCrashStrategyLtssfK"
|
| 26 |
+
SIMPLEX_CRASH_STRATEGY_LTSSF "SimplexCrashStrategy::kSimplexCrashStrategyLtssf" = SIMPLEX_CRASH_STRATEGY_LTSSF_K
|
| 27 |
+
SIMPLEX_CRASH_STRATEGY_BIXBY "SimplexCrashStrategy::kSimplexCrashStrategyBixby"
|
| 28 |
+
SIMPLEX_CRASH_STRATEGY_LTSSF_PRI "SimplexCrashStrategy::kSimplexCrashStrategyLtssfPri"
|
| 29 |
+
SIMPLEX_CRASH_STRATEGY_LTSF_K "SimplexCrashStrategy::kSimplexCrashStrategyLtsfK"
|
| 30 |
+
SIMPLEX_CRASH_STRATEGY_LTSF_PRI "SimplexCrashStrategy::kSimplexCrashStrategyLtsfPri"
|
| 31 |
+
SIMPLEX_CRASH_STRATEGY_LTSF "SimplexCrashStrategy::kSimplexCrashStrategyLtsf"
|
| 32 |
+
SIMPLEX_CRASH_STRATEGY_BIXBY_NO_NONZERO_COL_COSTS "SimplexCrashStrategy::kSimplexCrashStrategyBixbyNoNonzeroColCosts"
|
| 33 |
+
SIMPLEX_CRASH_STRATEGY_BASIC "SimplexCrashStrategy::kSimplexCrashStrategyBasic"
|
| 34 |
+
SIMPLEX_CRASH_STRATEGY_TEST_SING "SimplexCrashStrategy::kSimplexCrashStrategyTestSing"
|
| 35 |
+
SIMPLEX_CRASH_STRATEGY_MAX "SimplexCrashStrategy::kSimplexCrashStrategyMax" = SIMPLEX_CRASH_STRATEGY_TEST_SING
|
| 36 |
+
|
| 37 |
+
cdef enum SimplexEdgeWeightStrategy:
|
| 38 |
+
SIMPLEX_EDGE_WEIGHT_STRATEGY_MIN "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategyMin" = -1
|
| 39 |
+
SIMPLEX_EDGE_WEIGHT_STRATEGY_CHOOSE "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategyChoose" = SIMPLEX_EDGE_WEIGHT_STRATEGY_MIN
|
| 40 |
+
SIMPLEX_EDGE_WEIGHT_STRATEGY_DANTZIG "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategyDantzig"
|
| 41 |
+
SIMPLEX_EDGE_WEIGHT_STRATEGY_DEVEX "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategyDevex"
|
| 42 |
+
SIMPLEX_EDGE_WEIGHT_STRATEGY_STEEPEST_EDGE "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategySteepestEdge"
|
| 43 |
+
SIMPLEX_EDGE_WEIGHT_STRATEGY_STEEPEST_EDGE_UNIT_INITIAL "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategySteepestEdgeUnitInitial"
|
| 44 |
+
SIMPLEX_EDGE_WEIGHT_STRATEGY_MAX "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategyMax" = SIMPLEX_EDGE_WEIGHT_STRATEGY_STEEPEST_EDGE_UNIT_INITIAL
|
| 45 |
+
|
| 46 |
+
cdef enum SimplexPriceStrategy:
|
| 47 |
+
SIMPLEX_PRICE_STRATEGY_MIN = 0
|
| 48 |
+
SIMPLEX_PRICE_STRATEGY_COL = SIMPLEX_PRICE_STRATEGY_MIN
|
| 49 |
+
SIMPLEX_PRICE_STRATEGY_ROW
|
| 50 |
+
SIMPLEX_PRICE_STRATEGY_ROW_SWITCH
|
| 51 |
+
SIMPLEX_PRICE_STRATEGY_ROW_SWITCH_COL_SWITCH
|
| 52 |
+
SIMPLEX_PRICE_STRATEGY_MAX = SIMPLEX_PRICE_STRATEGY_ROW_SWITCH_COL_SWITCH
|
| 53 |
+
|
| 54 |
+
cdef enum SimplexDualChuzcStrategy:
|
| 55 |
+
SIMPLEX_DUAL_CHUZC_STRATEGY_MIN = 0
|
| 56 |
+
SIMPLEX_DUAL_CHUZC_STRATEGY_CHOOSE = SIMPLEX_DUAL_CHUZC_STRATEGY_MIN
|
| 57 |
+
SIMPLEX_DUAL_CHUZC_STRATEGY_QUAD
|
| 58 |
+
SIMPLEX_DUAL_CHUZC_STRATEGY_HEAP
|
| 59 |
+
SIMPLEX_DUAL_CHUZC_STRATEGY_BOTH
|
| 60 |
+
SIMPLEX_DUAL_CHUZC_STRATEGY_MAX = SIMPLEX_DUAL_CHUZC_STRATEGY_BOTH
|
| 61 |
+
|
| 62 |
+
cdef enum InvertHint:
|
| 63 |
+
INVERT_HINT_NO = 0
|
| 64 |
+
INVERT_HINT_UPDATE_LIMIT_REACHED
|
| 65 |
+
INVERT_HINT_SYNTHETIC_CLOCK_SAYS_INVERT
|
| 66 |
+
INVERT_HINT_POSSIBLY_OPTIMAL
|
| 67 |
+
INVERT_HINT_POSSIBLY_PRIMAL_UNBOUNDED
|
| 68 |
+
INVERT_HINT_POSSIBLY_DUAL_UNBOUNDED
|
| 69 |
+
INVERT_HINT_POSSIBLY_SINGULAR_BASIS
|
| 70 |
+
INVERT_HINT_PRIMAL_INFEASIBLE_IN_PRIMAL_SIMPLEX
|
| 71 |
+
INVERT_HINT_CHOOSE_COLUMN_FAIL
|
| 72 |
+
INVERT_HINT_Count
|
| 73 |
+
|
| 74 |
+
cdef enum DualEdgeWeightMode:
|
| 75 |
+
DANTZIG "DualEdgeWeightMode::DANTZIG" = 0
|
| 76 |
+
DEVEX "DualEdgeWeightMode::DEVEX"
|
| 77 |
+
STEEPEST_EDGE "DualEdgeWeightMode::STEEPEST_EDGE"
|
| 78 |
+
Count "DualEdgeWeightMode::Count"
|
| 79 |
+
|
| 80 |
+
cdef enum PriceMode:
|
| 81 |
+
ROW "PriceMode::ROW" = 0
|
| 82 |
+
COL "PriceMode::COL"
|
| 83 |
+
|
| 84 |
+
const int PARALLEL_THREADS_DEFAULT
|
| 85 |
+
const int DUAL_TASKS_MIN_THREADS
|
| 86 |
+
const int DUAL_MULTI_MIN_THREADS
|
| 87 |
+
|
| 88 |
+
const bool invert_if_row_out_negative
|
| 89 |
+
|
| 90 |
+
const int NONBASIC_FLAG_TRUE
|
| 91 |
+
const int NONBASIC_FLAG_FALSE
|
| 92 |
+
|
| 93 |
+
const int NONBASIC_MOVE_UP
|
| 94 |
+
const int NONBASIC_MOVE_DN
|
| 95 |
+
const int NONBASIC_MOVE_ZE
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/highs_c_api.pxd
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# cython: language_level=3
|
| 2 |
+
|
| 3 |
+
cdef extern from "highs_c_api.h" nogil:
|
| 4 |
+
int Highs_passLp(void* highs, int numcol, int numrow, int numnz,
|
| 5 |
+
double* colcost, double* collower, double* colupper,
|
| 6 |
+
double* rowlower, double* rowupper,
|
| 7 |
+
int* astart, int* aindex, double* avalue)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_lbfgsb.cpython-310-x86_64-linux-gnu.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d0e0cc53dba47fe455ac20e0c5588de5dcd553f4c8df5bc5b11a81d84339d015
|
| 3 |
+
size 524785
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_linprog.py
ADDED
|
@@ -0,0 +1,716 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
A top-level linear programming interface.
|
| 3 |
+
|
| 4 |
+
.. versionadded:: 0.15.0
|
| 5 |
+
|
| 6 |
+
Functions
|
| 7 |
+
---------
|
| 8 |
+
.. autosummary::
|
| 9 |
+
:toctree: generated/
|
| 10 |
+
|
| 11 |
+
linprog
|
| 12 |
+
linprog_verbose_callback
|
| 13 |
+
linprog_terse_callback
|
| 14 |
+
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
import numpy as np
|
| 18 |
+
|
| 19 |
+
from ._optimize import OptimizeResult, OptimizeWarning
|
| 20 |
+
from warnings import warn
|
| 21 |
+
from ._linprog_highs import _linprog_highs
|
| 22 |
+
from ._linprog_ip import _linprog_ip
|
| 23 |
+
from ._linprog_simplex import _linprog_simplex
|
| 24 |
+
from ._linprog_rs import _linprog_rs
|
| 25 |
+
from ._linprog_doc import (_linprog_highs_doc, _linprog_ip_doc, # noqa: F401
|
| 26 |
+
_linprog_rs_doc, _linprog_simplex_doc,
|
| 27 |
+
_linprog_highs_ipm_doc, _linprog_highs_ds_doc)
|
| 28 |
+
from ._linprog_util import (
|
| 29 |
+
_parse_linprog, _presolve, _get_Abc, _LPProblem, _autoscale,
|
| 30 |
+
_postsolve, _check_result, _display_summary)
|
| 31 |
+
from copy import deepcopy
|
| 32 |
+
|
| 33 |
+
__all__ = ['linprog', 'linprog_verbose_callback', 'linprog_terse_callback']
|
| 34 |
+
|
| 35 |
+
__docformat__ = "restructuredtext en"
|
| 36 |
+
|
| 37 |
+
LINPROG_METHODS = [
|
| 38 |
+
'simplex', 'revised simplex', 'interior-point', 'highs', 'highs-ds', 'highs-ipm'
|
| 39 |
+
]
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def linprog_verbose_callback(res):
|
| 43 |
+
"""
|
| 44 |
+
A sample callback function demonstrating the linprog callback interface.
|
| 45 |
+
This callback produces detailed output to sys.stdout before each iteration
|
| 46 |
+
and after the final iteration of the simplex algorithm.
|
| 47 |
+
|
| 48 |
+
Parameters
|
| 49 |
+
----------
|
| 50 |
+
res : A `scipy.optimize.OptimizeResult` consisting of the following fields:
|
| 51 |
+
|
| 52 |
+
x : 1-D array
|
| 53 |
+
The independent variable vector which optimizes the linear
|
| 54 |
+
programming problem.
|
| 55 |
+
fun : float
|
| 56 |
+
Value of the objective function.
|
| 57 |
+
success : bool
|
| 58 |
+
True if the algorithm succeeded in finding an optimal solution.
|
| 59 |
+
slack : 1-D array
|
| 60 |
+
The values of the slack variables. Each slack variable corresponds
|
| 61 |
+
to an inequality constraint. If the slack is zero, then the
|
| 62 |
+
corresponding constraint is active.
|
| 63 |
+
con : 1-D array
|
| 64 |
+
The (nominally zero) residuals of the equality constraints, that is,
|
| 65 |
+
``b - A_eq @ x``
|
| 66 |
+
phase : int
|
| 67 |
+
The phase of the optimization being executed. In phase 1 a basic
|
| 68 |
+
feasible solution is sought and the T has an additional row
|
| 69 |
+
representing an alternate objective function.
|
| 70 |
+
status : int
|
| 71 |
+
An integer representing the exit status of the optimization::
|
| 72 |
+
|
| 73 |
+
0 : Optimization terminated successfully
|
| 74 |
+
1 : Iteration limit reached
|
| 75 |
+
2 : Problem appears to be infeasible
|
| 76 |
+
3 : Problem appears to be unbounded
|
| 77 |
+
4 : Serious numerical difficulties encountered
|
| 78 |
+
|
| 79 |
+
nit : int
|
| 80 |
+
The number of iterations performed.
|
| 81 |
+
message : str
|
| 82 |
+
A string descriptor of the exit status of the optimization.
|
| 83 |
+
"""
|
| 84 |
+
x = res['x']
|
| 85 |
+
fun = res['fun']
|
| 86 |
+
phase = res['phase']
|
| 87 |
+
status = res['status']
|
| 88 |
+
nit = res['nit']
|
| 89 |
+
message = res['message']
|
| 90 |
+
complete = res['complete']
|
| 91 |
+
|
| 92 |
+
saved_printoptions = np.get_printoptions()
|
| 93 |
+
np.set_printoptions(linewidth=500,
|
| 94 |
+
formatter={'float': lambda x: f"{x: 12.4f}"})
|
| 95 |
+
if status:
|
| 96 |
+
print('--------- Simplex Early Exit -------\n')
|
| 97 |
+
print(f'The simplex method exited early with status {status:d}')
|
| 98 |
+
print(message)
|
| 99 |
+
elif complete:
|
| 100 |
+
print('--------- Simplex Complete --------\n')
|
| 101 |
+
print(f'Iterations required: {nit}')
|
| 102 |
+
else:
|
| 103 |
+
print(f'--------- Iteration {nit:d} ---------\n')
|
| 104 |
+
|
| 105 |
+
if nit > 0:
|
| 106 |
+
if phase == 1:
|
| 107 |
+
print('Current Pseudo-Objective Value:')
|
| 108 |
+
else:
|
| 109 |
+
print('Current Objective Value:')
|
| 110 |
+
print('f = ', fun)
|
| 111 |
+
print()
|
| 112 |
+
print('Current Solution Vector:')
|
| 113 |
+
print('x = ', x)
|
| 114 |
+
print()
|
| 115 |
+
|
| 116 |
+
np.set_printoptions(**saved_printoptions)
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
def linprog_terse_callback(res):
|
| 120 |
+
"""
|
| 121 |
+
A sample callback function demonstrating the linprog callback interface.
|
| 122 |
+
This callback produces brief output to sys.stdout before each iteration
|
| 123 |
+
and after the final iteration of the simplex algorithm.
|
| 124 |
+
|
| 125 |
+
Parameters
|
| 126 |
+
----------
|
| 127 |
+
res : A `scipy.optimize.OptimizeResult` consisting of the following fields:
|
| 128 |
+
|
| 129 |
+
x : 1-D array
|
| 130 |
+
The independent variable vector which optimizes the linear
|
| 131 |
+
programming problem.
|
| 132 |
+
fun : float
|
| 133 |
+
Value of the objective function.
|
| 134 |
+
success : bool
|
| 135 |
+
True if the algorithm succeeded in finding an optimal solution.
|
| 136 |
+
slack : 1-D array
|
| 137 |
+
The values of the slack variables. Each slack variable corresponds
|
| 138 |
+
to an inequality constraint. If the slack is zero, then the
|
| 139 |
+
corresponding constraint is active.
|
| 140 |
+
con : 1-D array
|
| 141 |
+
The (nominally zero) residuals of the equality constraints, that is,
|
| 142 |
+
``b - A_eq @ x``.
|
| 143 |
+
phase : int
|
| 144 |
+
The phase of the optimization being executed. In phase 1 a basic
|
| 145 |
+
feasible solution is sought and the T has an additional row
|
| 146 |
+
representing an alternate objective function.
|
| 147 |
+
status : int
|
| 148 |
+
An integer representing the exit status of the optimization::
|
| 149 |
+
|
| 150 |
+
0 : Optimization terminated successfully
|
| 151 |
+
1 : Iteration limit reached
|
| 152 |
+
2 : Problem appears to be infeasible
|
| 153 |
+
3 : Problem appears to be unbounded
|
| 154 |
+
4 : Serious numerical difficulties encountered
|
| 155 |
+
|
| 156 |
+
nit : int
|
| 157 |
+
The number of iterations performed.
|
| 158 |
+
message : str
|
| 159 |
+
A string descriptor of the exit status of the optimization.
|
| 160 |
+
"""
|
| 161 |
+
nit = res['nit']
|
| 162 |
+
x = res['x']
|
| 163 |
+
|
| 164 |
+
if nit == 0:
|
| 165 |
+
print("Iter: X:")
|
| 166 |
+
print(f"{nit: <5d} ", end="")
|
| 167 |
+
print(x)
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
def linprog(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
|
| 171 |
+
bounds=(0, None), method='highs', callback=None,
|
| 172 |
+
options=None, x0=None, integrality=None):
|
| 173 |
+
r"""
|
| 174 |
+
Linear programming: minimize a linear objective function subject to linear
|
| 175 |
+
equality and inequality constraints.
|
| 176 |
+
|
| 177 |
+
Linear programming solves problems of the following form:
|
| 178 |
+
|
| 179 |
+
.. math::
|
| 180 |
+
|
| 181 |
+
\min_x \ & c^T x \\
|
| 182 |
+
\mbox{such that} \ & A_{ub} x \leq b_{ub},\\
|
| 183 |
+
& A_{eq} x = b_{eq},\\
|
| 184 |
+
& l \leq x \leq u ,
|
| 185 |
+
|
| 186 |
+
where :math:`x` is a vector of decision variables; :math:`c`,
|
| 187 |
+
:math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
|
| 188 |
+
:math:`A_{ub}` and :math:`A_{eq}` are matrices.
|
| 189 |
+
|
| 190 |
+
Alternatively, that's:
|
| 191 |
+
|
| 192 |
+
- minimize ::
|
| 193 |
+
|
| 194 |
+
c @ x
|
| 195 |
+
|
| 196 |
+
- such that ::
|
| 197 |
+
|
| 198 |
+
A_ub @ x <= b_ub
|
| 199 |
+
A_eq @ x == b_eq
|
| 200 |
+
lb <= x <= ub
|
| 201 |
+
|
| 202 |
+
Note that by default ``lb = 0`` and ``ub = None``. Other bounds can be
|
| 203 |
+
specified with ``bounds``.
|
| 204 |
+
|
| 205 |
+
Parameters
|
| 206 |
+
----------
|
| 207 |
+
c : 1-D array
|
| 208 |
+
The coefficients of the linear objective function to be minimized.
|
| 209 |
+
A_ub : 2-D array, optional
|
| 210 |
+
The inequality constraint matrix. Each row of ``A_ub`` specifies the
|
| 211 |
+
coefficients of a linear inequality constraint on ``x``.
|
| 212 |
+
b_ub : 1-D array, optional
|
| 213 |
+
The inequality constraint vector. Each element represents an
|
| 214 |
+
upper bound on the corresponding value of ``A_ub @ x``.
|
| 215 |
+
A_eq : 2-D array, optional
|
| 216 |
+
The equality constraint matrix. Each row of ``A_eq`` specifies the
|
| 217 |
+
coefficients of a linear equality constraint on ``x``.
|
| 218 |
+
b_eq : 1-D array, optional
|
| 219 |
+
The equality constraint vector. Each element of ``A_eq @ x`` must equal
|
| 220 |
+
the corresponding element of ``b_eq``.
|
| 221 |
+
bounds : sequence, optional
|
| 222 |
+
A sequence of ``(min, max)`` pairs for each element in ``x``, defining
|
| 223 |
+
the minimum and maximum values of that decision variable.
|
| 224 |
+
If a single tuple ``(min, max)`` is provided, then ``min`` and ``max``
|
| 225 |
+
will serve as bounds for all decision variables.
|
| 226 |
+
Use ``None`` to indicate that there is no bound. For instance, the
|
| 227 |
+
default bound ``(0, None)`` means that all decision variables are
|
| 228 |
+
non-negative, and the pair ``(None, None)`` means no bounds at all,
|
| 229 |
+
i.e. all variables are allowed to be any real.
|
| 230 |
+
method : str, optional
|
| 231 |
+
The algorithm used to solve the standard form problem.
|
| 232 |
+
:ref:`'highs' <optimize.linprog-highs>` (default),
|
| 233 |
+
:ref:`'highs-ds' <optimize.linprog-highs-ds>`,
|
| 234 |
+
:ref:`'highs-ipm' <optimize.linprog-highs-ipm>`,
|
| 235 |
+
:ref:`'interior-point' <optimize.linprog-interior-point>` (legacy),
|
| 236 |
+
:ref:`'revised simplex' <optimize.linprog-revised_simplex>` (legacy),
|
| 237 |
+
and
|
| 238 |
+
:ref:`'simplex' <optimize.linprog-simplex>` (legacy) are supported.
|
| 239 |
+
The legacy methods are deprecated and will be removed in SciPy 1.11.0.
|
| 240 |
+
callback : callable, optional
|
| 241 |
+
If a callback function is provided, it will be called at least once per
|
| 242 |
+
iteration of the algorithm. The callback function must accept a single
|
| 243 |
+
`scipy.optimize.OptimizeResult` consisting of the following fields:
|
| 244 |
+
|
| 245 |
+
x : 1-D array
|
| 246 |
+
The current solution vector.
|
| 247 |
+
fun : float
|
| 248 |
+
The current value of the objective function ``c @ x``.
|
| 249 |
+
success : bool
|
| 250 |
+
``True`` when the algorithm has completed successfully.
|
| 251 |
+
slack : 1-D array
|
| 252 |
+
The (nominally positive) values of the slack,
|
| 253 |
+
``b_ub - A_ub @ x``.
|
| 254 |
+
con : 1-D array
|
| 255 |
+
The (nominally zero) residuals of the equality constraints,
|
| 256 |
+
``b_eq - A_eq @ x``.
|
| 257 |
+
phase : int
|
| 258 |
+
The phase of the algorithm being executed.
|
| 259 |
+
status : int
|
| 260 |
+
An integer representing the status of the algorithm.
|
| 261 |
+
|
| 262 |
+
``0`` : Optimization proceeding nominally.
|
| 263 |
+
|
| 264 |
+
``1`` : Iteration limit reached.
|
| 265 |
+
|
| 266 |
+
``2`` : Problem appears to be infeasible.
|
| 267 |
+
|
| 268 |
+
``3`` : Problem appears to be unbounded.
|
| 269 |
+
|
| 270 |
+
``4`` : Numerical difficulties encountered.
|
| 271 |
+
|
| 272 |
+
nit : int
|
| 273 |
+
The current iteration number.
|
| 274 |
+
message : str
|
| 275 |
+
A string descriptor of the algorithm status.
|
| 276 |
+
|
| 277 |
+
Callback functions are not currently supported by the HiGHS methods.
|
| 278 |
+
|
| 279 |
+
options : dict, optional
|
| 280 |
+
A dictionary of solver options. All methods accept the following
|
| 281 |
+
options:
|
| 282 |
+
|
| 283 |
+
maxiter : int
|
| 284 |
+
Maximum number of iterations to perform.
|
| 285 |
+
Default: see method-specific documentation.
|
| 286 |
+
disp : bool
|
| 287 |
+
Set to ``True`` to print convergence messages.
|
| 288 |
+
Default: ``False``.
|
| 289 |
+
presolve : bool
|
| 290 |
+
Set to ``False`` to disable automatic presolve.
|
| 291 |
+
Default: ``True``.
|
| 292 |
+
|
| 293 |
+
All methods except the HiGHS solvers also accept:
|
| 294 |
+
|
| 295 |
+
tol : float
|
| 296 |
+
A tolerance which determines when a residual is "close enough" to
|
| 297 |
+
zero to be considered exactly zero.
|
| 298 |
+
autoscale : bool
|
| 299 |
+
Set to ``True`` to automatically perform equilibration.
|
| 300 |
+
Consider using this option if the numerical values in the
|
| 301 |
+
constraints are separated by several orders of magnitude.
|
| 302 |
+
Default: ``False``.
|
| 303 |
+
rr : bool
|
| 304 |
+
Set to ``False`` to disable automatic redundancy removal.
|
| 305 |
+
Default: ``True``.
|
| 306 |
+
rr_method : string
|
| 307 |
+
Method used to identify and remove redundant rows from the
|
| 308 |
+
equality constraint matrix after presolve. For problems with
|
| 309 |
+
dense input, the available methods for redundancy removal are:
|
| 310 |
+
|
| 311 |
+
"SVD":
|
| 312 |
+
Repeatedly performs singular value decomposition on
|
| 313 |
+
the matrix, detecting redundant rows based on nonzeros
|
| 314 |
+
in the left singular vectors that correspond with
|
| 315 |
+
zero singular values. May be fast when the matrix is
|
| 316 |
+
nearly full rank.
|
| 317 |
+
"pivot":
|
| 318 |
+
Uses the algorithm presented in [5]_ to identify
|
| 319 |
+
redundant rows.
|
| 320 |
+
"ID":
|
| 321 |
+
Uses a randomized interpolative decomposition.
|
| 322 |
+
Identifies columns of the matrix transpose not used in
|
| 323 |
+
a full-rank interpolative decomposition of the matrix.
|
| 324 |
+
None:
|
| 325 |
+
Uses "svd" if the matrix is nearly full rank, that is,
|
| 326 |
+
the difference between the matrix rank and the number
|
| 327 |
+
of rows is less than five. If not, uses "pivot". The
|
| 328 |
+
behavior of this default is subject to change without
|
| 329 |
+
prior notice.
|
| 330 |
+
|
| 331 |
+
Default: None.
|
| 332 |
+
For problems with sparse input, this option is ignored, and the
|
| 333 |
+
pivot-based algorithm presented in [5]_ is used.
|
| 334 |
+
|
| 335 |
+
For method-specific options, see
|
| 336 |
+
:func:`show_options('linprog') <show_options>`.
|
| 337 |
+
|
| 338 |
+
x0 : 1-D array, optional
|
| 339 |
+
Guess values of the decision variables, which will be refined by
|
| 340 |
+
the optimization algorithm. This argument is currently used only by the
|
| 341 |
+
'revised simplex' method, and can only be used if `x0` represents a
|
| 342 |
+
basic feasible solution.
|
| 343 |
+
|
| 344 |
+
integrality : 1-D array or int, optional
|
| 345 |
+
Indicates the type of integrality constraint on each decision variable.
|
| 346 |
+
|
| 347 |
+
``0`` : Continuous variable; no integrality constraint.
|
| 348 |
+
|
| 349 |
+
``1`` : Integer variable; decision variable must be an integer
|
| 350 |
+
within `bounds`.
|
| 351 |
+
|
| 352 |
+
``2`` : Semi-continuous variable; decision variable must be within
|
| 353 |
+
`bounds` or take value ``0``.
|
| 354 |
+
|
| 355 |
+
``3`` : Semi-integer variable; decision variable must be an integer
|
| 356 |
+
within `bounds` or take value ``0``.
|
| 357 |
+
|
| 358 |
+
By default, all variables are continuous.
|
| 359 |
+
|
| 360 |
+
For mixed integrality constraints, supply an array of shape `c.shape`.
|
| 361 |
+
To infer a constraint on each decision variable from shorter inputs,
|
| 362 |
+
the argument will be broadcasted to `c.shape` using `np.broadcast_to`.
|
| 363 |
+
|
| 364 |
+
This argument is currently used only by the ``'highs'`` method and
|
| 365 |
+
ignored otherwise.
|
| 366 |
+
|
| 367 |
+
Returns
|
| 368 |
+
-------
|
| 369 |
+
res : OptimizeResult
|
| 370 |
+
A :class:`scipy.optimize.OptimizeResult` consisting of the fields
|
| 371 |
+
below. Note that the return types of the fields may depend on whether
|
| 372 |
+
the optimization was successful, therefore it is recommended to check
|
| 373 |
+
`OptimizeResult.status` before relying on the other fields:
|
| 374 |
+
|
| 375 |
+
x : 1-D array
|
| 376 |
+
The values of the decision variables that minimizes the
|
| 377 |
+
objective function while satisfying the constraints.
|
| 378 |
+
fun : float
|
| 379 |
+
The optimal value of the objective function ``c @ x``.
|
| 380 |
+
slack : 1-D array
|
| 381 |
+
The (nominally positive) values of the slack variables,
|
| 382 |
+
``b_ub - A_ub @ x``.
|
| 383 |
+
con : 1-D array
|
| 384 |
+
The (nominally zero) residuals of the equality constraints,
|
| 385 |
+
``b_eq - A_eq @ x``.
|
| 386 |
+
success : bool
|
| 387 |
+
``True`` when the algorithm succeeds in finding an optimal
|
| 388 |
+
solution.
|
| 389 |
+
status : int
|
| 390 |
+
An integer representing the exit status of the algorithm.
|
| 391 |
+
|
| 392 |
+
``0`` : Optimization terminated successfully.
|
| 393 |
+
|
| 394 |
+
``1`` : Iteration limit reached.
|
| 395 |
+
|
| 396 |
+
``2`` : Problem appears to be infeasible.
|
| 397 |
+
|
| 398 |
+
``3`` : Problem appears to be unbounded.
|
| 399 |
+
|
| 400 |
+
``4`` : Numerical difficulties encountered.
|
| 401 |
+
|
| 402 |
+
nit : int
|
| 403 |
+
The total number of iterations performed in all phases.
|
| 404 |
+
message : str
|
| 405 |
+
A string descriptor of the exit status of the algorithm.
|
| 406 |
+
|
| 407 |
+
See Also
|
| 408 |
+
--------
|
| 409 |
+
show_options : Additional options accepted by the solvers.
|
| 410 |
+
|
| 411 |
+
Notes
|
| 412 |
+
-----
|
| 413 |
+
This section describes the available solvers that can be selected by the
|
| 414 |
+
'method' parameter.
|
| 415 |
+
|
| 416 |
+
`'highs-ds'` and
|
| 417 |
+
`'highs-ipm'` are interfaces to the
|
| 418 |
+
HiGHS simplex and interior-point method solvers [13]_, respectively.
|
| 419 |
+
`'highs'` (default) chooses between
|
| 420 |
+
the two automatically. These are the fastest linear
|
| 421 |
+
programming solvers in SciPy, especially for large, sparse problems;
|
| 422 |
+
which of these two is faster is problem-dependent.
|
| 423 |
+
The other solvers (`'interior-point'`, `'revised simplex'`, and
|
| 424 |
+
`'simplex'`) are legacy methods and will be removed in SciPy 1.11.0.
|
| 425 |
+
|
| 426 |
+
Method *highs-ds* is a wrapper of the C++ high performance dual
|
| 427 |
+
revised simplex implementation (HSOL) [13]_, [14]_. Method *highs-ipm*
|
| 428 |
+
is a wrapper of a C++ implementation of an **i**\ nterior-\ **p**\ oint
|
| 429 |
+
**m**\ ethod [13]_; it features a crossover routine, so it is as accurate
|
| 430 |
+
as a simplex solver. Method *highs* chooses between the two automatically.
|
| 431 |
+
For new code involving `linprog`, we recommend explicitly choosing one of
|
| 432 |
+
these three method values.
|
| 433 |
+
|
| 434 |
+
.. versionadded:: 1.6.0
|
| 435 |
+
|
| 436 |
+
Method *interior-point* uses the primal-dual path following algorithm
|
| 437 |
+
as outlined in [4]_. This algorithm supports sparse constraint matrices and
|
| 438 |
+
is typically faster than the simplex methods, especially for large, sparse
|
| 439 |
+
problems. Note, however, that the solution returned may be slightly less
|
| 440 |
+
accurate than those of the simplex methods and will not, in general,
|
| 441 |
+
correspond with a vertex of the polytope defined by the constraints.
|
| 442 |
+
|
| 443 |
+
.. versionadded:: 1.0.0
|
| 444 |
+
|
| 445 |
+
Method *revised simplex* uses the revised simplex method as described in
|
| 446 |
+
[9]_, except that a factorization [11]_ of the basis matrix, rather than
|
| 447 |
+
its inverse, is efficiently maintained and used to solve the linear systems
|
| 448 |
+
at each iteration of the algorithm.
|
| 449 |
+
|
| 450 |
+
.. versionadded:: 1.3.0
|
| 451 |
+
|
| 452 |
+
Method *simplex* uses a traditional, full-tableau implementation of
|
| 453 |
+
Dantzig's simplex algorithm [1]_, [2]_ (*not* the
|
| 454 |
+
Nelder-Mead simplex). This algorithm is included for backwards
|
| 455 |
+
compatibility and educational purposes.
|
| 456 |
+
|
| 457 |
+
.. versionadded:: 0.15.0
|
| 458 |
+
|
| 459 |
+
Before applying *interior-point*, *revised simplex*, or *simplex*,
|
| 460 |
+
a presolve procedure based on [8]_ attempts
|
| 461 |
+
to identify trivial infeasibilities, trivial unboundedness, and potential
|
| 462 |
+
problem simplifications. Specifically, it checks for:
|
| 463 |
+
|
| 464 |
+
- rows of zeros in ``A_eq`` or ``A_ub``, representing trivial constraints;
|
| 465 |
+
- columns of zeros in ``A_eq`` `and` ``A_ub``, representing unconstrained
|
| 466 |
+
variables;
|
| 467 |
+
- column singletons in ``A_eq``, representing fixed variables; and
|
| 468 |
+
- column singletons in ``A_ub``, representing simple bounds.
|
| 469 |
+
|
| 470 |
+
If presolve reveals that the problem is unbounded (e.g. an unconstrained
|
| 471 |
+
and unbounded variable has negative cost) or infeasible (e.g., a row of
|
| 472 |
+
zeros in ``A_eq`` corresponds with a nonzero in ``b_eq``), the solver
|
| 473 |
+
terminates with the appropriate status code. Note that presolve terminates
|
| 474 |
+
as soon as any sign of unboundedness is detected; consequently, a problem
|
| 475 |
+
may be reported as unbounded when in reality the problem is infeasible
|
| 476 |
+
(but infeasibility has not been detected yet). Therefore, if it is
|
| 477 |
+
important to know whether the problem is actually infeasible, solve the
|
| 478 |
+
problem again with option ``presolve=False``.
|
| 479 |
+
|
| 480 |
+
If neither infeasibility nor unboundedness are detected in a single pass
|
| 481 |
+
of the presolve, bounds are tightened where possible and fixed
|
| 482 |
+
variables are removed from the problem. Then, linearly dependent rows
|
| 483 |
+
of the ``A_eq`` matrix are removed, (unless they represent an
|
| 484 |
+
infeasibility) to avoid numerical difficulties in the primary solve
|
| 485 |
+
routine. Note that rows that are nearly linearly dependent (within a
|
| 486 |
+
prescribed tolerance) may also be removed, which can change the optimal
|
| 487 |
+
solution in rare cases. If this is a concern, eliminate redundancy from
|
| 488 |
+
your problem formulation and run with option ``rr=False`` or
|
| 489 |
+
``presolve=False``.
|
| 490 |
+
|
| 491 |
+
Several potential improvements can be made here: additional presolve
|
| 492 |
+
checks outlined in [8]_ should be implemented, the presolve routine should
|
| 493 |
+
be run multiple times (until no further simplifications can be made), and
|
| 494 |
+
more of the efficiency improvements from [5]_ should be implemented in the
|
| 495 |
+
redundancy removal routines.
|
| 496 |
+
|
| 497 |
+
After presolve, the problem is transformed to standard form by converting
|
| 498 |
+
the (tightened) simple bounds to upper bound constraints, introducing
|
| 499 |
+
non-negative slack variables for inequality constraints, and expressing
|
| 500 |
+
unbounded variables as the difference between two non-negative variables.
|
| 501 |
+
Optionally, the problem is automatically scaled via equilibration [12]_.
|
| 502 |
+
The selected algorithm solves the standard form problem, and a
|
| 503 |
+
postprocessing routine converts the result to a solution to the original
|
| 504 |
+
problem.
|
| 505 |
+
|
| 506 |
+
References
|
| 507 |
+
----------
|
| 508 |
+
.. [1] Dantzig, George B., Linear programming and extensions. Rand
|
| 509 |
+
Corporation Research Study Princeton Univ. Press, Princeton, NJ,
|
| 510 |
+
1963
|
| 511 |
+
.. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to
|
| 512 |
+
Mathematical Programming", McGraw-Hill, Chapter 4.
|
| 513 |
+
.. [3] Bland, Robert G. New finite pivoting rules for the simplex method.
|
| 514 |
+
Mathematics of Operations Research (2), 1977: pp. 103-107.
|
| 515 |
+
.. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
|
| 516 |
+
optimizer for linear programming: an implementation of the
|
| 517 |
+
homogeneous algorithm." High performance optimization. Springer US,
|
| 518 |
+
2000. 197-232.
|
| 519 |
+
.. [5] Andersen, Erling D. "Finding all linearly dependent rows in
|
| 520 |
+
large-scale linear programming." Optimization Methods and Software
|
| 521 |
+
6.3 (1995): 219-227.
|
| 522 |
+
.. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear
|
| 523 |
+
Programming based on Newton's Method." Unpublished Course Notes,
|
| 524 |
+
March 2004. Available 2/25/2017 at
|
| 525 |
+
https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf
|
| 526 |
+
.. [7] Fourer, Robert. "Solving Linear Programs by Interior-Point Methods."
|
| 527 |
+
Unpublished Course Notes, August 26, 2005. Available 2/25/2017 at
|
| 528 |
+
http://www.4er.org/CourseNotes/Book%20B/B-III.pdf
|
| 529 |
+
.. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear
|
| 530 |
+
programming." Mathematical Programming 71.2 (1995): 221-245.
|
| 531 |
+
.. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
|
| 532 |
+
programming." Athena Scientific 1 (1997): 997.
|
| 533 |
+
.. [10] Andersen, Erling D., et al. Implementation of interior point
|
| 534 |
+
methods for large scale linear programming. HEC/Universite de
|
| 535 |
+
Geneve, 1996.
|
| 536 |
+
.. [11] Bartels, Richard H. "A stabilization of the simplex method."
|
| 537 |
+
Journal in Numerische Mathematik 16.5 (1971): 414-434.
|
| 538 |
+
.. [12] Tomlin, J. A. "On scaling linear programming problems."
|
| 539 |
+
Mathematical Programming Study 4 (1975): 146-166.
|
| 540 |
+
.. [13] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J.
|
| 541 |
+
"HiGHS - high performance software for linear optimization."
|
| 542 |
+
https://highs.dev/
|
| 543 |
+
.. [14] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised
|
| 544 |
+
simplex method." Mathematical Programming Computation, 10 (1),
|
| 545 |
+
119-142, 2018. DOI: 10.1007/s12532-017-0130-5
|
| 546 |
+
|
| 547 |
+
Examples
|
| 548 |
+
--------
|
| 549 |
+
Consider the following problem:
|
| 550 |
+
|
| 551 |
+
.. math::
|
| 552 |
+
|
| 553 |
+
\min_{x_0, x_1} \ -x_0 + 4x_1 & \\
|
| 554 |
+
\mbox{such that} \ -3x_0 + x_1 & \leq 6,\\
|
| 555 |
+
-x_0 - 2x_1 & \geq -4,\\
|
| 556 |
+
x_1 & \geq -3.
|
| 557 |
+
|
| 558 |
+
The problem is not presented in the form accepted by `linprog`. This is
|
| 559 |
+
easily remedied by converting the "greater than" inequality
|
| 560 |
+
constraint to a "less than" inequality constraint by
|
| 561 |
+
multiplying both sides by a factor of :math:`-1`. Note also that the last
|
| 562 |
+
constraint is really the simple bound :math:`-3 \leq x_1 \leq \infty`.
|
| 563 |
+
Finally, since there are no bounds on :math:`x_0`, we must explicitly
|
| 564 |
+
specify the bounds :math:`-\infty \leq x_0 \leq \infty`, as the
|
| 565 |
+
default is for variables to be non-negative. After collecting coeffecients
|
| 566 |
+
into arrays and tuples, the input for this problem is:
|
| 567 |
+
|
| 568 |
+
>>> from scipy.optimize import linprog
|
| 569 |
+
>>> c = [-1, 4]
|
| 570 |
+
>>> A = [[-3, 1], [1, 2]]
|
| 571 |
+
>>> b = [6, 4]
|
| 572 |
+
>>> x0_bounds = (None, None)
|
| 573 |
+
>>> x1_bounds = (-3, None)
|
| 574 |
+
>>> res = linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds])
|
| 575 |
+
>>> res.fun
|
| 576 |
+
-22.0
|
| 577 |
+
>>> res.x
|
| 578 |
+
array([10., -3.])
|
| 579 |
+
>>> res.message
|
| 580 |
+
'Optimization terminated successfully. (HiGHS Status 7: Optimal)'
|
| 581 |
+
|
| 582 |
+
The marginals (AKA dual values / shadow prices / Lagrange multipliers)
|
| 583 |
+
and residuals (slacks) are also available.
|
| 584 |
+
|
| 585 |
+
>>> res.ineqlin
|
| 586 |
+
residual: [ 3.900e+01 0.000e+00]
|
| 587 |
+
marginals: [-0.000e+00 -1.000e+00]
|
| 588 |
+
|
| 589 |
+
For example, because the marginal associated with the second inequality
|
| 590 |
+
constraint is -1, we expect the optimal value of the objective function
|
| 591 |
+
to decrease by ``eps`` if we add a small amount ``eps`` to the right hand
|
| 592 |
+
side of the second inequality constraint:
|
| 593 |
+
|
| 594 |
+
>>> eps = 0.05
|
| 595 |
+
>>> b[1] += eps
|
| 596 |
+
>>> linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds]).fun
|
| 597 |
+
-22.05
|
| 598 |
+
|
| 599 |
+
Also, because the residual on the first inequality constraint is 39, we
|
| 600 |
+
can decrease the right hand side of the first constraint by 39 without
|
| 601 |
+
affecting the optimal solution.
|
| 602 |
+
|
| 603 |
+
>>> b = [6, 4] # reset to original values
|
| 604 |
+
>>> b[0] -= 39
|
| 605 |
+
>>> linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds]).fun
|
| 606 |
+
-22.0
|
| 607 |
+
|
| 608 |
+
"""
|
| 609 |
+
|
| 610 |
+
meth = method.lower()
|
| 611 |
+
methods = {"highs", "highs-ds", "highs-ipm",
|
| 612 |
+
"simplex", "revised simplex", "interior-point"}
|
| 613 |
+
|
| 614 |
+
if meth not in methods:
|
| 615 |
+
raise ValueError(f"Unknown solver '{method}'")
|
| 616 |
+
|
| 617 |
+
if x0 is not None and meth != "revised simplex":
|
| 618 |
+
warning_message = "x0 is used only when method is 'revised simplex'. "
|
| 619 |
+
warn(warning_message, OptimizeWarning, stacklevel=2)
|
| 620 |
+
|
| 621 |
+
if np.any(integrality) and not meth == "highs":
|
| 622 |
+
integrality = None
|
| 623 |
+
warning_message = ("Only `method='highs'` supports integer "
|
| 624 |
+
"constraints. Ignoring `integrality`.")
|
| 625 |
+
warn(warning_message, OptimizeWarning, stacklevel=2)
|
| 626 |
+
elif np.any(integrality):
|
| 627 |
+
integrality = np.broadcast_to(integrality, np.shape(c))
|
| 628 |
+
else:
|
| 629 |
+
integrality = None
|
| 630 |
+
|
| 631 |
+
lp = _LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality)
|
| 632 |
+
lp, solver_options = _parse_linprog(lp, options, meth)
|
| 633 |
+
tol = solver_options.get('tol', 1e-9)
|
| 634 |
+
|
| 635 |
+
# Give unmodified problem to HiGHS
|
| 636 |
+
if meth.startswith('highs'):
|
| 637 |
+
if callback is not None:
|
| 638 |
+
raise NotImplementedError("HiGHS solvers do not support the "
|
| 639 |
+
"callback interface.")
|
| 640 |
+
highs_solvers = {'highs-ipm': 'ipm', 'highs-ds': 'simplex',
|
| 641 |
+
'highs': None}
|
| 642 |
+
|
| 643 |
+
sol = _linprog_highs(lp, solver=highs_solvers[meth],
|
| 644 |
+
**solver_options)
|
| 645 |
+
sol['status'], sol['message'] = (
|
| 646 |
+
_check_result(sol['x'], sol['fun'], sol['status'], sol['slack'],
|
| 647 |
+
sol['con'], lp.bounds, tol, sol['message'],
|
| 648 |
+
integrality))
|
| 649 |
+
sol['success'] = sol['status'] == 0
|
| 650 |
+
return OptimizeResult(sol)
|
| 651 |
+
|
| 652 |
+
warn(f"`method='{meth}'` is deprecated and will be removed in SciPy "
|
| 653 |
+
"1.11.0. Please use one of the HiGHS solvers (e.g. "
|
| 654 |
+
"`method='highs'`) in new code.", DeprecationWarning, stacklevel=2)
|
| 655 |
+
|
| 656 |
+
iteration = 0
|
| 657 |
+
complete = False # will become True if solved in presolve
|
| 658 |
+
undo = []
|
| 659 |
+
|
| 660 |
+
# Keep the original arrays to calculate slack/residuals for original
|
| 661 |
+
# problem.
|
| 662 |
+
lp_o = deepcopy(lp)
|
| 663 |
+
|
| 664 |
+
# Solve trivial problem, eliminate variables, tighten bounds, etc.
|
| 665 |
+
rr_method = solver_options.pop('rr_method', None) # need to pop these;
|
| 666 |
+
rr = solver_options.pop('rr', True) # they're not passed to methods
|
| 667 |
+
c0 = 0 # we might get a constant term in the objective
|
| 668 |
+
if solver_options.pop('presolve', True):
|
| 669 |
+
(lp, c0, x, undo, complete, status, message) = _presolve(lp, rr,
|
| 670 |
+
rr_method,
|
| 671 |
+
tol)
|
| 672 |
+
|
| 673 |
+
C, b_scale = 1, 1 # for trivial unscaling if autoscale is not used
|
| 674 |
+
postsolve_args = (lp_o._replace(bounds=lp.bounds), undo, C, b_scale)
|
| 675 |
+
|
| 676 |
+
if not complete:
|
| 677 |
+
A, b, c, c0, x0 = _get_Abc(lp, c0)
|
| 678 |
+
if solver_options.pop('autoscale', False):
|
| 679 |
+
A, b, c, x0, C, b_scale = _autoscale(A, b, c, x0)
|
| 680 |
+
postsolve_args = postsolve_args[:-2] + (C, b_scale)
|
| 681 |
+
|
| 682 |
+
if meth == 'simplex':
|
| 683 |
+
x, status, message, iteration = _linprog_simplex(
|
| 684 |
+
c, c0=c0, A=A, b=b, callback=callback,
|
| 685 |
+
postsolve_args=postsolve_args, **solver_options)
|
| 686 |
+
elif meth == 'interior-point':
|
| 687 |
+
x, status, message, iteration = _linprog_ip(
|
| 688 |
+
c, c0=c0, A=A, b=b, callback=callback,
|
| 689 |
+
postsolve_args=postsolve_args, **solver_options)
|
| 690 |
+
elif meth == 'revised simplex':
|
| 691 |
+
x, status, message, iteration = _linprog_rs(
|
| 692 |
+
c, c0=c0, A=A, b=b, x0=x0, callback=callback,
|
| 693 |
+
postsolve_args=postsolve_args, **solver_options)
|
| 694 |
+
|
| 695 |
+
# Eliminate artificial variables, re-introduce presolved variables, etc.
|
| 696 |
+
disp = solver_options.get('disp', False)
|
| 697 |
+
|
| 698 |
+
x, fun, slack, con = _postsolve(x, postsolve_args, complete)
|
| 699 |
+
|
| 700 |
+
status, message = _check_result(x, fun, status, slack, con, lp_o.bounds,
|
| 701 |
+
tol, message, integrality)
|
| 702 |
+
|
| 703 |
+
if disp:
|
| 704 |
+
_display_summary(message, status, fun, iteration)
|
| 705 |
+
|
| 706 |
+
sol = {
|
| 707 |
+
'x': x,
|
| 708 |
+
'fun': fun,
|
| 709 |
+
'slack': slack,
|
| 710 |
+
'con': con,
|
| 711 |
+
'status': status,
|
| 712 |
+
'message': message,
|
| 713 |
+
'nit': iteration,
|
| 714 |
+
'success': status == 0}
|
| 715 |
+
|
| 716 |
+
return OptimizeResult(sol)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_linprog_highs.py
ADDED
|
@@ -0,0 +1,440 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""HiGHS Linear Optimization Methods
|
| 2 |
+
|
| 3 |
+
Interface to HiGHS linear optimization software.
|
| 4 |
+
https://highs.dev/
|
| 5 |
+
|
| 6 |
+
.. versionadded:: 1.5.0
|
| 7 |
+
|
| 8 |
+
References
|
| 9 |
+
----------
|
| 10 |
+
.. [1] Q. Huangfu and J.A.J. Hall. "Parallelizing the dual revised simplex
|
| 11 |
+
method." Mathematical Programming Computation, 10 (1), 119-142,
|
| 12 |
+
2018. DOI: 10.1007/s12532-017-0130-5
|
| 13 |
+
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
import inspect
|
| 17 |
+
import numpy as np
|
| 18 |
+
from ._optimize import OptimizeWarning, OptimizeResult
|
| 19 |
+
from warnings import warn
|
| 20 |
+
from ._highs._highs_wrapper import _highs_wrapper
|
| 21 |
+
from ._highs._highs_constants import (
|
| 22 |
+
CONST_INF,
|
| 23 |
+
MESSAGE_LEVEL_NONE,
|
| 24 |
+
HIGHS_OBJECTIVE_SENSE_MINIMIZE,
|
| 25 |
+
|
| 26 |
+
MODEL_STATUS_NOTSET,
|
| 27 |
+
MODEL_STATUS_LOAD_ERROR,
|
| 28 |
+
MODEL_STATUS_MODEL_ERROR,
|
| 29 |
+
MODEL_STATUS_PRESOLVE_ERROR,
|
| 30 |
+
MODEL_STATUS_SOLVE_ERROR,
|
| 31 |
+
MODEL_STATUS_POSTSOLVE_ERROR,
|
| 32 |
+
MODEL_STATUS_MODEL_EMPTY,
|
| 33 |
+
MODEL_STATUS_OPTIMAL,
|
| 34 |
+
MODEL_STATUS_INFEASIBLE,
|
| 35 |
+
MODEL_STATUS_UNBOUNDED_OR_INFEASIBLE,
|
| 36 |
+
MODEL_STATUS_UNBOUNDED,
|
| 37 |
+
MODEL_STATUS_REACHED_DUAL_OBJECTIVE_VALUE_UPPER_BOUND
|
| 38 |
+
as MODEL_STATUS_RDOVUB,
|
| 39 |
+
MODEL_STATUS_REACHED_OBJECTIVE_TARGET,
|
| 40 |
+
MODEL_STATUS_REACHED_TIME_LIMIT,
|
| 41 |
+
MODEL_STATUS_REACHED_ITERATION_LIMIT,
|
| 42 |
+
|
| 43 |
+
HIGHS_SIMPLEX_STRATEGY_DUAL,
|
| 44 |
+
|
| 45 |
+
HIGHS_SIMPLEX_CRASH_STRATEGY_OFF,
|
| 46 |
+
|
| 47 |
+
HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_CHOOSE,
|
| 48 |
+
HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_DANTZIG,
|
| 49 |
+
HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_DEVEX,
|
| 50 |
+
HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_STEEPEST_EDGE,
|
| 51 |
+
)
|
| 52 |
+
from scipy.sparse import csc_matrix, vstack, issparse
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def _highs_to_scipy_status_message(highs_status, highs_message):
|
| 56 |
+
"""Converts HiGHS status number/message to SciPy status number/message"""
|
| 57 |
+
|
| 58 |
+
scipy_statuses_messages = {
|
| 59 |
+
None: (4, "HiGHS did not provide a status code. "),
|
| 60 |
+
MODEL_STATUS_NOTSET: (4, ""),
|
| 61 |
+
MODEL_STATUS_LOAD_ERROR: (4, ""),
|
| 62 |
+
MODEL_STATUS_MODEL_ERROR: (2, ""),
|
| 63 |
+
MODEL_STATUS_PRESOLVE_ERROR: (4, ""),
|
| 64 |
+
MODEL_STATUS_SOLVE_ERROR: (4, ""),
|
| 65 |
+
MODEL_STATUS_POSTSOLVE_ERROR: (4, ""),
|
| 66 |
+
MODEL_STATUS_MODEL_EMPTY: (4, ""),
|
| 67 |
+
MODEL_STATUS_RDOVUB: (4, ""),
|
| 68 |
+
MODEL_STATUS_REACHED_OBJECTIVE_TARGET: (4, ""),
|
| 69 |
+
MODEL_STATUS_OPTIMAL: (0, "Optimization terminated successfully. "),
|
| 70 |
+
MODEL_STATUS_REACHED_TIME_LIMIT: (1, "Time limit reached. "),
|
| 71 |
+
MODEL_STATUS_REACHED_ITERATION_LIMIT: (1, "Iteration limit reached. "),
|
| 72 |
+
MODEL_STATUS_INFEASIBLE: (2, "The problem is infeasible. "),
|
| 73 |
+
MODEL_STATUS_UNBOUNDED: (3, "The problem is unbounded. "),
|
| 74 |
+
MODEL_STATUS_UNBOUNDED_OR_INFEASIBLE: (4, "The problem is unbounded "
|
| 75 |
+
"or infeasible. ")}
|
| 76 |
+
unrecognized = (4, "The HiGHS status code was not recognized. ")
|
| 77 |
+
scipy_status, scipy_message = (
|
| 78 |
+
scipy_statuses_messages.get(highs_status, unrecognized))
|
| 79 |
+
scipy_message = (f"{scipy_message}"
|
| 80 |
+
f"(HiGHS Status {highs_status}: {highs_message})")
|
| 81 |
+
return scipy_status, scipy_message
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def _replace_inf(x):
|
| 85 |
+
# Replace `np.inf` with CONST_INF
|
| 86 |
+
infs = np.isinf(x)
|
| 87 |
+
with np.errstate(invalid="ignore"):
|
| 88 |
+
x[infs] = np.sign(x[infs])*CONST_INF
|
| 89 |
+
return x
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def _convert_to_highs_enum(option, option_str, choices):
|
| 93 |
+
# If option is in the choices we can look it up, if not use
|
| 94 |
+
# the default value taken from function signature and warn:
|
| 95 |
+
try:
|
| 96 |
+
return choices[option.lower()]
|
| 97 |
+
except AttributeError:
|
| 98 |
+
return choices[option]
|
| 99 |
+
except KeyError:
|
| 100 |
+
sig = inspect.signature(_linprog_highs)
|
| 101 |
+
default_str = sig.parameters[option_str].default
|
| 102 |
+
warn(f"Option {option_str} is {option}, but only values in "
|
| 103 |
+
f"{set(choices.keys())} are allowed. Using default: "
|
| 104 |
+
f"{default_str}.",
|
| 105 |
+
OptimizeWarning, stacklevel=3)
|
| 106 |
+
return choices[default_str]
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def _linprog_highs(lp, solver, time_limit=None, presolve=True,
|
| 110 |
+
disp=False, maxiter=None,
|
| 111 |
+
dual_feasibility_tolerance=None,
|
| 112 |
+
primal_feasibility_tolerance=None,
|
| 113 |
+
ipm_optimality_tolerance=None,
|
| 114 |
+
simplex_dual_edge_weight_strategy=None,
|
| 115 |
+
mip_rel_gap=None,
|
| 116 |
+
mip_max_nodes=None,
|
| 117 |
+
**unknown_options):
|
| 118 |
+
r"""
|
| 119 |
+
Solve the following linear programming problem using one of the HiGHS
|
| 120 |
+
solvers:
|
| 121 |
+
|
| 122 |
+
User-facing documentation is in _linprog_doc.py.
|
| 123 |
+
|
| 124 |
+
Parameters
|
| 125 |
+
----------
|
| 126 |
+
lp : _LPProblem
|
| 127 |
+
A ``scipy.optimize._linprog_util._LPProblem`` ``namedtuple``.
|
| 128 |
+
solver : "ipm" or "simplex" or None
|
| 129 |
+
Which HiGHS solver to use. If ``None``, "simplex" will be used.
|
| 130 |
+
|
| 131 |
+
Options
|
| 132 |
+
-------
|
| 133 |
+
maxiter : int
|
| 134 |
+
The maximum number of iterations to perform in either phase. For
|
| 135 |
+
``solver='ipm'``, this does not include the number of crossover
|
| 136 |
+
iterations. Default is the largest possible value for an ``int``
|
| 137 |
+
on the platform.
|
| 138 |
+
disp : bool
|
| 139 |
+
Set to ``True`` if indicators of optimization status are to be printed
|
| 140 |
+
to the console each iteration; default ``False``.
|
| 141 |
+
time_limit : float
|
| 142 |
+
The maximum time in seconds allotted to solve the problem; default is
|
| 143 |
+
the largest possible value for a ``double`` on the platform.
|
| 144 |
+
presolve : bool
|
| 145 |
+
Presolve attempts to identify trivial infeasibilities,
|
| 146 |
+
identify trivial unboundedness, and simplify the problem before
|
| 147 |
+
sending it to the main solver. It is generally recommended
|
| 148 |
+
to keep the default setting ``True``; set to ``False`` if presolve is
|
| 149 |
+
to be disabled.
|
| 150 |
+
dual_feasibility_tolerance : double
|
| 151 |
+
Dual feasibility tolerance. Default is 1e-07.
|
| 152 |
+
The minimum of this and ``primal_feasibility_tolerance``
|
| 153 |
+
is used for the feasibility tolerance when ``solver='ipm'``.
|
| 154 |
+
primal_feasibility_tolerance : double
|
| 155 |
+
Primal feasibility tolerance. Default is 1e-07.
|
| 156 |
+
The minimum of this and ``dual_feasibility_tolerance``
|
| 157 |
+
is used for the feasibility tolerance when ``solver='ipm'``.
|
| 158 |
+
ipm_optimality_tolerance : double
|
| 159 |
+
Optimality tolerance for ``solver='ipm'``. Default is 1e-08.
|
| 160 |
+
Minimum possible value is 1e-12 and must be smaller than the largest
|
| 161 |
+
possible value for a ``double`` on the platform.
|
| 162 |
+
simplex_dual_edge_weight_strategy : str (default: None)
|
| 163 |
+
Strategy for simplex dual edge weights. The default, ``None``,
|
| 164 |
+
automatically selects one of the following.
|
| 165 |
+
|
| 166 |
+
``'dantzig'`` uses Dantzig's original strategy of choosing the most
|
| 167 |
+
negative reduced cost.
|
| 168 |
+
|
| 169 |
+
``'devex'`` uses the strategy described in [15]_.
|
| 170 |
+
|
| 171 |
+
``steepest`` uses the exact steepest edge strategy as described in
|
| 172 |
+
[16]_.
|
| 173 |
+
|
| 174 |
+
``'steepest-devex'`` begins with the exact steepest edge strategy
|
| 175 |
+
until the computation is too costly or inexact and then switches to
|
| 176 |
+
the devex method.
|
| 177 |
+
|
| 178 |
+
Currently, using ``None`` always selects ``'steepest-devex'``, but this
|
| 179 |
+
may change as new options become available.
|
| 180 |
+
|
| 181 |
+
mip_max_nodes : int
|
| 182 |
+
The maximum number of nodes allotted to solve the problem; default is
|
| 183 |
+
the largest possible value for a ``HighsInt`` on the platform.
|
| 184 |
+
Ignored if not using the MIP solver.
|
| 185 |
+
unknown_options : dict
|
| 186 |
+
Optional arguments not used by this particular solver. If
|
| 187 |
+
``unknown_options`` is non-empty, a warning is issued listing all
|
| 188 |
+
unused options.
|
| 189 |
+
|
| 190 |
+
Returns
|
| 191 |
+
-------
|
| 192 |
+
sol : dict
|
| 193 |
+
A dictionary consisting of the fields:
|
| 194 |
+
|
| 195 |
+
x : 1D array
|
| 196 |
+
The values of the decision variables that minimizes the
|
| 197 |
+
objective function while satisfying the constraints.
|
| 198 |
+
fun : float
|
| 199 |
+
The optimal value of the objective function ``c @ x``.
|
| 200 |
+
slack : 1D array
|
| 201 |
+
The (nominally positive) values of the slack,
|
| 202 |
+
``b_ub - A_ub @ x``.
|
| 203 |
+
con : 1D array
|
| 204 |
+
The (nominally zero) residuals of the equality constraints,
|
| 205 |
+
``b_eq - A_eq @ x``.
|
| 206 |
+
success : bool
|
| 207 |
+
``True`` when the algorithm succeeds in finding an optimal
|
| 208 |
+
solution.
|
| 209 |
+
status : int
|
| 210 |
+
An integer representing the exit status of the algorithm.
|
| 211 |
+
|
| 212 |
+
``0`` : Optimization terminated successfully.
|
| 213 |
+
|
| 214 |
+
``1`` : Iteration or time limit reached.
|
| 215 |
+
|
| 216 |
+
``2`` : Problem appears to be infeasible.
|
| 217 |
+
|
| 218 |
+
``3`` : Problem appears to be unbounded.
|
| 219 |
+
|
| 220 |
+
``4`` : The HiGHS solver ran into a problem.
|
| 221 |
+
|
| 222 |
+
message : str
|
| 223 |
+
A string descriptor of the exit status of the algorithm.
|
| 224 |
+
nit : int
|
| 225 |
+
The total number of iterations performed.
|
| 226 |
+
For ``solver='simplex'``, this includes iterations in all
|
| 227 |
+
phases. For ``solver='ipm'``, this does not include
|
| 228 |
+
crossover iterations.
|
| 229 |
+
crossover_nit : int
|
| 230 |
+
The number of primal/dual pushes performed during the
|
| 231 |
+
crossover routine for ``solver='ipm'``. This is ``0``
|
| 232 |
+
for ``solver='simplex'``.
|
| 233 |
+
ineqlin : OptimizeResult
|
| 234 |
+
Solution and sensitivity information corresponding to the
|
| 235 |
+
inequality constraints, `b_ub`. A dictionary consisting of the
|
| 236 |
+
fields:
|
| 237 |
+
|
| 238 |
+
residual : np.ndnarray
|
| 239 |
+
The (nominally positive) values of the slack variables,
|
| 240 |
+
``b_ub - A_ub @ x``. This quantity is also commonly
|
| 241 |
+
referred to as "slack".
|
| 242 |
+
|
| 243 |
+
marginals : np.ndarray
|
| 244 |
+
The sensitivity (partial derivative) of the objective
|
| 245 |
+
function with respect to the right-hand side of the
|
| 246 |
+
inequality constraints, `b_ub`.
|
| 247 |
+
|
| 248 |
+
eqlin : OptimizeResult
|
| 249 |
+
Solution and sensitivity information corresponding to the
|
| 250 |
+
equality constraints, `b_eq`. A dictionary consisting of the
|
| 251 |
+
fields:
|
| 252 |
+
|
| 253 |
+
residual : np.ndarray
|
| 254 |
+
The (nominally zero) residuals of the equality constraints,
|
| 255 |
+
``b_eq - A_eq @ x``.
|
| 256 |
+
|
| 257 |
+
marginals : np.ndarray
|
| 258 |
+
The sensitivity (partial derivative) of the objective
|
| 259 |
+
function with respect to the right-hand side of the
|
| 260 |
+
equality constraints, `b_eq`.
|
| 261 |
+
|
| 262 |
+
lower, upper : OptimizeResult
|
| 263 |
+
Solution and sensitivity information corresponding to the
|
| 264 |
+
lower and upper bounds on decision variables, `bounds`.
|
| 265 |
+
|
| 266 |
+
residual : np.ndarray
|
| 267 |
+
The (nominally positive) values of the quantity
|
| 268 |
+
``x - lb`` (lower) or ``ub - x`` (upper).
|
| 269 |
+
|
| 270 |
+
marginals : np.ndarray
|
| 271 |
+
The sensitivity (partial derivative) of the objective
|
| 272 |
+
function with respect to the lower and upper
|
| 273 |
+
`bounds`.
|
| 274 |
+
|
| 275 |
+
mip_node_count : int
|
| 276 |
+
The number of subproblems or "nodes" solved by the MILP
|
| 277 |
+
solver. Only present when `integrality` is not `None`.
|
| 278 |
+
|
| 279 |
+
mip_dual_bound : float
|
| 280 |
+
The MILP solver's final estimate of the lower bound on the
|
| 281 |
+
optimal solution. Only present when `integrality` is not
|
| 282 |
+
`None`.
|
| 283 |
+
|
| 284 |
+
mip_gap : float
|
| 285 |
+
The difference between the final objective function value
|
| 286 |
+
and the final dual bound, scaled by the final objective
|
| 287 |
+
function value. Only present when `integrality` is not
|
| 288 |
+
`None`.
|
| 289 |
+
|
| 290 |
+
Notes
|
| 291 |
+
-----
|
| 292 |
+
The result fields `ineqlin`, `eqlin`, `lower`, and `upper` all contain
|
| 293 |
+
`marginals`, or partial derivatives of the objective function with respect
|
| 294 |
+
to the right-hand side of each constraint. These partial derivatives are
|
| 295 |
+
also referred to as "Lagrange multipliers", "dual values", and
|
| 296 |
+
"shadow prices". The sign convention of `marginals` is opposite that
|
| 297 |
+
of Lagrange multipliers produced by many nonlinear solvers.
|
| 298 |
+
|
| 299 |
+
References
|
| 300 |
+
----------
|
| 301 |
+
.. [15] Harris, Paula MJ. "Pivot selection methods of the Devex LP code."
|
| 302 |
+
Mathematical programming 5.1 (1973): 1-28.
|
| 303 |
+
.. [16] Goldfarb, Donald, and John Ker Reid. "A practicable steepest-edge
|
| 304 |
+
simplex algorithm." Mathematical Programming 12.1 (1977): 361-371.
|
| 305 |
+
"""
|
| 306 |
+
if unknown_options:
|
| 307 |
+
message = (f"Unrecognized options detected: {unknown_options}. "
|
| 308 |
+
"These will be passed to HiGHS verbatim.")
|
| 309 |
+
warn(message, OptimizeWarning, stacklevel=3)
|
| 310 |
+
|
| 311 |
+
# Map options to HiGHS enum values
|
| 312 |
+
simplex_dual_edge_weight_strategy_enum = _convert_to_highs_enum(
|
| 313 |
+
simplex_dual_edge_weight_strategy,
|
| 314 |
+
'simplex_dual_edge_weight_strategy',
|
| 315 |
+
choices={'dantzig': HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_DANTZIG,
|
| 316 |
+
'devex': HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_DEVEX,
|
| 317 |
+
'steepest-devex': HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_CHOOSE,
|
| 318 |
+
'steepest':
|
| 319 |
+
HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_STEEPEST_EDGE,
|
| 320 |
+
None: None})
|
| 321 |
+
|
| 322 |
+
c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality = lp
|
| 323 |
+
|
| 324 |
+
lb, ub = bounds.T.copy() # separate bounds, copy->C-cntgs
|
| 325 |
+
# highs_wrapper solves LHS <= A*x <= RHS, not equality constraints
|
| 326 |
+
with np.errstate(invalid="ignore"):
|
| 327 |
+
lhs_ub = -np.ones_like(b_ub)*np.inf # LHS of UB constraints is -inf
|
| 328 |
+
rhs_ub = b_ub # RHS of UB constraints is b_ub
|
| 329 |
+
lhs_eq = b_eq # Equality constraint is inequality
|
| 330 |
+
rhs_eq = b_eq # constraint with LHS=RHS
|
| 331 |
+
lhs = np.concatenate((lhs_ub, lhs_eq))
|
| 332 |
+
rhs = np.concatenate((rhs_ub, rhs_eq))
|
| 333 |
+
|
| 334 |
+
if issparse(A_ub) or issparse(A_eq):
|
| 335 |
+
A = vstack((A_ub, A_eq))
|
| 336 |
+
else:
|
| 337 |
+
A = np.vstack((A_ub, A_eq))
|
| 338 |
+
A = csc_matrix(A)
|
| 339 |
+
|
| 340 |
+
options = {
|
| 341 |
+
'presolve': presolve,
|
| 342 |
+
'sense': HIGHS_OBJECTIVE_SENSE_MINIMIZE,
|
| 343 |
+
'solver': solver,
|
| 344 |
+
'time_limit': time_limit,
|
| 345 |
+
'highs_debug_level': MESSAGE_LEVEL_NONE,
|
| 346 |
+
'dual_feasibility_tolerance': dual_feasibility_tolerance,
|
| 347 |
+
'ipm_optimality_tolerance': ipm_optimality_tolerance,
|
| 348 |
+
'log_to_console': disp,
|
| 349 |
+
'mip_max_nodes': mip_max_nodes,
|
| 350 |
+
'output_flag': disp,
|
| 351 |
+
'primal_feasibility_tolerance': primal_feasibility_tolerance,
|
| 352 |
+
'simplex_dual_edge_weight_strategy':
|
| 353 |
+
simplex_dual_edge_weight_strategy_enum,
|
| 354 |
+
'simplex_strategy': HIGHS_SIMPLEX_STRATEGY_DUAL,
|
| 355 |
+
'simplex_crash_strategy': HIGHS_SIMPLEX_CRASH_STRATEGY_OFF,
|
| 356 |
+
'ipm_iteration_limit': maxiter,
|
| 357 |
+
'simplex_iteration_limit': maxiter,
|
| 358 |
+
'mip_rel_gap': mip_rel_gap,
|
| 359 |
+
}
|
| 360 |
+
options.update(unknown_options)
|
| 361 |
+
|
| 362 |
+
# np.inf doesn't work; use very large constant
|
| 363 |
+
rhs = _replace_inf(rhs)
|
| 364 |
+
lhs = _replace_inf(lhs)
|
| 365 |
+
lb = _replace_inf(lb)
|
| 366 |
+
ub = _replace_inf(ub)
|
| 367 |
+
|
| 368 |
+
if integrality is None or np.sum(integrality) == 0:
|
| 369 |
+
integrality = np.empty(0)
|
| 370 |
+
else:
|
| 371 |
+
integrality = np.array(integrality)
|
| 372 |
+
|
| 373 |
+
res = _highs_wrapper(c, A.indptr, A.indices, A.data, lhs, rhs,
|
| 374 |
+
lb, ub, integrality.astype(np.uint8), options)
|
| 375 |
+
|
| 376 |
+
# HiGHS represents constraints as lhs/rhs, so
|
| 377 |
+
# Ax + s = b => Ax = b - s
|
| 378 |
+
# and we need to split up s by A_ub and A_eq
|
| 379 |
+
if 'slack' in res:
|
| 380 |
+
slack = res['slack']
|
| 381 |
+
con = np.array(slack[len(b_ub):])
|
| 382 |
+
slack = np.array(slack[:len(b_ub)])
|
| 383 |
+
else:
|
| 384 |
+
slack, con = None, None
|
| 385 |
+
|
| 386 |
+
# lagrange multipliers for equalities/inequalities and upper/lower bounds
|
| 387 |
+
if 'lambda' in res:
|
| 388 |
+
lamda = res['lambda']
|
| 389 |
+
marg_ineqlin = np.array(lamda[:len(b_ub)])
|
| 390 |
+
marg_eqlin = np.array(lamda[len(b_ub):])
|
| 391 |
+
marg_upper = np.array(res['marg_bnds'][1, :])
|
| 392 |
+
marg_lower = np.array(res['marg_bnds'][0, :])
|
| 393 |
+
else:
|
| 394 |
+
marg_ineqlin, marg_eqlin = None, None
|
| 395 |
+
marg_upper, marg_lower = None, None
|
| 396 |
+
|
| 397 |
+
# this needs to be updated if we start choosing the solver intelligently
|
| 398 |
+
|
| 399 |
+
# Convert to scipy-style status and message
|
| 400 |
+
highs_status = res.get('status', None)
|
| 401 |
+
highs_message = res.get('message', None)
|
| 402 |
+
status, message = _highs_to_scipy_status_message(highs_status,
|
| 403 |
+
highs_message)
|
| 404 |
+
|
| 405 |
+
x = np.array(res['x']) if 'x' in res else None
|
| 406 |
+
sol = {'x': x,
|
| 407 |
+
'slack': slack,
|
| 408 |
+
'con': con,
|
| 409 |
+
'ineqlin': OptimizeResult({
|
| 410 |
+
'residual': slack,
|
| 411 |
+
'marginals': marg_ineqlin,
|
| 412 |
+
}),
|
| 413 |
+
'eqlin': OptimizeResult({
|
| 414 |
+
'residual': con,
|
| 415 |
+
'marginals': marg_eqlin,
|
| 416 |
+
}),
|
| 417 |
+
'lower': OptimizeResult({
|
| 418 |
+
'residual': None if x is None else x - lb,
|
| 419 |
+
'marginals': marg_lower,
|
| 420 |
+
}),
|
| 421 |
+
'upper': OptimizeResult({
|
| 422 |
+
'residual': None if x is None else ub - x,
|
| 423 |
+
'marginals': marg_upper
|
| 424 |
+
}),
|
| 425 |
+
'fun': res.get('fun'),
|
| 426 |
+
'status': status,
|
| 427 |
+
'success': res['status'] == MODEL_STATUS_OPTIMAL,
|
| 428 |
+
'message': message,
|
| 429 |
+
'nit': res.get('simplex_nit', 0) or res.get('ipm_nit', 0),
|
| 430 |
+
'crossover_nit': res.get('crossover_nit'),
|
| 431 |
+
}
|
| 432 |
+
|
| 433 |
+
if np.any(x) and integrality is not None:
|
| 434 |
+
sol.update({
|
| 435 |
+
'mip_node_count': res.get('mip_node_count', 0),
|
| 436 |
+
'mip_dual_bound': res.get('mip_dual_bound', 0.0),
|
| 437 |
+
'mip_gap': res.get('mip_gap', 0.0),
|
| 438 |
+
})
|
| 439 |
+
|
| 440 |
+
return sol
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_linprog_util.py
ADDED
|
@@ -0,0 +1,1522 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Method agnostic utility functions for linear programming
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import scipy.sparse as sps
|
| 7 |
+
from warnings import warn
|
| 8 |
+
from ._optimize import OptimizeWarning
|
| 9 |
+
from scipy.optimize._remove_redundancy import (
|
| 10 |
+
_remove_redundancy_svd, _remove_redundancy_pivot_sparse,
|
| 11 |
+
_remove_redundancy_pivot_dense, _remove_redundancy_id
|
| 12 |
+
)
|
| 13 |
+
from collections import namedtuple
|
| 14 |
+
|
| 15 |
+
_LPProblem = namedtuple('_LPProblem',
|
| 16 |
+
'c A_ub b_ub A_eq b_eq bounds x0 integrality')
|
| 17 |
+
_LPProblem.__new__.__defaults__ = (None,) * 7 # make c the only required arg
|
| 18 |
+
_LPProblem.__doc__ = \
|
| 19 |
+
""" Represents a linear-programming problem.
|
| 20 |
+
|
| 21 |
+
Attributes
|
| 22 |
+
----------
|
| 23 |
+
c : 1D array
|
| 24 |
+
The coefficients of the linear objective function to be minimized.
|
| 25 |
+
A_ub : 2D array, optional
|
| 26 |
+
The inequality constraint matrix. Each row of ``A_ub`` specifies the
|
| 27 |
+
coefficients of a linear inequality constraint on ``x``.
|
| 28 |
+
b_ub : 1D array, optional
|
| 29 |
+
The inequality constraint vector. Each element represents an
|
| 30 |
+
upper bound on the corresponding value of ``A_ub @ x``.
|
| 31 |
+
A_eq : 2D array, optional
|
| 32 |
+
The equality constraint matrix. Each row of ``A_eq`` specifies the
|
| 33 |
+
coefficients of a linear equality constraint on ``x``.
|
| 34 |
+
b_eq : 1D array, optional
|
| 35 |
+
The equality constraint vector. Each element of ``A_eq @ x`` must equal
|
| 36 |
+
the corresponding element of ``b_eq``.
|
| 37 |
+
bounds : various valid formats, optional
|
| 38 |
+
The bounds of ``x``, as ``min`` and ``max`` pairs.
|
| 39 |
+
If bounds are specified for all N variables separately, valid formats
|
| 40 |
+
are:
|
| 41 |
+
* a 2D array (N x 2);
|
| 42 |
+
* a sequence of N sequences, each with 2 values.
|
| 43 |
+
If all variables have the same bounds, the bounds can be specified as
|
| 44 |
+
a 1-D or 2-D array or sequence with 2 scalar values.
|
| 45 |
+
If all variables have a lower bound of 0 and no upper bound, the bounds
|
| 46 |
+
parameter can be omitted (or given as None).
|
| 47 |
+
Absent lower and/or upper bounds can be specified as -numpy.inf (no
|
| 48 |
+
lower bound), numpy.inf (no upper bound) or None (both).
|
| 49 |
+
x0 : 1D array, optional
|
| 50 |
+
Guess values of the decision variables, which will be refined by
|
| 51 |
+
the optimization algorithm. This argument is currently used only by the
|
| 52 |
+
'revised simplex' method, and can only be used if `x0` represents a
|
| 53 |
+
basic feasible solution.
|
| 54 |
+
integrality : 1-D array or int, optional
|
| 55 |
+
Indicates the type of integrality constraint on each decision variable.
|
| 56 |
+
|
| 57 |
+
``0`` : Continuous variable; no integrality constraint.
|
| 58 |
+
|
| 59 |
+
``1`` : Integer variable; decision variable must be an integer
|
| 60 |
+
within `bounds`.
|
| 61 |
+
|
| 62 |
+
``2`` : Semi-continuous variable; decision variable must be within
|
| 63 |
+
`bounds` or take value ``0``.
|
| 64 |
+
|
| 65 |
+
``3`` : Semi-integer variable; decision variable must be an integer
|
| 66 |
+
within `bounds` or take value ``0``.
|
| 67 |
+
|
| 68 |
+
By default, all variables are continuous.
|
| 69 |
+
|
| 70 |
+
For mixed integrality constraints, supply an array of shape `c.shape`.
|
| 71 |
+
To infer a constraint on each decision variable from shorter inputs,
|
| 72 |
+
the argument will be broadcasted to `c.shape` using `np.broadcast_to`.
|
| 73 |
+
|
| 74 |
+
This argument is currently used only by the ``'highs'`` method and
|
| 75 |
+
ignored otherwise.
|
| 76 |
+
|
| 77 |
+
Notes
|
| 78 |
+
-----
|
| 79 |
+
This namedtuple supports 2 ways of initialization:
|
| 80 |
+
>>> lp1 = _LPProblem(c=[-1, 4], A_ub=[[-3, 1], [1, 2]], b_ub=[6, 4])
|
| 81 |
+
>>> lp2 = _LPProblem([-1, 4], [[-3, 1], [1, 2]], [6, 4])
|
| 82 |
+
|
| 83 |
+
Note that only ``c`` is a required argument here, whereas all other arguments
|
| 84 |
+
``A_ub``, ``b_ub``, ``A_eq``, ``b_eq``, ``bounds``, ``x0`` are optional with
|
| 85 |
+
default values of None.
|
| 86 |
+
For example, ``A_eq`` and ``b_eq`` can be set without ``A_ub`` or ``b_ub``:
|
| 87 |
+
>>> lp3 = _LPProblem(c=[-1, 4], A_eq=[[2, 1]], b_eq=[10])
|
| 88 |
+
"""
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def _check_sparse_inputs(options, meth, A_ub, A_eq):
|
| 92 |
+
"""
|
| 93 |
+
Check the provided ``A_ub`` and ``A_eq`` matrices conform to the specified
|
| 94 |
+
optional sparsity variables.
|
| 95 |
+
|
| 96 |
+
Parameters
|
| 97 |
+
----------
|
| 98 |
+
A_ub : 2-D array, optional
|
| 99 |
+
2-D array such that ``A_ub @ x`` gives the values of the upper-bound
|
| 100 |
+
inequality constraints at ``x``.
|
| 101 |
+
A_eq : 2-D array, optional
|
| 102 |
+
2-D array such that ``A_eq @ x`` gives the values of the equality
|
| 103 |
+
constraints at ``x``.
|
| 104 |
+
options : dict
|
| 105 |
+
A dictionary of solver options. All methods accept the following
|
| 106 |
+
generic options:
|
| 107 |
+
|
| 108 |
+
maxiter : int
|
| 109 |
+
Maximum number of iterations to perform.
|
| 110 |
+
disp : bool
|
| 111 |
+
Set to True to print convergence messages.
|
| 112 |
+
|
| 113 |
+
For method-specific options, see :func:`show_options('linprog')`.
|
| 114 |
+
method : str, optional
|
| 115 |
+
The algorithm used to solve the standard form problem.
|
| 116 |
+
|
| 117 |
+
Returns
|
| 118 |
+
-------
|
| 119 |
+
A_ub : 2-D array, optional
|
| 120 |
+
2-D array such that ``A_ub @ x`` gives the values of the upper-bound
|
| 121 |
+
inequality constraints at ``x``.
|
| 122 |
+
A_eq : 2-D array, optional
|
| 123 |
+
2-D array such that ``A_eq @ x`` gives the values of the equality
|
| 124 |
+
constraints at ``x``.
|
| 125 |
+
options : dict
|
| 126 |
+
A dictionary of solver options. All methods accept the following
|
| 127 |
+
generic options:
|
| 128 |
+
|
| 129 |
+
maxiter : int
|
| 130 |
+
Maximum number of iterations to perform.
|
| 131 |
+
disp : bool
|
| 132 |
+
Set to True to print convergence messages.
|
| 133 |
+
|
| 134 |
+
For method-specific options, see :func:`show_options('linprog')`.
|
| 135 |
+
"""
|
| 136 |
+
# This is an undocumented option for unit testing sparse presolve
|
| 137 |
+
_sparse_presolve = options.pop('_sparse_presolve', False)
|
| 138 |
+
if _sparse_presolve and A_eq is not None:
|
| 139 |
+
A_eq = sps.coo_matrix(A_eq)
|
| 140 |
+
if _sparse_presolve and A_ub is not None:
|
| 141 |
+
A_ub = sps.coo_matrix(A_ub)
|
| 142 |
+
|
| 143 |
+
sparse_constraint = sps.issparse(A_eq) or sps.issparse(A_ub)
|
| 144 |
+
|
| 145 |
+
preferred_methods = {"highs", "highs-ds", "highs-ipm"}
|
| 146 |
+
dense_methods = {"simplex", "revised simplex"}
|
| 147 |
+
if meth in dense_methods and sparse_constraint:
|
| 148 |
+
raise ValueError(f"Method '{meth}' does not support sparse "
|
| 149 |
+
"constraint matrices. Please consider using one of "
|
| 150 |
+
f"{preferred_methods}.")
|
| 151 |
+
|
| 152 |
+
sparse = options.get('sparse', False)
|
| 153 |
+
if not sparse and sparse_constraint and meth == 'interior-point':
|
| 154 |
+
options['sparse'] = True
|
| 155 |
+
warn("Sparse constraint matrix detected; setting 'sparse':True.",
|
| 156 |
+
OptimizeWarning, stacklevel=4)
|
| 157 |
+
return options, A_ub, A_eq
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def _format_A_constraints(A, n_x, sparse_lhs=False):
|
| 161 |
+
"""Format the left hand side of the constraints to a 2-D array
|
| 162 |
+
|
| 163 |
+
Parameters
|
| 164 |
+
----------
|
| 165 |
+
A : 2-D array
|
| 166 |
+
2-D array such that ``A @ x`` gives the values of the upper-bound
|
| 167 |
+
(in)equality constraints at ``x``.
|
| 168 |
+
n_x : int
|
| 169 |
+
The number of variables in the linear programming problem.
|
| 170 |
+
sparse_lhs : bool
|
| 171 |
+
Whether either of `A_ub` or `A_eq` are sparse. If true return a
|
| 172 |
+
coo_matrix instead of a numpy array.
|
| 173 |
+
|
| 174 |
+
Returns
|
| 175 |
+
-------
|
| 176 |
+
np.ndarray or sparse.coo_matrix
|
| 177 |
+
2-D array such that ``A @ x`` gives the values of the upper-bound
|
| 178 |
+
(in)equality constraints at ``x``.
|
| 179 |
+
|
| 180 |
+
"""
|
| 181 |
+
if sparse_lhs:
|
| 182 |
+
return sps.coo_matrix(
|
| 183 |
+
(0, n_x) if A is None else A, dtype=float, copy=True
|
| 184 |
+
)
|
| 185 |
+
elif A is None:
|
| 186 |
+
return np.zeros((0, n_x), dtype=float)
|
| 187 |
+
else:
|
| 188 |
+
return np.array(A, dtype=float, copy=True)
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
def _format_b_constraints(b):
|
| 192 |
+
"""Format the upper bounds of the constraints to a 1-D array
|
| 193 |
+
|
| 194 |
+
Parameters
|
| 195 |
+
----------
|
| 196 |
+
b : 1-D array
|
| 197 |
+
1-D array of values representing the upper-bound of each (in)equality
|
| 198 |
+
constraint (row) in ``A``.
|
| 199 |
+
|
| 200 |
+
Returns
|
| 201 |
+
-------
|
| 202 |
+
1-D np.array
|
| 203 |
+
1-D array of values representing the upper-bound of each (in)equality
|
| 204 |
+
constraint (row) in ``A``.
|
| 205 |
+
|
| 206 |
+
"""
|
| 207 |
+
if b is None:
|
| 208 |
+
return np.array([], dtype=float)
|
| 209 |
+
b = np.array(b, dtype=float, copy=True).squeeze()
|
| 210 |
+
return b if b.size != 1 else b.reshape(-1)
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
def _clean_inputs(lp):
|
| 214 |
+
"""
|
| 215 |
+
Given user inputs for a linear programming problem, return the
|
| 216 |
+
objective vector, upper bound constraints, equality constraints,
|
| 217 |
+
and simple bounds in a preferred format.
|
| 218 |
+
|
| 219 |
+
Parameters
|
| 220 |
+
----------
|
| 221 |
+
lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
|
| 222 |
+
|
| 223 |
+
c : 1D array
|
| 224 |
+
The coefficients of the linear objective function to be minimized.
|
| 225 |
+
A_ub : 2D array, optional
|
| 226 |
+
The inequality constraint matrix. Each row of ``A_ub`` specifies the
|
| 227 |
+
coefficients of a linear inequality constraint on ``x``.
|
| 228 |
+
b_ub : 1D array, optional
|
| 229 |
+
The inequality constraint vector. Each element represents an
|
| 230 |
+
upper bound on the corresponding value of ``A_ub @ x``.
|
| 231 |
+
A_eq : 2D array, optional
|
| 232 |
+
The equality constraint matrix. Each row of ``A_eq`` specifies the
|
| 233 |
+
coefficients of a linear equality constraint on ``x``.
|
| 234 |
+
b_eq : 1D array, optional
|
| 235 |
+
The equality constraint vector. Each element of ``A_eq @ x`` must equal
|
| 236 |
+
the corresponding element of ``b_eq``.
|
| 237 |
+
bounds : various valid formats, optional
|
| 238 |
+
The bounds of ``x``, as ``min`` and ``max`` pairs.
|
| 239 |
+
If bounds are specified for all N variables separately, valid formats are:
|
| 240 |
+
* a 2D array (2 x N or N x 2);
|
| 241 |
+
* a sequence of N sequences, each with 2 values.
|
| 242 |
+
If all variables have the same bounds, a single pair of values can
|
| 243 |
+
be specified. Valid formats are:
|
| 244 |
+
* a sequence with 2 scalar values;
|
| 245 |
+
* a sequence with a single element containing 2 scalar values.
|
| 246 |
+
If all variables have a lower bound of 0 and no upper bound, the bounds
|
| 247 |
+
parameter can be omitted (or given as None).
|
| 248 |
+
x0 : 1D array, optional
|
| 249 |
+
Guess values of the decision variables, which will be refined by
|
| 250 |
+
the optimization algorithm. This argument is currently used only by the
|
| 251 |
+
'revised simplex' method, and can only be used if `x0` represents a
|
| 252 |
+
basic feasible solution.
|
| 253 |
+
|
| 254 |
+
Returns
|
| 255 |
+
-------
|
| 256 |
+
lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
|
| 257 |
+
|
| 258 |
+
c : 1D array
|
| 259 |
+
The coefficients of the linear objective function to be minimized.
|
| 260 |
+
A_ub : 2D array, optional
|
| 261 |
+
The inequality constraint matrix. Each row of ``A_ub`` specifies the
|
| 262 |
+
coefficients of a linear inequality constraint on ``x``.
|
| 263 |
+
b_ub : 1D array, optional
|
| 264 |
+
The inequality constraint vector. Each element represents an
|
| 265 |
+
upper bound on the corresponding value of ``A_ub @ x``.
|
| 266 |
+
A_eq : 2D array, optional
|
| 267 |
+
The equality constraint matrix. Each row of ``A_eq`` specifies the
|
| 268 |
+
coefficients of a linear equality constraint on ``x``.
|
| 269 |
+
b_eq : 1D array, optional
|
| 270 |
+
The equality constraint vector. Each element of ``A_eq @ x`` must equal
|
| 271 |
+
the corresponding element of ``b_eq``.
|
| 272 |
+
bounds : 2D array
|
| 273 |
+
The bounds of ``x``, as ``min`` and ``max`` pairs, one for each of the N
|
| 274 |
+
elements of ``x``. The N x 2 array contains lower bounds in the first
|
| 275 |
+
column and upper bounds in the 2nd. Unbounded variables have lower
|
| 276 |
+
bound -np.inf and/or upper bound np.inf.
|
| 277 |
+
x0 : 1D array, optional
|
| 278 |
+
Guess values of the decision variables, which will be refined by
|
| 279 |
+
the optimization algorithm. This argument is currently used only by the
|
| 280 |
+
'revised simplex' method, and can only be used if `x0` represents a
|
| 281 |
+
basic feasible solution.
|
| 282 |
+
|
| 283 |
+
"""
|
| 284 |
+
c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality = lp
|
| 285 |
+
|
| 286 |
+
if c is None:
|
| 287 |
+
raise TypeError
|
| 288 |
+
|
| 289 |
+
try:
|
| 290 |
+
c = np.array(c, dtype=np.float64, copy=True).squeeze()
|
| 291 |
+
except ValueError as e:
|
| 292 |
+
raise TypeError(
|
| 293 |
+
"Invalid input for linprog: c must be a 1-D array of numerical "
|
| 294 |
+
"coefficients") from e
|
| 295 |
+
else:
|
| 296 |
+
# If c is a single value, convert it to a 1-D array.
|
| 297 |
+
if c.size == 1:
|
| 298 |
+
c = c.reshape(-1)
|
| 299 |
+
|
| 300 |
+
n_x = len(c)
|
| 301 |
+
if n_x == 0 or len(c.shape) != 1:
|
| 302 |
+
raise ValueError(
|
| 303 |
+
"Invalid input for linprog: c must be a 1-D array and must "
|
| 304 |
+
"not have more than one non-singleton dimension")
|
| 305 |
+
if not np.isfinite(c).all():
|
| 306 |
+
raise ValueError(
|
| 307 |
+
"Invalid input for linprog: c must not contain values "
|
| 308 |
+
"inf, nan, or None")
|
| 309 |
+
|
| 310 |
+
sparse_lhs = sps.issparse(A_eq) or sps.issparse(A_ub)
|
| 311 |
+
try:
|
| 312 |
+
A_ub = _format_A_constraints(A_ub, n_x, sparse_lhs=sparse_lhs)
|
| 313 |
+
except ValueError as e:
|
| 314 |
+
raise TypeError(
|
| 315 |
+
"Invalid input for linprog: A_ub must be a 2-D array "
|
| 316 |
+
"of numerical values") from e
|
| 317 |
+
else:
|
| 318 |
+
n_ub = A_ub.shape[0]
|
| 319 |
+
if len(A_ub.shape) != 2 or A_ub.shape[1] != n_x:
|
| 320 |
+
raise ValueError(
|
| 321 |
+
"Invalid input for linprog: A_ub must have exactly two "
|
| 322 |
+
"dimensions, and the number of columns in A_ub must be "
|
| 323 |
+
"equal to the size of c")
|
| 324 |
+
if (sps.issparse(A_ub) and not np.isfinite(A_ub.data).all()
|
| 325 |
+
or not sps.issparse(A_ub) and not np.isfinite(A_ub).all()):
|
| 326 |
+
raise ValueError(
|
| 327 |
+
"Invalid input for linprog: A_ub must not contain values "
|
| 328 |
+
"inf, nan, or None")
|
| 329 |
+
|
| 330 |
+
try:
|
| 331 |
+
b_ub = _format_b_constraints(b_ub)
|
| 332 |
+
except ValueError as e:
|
| 333 |
+
raise TypeError(
|
| 334 |
+
"Invalid input for linprog: b_ub must be a 1-D array of "
|
| 335 |
+
"numerical values, each representing the upper bound of an "
|
| 336 |
+
"inequality constraint (row) in A_ub") from e
|
| 337 |
+
else:
|
| 338 |
+
if b_ub.shape != (n_ub,):
|
| 339 |
+
raise ValueError(
|
| 340 |
+
"Invalid input for linprog: b_ub must be a 1-D array; b_ub "
|
| 341 |
+
"must not have more than one non-singleton dimension and "
|
| 342 |
+
"the number of rows in A_ub must equal the number of values "
|
| 343 |
+
"in b_ub")
|
| 344 |
+
if not np.isfinite(b_ub).all():
|
| 345 |
+
raise ValueError(
|
| 346 |
+
"Invalid input for linprog: b_ub must not contain values "
|
| 347 |
+
"inf, nan, or None")
|
| 348 |
+
|
| 349 |
+
try:
|
| 350 |
+
A_eq = _format_A_constraints(A_eq, n_x, sparse_lhs=sparse_lhs)
|
| 351 |
+
except ValueError as e:
|
| 352 |
+
raise TypeError(
|
| 353 |
+
"Invalid input for linprog: A_eq must be a 2-D array "
|
| 354 |
+
"of numerical values") from e
|
| 355 |
+
else:
|
| 356 |
+
n_eq = A_eq.shape[0]
|
| 357 |
+
if len(A_eq.shape) != 2 or A_eq.shape[1] != n_x:
|
| 358 |
+
raise ValueError(
|
| 359 |
+
"Invalid input for linprog: A_eq must have exactly two "
|
| 360 |
+
"dimensions, and the number of columns in A_eq must be "
|
| 361 |
+
"equal to the size of c")
|
| 362 |
+
|
| 363 |
+
if (sps.issparse(A_eq) and not np.isfinite(A_eq.data).all()
|
| 364 |
+
or not sps.issparse(A_eq) and not np.isfinite(A_eq).all()):
|
| 365 |
+
raise ValueError(
|
| 366 |
+
"Invalid input for linprog: A_eq must not contain values "
|
| 367 |
+
"inf, nan, or None")
|
| 368 |
+
|
| 369 |
+
try:
|
| 370 |
+
b_eq = _format_b_constraints(b_eq)
|
| 371 |
+
except ValueError as e:
|
| 372 |
+
raise TypeError(
|
| 373 |
+
"Invalid input for linprog: b_eq must be a dense, 1-D array of "
|
| 374 |
+
"numerical values, each representing the right hand side of an "
|
| 375 |
+
"equality constraint (row) in A_eq") from e
|
| 376 |
+
else:
|
| 377 |
+
if b_eq.shape != (n_eq,):
|
| 378 |
+
raise ValueError(
|
| 379 |
+
"Invalid input for linprog: b_eq must be a 1-D array; b_eq "
|
| 380 |
+
"must not have more than one non-singleton dimension and "
|
| 381 |
+
"the number of rows in A_eq must equal the number of values "
|
| 382 |
+
"in b_eq")
|
| 383 |
+
if not np.isfinite(b_eq).all():
|
| 384 |
+
raise ValueError(
|
| 385 |
+
"Invalid input for linprog: b_eq must not contain values "
|
| 386 |
+
"inf, nan, or None")
|
| 387 |
+
|
| 388 |
+
# x0 gives a (optional) starting solution to the solver. If x0 is None,
|
| 389 |
+
# skip the checks. Initial solution will be generated automatically.
|
| 390 |
+
if x0 is not None:
|
| 391 |
+
try:
|
| 392 |
+
x0 = np.array(x0, dtype=float, copy=True).squeeze()
|
| 393 |
+
except ValueError as e:
|
| 394 |
+
raise TypeError(
|
| 395 |
+
"Invalid input for linprog: x0 must be a 1-D array of "
|
| 396 |
+
"numerical coefficients") from e
|
| 397 |
+
if x0.ndim == 0:
|
| 398 |
+
x0 = x0.reshape(-1)
|
| 399 |
+
if len(x0) == 0 or x0.ndim != 1:
|
| 400 |
+
raise ValueError(
|
| 401 |
+
"Invalid input for linprog: x0 should be a 1-D array; it "
|
| 402 |
+
"must not have more than one non-singleton dimension")
|
| 403 |
+
if not x0.size == c.size:
|
| 404 |
+
raise ValueError(
|
| 405 |
+
"Invalid input for linprog: x0 and c should contain the "
|
| 406 |
+
"same number of elements")
|
| 407 |
+
if not np.isfinite(x0).all():
|
| 408 |
+
raise ValueError(
|
| 409 |
+
"Invalid input for linprog: x0 must not contain values "
|
| 410 |
+
"inf, nan, or None")
|
| 411 |
+
|
| 412 |
+
# Bounds can be one of these formats:
|
| 413 |
+
# (1) a 2-D array or sequence, with shape N x 2
|
| 414 |
+
# (2) a 1-D or 2-D sequence or array with 2 scalars
|
| 415 |
+
# (3) None (or an empty sequence or array)
|
| 416 |
+
# Unspecified bounds can be represented by None or (-)np.inf.
|
| 417 |
+
# All formats are converted into a N x 2 np.array with (-)np.inf where
|
| 418 |
+
# bounds are unspecified.
|
| 419 |
+
|
| 420 |
+
# Prepare clean bounds array
|
| 421 |
+
bounds_clean = np.zeros((n_x, 2), dtype=float)
|
| 422 |
+
|
| 423 |
+
# Convert to a numpy array.
|
| 424 |
+
# np.array(..,dtype=float) raises an error if dimensions are inconsistent
|
| 425 |
+
# or if there are invalid data types in bounds. Just add a linprog prefix
|
| 426 |
+
# to the error and re-raise.
|
| 427 |
+
# Creating at least a 2-D array simplifies the cases to distinguish below.
|
| 428 |
+
if bounds is None or np.array_equal(bounds, []) or np.array_equal(bounds, [[]]):
|
| 429 |
+
bounds = (0, np.inf)
|
| 430 |
+
try:
|
| 431 |
+
bounds_conv = np.atleast_2d(np.array(bounds, dtype=float))
|
| 432 |
+
except ValueError as e:
|
| 433 |
+
raise ValueError(
|
| 434 |
+
"Invalid input for linprog: unable to interpret bounds, "
|
| 435 |
+
"check values and dimensions: " + e.args[0]) from e
|
| 436 |
+
except TypeError as e:
|
| 437 |
+
raise TypeError(
|
| 438 |
+
"Invalid input for linprog: unable to interpret bounds, "
|
| 439 |
+
"check values and dimensions: " + e.args[0]) from e
|
| 440 |
+
|
| 441 |
+
# Check bounds options
|
| 442 |
+
bsh = bounds_conv.shape
|
| 443 |
+
if len(bsh) > 2:
|
| 444 |
+
# Do not try to handle multidimensional bounds input
|
| 445 |
+
raise ValueError(
|
| 446 |
+
"Invalid input for linprog: provide a 2-D array for bounds, "
|
| 447 |
+
f"not a {len(bsh):d}-D array.")
|
| 448 |
+
elif np.all(bsh == (n_x, 2)):
|
| 449 |
+
# Regular N x 2 array
|
| 450 |
+
bounds_clean = bounds_conv
|
| 451 |
+
elif (np.all(bsh == (2, 1)) or np.all(bsh == (1, 2))):
|
| 452 |
+
# 2 values: interpret as overall lower and upper bound
|
| 453 |
+
bounds_flat = bounds_conv.flatten()
|
| 454 |
+
bounds_clean[:, 0] = bounds_flat[0]
|
| 455 |
+
bounds_clean[:, 1] = bounds_flat[1]
|
| 456 |
+
elif np.all(bsh == (2, n_x)):
|
| 457 |
+
# Reject a 2 x N array
|
| 458 |
+
raise ValueError(
|
| 459 |
+
f"Invalid input for linprog: provide a {n_x:d} x 2 array for bounds, "
|
| 460 |
+
f"not a 2 x {n_x:d} array.")
|
| 461 |
+
else:
|
| 462 |
+
raise ValueError(
|
| 463 |
+
"Invalid input for linprog: unable to interpret bounds with this "
|
| 464 |
+
f"dimension tuple: {bsh}.")
|
| 465 |
+
|
| 466 |
+
# The process above creates nan-s where the input specified None
|
| 467 |
+
# Convert the nan-s in the 1st column to -np.inf and in the 2nd column
|
| 468 |
+
# to np.inf
|
| 469 |
+
i_none = np.isnan(bounds_clean[:, 0])
|
| 470 |
+
bounds_clean[i_none, 0] = -np.inf
|
| 471 |
+
i_none = np.isnan(bounds_clean[:, 1])
|
| 472 |
+
bounds_clean[i_none, 1] = np.inf
|
| 473 |
+
|
| 474 |
+
return _LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds_clean, x0, integrality)
|
| 475 |
+
|
| 476 |
+
|
| 477 |
+
def _presolve(lp, rr, rr_method, tol=1e-9):
|
| 478 |
+
"""
|
| 479 |
+
Given inputs for a linear programming problem in preferred format,
|
| 480 |
+
presolve the problem: identify trivial infeasibilities, redundancies,
|
| 481 |
+
and unboundedness, tighten bounds where possible, and eliminate fixed
|
| 482 |
+
variables.
|
| 483 |
+
|
| 484 |
+
Parameters
|
| 485 |
+
----------
|
| 486 |
+
lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
|
| 487 |
+
|
| 488 |
+
c : 1D array
|
| 489 |
+
The coefficients of the linear objective function to be minimized.
|
| 490 |
+
A_ub : 2D array, optional
|
| 491 |
+
The inequality constraint matrix. Each row of ``A_ub`` specifies the
|
| 492 |
+
coefficients of a linear inequality constraint on ``x``.
|
| 493 |
+
b_ub : 1D array, optional
|
| 494 |
+
The inequality constraint vector. Each element represents an
|
| 495 |
+
upper bound on the corresponding value of ``A_ub @ x``.
|
| 496 |
+
A_eq : 2D array, optional
|
| 497 |
+
The equality constraint matrix. Each row of ``A_eq`` specifies the
|
| 498 |
+
coefficients of a linear equality constraint on ``x``.
|
| 499 |
+
b_eq : 1D array, optional
|
| 500 |
+
The equality constraint vector. Each element of ``A_eq @ x`` must equal
|
| 501 |
+
the corresponding element of ``b_eq``.
|
| 502 |
+
bounds : 2D array
|
| 503 |
+
The bounds of ``x``, as ``min`` and ``max`` pairs, one for each of the N
|
| 504 |
+
elements of ``x``. The N x 2 array contains lower bounds in the first
|
| 505 |
+
column and upper bounds in the 2nd. Unbounded variables have lower
|
| 506 |
+
bound -np.inf and/or upper bound np.inf.
|
| 507 |
+
x0 : 1D array, optional
|
| 508 |
+
Guess values of the decision variables, which will be refined by
|
| 509 |
+
the optimization algorithm. This argument is currently used only by the
|
| 510 |
+
'revised simplex' method, and can only be used if `x0` represents a
|
| 511 |
+
basic feasible solution.
|
| 512 |
+
|
| 513 |
+
rr : bool
|
| 514 |
+
If ``True`` attempts to eliminate any redundant rows in ``A_eq``.
|
| 515 |
+
Set False if ``A_eq`` is known to be of full row rank, or if you are
|
| 516 |
+
looking for a potential speedup (at the expense of reliability).
|
| 517 |
+
rr_method : string
|
| 518 |
+
Method used to identify and remove redundant rows from the
|
| 519 |
+
equality constraint matrix after presolve.
|
| 520 |
+
tol : float
|
| 521 |
+
The tolerance which determines when a solution is "close enough" to
|
| 522 |
+
zero in Phase 1 to be considered a basic feasible solution or close
|
| 523 |
+
enough to positive to serve as an optimal solution.
|
| 524 |
+
|
| 525 |
+
Returns
|
| 526 |
+
-------
|
| 527 |
+
lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
|
| 528 |
+
|
| 529 |
+
c : 1D array
|
| 530 |
+
The coefficients of the linear objective function to be minimized.
|
| 531 |
+
A_ub : 2D array, optional
|
| 532 |
+
The inequality constraint matrix. Each row of ``A_ub`` specifies the
|
| 533 |
+
coefficients of a linear inequality constraint on ``x``.
|
| 534 |
+
b_ub : 1D array, optional
|
| 535 |
+
The inequality constraint vector. Each element represents an
|
| 536 |
+
upper bound on the corresponding value of ``A_ub @ x``.
|
| 537 |
+
A_eq : 2D array, optional
|
| 538 |
+
The equality constraint matrix. Each row of ``A_eq`` specifies the
|
| 539 |
+
coefficients of a linear equality constraint on ``x``.
|
| 540 |
+
b_eq : 1D array, optional
|
| 541 |
+
The equality constraint vector. Each element of ``A_eq @ x`` must equal
|
| 542 |
+
the corresponding element of ``b_eq``.
|
| 543 |
+
bounds : 2D array
|
| 544 |
+
The bounds of ``x``, as ``min`` and ``max`` pairs, possibly tightened.
|
| 545 |
+
x0 : 1D array, optional
|
| 546 |
+
Guess values of the decision variables, which will be refined by
|
| 547 |
+
the optimization algorithm. This argument is currently used only by the
|
| 548 |
+
'revised simplex' method, and can only be used if `x0` represents a
|
| 549 |
+
basic feasible solution.
|
| 550 |
+
|
| 551 |
+
c0 : 1D array
|
| 552 |
+
Constant term in objective function due to fixed (and eliminated)
|
| 553 |
+
variables.
|
| 554 |
+
x : 1D array
|
| 555 |
+
Solution vector (when the solution is trivial and can be determined
|
| 556 |
+
in presolve)
|
| 557 |
+
revstack: list of functions
|
| 558 |
+
the functions in the list reverse the operations of _presolve()
|
| 559 |
+
the function signature is x_org = f(x_mod), where x_mod is the result
|
| 560 |
+
of a presolve step and x_org the value at the start of the step
|
| 561 |
+
(currently, the revstack contains only one function)
|
| 562 |
+
complete: bool
|
| 563 |
+
Whether the solution is complete (solved or determined to be infeasible
|
| 564 |
+
or unbounded in presolve)
|
| 565 |
+
status : int
|
| 566 |
+
An integer representing the exit status of the optimization::
|
| 567 |
+
|
| 568 |
+
0 : Optimization terminated successfully
|
| 569 |
+
1 : Iteration limit reached
|
| 570 |
+
2 : Problem appears to be infeasible
|
| 571 |
+
3 : Problem appears to be unbounded
|
| 572 |
+
4 : Serious numerical difficulties encountered
|
| 573 |
+
|
| 574 |
+
message : str
|
| 575 |
+
A string descriptor of the exit status of the optimization.
|
| 576 |
+
|
| 577 |
+
References
|
| 578 |
+
----------
|
| 579 |
+
.. [5] Andersen, Erling D. "Finding all linearly dependent rows in
|
| 580 |
+
large-scale linear programming." Optimization Methods and Software
|
| 581 |
+
6.3 (1995): 219-227.
|
| 582 |
+
.. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear
|
| 583 |
+
programming." Mathematical Programming 71.2 (1995): 221-245.
|
| 584 |
+
|
| 585 |
+
"""
|
| 586 |
+
# ideas from Reference [5] by Andersen and Andersen
|
| 587 |
+
# however, unlike the reference, this is performed before converting
|
| 588 |
+
# problem to standard form
|
| 589 |
+
# There are a few advantages:
|
| 590 |
+
# * artificial variables have not been added, so matrices are smaller
|
| 591 |
+
# * bounds have not been converted to constraints yet. (It is better to
|
| 592 |
+
# do that after presolve because presolve may adjust the simple bounds.)
|
| 593 |
+
# There are many improvements that can be made, namely:
|
| 594 |
+
# * implement remaining checks from [5]
|
| 595 |
+
# * loop presolve until no additional changes are made
|
| 596 |
+
# * implement additional efficiency improvements in redundancy removal [2]
|
| 597 |
+
|
| 598 |
+
c, A_ub, b_ub, A_eq, b_eq, bounds, x0, _ = lp
|
| 599 |
+
|
| 600 |
+
revstack = [] # record of variables eliminated from problem
|
| 601 |
+
# constant term in cost function may be added if variables are eliminated
|
| 602 |
+
c0 = 0
|
| 603 |
+
complete = False # complete is True if detected infeasible/unbounded
|
| 604 |
+
x = np.zeros(c.shape) # this is solution vector if completed in presolve
|
| 605 |
+
|
| 606 |
+
status = 0 # all OK unless determined otherwise
|
| 607 |
+
message = ""
|
| 608 |
+
|
| 609 |
+
# Lower and upper bounds. Copy to prevent feedback.
|
| 610 |
+
lb = bounds[:, 0].copy()
|
| 611 |
+
ub = bounds[:, 1].copy()
|
| 612 |
+
|
| 613 |
+
m_eq, n = A_eq.shape
|
| 614 |
+
m_ub, n = A_ub.shape
|
| 615 |
+
|
| 616 |
+
if (rr_method is not None
|
| 617 |
+
and rr_method.lower() not in {"svd", "pivot", "id"}):
|
| 618 |
+
message = ("'" + str(rr_method) + "' is not a valid option "
|
| 619 |
+
"for redundancy removal. Valid options are 'SVD', "
|
| 620 |
+
"'pivot', and 'ID'.")
|
| 621 |
+
raise ValueError(message)
|
| 622 |
+
|
| 623 |
+
if sps.issparse(A_eq):
|
| 624 |
+
A_eq = A_eq.tocsr()
|
| 625 |
+
A_ub = A_ub.tocsr()
|
| 626 |
+
|
| 627 |
+
def where(A):
|
| 628 |
+
return A.nonzero()
|
| 629 |
+
|
| 630 |
+
vstack = sps.vstack
|
| 631 |
+
else:
|
| 632 |
+
where = np.where
|
| 633 |
+
vstack = np.vstack
|
| 634 |
+
|
| 635 |
+
# upper bounds > lower bounds
|
| 636 |
+
if np.any(ub < lb) or np.any(lb == np.inf) or np.any(ub == -np.inf):
|
| 637 |
+
status = 2
|
| 638 |
+
message = ("The problem is (trivially) infeasible since one "
|
| 639 |
+
"or more upper bounds are smaller than the corresponding "
|
| 640 |
+
"lower bounds, a lower bound is np.inf or an upper bound "
|
| 641 |
+
"is -np.inf.")
|
| 642 |
+
complete = True
|
| 643 |
+
return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
|
| 644 |
+
c0, x, revstack, complete, status, message)
|
| 645 |
+
|
| 646 |
+
# zero row in equality constraints
|
| 647 |
+
zero_row = np.array(np.sum(A_eq != 0, axis=1) == 0).flatten()
|
| 648 |
+
if np.any(zero_row):
|
| 649 |
+
if np.any(
|
| 650 |
+
np.logical_and(
|
| 651 |
+
zero_row,
|
| 652 |
+
np.abs(b_eq) > tol)): # test_zero_row_1
|
| 653 |
+
# infeasible if RHS is not zero
|
| 654 |
+
status = 2
|
| 655 |
+
message = ("The problem is (trivially) infeasible due to a row "
|
| 656 |
+
"of zeros in the equality constraint matrix with a "
|
| 657 |
+
"nonzero corresponding constraint value.")
|
| 658 |
+
complete = True
|
| 659 |
+
return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
|
| 660 |
+
c0, x, revstack, complete, status, message)
|
| 661 |
+
else: # test_zero_row_2
|
| 662 |
+
# if RHS is zero, we can eliminate this equation entirely
|
| 663 |
+
A_eq = A_eq[np.logical_not(zero_row), :]
|
| 664 |
+
b_eq = b_eq[np.logical_not(zero_row)]
|
| 665 |
+
|
| 666 |
+
# zero row in inequality constraints
|
| 667 |
+
zero_row = np.array(np.sum(A_ub != 0, axis=1) == 0).flatten()
|
| 668 |
+
if np.any(zero_row):
|
| 669 |
+
if np.any(np.logical_and(zero_row, b_ub < -tol)): # test_zero_row_1
|
| 670 |
+
# infeasible if RHS is less than zero (because LHS is zero)
|
| 671 |
+
status = 2
|
| 672 |
+
message = ("The problem is (trivially) infeasible due to a row "
|
| 673 |
+
"of zeros in the equality constraint matrix with a "
|
| 674 |
+
"nonzero corresponding constraint value.")
|
| 675 |
+
complete = True
|
| 676 |
+
return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
|
| 677 |
+
c0, x, revstack, complete, status, message)
|
| 678 |
+
else: # test_zero_row_2
|
| 679 |
+
# if LHS is >= 0, we can eliminate this constraint entirely
|
| 680 |
+
A_ub = A_ub[np.logical_not(zero_row), :]
|
| 681 |
+
b_ub = b_ub[np.logical_not(zero_row)]
|
| 682 |
+
|
| 683 |
+
# zero column in (both) constraints
|
| 684 |
+
# this indicates that a variable isn't constrained and can be removed
|
| 685 |
+
A = vstack((A_eq, A_ub))
|
| 686 |
+
if A.shape[0] > 0:
|
| 687 |
+
zero_col = np.array(np.sum(A != 0, axis=0) == 0).flatten()
|
| 688 |
+
# variable will be at upper or lower bound, depending on objective
|
| 689 |
+
x[np.logical_and(zero_col, c < 0)] = ub[
|
| 690 |
+
np.logical_and(zero_col, c < 0)]
|
| 691 |
+
x[np.logical_and(zero_col, c > 0)] = lb[
|
| 692 |
+
np.logical_and(zero_col, c > 0)]
|
| 693 |
+
if np.any(np.isinf(x)): # if an unconstrained variable has no bound
|
| 694 |
+
status = 3
|
| 695 |
+
message = ("If feasible, the problem is (trivially) unbounded "
|
| 696 |
+
"due to a zero column in the constraint matrices. If "
|
| 697 |
+
"you wish to check whether the problem is infeasible, "
|
| 698 |
+
"turn presolve off.")
|
| 699 |
+
complete = True
|
| 700 |
+
return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
|
| 701 |
+
c0, x, revstack, complete, status, message)
|
| 702 |
+
# variables will equal upper/lower bounds will be removed later
|
| 703 |
+
lb[np.logical_and(zero_col, c < 0)] = ub[
|
| 704 |
+
np.logical_and(zero_col, c < 0)]
|
| 705 |
+
ub[np.logical_and(zero_col, c > 0)] = lb[
|
| 706 |
+
np.logical_and(zero_col, c > 0)]
|
| 707 |
+
|
| 708 |
+
# row singleton in equality constraints
|
| 709 |
+
# this fixes a variable and removes the constraint
|
| 710 |
+
singleton_row = np.array(np.sum(A_eq != 0, axis=1) == 1).flatten()
|
| 711 |
+
rows = where(singleton_row)[0]
|
| 712 |
+
cols = where(A_eq[rows, :])[1]
|
| 713 |
+
if len(rows) > 0:
|
| 714 |
+
for row, col in zip(rows, cols):
|
| 715 |
+
val = b_eq[row] / A_eq[row, col]
|
| 716 |
+
if not lb[col] - tol <= val <= ub[col] + tol:
|
| 717 |
+
# infeasible if fixed value is not within bounds
|
| 718 |
+
status = 2
|
| 719 |
+
message = ("The problem is (trivially) infeasible because a "
|
| 720 |
+
"singleton row in the equality constraints is "
|
| 721 |
+
"inconsistent with the bounds.")
|
| 722 |
+
complete = True
|
| 723 |
+
return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
|
| 724 |
+
c0, x, revstack, complete, status, message)
|
| 725 |
+
else:
|
| 726 |
+
# sets upper and lower bounds at that fixed value - variable
|
| 727 |
+
# will be removed later
|
| 728 |
+
lb[col] = val
|
| 729 |
+
ub[col] = val
|
| 730 |
+
A_eq = A_eq[np.logical_not(singleton_row), :]
|
| 731 |
+
b_eq = b_eq[np.logical_not(singleton_row)]
|
| 732 |
+
|
| 733 |
+
# row singleton in inequality constraints
|
| 734 |
+
# this indicates a simple bound and the constraint can be removed
|
| 735 |
+
# simple bounds may be adjusted here
|
| 736 |
+
# After all of the simple bound information is combined here, get_Abc will
|
| 737 |
+
# turn the simple bounds into constraints
|
| 738 |
+
singleton_row = np.array(np.sum(A_ub != 0, axis=1) == 1).flatten()
|
| 739 |
+
cols = where(A_ub[singleton_row, :])[1]
|
| 740 |
+
rows = where(singleton_row)[0]
|
| 741 |
+
if len(rows) > 0:
|
| 742 |
+
for row, col in zip(rows, cols):
|
| 743 |
+
val = b_ub[row] / A_ub[row, col]
|
| 744 |
+
if A_ub[row, col] > 0: # upper bound
|
| 745 |
+
if val < lb[col] - tol: # infeasible
|
| 746 |
+
complete = True
|
| 747 |
+
elif val < ub[col]: # new upper bound
|
| 748 |
+
ub[col] = val
|
| 749 |
+
else: # lower bound
|
| 750 |
+
if val > ub[col] + tol: # infeasible
|
| 751 |
+
complete = True
|
| 752 |
+
elif val > lb[col]: # new lower bound
|
| 753 |
+
lb[col] = val
|
| 754 |
+
if complete:
|
| 755 |
+
status = 2
|
| 756 |
+
message = ("The problem is (trivially) infeasible because a "
|
| 757 |
+
"singleton row in the upper bound constraints is "
|
| 758 |
+
"inconsistent with the bounds.")
|
| 759 |
+
return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
|
| 760 |
+
c0, x, revstack, complete, status, message)
|
| 761 |
+
A_ub = A_ub[np.logical_not(singleton_row), :]
|
| 762 |
+
b_ub = b_ub[np.logical_not(singleton_row)]
|
| 763 |
+
|
| 764 |
+
# identical bounds indicate that variable can be removed
|
| 765 |
+
i_f = np.abs(lb - ub) < tol # indices of "fixed" variables
|
| 766 |
+
i_nf = np.logical_not(i_f) # indices of "not fixed" variables
|
| 767 |
+
|
| 768 |
+
# test_bounds_equal_but_infeasible
|
| 769 |
+
if np.all(i_f): # if bounds define solution, check for consistency
|
| 770 |
+
residual = b_eq - A_eq.dot(lb)
|
| 771 |
+
slack = b_ub - A_ub.dot(lb)
|
| 772 |
+
if ((A_ub.size > 0 and np.any(slack < 0)) or
|
| 773 |
+
(A_eq.size > 0 and not np.allclose(residual, 0))):
|
| 774 |
+
status = 2
|
| 775 |
+
message = ("The problem is (trivially) infeasible because the "
|
| 776 |
+
"bounds fix all variables to values inconsistent with "
|
| 777 |
+
"the constraints")
|
| 778 |
+
complete = True
|
| 779 |
+
return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
|
| 780 |
+
c0, x, revstack, complete, status, message)
|
| 781 |
+
|
| 782 |
+
ub_mod = ub
|
| 783 |
+
lb_mod = lb
|
| 784 |
+
if np.any(i_f):
|
| 785 |
+
c0 += c[i_f].dot(lb[i_f])
|
| 786 |
+
b_eq = b_eq - A_eq[:, i_f].dot(lb[i_f])
|
| 787 |
+
b_ub = b_ub - A_ub[:, i_f].dot(lb[i_f])
|
| 788 |
+
c = c[i_nf]
|
| 789 |
+
x_undo = lb[i_f] # not x[i_f], x is just zeroes
|
| 790 |
+
x = x[i_nf]
|
| 791 |
+
# user guess x0 stays separate from presolve solution x
|
| 792 |
+
if x0 is not None:
|
| 793 |
+
x0 = x0[i_nf]
|
| 794 |
+
A_eq = A_eq[:, i_nf]
|
| 795 |
+
A_ub = A_ub[:, i_nf]
|
| 796 |
+
# modify bounds
|
| 797 |
+
lb_mod = lb[i_nf]
|
| 798 |
+
ub_mod = ub[i_nf]
|
| 799 |
+
|
| 800 |
+
def rev(x_mod):
|
| 801 |
+
# Function to restore x: insert x_undo into x_mod.
|
| 802 |
+
# When elements have been removed at positions k1, k2, k3, ...
|
| 803 |
+
# then these must be replaced at (after) positions k1-1, k2-2,
|
| 804 |
+
# k3-3, ... in the modified array to recreate the original
|
| 805 |
+
i = np.flatnonzero(i_f)
|
| 806 |
+
# Number of variables to restore
|
| 807 |
+
N = len(i)
|
| 808 |
+
index_offset = np.arange(N)
|
| 809 |
+
# Create insert indices
|
| 810 |
+
insert_indices = i - index_offset
|
| 811 |
+
x_rev = np.insert(x_mod.astype(float), insert_indices, x_undo)
|
| 812 |
+
return x_rev
|
| 813 |
+
|
| 814 |
+
# Use revstack as a list of functions, currently just this one.
|
| 815 |
+
revstack.append(rev)
|
| 816 |
+
|
| 817 |
+
# no constraints indicates that problem is trivial
|
| 818 |
+
if A_eq.size == 0 and A_ub.size == 0:
|
| 819 |
+
b_eq = np.array([])
|
| 820 |
+
b_ub = np.array([])
|
| 821 |
+
# test_empty_constraint_1
|
| 822 |
+
if c.size == 0:
|
| 823 |
+
status = 0
|
| 824 |
+
message = ("The solution was determined in presolve as there are "
|
| 825 |
+
"no non-trivial constraints.")
|
| 826 |
+
elif (np.any(np.logical_and(c < 0, ub_mod == np.inf)) or
|
| 827 |
+
np.any(np.logical_and(c > 0, lb_mod == -np.inf))):
|
| 828 |
+
# test_no_constraints()
|
| 829 |
+
# test_unbounded_no_nontrivial_constraints_1
|
| 830 |
+
# test_unbounded_no_nontrivial_constraints_2
|
| 831 |
+
status = 3
|
| 832 |
+
message = ("The problem is (trivially) unbounded "
|
| 833 |
+
"because there are no non-trivial constraints and "
|
| 834 |
+
"a) at least one decision variable is unbounded "
|
| 835 |
+
"above and its corresponding cost is negative, or "
|
| 836 |
+
"b) at least one decision variable is unbounded below "
|
| 837 |
+
"and its corresponding cost is positive. ")
|
| 838 |
+
else: # test_empty_constraint_2
|
| 839 |
+
status = 0
|
| 840 |
+
message = ("The solution was determined in presolve as there are "
|
| 841 |
+
"no non-trivial constraints.")
|
| 842 |
+
complete = True
|
| 843 |
+
x[c < 0] = ub_mod[c < 0]
|
| 844 |
+
x[c > 0] = lb_mod[c > 0]
|
| 845 |
+
# where c is zero, set x to a finite bound or zero
|
| 846 |
+
x_zero_c = ub_mod[c == 0]
|
| 847 |
+
x_zero_c[np.isinf(x_zero_c)] = ub_mod[c == 0][np.isinf(x_zero_c)]
|
| 848 |
+
x_zero_c[np.isinf(x_zero_c)] = 0
|
| 849 |
+
x[c == 0] = x_zero_c
|
| 850 |
+
# if this is not the last step of presolve, should convert bounds back
|
| 851 |
+
# to array and return here
|
| 852 |
+
|
| 853 |
+
# Convert modified lb and ub back into N x 2 bounds
|
| 854 |
+
bounds = np.hstack((lb_mod[:, np.newaxis], ub_mod[:, np.newaxis]))
|
| 855 |
+
|
| 856 |
+
# remove redundant (linearly dependent) rows from equality constraints
|
| 857 |
+
n_rows_A = A_eq.shape[0]
|
| 858 |
+
redundancy_warning = ("A_eq does not appear to be of full row rank. To "
|
| 859 |
+
"improve performance, check the problem formulation "
|
| 860 |
+
"for redundant equality constraints.")
|
| 861 |
+
if (sps.issparse(A_eq)):
|
| 862 |
+
if rr and A_eq.size > 0: # TODO: Fast sparse rank check?
|
| 863 |
+
rr_res = _remove_redundancy_pivot_sparse(A_eq, b_eq)
|
| 864 |
+
A_eq, b_eq, status, message = rr_res
|
| 865 |
+
if A_eq.shape[0] < n_rows_A:
|
| 866 |
+
warn(redundancy_warning, OptimizeWarning, stacklevel=1)
|
| 867 |
+
if status != 0:
|
| 868 |
+
complete = True
|
| 869 |
+
return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
|
| 870 |
+
c0, x, revstack, complete, status, message)
|
| 871 |
+
|
| 872 |
+
# This is a wild guess for which redundancy removal algorithm will be
|
| 873 |
+
# faster. More testing would be good.
|
| 874 |
+
small_nullspace = 5
|
| 875 |
+
if rr and A_eq.size > 0:
|
| 876 |
+
try: # TODO: use results of first SVD in _remove_redundancy_svd
|
| 877 |
+
rank = np.linalg.matrix_rank(A_eq)
|
| 878 |
+
# oh well, we'll have to go with _remove_redundancy_pivot_dense
|
| 879 |
+
except Exception:
|
| 880 |
+
rank = 0
|
| 881 |
+
if rr and A_eq.size > 0 and rank < A_eq.shape[0]:
|
| 882 |
+
warn(redundancy_warning, OptimizeWarning, stacklevel=3)
|
| 883 |
+
dim_row_nullspace = A_eq.shape[0]-rank
|
| 884 |
+
if rr_method is None:
|
| 885 |
+
if dim_row_nullspace <= small_nullspace:
|
| 886 |
+
rr_res = _remove_redundancy_svd(A_eq, b_eq)
|
| 887 |
+
A_eq, b_eq, status, message = rr_res
|
| 888 |
+
if dim_row_nullspace > small_nullspace or status == 4:
|
| 889 |
+
rr_res = _remove_redundancy_pivot_dense(A_eq, b_eq)
|
| 890 |
+
A_eq, b_eq, status, message = rr_res
|
| 891 |
+
|
| 892 |
+
else:
|
| 893 |
+
rr_method = rr_method.lower()
|
| 894 |
+
if rr_method == "svd":
|
| 895 |
+
rr_res = _remove_redundancy_svd(A_eq, b_eq)
|
| 896 |
+
A_eq, b_eq, status, message = rr_res
|
| 897 |
+
elif rr_method == "pivot":
|
| 898 |
+
rr_res = _remove_redundancy_pivot_dense(A_eq, b_eq)
|
| 899 |
+
A_eq, b_eq, status, message = rr_res
|
| 900 |
+
elif rr_method == "id":
|
| 901 |
+
rr_res = _remove_redundancy_id(A_eq, b_eq, rank)
|
| 902 |
+
A_eq, b_eq, status, message = rr_res
|
| 903 |
+
else: # shouldn't get here; option validity checked above
|
| 904 |
+
pass
|
| 905 |
+
if A_eq.shape[0] < rank:
|
| 906 |
+
message = ("Due to numerical issues, redundant equality "
|
| 907 |
+
"constraints could not be removed automatically. "
|
| 908 |
+
"Try providing your constraint matrices as sparse "
|
| 909 |
+
"matrices to activate sparse presolve, try turning "
|
| 910 |
+
"off redundancy removal, or try turning off presolve "
|
| 911 |
+
"altogether.")
|
| 912 |
+
status = 4
|
| 913 |
+
if status != 0:
|
| 914 |
+
complete = True
|
| 915 |
+
return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
|
| 916 |
+
c0, x, revstack, complete, status, message)
|
| 917 |
+
|
| 918 |
+
|
| 919 |
+
def _parse_linprog(lp, options, meth):
|
| 920 |
+
"""
|
| 921 |
+
Parse the provided linear programming problem
|
| 922 |
+
|
| 923 |
+
``_parse_linprog`` employs two main steps ``_check_sparse_inputs`` and
|
| 924 |
+
``_clean_inputs``. ``_check_sparse_inputs`` checks for sparsity in the
|
| 925 |
+
provided constraints (``A_ub`` and ``A_eq) and if these match the provided
|
| 926 |
+
sparsity optional values.
|
| 927 |
+
|
| 928 |
+
``_clean inputs`` checks of the provided inputs. If no violations are
|
| 929 |
+
identified the objective vector, upper bound constraints, equality
|
| 930 |
+
constraints, and simple bounds are returned in the expected format.
|
| 931 |
+
|
| 932 |
+
Parameters
|
| 933 |
+
----------
|
| 934 |
+
lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
|
| 935 |
+
|
| 936 |
+
c : 1D array
|
| 937 |
+
The coefficients of the linear objective function to be minimized.
|
| 938 |
+
A_ub : 2D array, optional
|
| 939 |
+
The inequality constraint matrix. Each row of ``A_ub`` specifies the
|
| 940 |
+
coefficients of a linear inequality constraint on ``x``.
|
| 941 |
+
b_ub : 1D array, optional
|
| 942 |
+
The inequality constraint vector. Each element represents an
|
| 943 |
+
upper bound on the corresponding value of ``A_ub @ x``.
|
| 944 |
+
A_eq : 2D array, optional
|
| 945 |
+
The equality constraint matrix. Each row of ``A_eq`` specifies the
|
| 946 |
+
coefficients of a linear equality constraint on ``x``.
|
| 947 |
+
b_eq : 1D array, optional
|
| 948 |
+
The equality constraint vector. Each element of ``A_eq @ x`` must equal
|
| 949 |
+
the corresponding element of ``b_eq``.
|
| 950 |
+
bounds : various valid formats, optional
|
| 951 |
+
The bounds of ``x``, as ``min`` and ``max`` pairs.
|
| 952 |
+
If bounds are specified for all N variables separately, valid formats are:
|
| 953 |
+
* a 2D array (2 x N or N x 2);
|
| 954 |
+
* a sequence of N sequences, each with 2 values.
|
| 955 |
+
If all variables have the same bounds, a single pair of values can
|
| 956 |
+
be specified. Valid formats are:
|
| 957 |
+
* a sequence with 2 scalar values;
|
| 958 |
+
* a sequence with a single element containing 2 scalar values.
|
| 959 |
+
If all variables have a lower bound of 0 and no upper bound, the bounds
|
| 960 |
+
parameter can be omitted (or given as None).
|
| 961 |
+
x0 : 1D array, optional
|
| 962 |
+
Guess values of the decision variables, which will be refined by
|
| 963 |
+
the optimization algorithm. This argument is currently used only by the
|
| 964 |
+
'revised simplex' method, and can only be used if `x0` represents a
|
| 965 |
+
basic feasible solution.
|
| 966 |
+
|
| 967 |
+
options : dict
|
| 968 |
+
A dictionary of solver options. All methods accept the following
|
| 969 |
+
generic options:
|
| 970 |
+
|
| 971 |
+
maxiter : int
|
| 972 |
+
Maximum number of iterations to perform.
|
| 973 |
+
disp : bool
|
| 974 |
+
Set to True to print convergence messages.
|
| 975 |
+
|
| 976 |
+
For method-specific options, see :func:`show_options('linprog')`.
|
| 977 |
+
|
| 978 |
+
Returns
|
| 979 |
+
-------
|
| 980 |
+
lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
|
| 981 |
+
|
| 982 |
+
c : 1D array
|
| 983 |
+
The coefficients of the linear objective function to be minimized.
|
| 984 |
+
A_ub : 2D array, optional
|
| 985 |
+
The inequality constraint matrix. Each row of ``A_ub`` specifies the
|
| 986 |
+
coefficients of a linear inequality constraint on ``x``.
|
| 987 |
+
b_ub : 1D array, optional
|
| 988 |
+
The inequality constraint vector. Each element represents an
|
| 989 |
+
upper bound on the corresponding value of ``A_ub @ x``.
|
| 990 |
+
A_eq : 2D array, optional
|
| 991 |
+
The equality constraint matrix. Each row of ``A_eq`` specifies the
|
| 992 |
+
coefficients of a linear equality constraint on ``x``.
|
| 993 |
+
b_eq : 1D array, optional
|
| 994 |
+
The equality constraint vector. Each element of ``A_eq @ x`` must equal
|
| 995 |
+
the corresponding element of ``b_eq``.
|
| 996 |
+
bounds : 2D array
|
| 997 |
+
The bounds of ``x``, as ``min`` and ``max`` pairs, one for each of the N
|
| 998 |
+
elements of ``x``. The N x 2 array contains lower bounds in the first
|
| 999 |
+
column and upper bounds in the 2nd. Unbounded variables have lower
|
| 1000 |
+
bound -np.inf and/or upper bound np.inf.
|
| 1001 |
+
x0 : 1D array, optional
|
| 1002 |
+
Guess values of the decision variables, which will be refined by
|
| 1003 |
+
the optimization algorithm. This argument is currently used only by the
|
| 1004 |
+
'revised simplex' method, and can only be used if `x0` represents a
|
| 1005 |
+
basic feasible solution.
|
| 1006 |
+
|
| 1007 |
+
options : dict, optional
|
| 1008 |
+
A dictionary of solver options. All methods accept the following
|
| 1009 |
+
generic options:
|
| 1010 |
+
|
| 1011 |
+
maxiter : int
|
| 1012 |
+
Maximum number of iterations to perform.
|
| 1013 |
+
disp : bool
|
| 1014 |
+
Set to True to print convergence messages.
|
| 1015 |
+
|
| 1016 |
+
For method-specific options, see :func:`show_options('linprog')`.
|
| 1017 |
+
|
| 1018 |
+
"""
|
| 1019 |
+
if options is None:
|
| 1020 |
+
options = {}
|
| 1021 |
+
|
| 1022 |
+
solver_options = {k: v for k, v in options.items()}
|
| 1023 |
+
solver_options, A_ub, A_eq = _check_sparse_inputs(solver_options, meth,
|
| 1024 |
+
lp.A_ub, lp.A_eq)
|
| 1025 |
+
# Convert lists to numpy arrays, etc...
|
| 1026 |
+
lp = _clean_inputs(lp._replace(A_ub=A_ub, A_eq=A_eq))
|
| 1027 |
+
return lp, solver_options
|
| 1028 |
+
|
| 1029 |
+
|
| 1030 |
+
def _get_Abc(lp, c0):
|
| 1031 |
+
"""
|
| 1032 |
+
Given a linear programming problem of the form:
|
| 1033 |
+
|
| 1034 |
+
Minimize::
|
| 1035 |
+
|
| 1036 |
+
c @ x
|
| 1037 |
+
|
| 1038 |
+
Subject to::
|
| 1039 |
+
|
| 1040 |
+
A_ub @ x <= b_ub
|
| 1041 |
+
A_eq @ x == b_eq
|
| 1042 |
+
lb <= x <= ub
|
| 1043 |
+
|
| 1044 |
+
where ``lb = 0`` and ``ub = None`` unless set in ``bounds``.
|
| 1045 |
+
|
| 1046 |
+
Return the problem in standard form:
|
| 1047 |
+
|
| 1048 |
+
Minimize::
|
| 1049 |
+
|
| 1050 |
+
c @ x
|
| 1051 |
+
|
| 1052 |
+
Subject to::
|
| 1053 |
+
|
| 1054 |
+
A @ x == b
|
| 1055 |
+
x >= 0
|
| 1056 |
+
|
| 1057 |
+
by adding slack variables and making variable substitutions as necessary.
|
| 1058 |
+
|
| 1059 |
+
Parameters
|
| 1060 |
+
----------
|
| 1061 |
+
lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
|
| 1062 |
+
|
| 1063 |
+
c : 1D array
|
| 1064 |
+
The coefficients of the linear objective function to be minimized.
|
| 1065 |
+
A_ub : 2D array, optional
|
| 1066 |
+
The inequality constraint matrix. Each row of ``A_ub`` specifies the
|
| 1067 |
+
coefficients of a linear inequality constraint on ``x``.
|
| 1068 |
+
b_ub : 1D array, optional
|
| 1069 |
+
The inequality constraint vector. Each element represents an
|
| 1070 |
+
upper bound on the corresponding value of ``A_ub @ x``.
|
| 1071 |
+
A_eq : 2D array, optional
|
| 1072 |
+
The equality constraint matrix. Each row of ``A_eq`` specifies the
|
| 1073 |
+
coefficients of a linear equality constraint on ``x``.
|
| 1074 |
+
b_eq : 1D array, optional
|
| 1075 |
+
The equality constraint vector. Each element of ``A_eq @ x`` must equal
|
| 1076 |
+
the corresponding element of ``b_eq``.
|
| 1077 |
+
bounds : 2D array
|
| 1078 |
+
The bounds of ``x``, lower bounds in the 1st column, upper
|
| 1079 |
+
bounds in the 2nd column. The bounds are possibly tightened
|
| 1080 |
+
by the presolve procedure.
|
| 1081 |
+
x0 : 1D array, optional
|
| 1082 |
+
Guess values of the decision variables, which will be refined by
|
| 1083 |
+
the optimization algorithm. This argument is currently used only by the
|
| 1084 |
+
'revised simplex' method, and can only be used if `x0` represents a
|
| 1085 |
+
basic feasible solution.
|
| 1086 |
+
|
| 1087 |
+
c0 : float
|
| 1088 |
+
Constant term in objective function due to fixed (and eliminated)
|
| 1089 |
+
variables.
|
| 1090 |
+
|
| 1091 |
+
Returns
|
| 1092 |
+
-------
|
| 1093 |
+
A : 2-D array
|
| 1094 |
+
2-D array such that ``A`` @ ``x``, gives the values of the equality
|
| 1095 |
+
constraints at ``x``.
|
| 1096 |
+
b : 1-D array
|
| 1097 |
+
1-D array of values representing the RHS of each equality constraint
|
| 1098 |
+
(row) in A (for standard form problem).
|
| 1099 |
+
c : 1-D array
|
| 1100 |
+
Coefficients of the linear objective function to be minimized (for
|
| 1101 |
+
standard form problem).
|
| 1102 |
+
c0 : float
|
| 1103 |
+
Constant term in objective function due to fixed (and eliminated)
|
| 1104 |
+
variables.
|
| 1105 |
+
x0 : 1-D array
|
| 1106 |
+
Starting values of the independent variables, which will be refined by
|
| 1107 |
+
the optimization algorithm
|
| 1108 |
+
|
| 1109 |
+
References
|
| 1110 |
+
----------
|
| 1111 |
+
.. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
|
| 1112 |
+
programming." Athena Scientific 1 (1997): 997.
|
| 1113 |
+
|
| 1114 |
+
"""
|
| 1115 |
+
c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality = lp
|
| 1116 |
+
|
| 1117 |
+
if sps.issparse(A_eq):
|
| 1118 |
+
sparse = True
|
| 1119 |
+
A_eq = sps.csr_matrix(A_eq)
|
| 1120 |
+
A_ub = sps.csr_matrix(A_ub)
|
| 1121 |
+
|
| 1122 |
+
def hstack(blocks):
|
| 1123 |
+
return sps.hstack(blocks, format="csr")
|
| 1124 |
+
|
| 1125 |
+
def vstack(blocks):
|
| 1126 |
+
return sps.vstack(blocks, format="csr")
|
| 1127 |
+
|
| 1128 |
+
zeros = sps.csr_matrix
|
| 1129 |
+
eye = sps.eye
|
| 1130 |
+
else:
|
| 1131 |
+
sparse = False
|
| 1132 |
+
hstack = np.hstack
|
| 1133 |
+
vstack = np.vstack
|
| 1134 |
+
zeros = np.zeros
|
| 1135 |
+
eye = np.eye
|
| 1136 |
+
|
| 1137 |
+
# Variables lbs and ubs (see below) may be changed, which feeds back into
|
| 1138 |
+
# bounds, so copy.
|
| 1139 |
+
bounds = np.array(bounds, copy=True)
|
| 1140 |
+
|
| 1141 |
+
# modify problem such that all variables have only non-negativity bounds
|
| 1142 |
+
lbs = bounds[:, 0]
|
| 1143 |
+
ubs = bounds[:, 1]
|
| 1144 |
+
m_ub, n_ub = A_ub.shape
|
| 1145 |
+
|
| 1146 |
+
lb_none = np.equal(lbs, -np.inf)
|
| 1147 |
+
ub_none = np.equal(ubs, np.inf)
|
| 1148 |
+
lb_some = np.logical_not(lb_none)
|
| 1149 |
+
ub_some = np.logical_not(ub_none)
|
| 1150 |
+
|
| 1151 |
+
# unbounded below: substitute xi = -xi' (unbounded above)
|
| 1152 |
+
# if -inf <= xi <= ub, then -ub <= -xi <= inf, so swap and invert bounds
|
| 1153 |
+
l_nolb_someub = np.logical_and(lb_none, ub_some)
|
| 1154 |
+
i_nolb = np.nonzero(l_nolb_someub)[0]
|
| 1155 |
+
lbs[l_nolb_someub], ubs[l_nolb_someub] = (
|
| 1156 |
+
-ubs[l_nolb_someub], -lbs[l_nolb_someub])
|
| 1157 |
+
lb_none = np.equal(lbs, -np.inf)
|
| 1158 |
+
ub_none = np.equal(ubs, np.inf)
|
| 1159 |
+
lb_some = np.logical_not(lb_none)
|
| 1160 |
+
ub_some = np.logical_not(ub_none)
|
| 1161 |
+
c[i_nolb] *= -1
|
| 1162 |
+
if x0 is not None:
|
| 1163 |
+
x0[i_nolb] *= -1
|
| 1164 |
+
if len(i_nolb) > 0:
|
| 1165 |
+
if A_ub.shape[0] > 0: # sometimes needed for sparse arrays... weird
|
| 1166 |
+
A_ub[:, i_nolb] *= -1
|
| 1167 |
+
if A_eq.shape[0] > 0:
|
| 1168 |
+
A_eq[:, i_nolb] *= -1
|
| 1169 |
+
|
| 1170 |
+
# upper bound: add inequality constraint
|
| 1171 |
+
i_newub, = ub_some.nonzero()
|
| 1172 |
+
ub_newub = ubs[ub_some]
|
| 1173 |
+
n_bounds = len(i_newub)
|
| 1174 |
+
if n_bounds > 0:
|
| 1175 |
+
shape = (n_bounds, A_ub.shape[1])
|
| 1176 |
+
if sparse:
|
| 1177 |
+
idxs = (np.arange(n_bounds), i_newub)
|
| 1178 |
+
A_ub = vstack((A_ub, sps.csr_matrix((np.ones(n_bounds), idxs),
|
| 1179 |
+
shape=shape)))
|
| 1180 |
+
else:
|
| 1181 |
+
A_ub = vstack((A_ub, np.zeros(shape)))
|
| 1182 |
+
A_ub[np.arange(m_ub, A_ub.shape[0]), i_newub] = 1
|
| 1183 |
+
b_ub = np.concatenate((b_ub, np.zeros(n_bounds)))
|
| 1184 |
+
b_ub[m_ub:] = ub_newub
|
| 1185 |
+
|
| 1186 |
+
A1 = vstack((A_ub, A_eq))
|
| 1187 |
+
b = np.concatenate((b_ub, b_eq))
|
| 1188 |
+
c = np.concatenate((c, np.zeros((A_ub.shape[0],))))
|
| 1189 |
+
if x0 is not None:
|
| 1190 |
+
x0 = np.concatenate((x0, np.zeros((A_ub.shape[0],))))
|
| 1191 |
+
# unbounded: substitute xi = xi+ + xi-
|
| 1192 |
+
l_free = np.logical_and(lb_none, ub_none)
|
| 1193 |
+
i_free = np.nonzero(l_free)[0]
|
| 1194 |
+
n_free = len(i_free)
|
| 1195 |
+
c = np.concatenate((c, np.zeros(n_free)))
|
| 1196 |
+
if x0 is not None:
|
| 1197 |
+
x0 = np.concatenate((x0, np.zeros(n_free)))
|
| 1198 |
+
A1 = hstack((A1[:, :n_ub], -A1[:, i_free]))
|
| 1199 |
+
c[n_ub:n_ub+n_free] = -c[i_free]
|
| 1200 |
+
if x0 is not None:
|
| 1201 |
+
i_free_neg = x0[i_free] < 0
|
| 1202 |
+
x0[np.arange(n_ub, A1.shape[1])[i_free_neg]] = -x0[i_free[i_free_neg]]
|
| 1203 |
+
x0[i_free[i_free_neg]] = 0
|
| 1204 |
+
|
| 1205 |
+
# add slack variables
|
| 1206 |
+
A2 = vstack([eye(A_ub.shape[0]), zeros((A_eq.shape[0], A_ub.shape[0]))])
|
| 1207 |
+
|
| 1208 |
+
A = hstack([A1, A2])
|
| 1209 |
+
|
| 1210 |
+
# lower bound: substitute xi = xi' + lb
|
| 1211 |
+
# now there is a constant term in objective
|
| 1212 |
+
i_shift = np.nonzero(lb_some)[0]
|
| 1213 |
+
lb_shift = lbs[lb_some].astype(float)
|
| 1214 |
+
c0 += np.sum(lb_shift * c[i_shift])
|
| 1215 |
+
if sparse:
|
| 1216 |
+
b = b.reshape(-1, 1)
|
| 1217 |
+
A = A.tocsc()
|
| 1218 |
+
b -= (A[:, i_shift] * sps.diags(lb_shift)).sum(axis=1)
|
| 1219 |
+
b = b.ravel()
|
| 1220 |
+
else:
|
| 1221 |
+
b -= (A[:, i_shift] * lb_shift).sum(axis=1)
|
| 1222 |
+
if x0 is not None:
|
| 1223 |
+
x0[i_shift] -= lb_shift
|
| 1224 |
+
|
| 1225 |
+
return A, b, c, c0, x0
|
| 1226 |
+
|
| 1227 |
+
|
| 1228 |
+
def _round_to_power_of_two(x):
|
| 1229 |
+
"""
|
| 1230 |
+
Round elements of the array to the nearest power of two.
|
| 1231 |
+
"""
|
| 1232 |
+
return 2**np.around(np.log2(x))
|
| 1233 |
+
|
| 1234 |
+
|
| 1235 |
+
def _autoscale(A, b, c, x0):
|
| 1236 |
+
"""
|
| 1237 |
+
Scales the problem according to equilibration from [12].
|
| 1238 |
+
Also normalizes the right hand side vector by its maximum element.
|
| 1239 |
+
"""
|
| 1240 |
+
m, n = A.shape
|
| 1241 |
+
|
| 1242 |
+
C = 1
|
| 1243 |
+
R = 1
|
| 1244 |
+
|
| 1245 |
+
if A.size > 0:
|
| 1246 |
+
|
| 1247 |
+
R = np.max(np.abs(A), axis=1)
|
| 1248 |
+
if sps.issparse(A):
|
| 1249 |
+
R = R.toarray().flatten()
|
| 1250 |
+
R[R == 0] = 1
|
| 1251 |
+
R = 1/_round_to_power_of_two(R)
|
| 1252 |
+
A = sps.diags(R)*A if sps.issparse(A) else A*R.reshape(m, 1)
|
| 1253 |
+
b = b*R
|
| 1254 |
+
|
| 1255 |
+
C = np.max(np.abs(A), axis=0)
|
| 1256 |
+
if sps.issparse(A):
|
| 1257 |
+
C = C.toarray().flatten()
|
| 1258 |
+
C[C == 0] = 1
|
| 1259 |
+
C = 1/_round_to_power_of_two(C)
|
| 1260 |
+
A = A*sps.diags(C) if sps.issparse(A) else A*C
|
| 1261 |
+
c = c*C
|
| 1262 |
+
|
| 1263 |
+
b_scale = np.max(np.abs(b)) if b.size > 0 else 1
|
| 1264 |
+
if b_scale == 0:
|
| 1265 |
+
b_scale = 1.
|
| 1266 |
+
b = b/b_scale
|
| 1267 |
+
|
| 1268 |
+
if x0 is not None:
|
| 1269 |
+
x0 = x0/b_scale*(1/C)
|
| 1270 |
+
return A, b, c, x0, C, b_scale
|
| 1271 |
+
|
| 1272 |
+
|
| 1273 |
+
def _unscale(x, C, b_scale):
|
| 1274 |
+
"""
|
| 1275 |
+
Converts solution to _autoscale problem -> solution to original problem.
|
| 1276 |
+
"""
|
| 1277 |
+
|
| 1278 |
+
try:
|
| 1279 |
+
n = len(C)
|
| 1280 |
+
# fails if sparse or scalar; that's OK.
|
| 1281 |
+
# this is only needed for original simplex (never sparse)
|
| 1282 |
+
except TypeError:
|
| 1283 |
+
n = len(x)
|
| 1284 |
+
|
| 1285 |
+
return x[:n]*b_scale*C
|
| 1286 |
+
|
| 1287 |
+
|
| 1288 |
+
def _display_summary(message, status, fun, iteration):
|
| 1289 |
+
"""
|
| 1290 |
+
Print the termination summary of the linear program
|
| 1291 |
+
|
| 1292 |
+
Parameters
|
| 1293 |
+
----------
|
| 1294 |
+
message : str
|
| 1295 |
+
A string descriptor of the exit status of the optimization.
|
| 1296 |
+
status : int
|
| 1297 |
+
An integer representing the exit status of the optimization::
|
| 1298 |
+
|
| 1299 |
+
0 : Optimization terminated successfully
|
| 1300 |
+
1 : Iteration limit reached
|
| 1301 |
+
2 : Problem appears to be infeasible
|
| 1302 |
+
3 : Problem appears to be unbounded
|
| 1303 |
+
4 : Serious numerical difficulties encountered
|
| 1304 |
+
|
| 1305 |
+
fun : float
|
| 1306 |
+
Value of the objective function.
|
| 1307 |
+
iteration : iteration
|
| 1308 |
+
The number of iterations performed.
|
| 1309 |
+
"""
|
| 1310 |
+
print(message)
|
| 1311 |
+
if status in (0, 1):
|
| 1312 |
+
print(f" Current function value: {fun: <12.6f}")
|
| 1313 |
+
print(f" Iterations: {iteration:d}")
|
| 1314 |
+
|
| 1315 |
+
|
| 1316 |
+
def _postsolve(x, postsolve_args, complete=False):
|
| 1317 |
+
"""
|
| 1318 |
+
Given solution x to presolved, standard form linear program x, add
|
| 1319 |
+
fixed variables back into the problem and undo the variable substitutions
|
| 1320 |
+
to get solution to original linear program. Also, calculate the objective
|
| 1321 |
+
function value, slack in original upper bound constraints, and residuals
|
| 1322 |
+
in original equality constraints.
|
| 1323 |
+
|
| 1324 |
+
Parameters
|
| 1325 |
+
----------
|
| 1326 |
+
x : 1-D array
|
| 1327 |
+
Solution vector to the standard-form problem.
|
| 1328 |
+
postsolve_args : tuple
|
| 1329 |
+
Data needed by _postsolve to convert the solution to the standard-form
|
| 1330 |
+
problem into the solution to the original problem, including:
|
| 1331 |
+
|
| 1332 |
+
lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
|
| 1333 |
+
|
| 1334 |
+
c : 1D array
|
| 1335 |
+
The coefficients of the linear objective function to be minimized.
|
| 1336 |
+
A_ub : 2D array, optional
|
| 1337 |
+
The inequality constraint matrix. Each row of ``A_ub`` specifies the
|
| 1338 |
+
coefficients of a linear inequality constraint on ``x``.
|
| 1339 |
+
b_ub : 1D array, optional
|
| 1340 |
+
The inequality constraint vector. Each element represents an
|
| 1341 |
+
upper bound on the corresponding value of ``A_ub @ x``.
|
| 1342 |
+
A_eq : 2D array, optional
|
| 1343 |
+
The equality constraint matrix. Each row of ``A_eq`` specifies the
|
| 1344 |
+
coefficients of a linear equality constraint on ``x``.
|
| 1345 |
+
b_eq : 1D array, optional
|
| 1346 |
+
The equality constraint vector. Each element of ``A_eq @ x`` must equal
|
| 1347 |
+
the corresponding element of ``b_eq``.
|
| 1348 |
+
bounds : 2D array
|
| 1349 |
+
The bounds of ``x``, lower bounds in the 1st column, upper
|
| 1350 |
+
bounds in the 2nd column. The bounds are possibly tightened
|
| 1351 |
+
by the presolve procedure.
|
| 1352 |
+
x0 : 1D array, optional
|
| 1353 |
+
Guess values of the decision variables, which will be refined by
|
| 1354 |
+
the optimization algorithm. This argument is currently used only by the
|
| 1355 |
+
'revised simplex' method, and can only be used if `x0` represents a
|
| 1356 |
+
basic feasible solution.
|
| 1357 |
+
|
| 1358 |
+
revstack: list of functions
|
| 1359 |
+
the functions in the list reverse the operations of _presolve()
|
| 1360 |
+
the function signature is x_org = f(x_mod), where x_mod is the result
|
| 1361 |
+
of a presolve step and x_org the value at the start of the step
|
| 1362 |
+
complete : bool
|
| 1363 |
+
Whether the solution is was determined in presolve (``True`` if so)
|
| 1364 |
+
|
| 1365 |
+
Returns
|
| 1366 |
+
-------
|
| 1367 |
+
x : 1-D array
|
| 1368 |
+
Solution vector to original linear programming problem
|
| 1369 |
+
fun: float
|
| 1370 |
+
optimal objective value for original problem
|
| 1371 |
+
slack : 1-D array
|
| 1372 |
+
The (non-negative) slack in the upper bound constraints, that is,
|
| 1373 |
+
``b_ub - A_ub @ x``
|
| 1374 |
+
con : 1-D array
|
| 1375 |
+
The (nominally zero) residuals of the equality constraints, that is,
|
| 1376 |
+
``b - A_eq @ x``
|
| 1377 |
+
"""
|
| 1378 |
+
# note that all the inputs are the ORIGINAL, unmodified versions
|
| 1379 |
+
# no rows, columns have been removed
|
| 1380 |
+
|
| 1381 |
+
c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality = postsolve_args[0]
|
| 1382 |
+
revstack, C, b_scale = postsolve_args[1:]
|
| 1383 |
+
|
| 1384 |
+
x = _unscale(x, C, b_scale)
|
| 1385 |
+
|
| 1386 |
+
# Undo variable substitutions of _get_Abc()
|
| 1387 |
+
# if "complete", problem was solved in presolve; don't do anything here
|
| 1388 |
+
n_x = bounds.shape[0]
|
| 1389 |
+
if not complete and bounds is not None: # bounds are never none, probably
|
| 1390 |
+
n_unbounded = 0
|
| 1391 |
+
for i, bi in enumerate(bounds):
|
| 1392 |
+
lbi = bi[0]
|
| 1393 |
+
ubi = bi[1]
|
| 1394 |
+
if lbi == -np.inf and ubi == np.inf:
|
| 1395 |
+
n_unbounded += 1
|
| 1396 |
+
x[i] = x[i] - x[n_x + n_unbounded - 1]
|
| 1397 |
+
else:
|
| 1398 |
+
if lbi == -np.inf:
|
| 1399 |
+
x[i] = ubi - x[i]
|
| 1400 |
+
else:
|
| 1401 |
+
x[i] += lbi
|
| 1402 |
+
# all the rest of the variables were artificial
|
| 1403 |
+
x = x[:n_x]
|
| 1404 |
+
|
| 1405 |
+
# If there were variables removed from the problem, add them back into the
|
| 1406 |
+
# solution vector
|
| 1407 |
+
# Apply the functions in revstack (reverse direction)
|
| 1408 |
+
for rev in reversed(revstack):
|
| 1409 |
+
x = rev(x)
|
| 1410 |
+
|
| 1411 |
+
fun = x.dot(c)
|
| 1412 |
+
slack = b_ub - A_ub.dot(x) # report slack for ORIGINAL UB constraints
|
| 1413 |
+
# report residuals of ORIGINAL EQ constraints
|
| 1414 |
+
con = b_eq - A_eq.dot(x)
|
| 1415 |
+
|
| 1416 |
+
return x, fun, slack, con
|
| 1417 |
+
|
| 1418 |
+
|
| 1419 |
+
def _check_result(x, fun, status, slack, con, bounds, tol, message,
|
| 1420 |
+
integrality):
|
| 1421 |
+
"""
|
| 1422 |
+
Check the validity of the provided solution.
|
| 1423 |
+
|
| 1424 |
+
A valid (optimal) solution satisfies all bounds, all slack variables are
|
| 1425 |
+
negative and all equality constraint residuals are strictly non-zero.
|
| 1426 |
+
Further, the lower-bounds, upper-bounds, slack and residuals contain
|
| 1427 |
+
no nan values.
|
| 1428 |
+
|
| 1429 |
+
Parameters
|
| 1430 |
+
----------
|
| 1431 |
+
x : 1-D array
|
| 1432 |
+
Solution vector to original linear programming problem
|
| 1433 |
+
fun: float
|
| 1434 |
+
optimal objective value for original problem
|
| 1435 |
+
status : int
|
| 1436 |
+
An integer representing the exit status of the optimization::
|
| 1437 |
+
|
| 1438 |
+
0 : Optimization terminated successfully
|
| 1439 |
+
1 : Iteration limit reached
|
| 1440 |
+
2 : Problem appears to be infeasible
|
| 1441 |
+
3 : Problem appears to be unbounded
|
| 1442 |
+
4 : Serious numerical difficulties encountered
|
| 1443 |
+
|
| 1444 |
+
slack : 1-D array
|
| 1445 |
+
The (non-negative) slack in the upper bound constraints, that is,
|
| 1446 |
+
``b_ub - A_ub @ x``
|
| 1447 |
+
con : 1-D array
|
| 1448 |
+
The (nominally zero) residuals of the equality constraints, that is,
|
| 1449 |
+
``b - A_eq @ x``
|
| 1450 |
+
bounds : 2D array
|
| 1451 |
+
The bounds on the original variables ``x``
|
| 1452 |
+
message : str
|
| 1453 |
+
A string descriptor of the exit status of the optimization.
|
| 1454 |
+
tol : float
|
| 1455 |
+
Termination tolerance; see [1]_ Section 4.5.
|
| 1456 |
+
|
| 1457 |
+
Returns
|
| 1458 |
+
-------
|
| 1459 |
+
status : int
|
| 1460 |
+
An integer representing the exit status of the optimization::
|
| 1461 |
+
|
| 1462 |
+
0 : Optimization terminated successfully
|
| 1463 |
+
1 : Iteration limit reached
|
| 1464 |
+
2 : Problem appears to be infeasible
|
| 1465 |
+
3 : Problem appears to be unbounded
|
| 1466 |
+
4 : Serious numerical difficulties encountered
|
| 1467 |
+
|
| 1468 |
+
message : str
|
| 1469 |
+
A string descriptor of the exit status of the optimization.
|
| 1470 |
+
"""
|
| 1471 |
+
# Somewhat arbitrary
|
| 1472 |
+
tol = np.sqrt(tol) * 10
|
| 1473 |
+
|
| 1474 |
+
if x is None:
|
| 1475 |
+
# HiGHS does not provide x if infeasible/unbounded
|
| 1476 |
+
if status == 0: # Observed with HiGHS Simplex Primal
|
| 1477 |
+
status = 4
|
| 1478 |
+
message = ("The solver did not provide a solution nor did it "
|
| 1479 |
+
"report a failure. Please submit a bug report.")
|
| 1480 |
+
return status, message
|
| 1481 |
+
|
| 1482 |
+
contains_nans = (
|
| 1483 |
+
np.isnan(x).any()
|
| 1484 |
+
or np.isnan(fun)
|
| 1485 |
+
or np.isnan(slack).any()
|
| 1486 |
+
or np.isnan(con).any()
|
| 1487 |
+
)
|
| 1488 |
+
|
| 1489 |
+
if contains_nans:
|
| 1490 |
+
is_feasible = False
|
| 1491 |
+
else:
|
| 1492 |
+
if integrality is None:
|
| 1493 |
+
integrality = 0
|
| 1494 |
+
valid_bounds = (x >= bounds[:, 0] - tol) & (x <= bounds[:, 1] + tol)
|
| 1495 |
+
# When integrality is 2 or 3, x must be within bounds OR take value 0
|
| 1496 |
+
valid_bounds |= (integrality > 1) & np.isclose(x, 0, atol=tol)
|
| 1497 |
+
invalid_bounds = not np.all(valid_bounds)
|
| 1498 |
+
|
| 1499 |
+
invalid_slack = status != 3 and (slack < -tol).any()
|
| 1500 |
+
invalid_con = status != 3 and (np.abs(con) > tol).any()
|
| 1501 |
+
is_feasible = not (invalid_bounds or invalid_slack or invalid_con)
|
| 1502 |
+
|
| 1503 |
+
if status == 0 and not is_feasible:
|
| 1504 |
+
status = 4
|
| 1505 |
+
message = ("The solution does not satisfy the constraints within the "
|
| 1506 |
+
"required tolerance of " + f"{tol:.2E}" + ", yet "
|
| 1507 |
+
"no errors were raised and there is no certificate of "
|
| 1508 |
+
"infeasibility or unboundedness. Check whether "
|
| 1509 |
+
"the slack and constraint residuals are acceptable; "
|
| 1510 |
+
"if not, consider enabling presolve, adjusting the "
|
| 1511 |
+
"tolerance option(s), and/or using a different method. "
|
| 1512 |
+
"Please consider submitting a bug report.")
|
| 1513 |
+
elif status == 2 and is_feasible:
|
| 1514 |
+
# Occurs if the simplex method exits after phase one with a very
|
| 1515 |
+
# nearly basic feasible solution. Postsolving can make the solution
|
| 1516 |
+
# basic, however, this solution is NOT optimal
|
| 1517 |
+
status = 4
|
| 1518 |
+
message = ("The solution is feasible, but the solver did not report "
|
| 1519 |
+
"that the solution was optimal. Please try a different "
|
| 1520 |
+
"method.")
|
| 1521 |
+
|
| 1522 |
+
return status, message
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_lsq/__init__.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""This module contains least-squares algorithms."""
|
| 2 |
+
from .least_squares import least_squares
|
| 3 |
+
from .lsq_linear import lsq_linear
|
| 4 |
+
|
| 5 |
+
__all__ = ['least_squares', 'lsq_linear']
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/bvls.cpython-310.pyc
ADDED
|
Binary file (2.72 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_lsq/bvls.py
ADDED
|
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Bounded-variable least-squares algorithm."""
|
| 2 |
+
import numpy as np
|
| 3 |
+
from numpy.linalg import norm, lstsq
|
| 4 |
+
from scipy.optimize import OptimizeResult
|
| 5 |
+
|
| 6 |
+
from .common import print_header_linear, print_iteration_linear
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def compute_kkt_optimality(g, on_bound):
|
| 10 |
+
"""Compute the maximum violation of KKT conditions."""
|
| 11 |
+
g_kkt = g * on_bound
|
| 12 |
+
free_set = on_bound == 0
|
| 13 |
+
g_kkt[free_set] = np.abs(g[free_set])
|
| 14 |
+
return np.max(g_kkt)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def bvls(A, b, x_lsq, lb, ub, tol, max_iter, verbose, rcond=None):
|
| 18 |
+
m, n = A.shape
|
| 19 |
+
|
| 20 |
+
x = x_lsq.copy()
|
| 21 |
+
on_bound = np.zeros(n)
|
| 22 |
+
|
| 23 |
+
mask = x <= lb
|
| 24 |
+
x[mask] = lb[mask]
|
| 25 |
+
on_bound[mask] = -1
|
| 26 |
+
|
| 27 |
+
mask = x >= ub
|
| 28 |
+
x[mask] = ub[mask]
|
| 29 |
+
on_bound[mask] = 1
|
| 30 |
+
|
| 31 |
+
free_set = on_bound == 0
|
| 32 |
+
active_set = ~free_set
|
| 33 |
+
free_set, = np.nonzero(free_set)
|
| 34 |
+
|
| 35 |
+
r = A.dot(x) - b
|
| 36 |
+
cost = 0.5 * np.dot(r, r)
|
| 37 |
+
initial_cost = cost
|
| 38 |
+
g = A.T.dot(r)
|
| 39 |
+
|
| 40 |
+
cost_change = None
|
| 41 |
+
step_norm = None
|
| 42 |
+
iteration = 0
|
| 43 |
+
|
| 44 |
+
if verbose == 2:
|
| 45 |
+
print_header_linear()
|
| 46 |
+
|
| 47 |
+
# This is the initialization loop. The requirement is that the
|
| 48 |
+
# least-squares solution on free variables is feasible before BVLS starts.
|
| 49 |
+
# One possible initialization is to set all variables to lower or upper
|
| 50 |
+
# bounds, but many iterations may be required from this state later on.
|
| 51 |
+
# The implemented ad-hoc procedure which intuitively should give a better
|
| 52 |
+
# initial state: find the least-squares solution on current free variables,
|
| 53 |
+
# if its feasible then stop, otherwise, set violating variables to
|
| 54 |
+
# corresponding bounds and continue on the reduced set of free variables.
|
| 55 |
+
|
| 56 |
+
while free_set.size > 0:
|
| 57 |
+
if verbose == 2:
|
| 58 |
+
optimality = compute_kkt_optimality(g, on_bound)
|
| 59 |
+
print_iteration_linear(iteration, cost, cost_change, step_norm,
|
| 60 |
+
optimality)
|
| 61 |
+
|
| 62 |
+
iteration += 1
|
| 63 |
+
x_free_old = x[free_set].copy()
|
| 64 |
+
|
| 65 |
+
A_free = A[:, free_set]
|
| 66 |
+
b_free = b - A.dot(x * active_set)
|
| 67 |
+
z = lstsq(A_free, b_free, rcond=rcond)[0]
|
| 68 |
+
|
| 69 |
+
lbv = z < lb[free_set]
|
| 70 |
+
ubv = z > ub[free_set]
|
| 71 |
+
v = lbv | ubv
|
| 72 |
+
|
| 73 |
+
if np.any(lbv):
|
| 74 |
+
ind = free_set[lbv]
|
| 75 |
+
x[ind] = lb[ind]
|
| 76 |
+
active_set[ind] = True
|
| 77 |
+
on_bound[ind] = -1
|
| 78 |
+
|
| 79 |
+
if np.any(ubv):
|
| 80 |
+
ind = free_set[ubv]
|
| 81 |
+
x[ind] = ub[ind]
|
| 82 |
+
active_set[ind] = True
|
| 83 |
+
on_bound[ind] = 1
|
| 84 |
+
|
| 85 |
+
ind = free_set[~v]
|
| 86 |
+
x[ind] = z[~v]
|
| 87 |
+
|
| 88 |
+
r = A.dot(x) - b
|
| 89 |
+
cost_new = 0.5 * np.dot(r, r)
|
| 90 |
+
cost_change = cost - cost_new
|
| 91 |
+
cost = cost_new
|
| 92 |
+
g = A.T.dot(r)
|
| 93 |
+
step_norm = norm(x[free_set] - x_free_old)
|
| 94 |
+
|
| 95 |
+
if np.any(v):
|
| 96 |
+
free_set = free_set[~v]
|
| 97 |
+
else:
|
| 98 |
+
break
|
| 99 |
+
|
| 100 |
+
if max_iter is None:
|
| 101 |
+
max_iter = n
|
| 102 |
+
max_iter += iteration
|
| 103 |
+
|
| 104 |
+
termination_status = None
|
| 105 |
+
|
| 106 |
+
# Main BVLS loop.
|
| 107 |
+
|
| 108 |
+
optimality = compute_kkt_optimality(g, on_bound)
|
| 109 |
+
for iteration in range(iteration, max_iter): # BVLS Loop A
|
| 110 |
+
if verbose == 2:
|
| 111 |
+
print_iteration_linear(iteration, cost, cost_change,
|
| 112 |
+
step_norm, optimality)
|
| 113 |
+
|
| 114 |
+
if optimality < tol:
|
| 115 |
+
termination_status = 1
|
| 116 |
+
|
| 117 |
+
if termination_status is not None:
|
| 118 |
+
break
|
| 119 |
+
|
| 120 |
+
move_to_free = np.argmax(g * on_bound)
|
| 121 |
+
on_bound[move_to_free] = 0
|
| 122 |
+
|
| 123 |
+
while True: # BVLS Loop B
|
| 124 |
+
|
| 125 |
+
free_set = on_bound == 0
|
| 126 |
+
active_set = ~free_set
|
| 127 |
+
free_set, = np.nonzero(free_set)
|
| 128 |
+
|
| 129 |
+
x_free = x[free_set]
|
| 130 |
+
x_free_old = x_free.copy()
|
| 131 |
+
lb_free = lb[free_set]
|
| 132 |
+
ub_free = ub[free_set]
|
| 133 |
+
|
| 134 |
+
A_free = A[:, free_set]
|
| 135 |
+
b_free = b - A.dot(x * active_set)
|
| 136 |
+
z = lstsq(A_free, b_free, rcond=rcond)[0]
|
| 137 |
+
|
| 138 |
+
lbv, = np.nonzero(z < lb_free)
|
| 139 |
+
ubv, = np.nonzero(z > ub_free)
|
| 140 |
+
v = np.hstack((lbv, ubv))
|
| 141 |
+
|
| 142 |
+
if v.size > 0:
|
| 143 |
+
alphas = np.hstack((
|
| 144 |
+
lb_free[lbv] - x_free[lbv],
|
| 145 |
+
ub_free[ubv] - x_free[ubv])) / (z[v] - x_free[v])
|
| 146 |
+
|
| 147 |
+
i = np.argmin(alphas)
|
| 148 |
+
i_free = v[i]
|
| 149 |
+
alpha = alphas[i]
|
| 150 |
+
|
| 151 |
+
x_free *= 1 - alpha
|
| 152 |
+
x_free += alpha * z
|
| 153 |
+
x[free_set] = x_free
|
| 154 |
+
|
| 155 |
+
if i < lbv.size:
|
| 156 |
+
on_bound[free_set[i_free]] = -1
|
| 157 |
+
else:
|
| 158 |
+
on_bound[free_set[i_free]] = 1
|
| 159 |
+
else:
|
| 160 |
+
x_free = z
|
| 161 |
+
x[free_set] = x_free
|
| 162 |
+
break
|
| 163 |
+
|
| 164 |
+
step_norm = norm(x_free - x_free_old)
|
| 165 |
+
|
| 166 |
+
r = A.dot(x) - b
|
| 167 |
+
cost_new = 0.5 * np.dot(r, r)
|
| 168 |
+
cost_change = cost - cost_new
|
| 169 |
+
|
| 170 |
+
if cost_change < tol * cost:
|
| 171 |
+
termination_status = 2
|
| 172 |
+
cost = cost_new
|
| 173 |
+
|
| 174 |
+
g = A.T.dot(r)
|
| 175 |
+
optimality = compute_kkt_optimality(g, on_bound)
|
| 176 |
+
|
| 177 |
+
if termination_status is None:
|
| 178 |
+
termination_status = 0
|
| 179 |
+
|
| 180 |
+
return OptimizeResult(
|
| 181 |
+
x=x, fun=r, cost=cost, optimality=optimality, active_mask=on_bound,
|
| 182 |
+
nit=iteration + 1, status=termination_status,
|
| 183 |
+
initial_cost=initial_cost)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_lsq/common.py
ADDED
|
@@ -0,0 +1,733 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Functions used by least-squares algorithms."""
|
| 2 |
+
from math import copysign
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
from numpy.linalg import norm
|
| 6 |
+
|
| 7 |
+
from scipy.linalg import cho_factor, cho_solve, LinAlgError
|
| 8 |
+
from scipy.sparse import issparse
|
| 9 |
+
from scipy.sparse.linalg import LinearOperator, aslinearoperator
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
EPS = np.finfo(float).eps
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
# Functions related to a trust-region problem.
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def intersect_trust_region(x, s, Delta):
|
| 19 |
+
"""Find the intersection of a line with the boundary of a trust region.
|
| 20 |
+
|
| 21 |
+
This function solves the quadratic equation with respect to t
|
| 22 |
+
||(x + s*t)||**2 = Delta**2.
|
| 23 |
+
|
| 24 |
+
Returns
|
| 25 |
+
-------
|
| 26 |
+
t_neg, t_pos : tuple of float
|
| 27 |
+
Negative and positive roots.
|
| 28 |
+
|
| 29 |
+
Raises
|
| 30 |
+
------
|
| 31 |
+
ValueError
|
| 32 |
+
If `s` is zero or `x` is not within the trust region.
|
| 33 |
+
"""
|
| 34 |
+
a = np.dot(s, s)
|
| 35 |
+
if a == 0:
|
| 36 |
+
raise ValueError("`s` is zero.")
|
| 37 |
+
|
| 38 |
+
b = np.dot(x, s)
|
| 39 |
+
|
| 40 |
+
c = np.dot(x, x) - Delta**2
|
| 41 |
+
if c > 0:
|
| 42 |
+
raise ValueError("`x` is not within the trust region.")
|
| 43 |
+
|
| 44 |
+
d = np.sqrt(b*b - a*c) # Root from one fourth of the discriminant.
|
| 45 |
+
|
| 46 |
+
# Computations below avoid loss of significance, see "Numerical Recipes".
|
| 47 |
+
q = -(b + copysign(d, b))
|
| 48 |
+
t1 = q / a
|
| 49 |
+
t2 = c / q
|
| 50 |
+
|
| 51 |
+
if t1 < t2:
|
| 52 |
+
return t1, t2
|
| 53 |
+
else:
|
| 54 |
+
return t2, t1
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def solve_lsq_trust_region(n, m, uf, s, V, Delta, initial_alpha=None,
|
| 58 |
+
rtol=0.01, max_iter=10):
|
| 59 |
+
"""Solve a trust-region problem arising in least-squares minimization.
|
| 60 |
+
|
| 61 |
+
This function implements a method described by J. J. More [1]_ and used
|
| 62 |
+
in MINPACK, but it relies on a single SVD of Jacobian instead of series
|
| 63 |
+
of Cholesky decompositions. Before running this function, compute:
|
| 64 |
+
``U, s, VT = svd(J, full_matrices=False)``.
|
| 65 |
+
|
| 66 |
+
Parameters
|
| 67 |
+
----------
|
| 68 |
+
n : int
|
| 69 |
+
Number of variables.
|
| 70 |
+
m : int
|
| 71 |
+
Number of residuals.
|
| 72 |
+
uf : ndarray
|
| 73 |
+
Computed as U.T.dot(f).
|
| 74 |
+
s : ndarray
|
| 75 |
+
Singular values of J.
|
| 76 |
+
V : ndarray
|
| 77 |
+
Transpose of VT.
|
| 78 |
+
Delta : float
|
| 79 |
+
Radius of a trust region.
|
| 80 |
+
initial_alpha : float, optional
|
| 81 |
+
Initial guess for alpha, which might be available from a previous
|
| 82 |
+
iteration. If None, determined automatically.
|
| 83 |
+
rtol : float, optional
|
| 84 |
+
Stopping tolerance for the root-finding procedure. Namely, the
|
| 85 |
+
solution ``p`` will satisfy ``abs(norm(p) - Delta) < rtol * Delta``.
|
| 86 |
+
max_iter : int, optional
|
| 87 |
+
Maximum allowed number of iterations for the root-finding procedure.
|
| 88 |
+
|
| 89 |
+
Returns
|
| 90 |
+
-------
|
| 91 |
+
p : ndarray, shape (n,)
|
| 92 |
+
Found solution of a trust-region problem.
|
| 93 |
+
alpha : float
|
| 94 |
+
Positive value such that (J.T*J + alpha*I)*p = -J.T*f.
|
| 95 |
+
Sometimes called Levenberg-Marquardt parameter.
|
| 96 |
+
n_iter : int
|
| 97 |
+
Number of iterations made by root-finding procedure. Zero means
|
| 98 |
+
that Gauss-Newton step was selected as the solution.
|
| 99 |
+
|
| 100 |
+
References
|
| 101 |
+
----------
|
| 102 |
+
.. [1] More, J. J., "The Levenberg-Marquardt Algorithm: Implementation
|
| 103 |
+
and Theory," Numerical Analysis, ed. G. A. Watson, Lecture Notes
|
| 104 |
+
in Mathematics 630, Springer Verlag, pp. 105-116, 1977.
|
| 105 |
+
"""
|
| 106 |
+
def phi_and_derivative(alpha, suf, s, Delta):
|
| 107 |
+
"""Function of which to find zero.
|
| 108 |
+
|
| 109 |
+
It is defined as "norm of regularized (by alpha) least-squares
|
| 110 |
+
solution minus `Delta`". Refer to [1]_.
|
| 111 |
+
"""
|
| 112 |
+
denom = s**2 + alpha
|
| 113 |
+
p_norm = norm(suf / denom)
|
| 114 |
+
phi = p_norm - Delta
|
| 115 |
+
phi_prime = -np.sum(suf ** 2 / denom**3) / p_norm
|
| 116 |
+
return phi, phi_prime
|
| 117 |
+
|
| 118 |
+
suf = s * uf
|
| 119 |
+
|
| 120 |
+
# Check if J has full rank and try Gauss-Newton step.
|
| 121 |
+
if m >= n:
|
| 122 |
+
threshold = EPS * m * s[0]
|
| 123 |
+
full_rank = s[-1] > threshold
|
| 124 |
+
else:
|
| 125 |
+
full_rank = False
|
| 126 |
+
|
| 127 |
+
if full_rank:
|
| 128 |
+
p = -V.dot(uf / s)
|
| 129 |
+
if norm(p) <= Delta:
|
| 130 |
+
return p, 0.0, 0
|
| 131 |
+
|
| 132 |
+
alpha_upper = norm(suf) / Delta
|
| 133 |
+
|
| 134 |
+
if full_rank:
|
| 135 |
+
phi, phi_prime = phi_and_derivative(0.0, suf, s, Delta)
|
| 136 |
+
alpha_lower = -phi / phi_prime
|
| 137 |
+
else:
|
| 138 |
+
alpha_lower = 0.0
|
| 139 |
+
|
| 140 |
+
if initial_alpha is None or not full_rank and initial_alpha == 0:
|
| 141 |
+
alpha = max(0.001 * alpha_upper, (alpha_lower * alpha_upper)**0.5)
|
| 142 |
+
else:
|
| 143 |
+
alpha = initial_alpha
|
| 144 |
+
|
| 145 |
+
for it in range(max_iter):
|
| 146 |
+
if alpha < alpha_lower or alpha > alpha_upper:
|
| 147 |
+
alpha = max(0.001 * alpha_upper, (alpha_lower * alpha_upper)**0.5)
|
| 148 |
+
|
| 149 |
+
phi, phi_prime = phi_and_derivative(alpha, suf, s, Delta)
|
| 150 |
+
|
| 151 |
+
if phi < 0:
|
| 152 |
+
alpha_upper = alpha
|
| 153 |
+
|
| 154 |
+
ratio = phi / phi_prime
|
| 155 |
+
alpha_lower = max(alpha_lower, alpha - ratio)
|
| 156 |
+
alpha -= (phi + Delta) * ratio / Delta
|
| 157 |
+
|
| 158 |
+
if np.abs(phi) < rtol * Delta:
|
| 159 |
+
break
|
| 160 |
+
|
| 161 |
+
p = -V.dot(suf / (s**2 + alpha))
|
| 162 |
+
|
| 163 |
+
# Make the norm of p equal to Delta, p is changed only slightly during
|
| 164 |
+
# this. It is done to prevent p lie outside the trust region (which can
|
| 165 |
+
# cause problems later).
|
| 166 |
+
p *= Delta / norm(p)
|
| 167 |
+
|
| 168 |
+
return p, alpha, it + 1
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
def solve_trust_region_2d(B, g, Delta):
|
| 172 |
+
"""Solve a general trust-region problem in 2 dimensions.
|
| 173 |
+
|
| 174 |
+
The problem is reformulated as a 4th order algebraic equation,
|
| 175 |
+
the solution of which is found by numpy.roots.
|
| 176 |
+
|
| 177 |
+
Parameters
|
| 178 |
+
----------
|
| 179 |
+
B : ndarray, shape (2, 2)
|
| 180 |
+
Symmetric matrix, defines a quadratic term of the function.
|
| 181 |
+
g : ndarray, shape (2,)
|
| 182 |
+
Defines a linear term of the function.
|
| 183 |
+
Delta : float
|
| 184 |
+
Radius of a trust region.
|
| 185 |
+
|
| 186 |
+
Returns
|
| 187 |
+
-------
|
| 188 |
+
p : ndarray, shape (2,)
|
| 189 |
+
Found solution.
|
| 190 |
+
newton_step : bool
|
| 191 |
+
Whether the returned solution is the Newton step which lies within
|
| 192 |
+
the trust region.
|
| 193 |
+
"""
|
| 194 |
+
try:
|
| 195 |
+
R, lower = cho_factor(B)
|
| 196 |
+
p = -cho_solve((R, lower), g)
|
| 197 |
+
if np.dot(p, p) <= Delta**2:
|
| 198 |
+
return p, True
|
| 199 |
+
except LinAlgError:
|
| 200 |
+
pass
|
| 201 |
+
|
| 202 |
+
a = B[0, 0] * Delta**2
|
| 203 |
+
b = B[0, 1] * Delta**2
|
| 204 |
+
c = B[1, 1] * Delta**2
|
| 205 |
+
|
| 206 |
+
d = g[0] * Delta
|
| 207 |
+
f = g[1] * Delta
|
| 208 |
+
|
| 209 |
+
coeffs = np.array(
|
| 210 |
+
[-b + d, 2 * (a - c + f), 6 * b, 2 * (-a + c + f), -b - d])
|
| 211 |
+
t = np.roots(coeffs) # Can handle leading zeros.
|
| 212 |
+
t = np.real(t[np.isreal(t)])
|
| 213 |
+
|
| 214 |
+
p = Delta * np.vstack((2 * t / (1 + t**2), (1 - t**2) / (1 + t**2)))
|
| 215 |
+
value = 0.5 * np.sum(p * B.dot(p), axis=0) + np.dot(g, p)
|
| 216 |
+
i = np.argmin(value)
|
| 217 |
+
p = p[:, i]
|
| 218 |
+
|
| 219 |
+
return p, False
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
def update_tr_radius(Delta, actual_reduction, predicted_reduction,
|
| 223 |
+
step_norm, bound_hit):
|
| 224 |
+
"""Update the radius of a trust region based on the cost reduction.
|
| 225 |
+
|
| 226 |
+
Returns
|
| 227 |
+
-------
|
| 228 |
+
Delta : float
|
| 229 |
+
New radius.
|
| 230 |
+
ratio : float
|
| 231 |
+
Ratio between actual and predicted reductions.
|
| 232 |
+
"""
|
| 233 |
+
if predicted_reduction > 0:
|
| 234 |
+
ratio = actual_reduction / predicted_reduction
|
| 235 |
+
elif predicted_reduction == actual_reduction == 0:
|
| 236 |
+
ratio = 1
|
| 237 |
+
else:
|
| 238 |
+
ratio = 0
|
| 239 |
+
|
| 240 |
+
if ratio < 0.25:
|
| 241 |
+
Delta = 0.25 * step_norm
|
| 242 |
+
elif ratio > 0.75 and bound_hit:
|
| 243 |
+
Delta *= 2.0
|
| 244 |
+
|
| 245 |
+
return Delta, ratio
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
# Construction and minimization of quadratic functions.
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
def build_quadratic_1d(J, g, s, diag=None, s0=None):
|
| 252 |
+
"""Parameterize a multivariate quadratic function along a line.
|
| 253 |
+
|
| 254 |
+
The resulting univariate quadratic function is given as follows::
|
| 255 |
+
|
| 256 |
+
f(t) = 0.5 * (s0 + s*t).T * (J.T*J + diag) * (s0 + s*t) +
|
| 257 |
+
g.T * (s0 + s*t)
|
| 258 |
+
|
| 259 |
+
Parameters
|
| 260 |
+
----------
|
| 261 |
+
J : ndarray, sparse matrix or LinearOperator shape (m, n)
|
| 262 |
+
Jacobian matrix, affects the quadratic term.
|
| 263 |
+
g : ndarray, shape (n,)
|
| 264 |
+
Gradient, defines the linear term.
|
| 265 |
+
s : ndarray, shape (n,)
|
| 266 |
+
Direction vector of a line.
|
| 267 |
+
diag : None or ndarray with shape (n,), optional
|
| 268 |
+
Addition diagonal part, affects the quadratic term.
|
| 269 |
+
If None, assumed to be 0.
|
| 270 |
+
s0 : None or ndarray with shape (n,), optional
|
| 271 |
+
Initial point. If None, assumed to be 0.
|
| 272 |
+
|
| 273 |
+
Returns
|
| 274 |
+
-------
|
| 275 |
+
a : float
|
| 276 |
+
Coefficient for t**2.
|
| 277 |
+
b : float
|
| 278 |
+
Coefficient for t.
|
| 279 |
+
c : float
|
| 280 |
+
Free term. Returned only if `s0` is provided.
|
| 281 |
+
"""
|
| 282 |
+
v = J.dot(s)
|
| 283 |
+
a = np.dot(v, v)
|
| 284 |
+
if diag is not None:
|
| 285 |
+
a += np.dot(s * diag, s)
|
| 286 |
+
a *= 0.5
|
| 287 |
+
|
| 288 |
+
b = np.dot(g, s)
|
| 289 |
+
|
| 290 |
+
if s0 is not None:
|
| 291 |
+
u = J.dot(s0)
|
| 292 |
+
b += np.dot(u, v)
|
| 293 |
+
c = 0.5 * np.dot(u, u) + np.dot(g, s0)
|
| 294 |
+
if diag is not None:
|
| 295 |
+
b += np.dot(s0 * diag, s)
|
| 296 |
+
c += 0.5 * np.dot(s0 * diag, s0)
|
| 297 |
+
return a, b, c
|
| 298 |
+
else:
|
| 299 |
+
return a, b
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
def minimize_quadratic_1d(a, b, lb, ub, c=0):
|
| 303 |
+
"""Minimize a 1-D quadratic function subject to bounds.
|
| 304 |
+
|
| 305 |
+
The free term `c` is 0 by default. Bounds must be finite.
|
| 306 |
+
|
| 307 |
+
Returns
|
| 308 |
+
-------
|
| 309 |
+
t : float
|
| 310 |
+
Minimum point.
|
| 311 |
+
y : float
|
| 312 |
+
Minimum value.
|
| 313 |
+
"""
|
| 314 |
+
t = [lb, ub]
|
| 315 |
+
if a != 0:
|
| 316 |
+
extremum = -0.5 * b / a
|
| 317 |
+
if lb < extremum < ub:
|
| 318 |
+
t.append(extremum)
|
| 319 |
+
t = np.asarray(t)
|
| 320 |
+
y = t * (a * t + b) + c
|
| 321 |
+
min_index = np.argmin(y)
|
| 322 |
+
return t[min_index], y[min_index]
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
def evaluate_quadratic(J, g, s, diag=None):
|
| 326 |
+
"""Compute values of a quadratic function arising in least squares.
|
| 327 |
+
|
| 328 |
+
The function is 0.5 * s.T * (J.T * J + diag) * s + g.T * s.
|
| 329 |
+
|
| 330 |
+
Parameters
|
| 331 |
+
----------
|
| 332 |
+
J : ndarray, sparse matrix or LinearOperator, shape (m, n)
|
| 333 |
+
Jacobian matrix, affects the quadratic term.
|
| 334 |
+
g : ndarray, shape (n,)
|
| 335 |
+
Gradient, defines the linear term.
|
| 336 |
+
s : ndarray, shape (k, n) or (n,)
|
| 337 |
+
Array containing steps as rows.
|
| 338 |
+
diag : ndarray, shape (n,), optional
|
| 339 |
+
Addition diagonal part, affects the quadratic term.
|
| 340 |
+
If None, assumed to be 0.
|
| 341 |
+
|
| 342 |
+
Returns
|
| 343 |
+
-------
|
| 344 |
+
values : ndarray with shape (k,) or float
|
| 345 |
+
Values of the function. If `s` was 2-D, then ndarray is
|
| 346 |
+
returned, otherwise, float is returned.
|
| 347 |
+
"""
|
| 348 |
+
if s.ndim == 1:
|
| 349 |
+
Js = J.dot(s)
|
| 350 |
+
q = np.dot(Js, Js)
|
| 351 |
+
if diag is not None:
|
| 352 |
+
q += np.dot(s * diag, s)
|
| 353 |
+
else:
|
| 354 |
+
Js = J.dot(s.T)
|
| 355 |
+
q = np.sum(Js**2, axis=0)
|
| 356 |
+
if diag is not None:
|
| 357 |
+
q += np.sum(diag * s**2, axis=1)
|
| 358 |
+
|
| 359 |
+
l = np.dot(s, g)
|
| 360 |
+
|
| 361 |
+
return 0.5 * q + l
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
# Utility functions to work with bound constraints.
|
| 365 |
+
|
| 366 |
+
|
| 367 |
+
def in_bounds(x, lb, ub):
|
| 368 |
+
"""Check if a point lies within bounds."""
|
| 369 |
+
return np.all((x >= lb) & (x <= ub))
|
| 370 |
+
|
| 371 |
+
|
| 372 |
+
def step_size_to_bound(x, s, lb, ub):
|
| 373 |
+
"""Compute a min_step size required to reach a bound.
|
| 374 |
+
|
| 375 |
+
The function computes a positive scalar t, such that x + s * t is on
|
| 376 |
+
the bound.
|
| 377 |
+
|
| 378 |
+
Returns
|
| 379 |
+
-------
|
| 380 |
+
step : float
|
| 381 |
+
Computed step. Non-negative value.
|
| 382 |
+
hits : ndarray of int with shape of x
|
| 383 |
+
Each element indicates whether a corresponding variable reaches the
|
| 384 |
+
bound:
|
| 385 |
+
|
| 386 |
+
* 0 - the bound was not hit.
|
| 387 |
+
* -1 - the lower bound was hit.
|
| 388 |
+
* 1 - the upper bound was hit.
|
| 389 |
+
"""
|
| 390 |
+
non_zero = np.nonzero(s)
|
| 391 |
+
s_non_zero = s[non_zero]
|
| 392 |
+
steps = np.empty_like(x)
|
| 393 |
+
steps.fill(np.inf)
|
| 394 |
+
with np.errstate(over='ignore'):
|
| 395 |
+
steps[non_zero] = np.maximum((lb - x)[non_zero] / s_non_zero,
|
| 396 |
+
(ub - x)[non_zero] / s_non_zero)
|
| 397 |
+
min_step = np.min(steps)
|
| 398 |
+
return min_step, np.equal(steps, min_step) * np.sign(s).astype(int)
|
| 399 |
+
|
| 400 |
+
|
| 401 |
+
def find_active_constraints(x, lb, ub, rtol=1e-10):
|
| 402 |
+
"""Determine which constraints are active in a given point.
|
| 403 |
+
|
| 404 |
+
The threshold is computed using `rtol` and the absolute value of the
|
| 405 |
+
closest bound.
|
| 406 |
+
|
| 407 |
+
Returns
|
| 408 |
+
-------
|
| 409 |
+
active : ndarray of int with shape of x
|
| 410 |
+
Each component shows whether the corresponding constraint is active:
|
| 411 |
+
|
| 412 |
+
* 0 - a constraint is not active.
|
| 413 |
+
* -1 - a lower bound is active.
|
| 414 |
+
* 1 - a upper bound is active.
|
| 415 |
+
"""
|
| 416 |
+
active = np.zeros_like(x, dtype=int)
|
| 417 |
+
|
| 418 |
+
if rtol == 0:
|
| 419 |
+
active[x <= lb] = -1
|
| 420 |
+
active[x >= ub] = 1
|
| 421 |
+
return active
|
| 422 |
+
|
| 423 |
+
lower_dist = x - lb
|
| 424 |
+
upper_dist = ub - x
|
| 425 |
+
|
| 426 |
+
lower_threshold = rtol * np.maximum(1, np.abs(lb))
|
| 427 |
+
upper_threshold = rtol * np.maximum(1, np.abs(ub))
|
| 428 |
+
|
| 429 |
+
lower_active = (np.isfinite(lb) &
|
| 430 |
+
(lower_dist <= np.minimum(upper_dist, lower_threshold)))
|
| 431 |
+
active[lower_active] = -1
|
| 432 |
+
|
| 433 |
+
upper_active = (np.isfinite(ub) &
|
| 434 |
+
(upper_dist <= np.minimum(lower_dist, upper_threshold)))
|
| 435 |
+
active[upper_active] = 1
|
| 436 |
+
|
| 437 |
+
return active
|
| 438 |
+
|
| 439 |
+
|
| 440 |
+
def make_strictly_feasible(x, lb, ub, rstep=1e-10):
|
| 441 |
+
"""Shift a point to the interior of a feasible region.
|
| 442 |
+
|
| 443 |
+
Each element of the returned vector is at least at a relative distance
|
| 444 |
+
`rstep` from the closest bound. If ``rstep=0`` then `np.nextafter` is used.
|
| 445 |
+
"""
|
| 446 |
+
x_new = x.copy()
|
| 447 |
+
|
| 448 |
+
active = find_active_constraints(x, lb, ub, rstep)
|
| 449 |
+
lower_mask = np.equal(active, -1)
|
| 450 |
+
upper_mask = np.equal(active, 1)
|
| 451 |
+
|
| 452 |
+
if rstep == 0:
|
| 453 |
+
x_new[lower_mask] = np.nextafter(lb[lower_mask], ub[lower_mask])
|
| 454 |
+
x_new[upper_mask] = np.nextafter(ub[upper_mask], lb[upper_mask])
|
| 455 |
+
else:
|
| 456 |
+
x_new[lower_mask] = (lb[lower_mask] +
|
| 457 |
+
rstep * np.maximum(1, np.abs(lb[lower_mask])))
|
| 458 |
+
x_new[upper_mask] = (ub[upper_mask] -
|
| 459 |
+
rstep * np.maximum(1, np.abs(ub[upper_mask])))
|
| 460 |
+
|
| 461 |
+
tight_bounds = (x_new < lb) | (x_new > ub)
|
| 462 |
+
x_new[tight_bounds] = 0.5 * (lb[tight_bounds] + ub[tight_bounds])
|
| 463 |
+
|
| 464 |
+
return x_new
|
| 465 |
+
|
| 466 |
+
|
| 467 |
+
def CL_scaling_vector(x, g, lb, ub):
|
| 468 |
+
"""Compute Coleman-Li scaling vector and its derivatives.
|
| 469 |
+
|
| 470 |
+
Components of a vector v are defined as follows::
|
| 471 |
+
|
| 472 |
+
| ub[i] - x[i], if g[i] < 0 and ub[i] < np.inf
|
| 473 |
+
v[i] = | x[i] - lb[i], if g[i] > 0 and lb[i] > -np.inf
|
| 474 |
+
| 1, otherwise
|
| 475 |
+
|
| 476 |
+
According to this definition v[i] >= 0 for all i. It differs from the
|
| 477 |
+
definition in paper [1]_ (eq. (2.2)), where the absolute value of v is
|
| 478 |
+
used. Both definitions are equivalent down the line.
|
| 479 |
+
Derivatives of v with respect to x take value 1, -1 or 0 depending on a
|
| 480 |
+
case.
|
| 481 |
+
|
| 482 |
+
Returns
|
| 483 |
+
-------
|
| 484 |
+
v : ndarray with shape of x
|
| 485 |
+
Scaling vector.
|
| 486 |
+
dv : ndarray with shape of x
|
| 487 |
+
Derivatives of v[i] with respect to x[i], diagonal elements of v's
|
| 488 |
+
Jacobian.
|
| 489 |
+
|
| 490 |
+
References
|
| 491 |
+
----------
|
| 492 |
+
.. [1] M.A. Branch, T.F. Coleman, and Y. Li, "A Subspace, Interior,
|
| 493 |
+
and Conjugate Gradient Method for Large-Scale Bound-Constrained
|
| 494 |
+
Minimization Problems," SIAM Journal on Scientific Computing,
|
| 495 |
+
Vol. 21, Number 1, pp 1-23, 1999.
|
| 496 |
+
"""
|
| 497 |
+
v = np.ones_like(x)
|
| 498 |
+
dv = np.zeros_like(x)
|
| 499 |
+
|
| 500 |
+
mask = (g < 0) & np.isfinite(ub)
|
| 501 |
+
v[mask] = ub[mask] - x[mask]
|
| 502 |
+
dv[mask] = -1
|
| 503 |
+
|
| 504 |
+
mask = (g > 0) & np.isfinite(lb)
|
| 505 |
+
v[mask] = x[mask] - lb[mask]
|
| 506 |
+
dv[mask] = 1
|
| 507 |
+
|
| 508 |
+
return v, dv
|
| 509 |
+
|
| 510 |
+
|
| 511 |
+
def reflective_transformation(y, lb, ub):
|
| 512 |
+
"""Compute reflective transformation and its gradient."""
|
| 513 |
+
if in_bounds(y, lb, ub):
|
| 514 |
+
return y, np.ones_like(y)
|
| 515 |
+
|
| 516 |
+
lb_finite = np.isfinite(lb)
|
| 517 |
+
ub_finite = np.isfinite(ub)
|
| 518 |
+
|
| 519 |
+
x = y.copy()
|
| 520 |
+
g_negative = np.zeros_like(y, dtype=bool)
|
| 521 |
+
|
| 522 |
+
mask = lb_finite & ~ub_finite
|
| 523 |
+
x[mask] = np.maximum(y[mask], 2 * lb[mask] - y[mask])
|
| 524 |
+
g_negative[mask] = y[mask] < lb[mask]
|
| 525 |
+
|
| 526 |
+
mask = ~lb_finite & ub_finite
|
| 527 |
+
x[mask] = np.minimum(y[mask], 2 * ub[mask] - y[mask])
|
| 528 |
+
g_negative[mask] = y[mask] > ub[mask]
|
| 529 |
+
|
| 530 |
+
mask = lb_finite & ub_finite
|
| 531 |
+
d = ub - lb
|
| 532 |
+
t = np.remainder(y[mask] - lb[mask], 2 * d[mask])
|
| 533 |
+
x[mask] = lb[mask] + np.minimum(t, 2 * d[mask] - t)
|
| 534 |
+
g_negative[mask] = t > d[mask]
|
| 535 |
+
|
| 536 |
+
g = np.ones_like(y)
|
| 537 |
+
g[g_negative] = -1
|
| 538 |
+
|
| 539 |
+
return x, g
|
| 540 |
+
|
| 541 |
+
|
| 542 |
+
# Functions to display algorithm's progress.
|
| 543 |
+
|
| 544 |
+
|
| 545 |
+
def print_header_nonlinear():
|
| 546 |
+
print("{:^15}{:^15}{:^15}{:^15}{:^15}{:^15}"
|
| 547 |
+
.format("Iteration", "Total nfev", "Cost", "Cost reduction",
|
| 548 |
+
"Step norm", "Optimality"))
|
| 549 |
+
|
| 550 |
+
|
| 551 |
+
def print_iteration_nonlinear(iteration, nfev, cost, cost_reduction,
|
| 552 |
+
step_norm, optimality):
|
| 553 |
+
if cost_reduction is None:
|
| 554 |
+
cost_reduction = " " * 15
|
| 555 |
+
else:
|
| 556 |
+
cost_reduction = f"{cost_reduction:^15.2e}"
|
| 557 |
+
|
| 558 |
+
if step_norm is None:
|
| 559 |
+
step_norm = " " * 15
|
| 560 |
+
else:
|
| 561 |
+
step_norm = f"{step_norm:^15.2e}"
|
| 562 |
+
|
| 563 |
+
print("{:^15}{:^15}{:^15.4e}{}{}{:^15.2e}"
|
| 564 |
+
.format(iteration, nfev, cost, cost_reduction,
|
| 565 |
+
step_norm, optimality))
|
| 566 |
+
|
| 567 |
+
|
| 568 |
+
def print_header_linear():
|
| 569 |
+
print("{:^15}{:^15}{:^15}{:^15}{:^15}"
|
| 570 |
+
.format("Iteration", "Cost", "Cost reduction", "Step norm",
|
| 571 |
+
"Optimality"))
|
| 572 |
+
|
| 573 |
+
|
| 574 |
+
def print_iteration_linear(iteration, cost, cost_reduction, step_norm,
|
| 575 |
+
optimality):
|
| 576 |
+
if cost_reduction is None:
|
| 577 |
+
cost_reduction = " " * 15
|
| 578 |
+
else:
|
| 579 |
+
cost_reduction = f"{cost_reduction:^15.2e}"
|
| 580 |
+
|
| 581 |
+
if step_norm is None:
|
| 582 |
+
step_norm = " " * 15
|
| 583 |
+
else:
|
| 584 |
+
step_norm = f"{step_norm:^15.2e}"
|
| 585 |
+
|
| 586 |
+
print(f"{iteration:^15}{cost:^15.4e}{cost_reduction}{step_norm}{optimality:^15.2e}")
|
| 587 |
+
|
| 588 |
+
|
| 589 |
+
# Simple helper functions.
|
| 590 |
+
|
| 591 |
+
|
| 592 |
+
def compute_grad(J, f):
|
| 593 |
+
"""Compute gradient of the least-squares cost function."""
|
| 594 |
+
if isinstance(J, LinearOperator):
|
| 595 |
+
return J.rmatvec(f)
|
| 596 |
+
else:
|
| 597 |
+
return J.T.dot(f)
|
| 598 |
+
|
| 599 |
+
|
| 600 |
+
def compute_jac_scale(J, scale_inv_old=None):
|
| 601 |
+
"""Compute variables scale based on the Jacobian matrix."""
|
| 602 |
+
if issparse(J):
|
| 603 |
+
scale_inv = np.asarray(J.power(2).sum(axis=0)).ravel()**0.5
|
| 604 |
+
else:
|
| 605 |
+
scale_inv = np.sum(J**2, axis=0)**0.5
|
| 606 |
+
|
| 607 |
+
if scale_inv_old is None:
|
| 608 |
+
scale_inv[scale_inv == 0] = 1
|
| 609 |
+
else:
|
| 610 |
+
scale_inv = np.maximum(scale_inv, scale_inv_old)
|
| 611 |
+
|
| 612 |
+
return 1 / scale_inv, scale_inv
|
| 613 |
+
|
| 614 |
+
|
| 615 |
+
def left_multiplied_operator(J, d):
|
| 616 |
+
"""Return diag(d) J as LinearOperator."""
|
| 617 |
+
J = aslinearoperator(J)
|
| 618 |
+
|
| 619 |
+
def matvec(x):
|
| 620 |
+
return d * J.matvec(x)
|
| 621 |
+
|
| 622 |
+
def matmat(X):
|
| 623 |
+
return d[:, np.newaxis] * J.matmat(X)
|
| 624 |
+
|
| 625 |
+
def rmatvec(x):
|
| 626 |
+
return J.rmatvec(x.ravel() * d)
|
| 627 |
+
|
| 628 |
+
return LinearOperator(J.shape, matvec=matvec, matmat=matmat,
|
| 629 |
+
rmatvec=rmatvec)
|
| 630 |
+
|
| 631 |
+
|
| 632 |
+
def right_multiplied_operator(J, d):
|
| 633 |
+
"""Return J diag(d) as LinearOperator."""
|
| 634 |
+
J = aslinearoperator(J)
|
| 635 |
+
|
| 636 |
+
def matvec(x):
|
| 637 |
+
return J.matvec(np.ravel(x) * d)
|
| 638 |
+
|
| 639 |
+
def matmat(X):
|
| 640 |
+
return J.matmat(X * d[:, np.newaxis])
|
| 641 |
+
|
| 642 |
+
def rmatvec(x):
|
| 643 |
+
return d * J.rmatvec(x)
|
| 644 |
+
|
| 645 |
+
return LinearOperator(J.shape, matvec=matvec, matmat=matmat,
|
| 646 |
+
rmatvec=rmatvec)
|
| 647 |
+
|
| 648 |
+
|
| 649 |
+
def regularized_lsq_operator(J, diag):
|
| 650 |
+
"""Return a matrix arising in regularized least squares as LinearOperator.
|
| 651 |
+
|
| 652 |
+
The matrix is
|
| 653 |
+
[ J ]
|
| 654 |
+
[ D ]
|
| 655 |
+
where D is diagonal matrix with elements from `diag`.
|
| 656 |
+
"""
|
| 657 |
+
J = aslinearoperator(J)
|
| 658 |
+
m, n = J.shape
|
| 659 |
+
|
| 660 |
+
def matvec(x):
|
| 661 |
+
return np.hstack((J.matvec(x), diag * x))
|
| 662 |
+
|
| 663 |
+
def rmatvec(x):
|
| 664 |
+
x1 = x[:m]
|
| 665 |
+
x2 = x[m:]
|
| 666 |
+
return J.rmatvec(x1) + diag * x2
|
| 667 |
+
|
| 668 |
+
return LinearOperator((m + n, n), matvec=matvec, rmatvec=rmatvec)
|
| 669 |
+
|
| 670 |
+
|
| 671 |
+
def right_multiply(J, d, copy=True):
|
| 672 |
+
"""Compute J diag(d).
|
| 673 |
+
|
| 674 |
+
If `copy` is False, `J` is modified in place (unless being LinearOperator).
|
| 675 |
+
"""
|
| 676 |
+
if copy and not isinstance(J, LinearOperator):
|
| 677 |
+
J = J.copy()
|
| 678 |
+
|
| 679 |
+
if issparse(J):
|
| 680 |
+
J.data *= d.take(J.indices, mode='clip') # scikit-learn recipe.
|
| 681 |
+
elif isinstance(J, LinearOperator):
|
| 682 |
+
J = right_multiplied_operator(J, d)
|
| 683 |
+
else:
|
| 684 |
+
J *= d
|
| 685 |
+
|
| 686 |
+
return J
|
| 687 |
+
|
| 688 |
+
|
| 689 |
+
def left_multiply(J, d, copy=True):
|
| 690 |
+
"""Compute diag(d) J.
|
| 691 |
+
|
| 692 |
+
If `copy` is False, `J` is modified in place (unless being LinearOperator).
|
| 693 |
+
"""
|
| 694 |
+
if copy and not isinstance(J, LinearOperator):
|
| 695 |
+
J = J.copy()
|
| 696 |
+
|
| 697 |
+
if issparse(J):
|
| 698 |
+
J.data *= np.repeat(d, np.diff(J.indptr)) # scikit-learn recipe.
|
| 699 |
+
elif isinstance(J, LinearOperator):
|
| 700 |
+
J = left_multiplied_operator(J, d)
|
| 701 |
+
else:
|
| 702 |
+
J *= d[:, np.newaxis]
|
| 703 |
+
|
| 704 |
+
return J
|
| 705 |
+
|
| 706 |
+
|
| 707 |
+
def check_termination(dF, F, dx_norm, x_norm, ratio, ftol, xtol):
|
| 708 |
+
"""Check termination condition for nonlinear least squares."""
|
| 709 |
+
ftol_satisfied = dF < ftol * F and ratio > 0.25
|
| 710 |
+
xtol_satisfied = dx_norm < xtol * (xtol + x_norm)
|
| 711 |
+
|
| 712 |
+
if ftol_satisfied and xtol_satisfied:
|
| 713 |
+
return 4
|
| 714 |
+
elif ftol_satisfied:
|
| 715 |
+
return 2
|
| 716 |
+
elif xtol_satisfied:
|
| 717 |
+
return 3
|
| 718 |
+
else:
|
| 719 |
+
return None
|
| 720 |
+
|
| 721 |
+
|
| 722 |
+
def scale_for_robust_loss_function(J, f, rho):
|
| 723 |
+
"""Scale Jacobian and residuals for a robust loss function.
|
| 724 |
+
|
| 725 |
+
Arrays are modified in place.
|
| 726 |
+
"""
|
| 727 |
+
J_scale = rho[1] + 2 * rho[2] * f**2
|
| 728 |
+
J_scale[J_scale < EPS] = EPS
|
| 729 |
+
J_scale **= 0.5
|
| 730 |
+
|
| 731 |
+
f *= rho[1] / J_scale
|
| 732 |
+
|
| 733 |
+
return left_multiply(J, J_scale, copy=False), f
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_lsq/dogbox.py
ADDED
|
@@ -0,0 +1,331 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Dogleg algorithm with rectangular trust regions for least-squares minimization.
|
| 3 |
+
|
| 4 |
+
The description of the algorithm can be found in [Voglis]_. The algorithm does
|
| 5 |
+
trust-region iterations, but the shape of trust regions is rectangular as
|
| 6 |
+
opposed to conventional elliptical. The intersection of a trust region and
|
| 7 |
+
an initial feasible region is again some rectangle. Thus, on each iteration a
|
| 8 |
+
bound-constrained quadratic optimization problem is solved.
|
| 9 |
+
|
| 10 |
+
A quadratic problem is solved by well-known dogleg approach, where the
|
| 11 |
+
function is minimized along piecewise-linear "dogleg" path [NumOpt]_,
|
| 12 |
+
Chapter 4. If Jacobian is not rank-deficient then the function is decreasing
|
| 13 |
+
along this path, and optimization amounts to simply following along this
|
| 14 |
+
path as long as a point stays within the bounds. A constrained Cauchy step
|
| 15 |
+
(along the anti-gradient) is considered for safety in rank deficient cases,
|
| 16 |
+
in this situations the convergence might be slow.
|
| 17 |
+
|
| 18 |
+
If during iterations some variable hit the initial bound and the component
|
| 19 |
+
of anti-gradient points outside the feasible region, then a next dogleg step
|
| 20 |
+
won't make any progress. At this state such variables satisfy first-order
|
| 21 |
+
optimality conditions and they are excluded before computing a next dogleg
|
| 22 |
+
step.
|
| 23 |
+
|
| 24 |
+
Gauss-Newton step can be computed exactly by `numpy.linalg.lstsq` (for dense
|
| 25 |
+
Jacobian matrices) or by iterative procedure `scipy.sparse.linalg.lsmr` (for
|
| 26 |
+
dense and sparse matrices, or Jacobian being LinearOperator). The second
|
| 27 |
+
option allows to solve very large problems (up to couple of millions of
|
| 28 |
+
residuals on a regular PC), provided the Jacobian matrix is sufficiently
|
| 29 |
+
sparse. But note that dogbox is not very good for solving problems with
|
| 30 |
+
large number of constraints, because of variables exclusion-inclusion on each
|
| 31 |
+
iteration (a required number of function evaluations might be high or accuracy
|
| 32 |
+
of a solution will be poor), thus its large-scale usage is probably limited
|
| 33 |
+
to unconstrained problems.
|
| 34 |
+
|
| 35 |
+
References
|
| 36 |
+
----------
|
| 37 |
+
.. [Voglis] C. Voglis and I. E. Lagaris, "A Rectangular Trust Region Dogleg
|
| 38 |
+
Approach for Unconstrained and Bound Constrained Nonlinear
|
| 39 |
+
Optimization", WSEAS International Conference on Applied
|
| 40 |
+
Mathematics, Corfu, Greece, 2004.
|
| 41 |
+
.. [NumOpt] J. Nocedal and S. J. Wright, "Numerical optimization, 2nd edition".
|
| 42 |
+
"""
|
| 43 |
+
import numpy as np
|
| 44 |
+
from numpy.linalg import lstsq, norm
|
| 45 |
+
|
| 46 |
+
from scipy.sparse.linalg import LinearOperator, aslinearoperator, lsmr
|
| 47 |
+
from scipy.optimize import OptimizeResult
|
| 48 |
+
|
| 49 |
+
from .common import (
|
| 50 |
+
step_size_to_bound, in_bounds, update_tr_radius, evaluate_quadratic,
|
| 51 |
+
build_quadratic_1d, minimize_quadratic_1d, compute_grad,
|
| 52 |
+
compute_jac_scale, check_termination, scale_for_robust_loss_function,
|
| 53 |
+
print_header_nonlinear, print_iteration_nonlinear)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def lsmr_operator(Jop, d, active_set):
|
| 57 |
+
"""Compute LinearOperator to use in LSMR by dogbox algorithm.
|
| 58 |
+
|
| 59 |
+
`active_set` mask is used to excluded active variables from computations
|
| 60 |
+
of matrix-vector products.
|
| 61 |
+
"""
|
| 62 |
+
m, n = Jop.shape
|
| 63 |
+
|
| 64 |
+
def matvec(x):
|
| 65 |
+
x_free = x.ravel().copy()
|
| 66 |
+
x_free[active_set] = 0
|
| 67 |
+
return Jop.matvec(x * d)
|
| 68 |
+
|
| 69 |
+
def rmatvec(x):
|
| 70 |
+
r = d * Jop.rmatvec(x)
|
| 71 |
+
r[active_set] = 0
|
| 72 |
+
return r
|
| 73 |
+
|
| 74 |
+
return LinearOperator((m, n), matvec=matvec, rmatvec=rmatvec, dtype=float)
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def find_intersection(x, tr_bounds, lb, ub):
|
| 78 |
+
"""Find intersection of trust-region bounds and initial bounds.
|
| 79 |
+
|
| 80 |
+
Returns
|
| 81 |
+
-------
|
| 82 |
+
lb_total, ub_total : ndarray with shape of x
|
| 83 |
+
Lower and upper bounds of the intersection region.
|
| 84 |
+
orig_l, orig_u : ndarray of bool with shape of x
|
| 85 |
+
True means that an original bound is taken as a corresponding bound
|
| 86 |
+
in the intersection region.
|
| 87 |
+
tr_l, tr_u : ndarray of bool with shape of x
|
| 88 |
+
True means that a trust-region bound is taken as a corresponding bound
|
| 89 |
+
in the intersection region.
|
| 90 |
+
"""
|
| 91 |
+
lb_centered = lb - x
|
| 92 |
+
ub_centered = ub - x
|
| 93 |
+
|
| 94 |
+
lb_total = np.maximum(lb_centered, -tr_bounds)
|
| 95 |
+
ub_total = np.minimum(ub_centered, tr_bounds)
|
| 96 |
+
|
| 97 |
+
orig_l = np.equal(lb_total, lb_centered)
|
| 98 |
+
orig_u = np.equal(ub_total, ub_centered)
|
| 99 |
+
|
| 100 |
+
tr_l = np.equal(lb_total, -tr_bounds)
|
| 101 |
+
tr_u = np.equal(ub_total, tr_bounds)
|
| 102 |
+
|
| 103 |
+
return lb_total, ub_total, orig_l, orig_u, tr_l, tr_u
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def dogleg_step(x, newton_step, g, a, b, tr_bounds, lb, ub):
|
| 107 |
+
"""Find dogleg step in a rectangular region.
|
| 108 |
+
|
| 109 |
+
Returns
|
| 110 |
+
-------
|
| 111 |
+
step : ndarray, shape (n,)
|
| 112 |
+
Computed dogleg step.
|
| 113 |
+
bound_hits : ndarray of int, shape (n,)
|
| 114 |
+
Each component shows whether a corresponding variable hits the
|
| 115 |
+
initial bound after the step is taken:
|
| 116 |
+
* 0 - a variable doesn't hit the bound.
|
| 117 |
+
* -1 - lower bound is hit.
|
| 118 |
+
* 1 - upper bound is hit.
|
| 119 |
+
tr_hit : bool
|
| 120 |
+
Whether the step hit the boundary of the trust-region.
|
| 121 |
+
"""
|
| 122 |
+
lb_total, ub_total, orig_l, orig_u, tr_l, tr_u = find_intersection(
|
| 123 |
+
x, tr_bounds, lb, ub
|
| 124 |
+
)
|
| 125 |
+
bound_hits = np.zeros_like(x, dtype=int)
|
| 126 |
+
|
| 127 |
+
if in_bounds(newton_step, lb_total, ub_total):
|
| 128 |
+
return newton_step, bound_hits, False
|
| 129 |
+
|
| 130 |
+
to_bounds, _ = step_size_to_bound(np.zeros_like(x), -g, lb_total, ub_total)
|
| 131 |
+
|
| 132 |
+
# The classical dogleg algorithm would check if Cauchy step fits into
|
| 133 |
+
# the bounds, and just return it constrained version if not. But in a
|
| 134 |
+
# rectangular trust region it makes sense to try to improve constrained
|
| 135 |
+
# Cauchy step too. Thus, we don't distinguish these two cases.
|
| 136 |
+
|
| 137 |
+
cauchy_step = -minimize_quadratic_1d(a, b, 0, to_bounds)[0] * g
|
| 138 |
+
|
| 139 |
+
step_diff = newton_step - cauchy_step
|
| 140 |
+
step_size, hits = step_size_to_bound(cauchy_step, step_diff,
|
| 141 |
+
lb_total, ub_total)
|
| 142 |
+
bound_hits[(hits < 0) & orig_l] = -1
|
| 143 |
+
bound_hits[(hits > 0) & orig_u] = 1
|
| 144 |
+
tr_hit = np.any((hits < 0) & tr_l | (hits > 0) & tr_u)
|
| 145 |
+
|
| 146 |
+
return cauchy_step + step_size * step_diff, bound_hits, tr_hit
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
def dogbox(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale,
|
| 150 |
+
loss_function, tr_solver, tr_options, verbose):
|
| 151 |
+
f = f0
|
| 152 |
+
f_true = f.copy()
|
| 153 |
+
nfev = 1
|
| 154 |
+
|
| 155 |
+
J = J0
|
| 156 |
+
njev = 1
|
| 157 |
+
|
| 158 |
+
if loss_function is not None:
|
| 159 |
+
rho = loss_function(f)
|
| 160 |
+
cost = 0.5 * np.sum(rho[0])
|
| 161 |
+
J, f = scale_for_robust_loss_function(J, f, rho)
|
| 162 |
+
else:
|
| 163 |
+
cost = 0.5 * np.dot(f, f)
|
| 164 |
+
|
| 165 |
+
g = compute_grad(J, f)
|
| 166 |
+
|
| 167 |
+
jac_scale = isinstance(x_scale, str) and x_scale == 'jac'
|
| 168 |
+
if jac_scale:
|
| 169 |
+
scale, scale_inv = compute_jac_scale(J)
|
| 170 |
+
else:
|
| 171 |
+
scale, scale_inv = x_scale, 1 / x_scale
|
| 172 |
+
|
| 173 |
+
Delta = norm(x0 * scale_inv, ord=np.inf)
|
| 174 |
+
if Delta == 0:
|
| 175 |
+
Delta = 1.0
|
| 176 |
+
|
| 177 |
+
on_bound = np.zeros_like(x0, dtype=int)
|
| 178 |
+
on_bound[np.equal(x0, lb)] = -1
|
| 179 |
+
on_bound[np.equal(x0, ub)] = 1
|
| 180 |
+
|
| 181 |
+
x = x0
|
| 182 |
+
step = np.empty_like(x0)
|
| 183 |
+
|
| 184 |
+
if max_nfev is None:
|
| 185 |
+
max_nfev = x0.size * 100
|
| 186 |
+
|
| 187 |
+
termination_status = None
|
| 188 |
+
iteration = 0
|
| 189 |
+
step_norm = None
|
| 190 |
+
actual_reduction = None
|
| 191 |
+
|
| 192 |
+
if verbose == 2:
|
| 193 |
+
print_header_nonlinear()
|
| 194 |
+
|
| 195 |
+
while True:
|
| 196 |
+
active_set = on_bound * g < 0
|
| 197 |
+
free_set = ~active_set
|
| 198 |
+
|
| 199 |
+
g_free = g[free_set]
|
| 200 |
+
g_full = g.copy()
|
| 201 |
+
g[active_set] = 0
|
| 202 |
+
|
| 203 |
+
g_norm = norm(g, ord=np.inf)
|
| 204 |
+
if g_norm < gtol:
|
| 205 |
+
termination_status = 1
|
| 206 |
+
|
| 207 |
+
if verbose == 2:
|
| 208 |
+
print_iteration_nonlinear(iteration, nfev, cost, actual_reduction,
|
| 209 |
+
step_norm, g_norm)
|
| 210 |
+
|
| 211 |
+
if termination_status is not None or nfev == max_nfev:
|
| 212 |
+
break
|
| 213 |
+
|
| 214 |
+
x_free = x[free_set]
|
| 215 |
+
lb_free = lb[free_set]
|
| 216 |
+
ub_free = ub[free_set]
|
| 217 |
+
scale_free = scale[free_set]
|
| 218 |
+
|
| 219 |
+
# Compute (Gauss-)Newton and build quadratic model for Cauchy step.
|
| 220 |
+
if tr_solver == 'exact':
|
| 221 |
+
J_free = J[:, free_set]
|
| 222 |
+
newton_step = lstsq(J_free, -f, rcond=-1)[0]
|
| 223 |
+
|
| 224 |
+
# Coefficients for the quadratic model along the anti-gradient.
|
| 225 |
+
a, b = build_quadratic_1d(J_free, g_free, -g_free)
|
| 226 |
+
elif tr_solver == 'lsmr':
|
| 227 |
+
Jop = aslinearoperator(J)
|
| 228 |
+
|
| 229 |
+
# We compute lsmr step in scaled variables and then
|
| 230 |
+
# transform back to normal variables, if lsmr would give exact lsq
|
| 231 |
+
# solution, this would be equivalent to not doing any
|
| 232 |
+
# transformations, but from experience it's better this way.
|
| 233 |
+
|
| 234 |
+
# We pass active_set to make computations as if we selected
|
| 235 |
+
# the free subset of J columns, but without actually doing any
|
| 236 |
+
# slicing, which is expensive for sparse matrices and impossible
|
| 237 |
+
# for LinearOperator.
|
| 238 |
+
|
| 239 |
+
lsmr_op = lsmr_operator(Jop, scale, active_set)
|
| 240 |
+
newton_step = -lsmr(lsmr_op, f, **tr_options)[0][free_set]
|
| 241 |
+
newton_step *= scale_free
|
| 242 |
+
|
| 243 |
+
# Components of g for active variables were zeroed, so this call
|
| 244 |
+
# is correct and equivalent to using J_free and g_free.
|
| 245 |
+
a, b = build_quadratic_1d(Jop, g, -g)
|
| 246 |
+
|
| 247 |
+
actual_reduction = -1.0
|
| 248 |
+
while actual_reduction <= 0 and nfev < max_nfev:
|
| 249 |
+
tr_bounds = Delta * scale_free
|
| 250 |
+
|
| 251 |
+
step_free, on_bound_free, tr_hit = dogleg_step(
|
| 252 |
+
x_free, newton_step, g_free, a, b, tr_bounds, lb_free, ub_free)
|
| 253 |
+
|
| 254 |
+
step.fill(0.0)
|
| 255 |
+
step[free_set] = step_free
|
| 256 |
+
|
| 257 |
+
if tr_solver == 'exact':
|
| 258 |
+
predicted_reduction = -evaluate_quadratic(J_free, g_free,
|
| 259 |
+
step_free)
|
| 260 |
+
elif tr_solver == 'lsmr':
|
| 261 |
+
predicted_reduction = -evaluate_quadratic(Jop, g, step)
|
| 262 |
+
|
| 263 |
+
# gh11403 ensure that solution is fully within bounds.
|
| 264 |
+
x_new = np.clip(x + step, lb, ub)
|
| 265 |
+
|
| 266 |
+
f_new = fun(x_new)
|
| 267 |
+
nfev += 1
|
| 268 |
+
|
| 269 |
+
step_h_norm = norm(step * scale_inv, ord=np.inf)
|
| 270 |
+
|
| 271 |
+
if not np.all(np.isfinite(f_new)):
|
| 272 |
+
Delta = 0.25 * step_h_norm
|
| 273 |
+
continue
|
| 274 |
+
|
| 275 |
+
# Usual trust-region step quality estimation.
|
| 276 |
+
if loss_function is not None:
|
| 277 |
+
cost_new = loss_function(f_new, cost_only=True)
|
| 278 |
+
else:
|
| 279 |
+
cost_new = 0.5 * np.dot(f_new, f_new)
|
| 280 |
+
actual_reduction = cost - cost_new
|
| 281 |
+
|
| 282 |
+
Delta, ratio = update_tr_radius(
|
| 283 |
+
Delta, actual_reduction, predicted_reduction,
|
| 284 |
+
step_h_norm, tr_hit
|
| 285 |
+
)
|
| 286 |
+
|
| 287 |
+
step_norm = norm(step)
|
| 288 |
+
termination_status = check_termination(
|
| 289 |
+
actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol)
|
| 290 |
+
|
| 291 |
+
if termination_status is not None:
|
| 292 |
+
break
|
| 293 |
+
|
| 294 |
+
if actual_reduction > 0:
|
| 295 |
+
on_bound[free_set] = on_bound_free
|
| 296 |
+
|
| 297 |
+
x = x_new
|
| 298 |
+
# Set variables exactly at the boundary.
|
| 299 |
+
mask = on_bound == -1
|
| 300 |
+
x[mask] = lb[mask]
|
| 301 |
+
mask = on_bound == 1
|
| 302 |
+
x[mask] = ub[mask]
|
| 303 |
+
|
| 304 |
+
f = f_new
|
| 305 |
+
f_true = f.copy()
|
| 306 |
+
|
| 307 |
+
cost = cost_new
|
| 308 |
+
|
| 309 |
+
J = jac(x, f)
|
| 310 |
+
njev += 1
|
| 311 |
+
|
| 312 |
+
if loss_function is not None:
|
| 313 |
+
rho = loss_function(f)
|
| 314 |
+
J, f = scale_for_robust_loss_function(J, f, rho)
|
| 315 |
+
|
| 316 |
+
g = compute_grad(J, f)
|
| 317 |
+
|
| 318 |
+
if jac_scale:
|
| 319 |
+
scale, scale_inv = compute_jac_scale(J, scale_inv)
|
| 320 |
+
else:
|
| 321 |
+
step_norm = 0
|
| 322 |
+
actual_reduction = 0
|
| 323 |
+
|
| 324 |
+
iteration += 1
|
| 325 |
+
|
| 326 |
+
if termination_status is None:
|
| 327 |
+
termination_status = 0
|
| 328 |
+
|
| 329 |
+
return OptimizeResult(
|
| 330 |
+
x=x, cost=cost, fun=f_true, jac=J, grad=g_full, optimality=g_norm,
|
| 331 |
+
active_mask=on_bound, nfev=nfev, njev=njev, status=termination_status)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_lsq/least_squares.py
ADDED
|
@@ -0,0 +1,967 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Generic interface for least-squares minimization."""
|
| 2 |
+
from warnings import warn
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
from numpy.linalg import norm
|
| 6 |
+
|
| 7 |
+
from scipy.sparse import issparse
|
| 8 |
+
from scipy.sparse.linalg import LinearOperator
|
| 9 |
+
from scipy.optimize import _minpack, OptimizeResult
|
| 10 |
+
from scipy.optimize._numdiff import approx_derivative, group_columns
|
| 11 |
+
from scipy.optimize._minimize import Bounds
|
| 12 |
+
|
| 13 |
+
from .trf import trf
|
| 14 |
+
from .dogbox import dogbox
|
| 15 |
+
from .common import EPS, in_bounds, make_strictly_feasible
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
TERMINATION_MESSAGES = {
|
| 19 |
+
-1: "Improper input parameters status returned from `leastsq`",
|
| 20 |
+
0: "The maximum number of function evaluations is exceeded.",
|
| 21 |
+
1: "`gtol` termination condition is satisfied.",
|
| 22 |
+
2: "`ftol` termination condition is satisfied.",
|
| 23 |
+
3: "`xtol` termination condition is satisfied.",
|
| 24 |
+
4: "Both `ftol` and `xtol` termination conditions are satisfied."
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
FROM_MINPACK_TO_COMMON = {
|
| 29 |
+
0: -1, # Improper input parameters from MINPACK.
|
| 30 |
+
1: 2,
|
| 31 |
+
2: 3,
|
| 32 |
+
3: 4,
|
| 33 |
+
4: 1,
|
| 34 |
+
5: 0
|
| 35 |
+
# There are 6, 7, 8 for too small tolerance parameters,
|
| 36 |
+
# but we guard against it by checking ftol, xtol, gtol beforehand.
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def call_minpack(fun, x0, jac, ftol, xtol, gtol, max_nfev, x_scale, diff_step):
|
| 41 |
+
n = x0.size
|
| 42 |
+
|
| 43 |
+
if diff_step is None:
|
| 44 |
+
epsfcn = EPS
|
| 45 |
+
else:
|
| 46 |
+
epsfcn = diff_step**2
|
| 47 |
+
|
| 48 |
+
# Compute MINPACK's `diag`, which is inverse of our `x_scale` and
|
| 49 |
+
# ``x_scale='jac'`` corresponds to ``diag=None``.
|
| 50 |
+
if isinstance(x_scale, str) and x_scale == 'jac':
|
| 51 |
+
diag = None
|
| 52 |
+
else:
|
| 53 |
+
diag = 1 / x_scale
|
| 54 |
+
|
| 55 |
+
full_output = True
|
| 56 |
+
col_deriv = False
|
| 57 |
+
factor = 100.0
|
| 58 |
+
|
| 59 |
+
if jac is None:
|
| 60 |
+
if max_nfev is None:
|
| 61 |
+
# n squared to account for Jacobian evaluations.
|
| 62 |
+
max_nfev = 100 * n * (n + 1)
|
| 63 |
+
x, info, status = _minpack._lmdif(
|
| 64 |
+
fun, x0, (), full_output, ftol, xtol, gtol,
|
| 65 |
+
max_nfev, epsfcn, factor, diag)
|
| 66 |
+
else:
|
| 67 |
+
if max_nfev is None:
|
| 68 |
+
max_nfev = 100 * n
|
| 69 |
+
x, info, status = _minpack._lmder(
|
| 70 |
+
fun, jac, x0, (), full_output, col_deriv,
|
| 71 |
+
ftol, xtol, gtol, max_nfev, factor, diag)
|
| 72 |
+
|
| 73 |
+
f = info['fvec']
|
| 74 |
+
|
| 75 |
+
if callable(jac):
|
| 76 |
+
J = jac(x)
|
| 77 |
+
else:
|
| 78 |
+
J = np.atleast_2d(approx_derivative(fun, x))
|
| 79 |
+
|
| 80 |
+
cost = 0.5 * np.dot(f, f)
|
| 81 |
+
g = J.T.dot(f)
|
| 82 |
+
g_norm = norm(g, ord=np.inf)
|
| 83 |
+
|
| 84 |
+
nfev = info['nfev']
|
| 85 |
+
njev = info.get('njev', None)
|
| 86 |
+
|
| 87 |
+
status = FROM_MINPACK_TO_COMMON[status]
|
| 88 |
+
active_mask = np.zeros_like(x0, dtype=int)
|
| 89 |
+
|
| 90 |
+
return OptimizeResult(
|
| 91 |
+
x=x, cost=cost, fun=f, jac=J, grad=g, optimality=g_norm,
|
| 92 |
+
active_mask=active_mask, nfev=nfev, njev=njev, status=status)
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def prepare_bounds(bounds, n):
|
| 96 |
+
lb, ub = (np.asarray(b, dtype=float) for b in bounds)
|
| 97 |
+
if lb.ndim == 0:
|
| 98 |
+
lb = np.resize(lb, n)
|
| 99 |
+
|
| 100 |
+
if ub.ndim == 0:
|
| 101 |
+
ub = np.resize(ub, n)
|
| 102 |
+
|
| 103 |
+
return lb, ub
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def check_tolerance(ftol, xtol, gtol, method):
|
| 107 |
+
def check(tol, name):
|
| 108 |
+
if tol is None:
|
| 109 |
+
tol = 0
|
| 110 |
+
elif tol < EPS:
|
| 111 |
+
warn(f"Setting `{name}` below the machine epsilon ({EPS:.2e}) effectively "
|
| 112 |
+
f"disables the corresponding termination condition.",
|
| 113 |
+
stacklevel=3)
|
| 114 |
+
return tol
|
| 115 |
+
|
| 116 |
+
ftol = check(ftol, "ftol")
|
| 117 |
+
xtol = check(xtol, "xtol")
|
| 118 |
+
gtol = check(gtol, "gtol")
|
| 119 |
+
|
| 120 |
+
if method == "lm" and (ftol < EPS or xtol < EPS or gtol < EPS):
|
| 121 |
+
raise ValueError("All tolerances must be higher than machine epsilon "
|
| 122 |
+
f"({EPS:.2e}) for method 'lm'.")
|
| 123 |
+
elif ftol < EPS and xtol < EPS and gtol < EPS:
|
| 124 |
+
raise ValueError("At least one of the tolerances must be higher than "
|
| 125 |
+
f"machine epsilon ({EPS:.2e}).")
|
| 126 |
+
|
| 127 |
+
return ftol, xtol, gtol
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def check_x_scale(x_scale, x0):
|
| 131 |
+
if isinstance(x_scale, str) and x_scale == 'jac':
|
| 132 |
+
return x_scale
|
| 133 |
+
|
| 134 |
+
try:
|
| 135 |
+
x_scale = np.asarray(x_scale, dtype=float)
|
| 136 |
+
valid = np.all(np.isfinite(x_scale)) and np.all(x_scale > 0)
|
| 137 |
+
except (ValueError, TypeError):
|
| 138 |
+
valid = False
|
| 139 |
+
|
| 140 |
+
if not valid:
|
| 141 |
+
raise ValueError("`x_scale` must be 'jac' or array_like with "
|
| 142 |
+
"positive numbers.")
|
| 143 |
+
|
| 144 |
+
if x_scale.ndim == 0:
|
| 145 |
+
x_scale = np.resize(x_scale, x0.shape)
|
| 146 |
+
|
| 147 |
+
if x_scale.shape != x0.shape:
|
| 148 |
+
raise ValueError("Inconsistent shapes between `x_scale` and `x0`.")
|
| 149 |
+
|
| 150 |
+
return x_scale
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def check_jac_sparsity(jac_sparsity, m, n):
|
| 154 |
+
if jac_sparsity is None:
|
| 155 |
+
return None
|
| 156 |
+
|
| 157 |
+
if not issparse(jac_sparsity):
|
| 158 |
+
jac_sparsity = np.atleast_2d(jac_sparsity)
|
| 159 |
+
|
| 160 |
+
if jac_sparsity.shape != (m, n):
|
| 161 |
+
raise ValueError("`jac_sparsity` has wrong shape.")
|
| 162 |
+
|
| 163 |
+
return jac_sparsity, group_columns(jac_sparsity)
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
# Loss functions.
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
def huber(z, rho, cost_only):
|
| 170 |
+
mask = z <= 1
|
| 171 |
+
rho[0, mask] = z[mask]
|
| 172 |
+
rho[0, ~mask] = 2 * z[~mask]**0.5 - 1
|
| 173 |
+
if cost_only:
|
| 174 |
+
return
|
| 175 |
+
rho[1, mask] = 1
|
| 176 |
+
rho[1, ~mask] = z[~mask]**-0.5
|
| 177 |
+
rho[2, mask] = 0
|
| 178 |
+
rho[2, ~mask] = -0.5 * z[~mask]**-1.5
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
def soft_l1(z, rho, cost_only):
|
| 182 |
+
t = 1 + z
|
| 183 |
+
rho[0] = 2 * (t**0.5 - 1)
|
| 184 |
+
if cost_only:
|
| 185 |
+
return
|
| 186 |
+
rho[1] = t**-0.5
|
| 187 |
+
rho[2] = -0.5 * t**-1.5
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
def cauchy(z, rho, cost_only):
|
| 191 |
+
rho[0] = np.log1p(z)
|
| 192 |
+
if cost_only:
|
| 193 |
+
return
|
| 194 |
+
t = 1 + z
|
| 195 |
+
rho[1] = 1 / t
|
| 196 |
+
rho[2] = -1 / t**2
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
def arctan(z, rho, cost_only):
|
| 200 |
+
rho[0] = np.arctan(z)
|
| 201 |
+
if cost_only:
|
| 202 |
+
return
|
| 203 |
+
t = 1 + z**2
|
| 204 |
+
rho[1] = 1 / t
|
| 205 |
+
rho[2] = -2 * z / t**2
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
IMPLEMENTED_LOSSES = dict(linear=None, huber=huber, soft_l1=soft_l1,
|
| 209 |
+
cauchy=cauchy, arctan=arctan)
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
def construct_loss_function(m, loss, f_scale):
|
| 213 |
+
if loss == 'linear':
|
| 214 |
+
return None
|
| 215 |
+
|
| 216 |
+
if not callable(loss):
|
| 217 |
+
loss = IMPLEMENTED_LOSSES[loss]
|
| 218 |
+
rho = np.empty((3, m))
|
| 219 |
+
|
| 220 |
+
def loss_function(f, cost_only=False):
|
| 221 |
+
z = (f / f_scale) ** 2
|
| 222 |
+
loss(z, rho, cost_only=cost_only)
|
| 223 |
+
if cost_only:
|
| 224 |
+
return 0.5 * f_scale ** 2 * np.sum(rho[0])
|
| 225 |
+
rho[0] *= f_scale ** 2
|
| 226 |
+
rho[2] /= f_scale ** 2
|
| 227 |
+
return rho
|
| 228 |
+
else:
|
| 229 |
+
def loss_function(f, cost_only=False):
|
| 230 |
+
z = (f / f_scale) ** 2
|
| 231 |
+
rho = loss(z)
|
| 232 |
+
if cost_only:
|
| 233 |
+
return 0.5 * f_scale ** 2 * np.sum(rho[0])
|
| 234 |
+
rho[0] *= f_scale ** 2
|
| 235 |
+
rho[2] /= f_scale ** 2
|
| 236 |
+
return rho
|
| 237 |
+
|
| 238 |
+
return loss_function
|
| 239 |
+
|
| 240 |
+
|
| 241 |
+
def least_squares(
|
| 242 |
+
fun, x0, jac='2-point', bounds=(-np.inf, np.inf), method='trf',
|
| 243 |
+
ftol=1e-8, xtol=1e-8, gtol=1e-8, x_scale=1.0, loss='linear',
|
| 244 |
+
f_scale=1.0, diff_step=None, tr_solver=None, tr_options={},
|
| 245 |
+
jac_sparsity=None, max_nfev=None, verbose=0, args=(), kwargs={}):
|
| 246 |
+
"""Solve a nonlinear least-squares problem with bounds on the variables.
|
| 247 |
+
|
| 248 |
+
Given the residuals f(x) (an m-D real function of n real
|
| 249 |
+
variables) and the loss function rho(s) (a scalar function), `least_squares`
|
| 250 |
+
finds a local minimum of the cost function F(x)::
|
| 251 |
+
|
| 252 |
+
minimize F(x) = 0.5 * sum(rho(f_i(x)**2), i = 0, ..., m - 1)
|
| 253 |
+
subject to lb <= x <= ub
|
| 254 |
+
|
| 255 |
+
The purpose of the loss function rho(s) is to reduce the influence of
|
| 256 |
+
outliers on the solution.
|
| 257 |
+
|
| 258 |
+
Parameters
|
| 259 |
+
----------
|
| 260 |
+
fun : callable
|
| 261 |
+
Function which computes the vector of residuals, with the signature
|
| 262 |
+
``fun(x, *args, **kwargs)``, i.e., the minimization proceeds with
|
| 263 |
+
respect to its first argument. The argument ``x`` passed to this
|
| 264 |
+
function is an ndarray of shape (n,) (never a scalar, even for n=1).
|
| 265 |
+
It must allocate and return a 1-D array_like of shape (m,) or a scalar.
|
| 266 |
+
If the argument ``x`` is complex or the function ``fun`` returns
|
| 267 |
+
complex residuals, it must be wrapped in a real function of real
|
| 268 |
+
arguments, as shown at the end of the Examples section.
|
| 269 |
+
x0 : array_like with shape (n,) or float
|
| 270 |
+
Initial guess on independent variables. If float, it will be treated
|
| 271 |
+
as a 1-D array with one element. When `method` is 'trf', the initial
|
| 272 |
+
guess might be slightly adjusted to lie sufficiently within the given
|
| 273 |
+
`bounds`.
|
| 274 |
+
jac : {'2-point', '3-point', 'cs', callable}, optional
|
| 275 |
+
Method of computing the Jacobian matrix (an m-by-n matrix, where
|
| 276 |
+
element (i, j) is the partial derivative of f[i] with respect to
|
| 277 |
+
x[j]). The keywords select a finite difference scheme for numerical
|
| 278 |
+
estimation. The scheme '3-point' is more accurate, but requires
|
| 279 |
+
twice as many operations as '2-point' (default). The scheme 'cs'
|
| 280 |
+
uses complex steps, and while potentially the most accurate, it is
|
| 281 |
+
applicable only when `fun` correctly handles complex inputs and
|
| 282 |
+
can be analytically continued to the complex plane. Method 'lm'
|
| 283 |
+
always uses the '2-point' scheme. If callable, it is used as
|
| 284 |
+
``jac(x, *args, **kwargs)`` and should return a good approximation
|
| 285 |
+
(or the exact value) for the Jacobian as an array_like (np.atleast_2d
|
| 286 |
+
is applied), a sparse matrix (csr_matrix preferred for performance) or
|
| 287 |
+
a `scipy.sparse.linalg.LinearOperator`.
|
| 288 |
+
bounds : 2-tuple of array_like or `Bounds`, optional
|
| 289 |
+
There are two ways to specify bounds:
|
| 290 |
+
|
| 291 |
+
1. Instance of `Bounds` class
|
| 292 |
+
2. Lower and upper bounds on independent variables. Defaults to no
|
| 293 |
+
bounds. Each array must match the size of `x0` or be a scalar,
|
| 294 |
+
in the latter case a bound will be the same for all variables.
|
| 295 |
+
Use ``np.inf`` with an appropriate sign to disable bounds on all
|
| 296 |
+
or some variables.
|
| 297 |
+
method : {'trf', 'dogbox', 'lm'}, optional
|
| 298 |
+
Algorithm to perform minimization.
|
| 299 |
+
|
| 300 |
+
* 'trf' : Trust Region Reflective algorithm, particularly suitable
|
| 301 |
+
for large sparse problems with bounds. Generally robust method.
|
| 302 |
+
* 'dogbox' : dogleg algorithm with rectangular trust regions,
|
| 303 |
+
typical use case is small problems with bounds. Not recommended
|
| 304 |
+
for problems with rank-deficient Jacobian.
|
| 305 |
+
* 'lm' : Levenberg-Marquardt algorithm as implemented in MINPACK.
|
| 306 |
+
Doesn't handle bounds and sparse Jacobians. Usually the most
|
| 307 |
+
efficient method for small unconstrained problems.
|
| 308 |
+
|
| 309 |
+
Default is 'trf'. See Notes for more information.
|
| 310 |
+
ftol : float or None, optional
|
| 311 |
+
Tolerance for termination by the change of the cost function. Default
|
| 312 |
+
is 1e-8. The optimization process is stopped when ``dF < ftol * F``,
|
| 313 |
+
and there was an adequate agreement between a local quadratic model and
|
| 314 |
+
the true model in the last step.
|
| 315 |
+
|
| 316 |
+
If None and 'method' is not 'lm', the termination by this condition is
|
| 317 |
+
disabled. If 'method' is 'lm', this tolerance must be higher than
|
| 318 |
+
machine epsilon.
|
| 319 |
+
xtol : float or None, optional
|
| 320 |
+
Tolerance for termination by the change of the independent variables.
|
| 321 |
+
Default is 1e-8. The exact condition depends on the `method` used:
|
| 322 |
+
|
| 323 |
+
* For 'trf' and 'dogbox' : ``norm(dx) < xtol * (xtol + norm(x))``.
|
| 324 |
+
* For 'lm' : ``Delta < xtol * norm(xs)``, where ``Delta`` is
|
| 325 |
+
a trust-region radius and ``xs`` is the value of ``x``
|
| 326 |
+
scaled according to `x_scale` parameter (see below).
|
| 327 |
+
|
| 328 |
+
If None and 'method' is not 'lm', the termination by this condition is
|
| 329 |
+
disabled. If 'method' is 'lm', this tolerance must be higher than
|
| 330 |
+
machine epsilon.
|
| 331 |
+
gtol : float or None, optional
|
| 332 |
+
Tolerance for termination by the norm of the gradient. Default is 1e-8.
|
| 333 |
+
The exact condition depends on a `method` used:
|
| 334 |
+
|
| 335 |
+
* For 'trf' : ``norm(g_scaled, ord=np.inf) < gtol``, where
|
| 336 |
+
``g_scaled`` is the value of the gradient scaled to account for
|
| 337 |
+
the presence of the bounds [STIR]_.
|
| 338 |
+
* For 'dogbox' : ``norm(g_free, ord=np.inf) < gtol``, where
|
| 339 |
+
``g_free`` is the gradient with respect to the variables which
|
| 340 |
+
are not in the optimal state on the boundary.
|
| 341 |
+
* For 'lm' : the maximum absolute value of the cosine of angles
|
| 342 |
+
between columns of the Jacobian and the residual vector is less
|
| 343 |
+
than `gtol`, or the residual vector is zero.
|
| 344 |
+
|
| 345 |
+
If None and 'method' is not 'lm', the termination by this condition is
|
| 346 |
+
disabled. If 'method' is 'lm', this tolerance must be higher than
|
| 347 |
+
machine epsilon.
|
| 348 |
+
x_scale : array_like or 'jac', optional
|
| 349 |
+
Characteristic scale of each variable. Setting `x_scale` is equivalent
|
| 350 |
+
to reformulating the problem in scaled variables ``xs = x / x_scale``.
|
| 351 |
+
An alternative view is that the size of a trust region along jth
|
| 352 |
+
dimension is proportional to ``x_scale[j]``. Improved convergence may
|
| 353 |
+
be achieved by setting `x_scale` such that a step of a given size
|
| 354 |
+
along any of the scaled variables has a similar effect on the cost
|
| 355 |
+
function. If set to 'jac', the scale is iteratively updated using the
|
| 356 |
+
inverse norms of the columns of the Jacobian matrix (as described in
|
| 357 |
+
[JJMore]_).
|
| 358 |
+
loss : str or callable, optional
|
| 359 |
+
Determines the loss function. The following keyword values are allowed:
|
| 360 |
+
|
| 361 |
+
* 'linear' (default) : ``rho(z) = z``. Gives a standard
|
| 362 |
+
least-squares problem.
|
| 363 |
+
* 'soft_l1' : ``rho(z) = 2 * ((1 + z)**0.5 - 1)``. The smooth
|
| 364 |
+
approximation of l1 (absolute value) loss. Usually a good
|
| 365 |
+
choice for robust least squares.
|
| 366 |
+
* 'huber' : ``rho(z) = z if z <= 1 else 2*z**0.5 - 1``. Works
|
| 367 |
+
similarly to 'soft_l1'.
|
| 368 |
+
* 'cauchy' : ``rho(z) = ln(1 + z)``. Severely weakens outliers
|
| 369 |
+
influence, but may cause difficulties in optimization process.
|
| 370 |
+
* 'arctan' : ``rho(z) = arctan(z)``. Limits a maximum loss on
|
| 371 |
+
a single residual, has properties similar to 'cauchy'.
|
| 372 |
+
|
| 373 |
+
If callable, it must take a 1-D ndarray ``z=f**2`` and return an
|
| 374 |
+
array_like with shape (3, m) where row 0 contains function values,
|
| 375 |
+
row 1 contains first derivatives and row 2 contains second
|
| 376 |
+
derivatives. Method 'lm' supports only 'linear' loss.
|
| 377 |
+
f_scale : float, optional
|
| 378 |
+
Value of soft margin between inlier and outlier residuals, default
|
| 379 |
+
is 1.0. The loss function is evaluated as follows
|
| 380 |
+
``rho_(f**2) = C**2 * rho(f**2 / C**2)``, where ``C`` is `f_scale`,
|
| 381 |
+
and ``rho`` is determined by `loss` parameter. This parameter has
|
| 382 |
+
no effect with ``loss='linear'``, but for other `loss` values it is
|
| 383 |
+
of crucial importance.
|
| 384 |
+
max_nfev : None or int, optional
|
| 385 |
+
Maximum number of function evaluations before the termination.
|
| 386 |
+
If None (default), the value is chosen automatically:
|
| 387 |
+
|
| 388 |
+
* For 'trf' and 'dogbox' : 100 * n.
|
| 389 |
+
* For 'lm' : 100 * n if `jac` is callable and 100 * n * (n + 1)
|
| 390 |
+
otherwise (because 'lm' counts function calls in Jacobian
|
| 391 |
+
estimation).
|
| 392 |
+
|
| 393 |
+
diff_step : None or array_like, optional
|
| 394 |
+
Determines the relative step size for the finite difference
|
| 395 |
+
approximation of the Jacobian. The actual step is computed as
|
| 396 |
+
``x * diff_step``. If None (default), then `diff_step` is taken to be
|
| 397 |
+
a conventional "optimal" power of machine epsilon for the finite
|
| 398 |
+
difference scheme used [NR]_.
|
| 399 |
+
tr_solver : {None, 'exact', 'lsmr'}, optional
|
| 400 |
+
Method for solving trust-region subproblems, relevant only for 'trf'
|
| 401 |
+
and 'dogbox' methods.
|
| 402 |
+
|
| 403 |
+
* 'exact' is suitable for not very large problems with dense
|
| 404 |
+
Jacobian matrices. The computational complexity per iteration is
|
| 405 |
+
comparable to a singular value decomposition of the Jacobian
|
| 406 |
+
matrix.
|
| 407 |
+
* 'lsmr' is suitable for problems with sparse and large Jacobian
|
| 408 |
+
matrices. It uses the iterative procedure
|
| 409 |
+
`scipy.sparse.linalg.lsmr` for finding a solution of a linear
|
| 410 |
+
least-squares problem and only requires matrix-vector product
|
| 411 |
+
evaluations.
|
| 412 |
+
|
| 413 |
+
If None (default), the solver is chosen based on the type of Jacobian
|
| 414 |
+
returned on the first iteration.
|
| 415 |
+
tr_options : dict, optional
|
| 416 |
+
Keyword options passed to trust-region solver.
|
| 417 |
+
|
| 418 |
+
* ``tr_solver='exact'``: `tr_options` are ignored.
|
| 419 |
+
* ``tr_solver='lsmr'``: options for `scipy.sparse.linalg.lsmr`.
|
| 420 |
+
Additionally, ``method='trf'`` supports 'regularize' option
|
| 421 |
+
(bool, default is True), which adds a regularization term to the
|
| 422 |
+
normal equation, which improves convergence if the Jacobian is
|
| 423 |
+
rank-deficient [Byrd]_ (eq. 3.4).
|
| 424 |
+
|
| 425 |
+
jac_sparsity : {None, array_like, sparse matrix}, optional
|
| 426 |
+
Defines the sparsity structure of the Jacobian matrix for finite
|
| 427 |
+
difference estimation, its shape must be (m, n). If the Jacobian has
|
| 428 |
+
only few non-zero elements in *each* row, providing the sparsity
|
| 429 |
+
structure will greatly speed up the computations [Curtis]_. A zero
|
| 430 |
+
entry means that a corresponding element in the Jacobian is identically
|
| 431 |
+
zero. If provided, forces the use of 'lsmr' trust-region solver.
|
| 432 |
+
If None (default), then dense differencing will be used. Has no effect
|
| 433 |
+
for 'lm' method.
|
| 434 |
+
verbose : {0, 1, 2}, optional
|
| 435 |
+
Level of algorithm's verbosity:
|
| 436 |
+
|
| 437 |
+
* 0 (default) : work silently.
|
| 438 |
+
* 1 : display a termination report.
|
| 439 |
+
* 2 : display progress during iterations (not supported by 'lm'
|
| 440 |
+
method).
|
| 441 |
+
|
| 442 |
+
args, kwargs : tuple and dict, optional
|
| 443 |
+
Additional arguments passed to `fun` and `jac`. Both empty by default.
|
| 444 |
+
The calling signature is ``fun(x, *args, **kwargs)`` and the same for
|
| 445 |
+
`jac`.
|
| 446 |
+
|
| 447 |
+
Returns
|
| 448 |
+
-------
|
| 449 |
+
result : OptimizeResult
|
| 450 |
+
`OptimizeResult` with the following fields defined:
|
| 451 |
+
|
| 452 |
+
x : ndarray, shape (n,)
|
| 453 |
+
Solution found.
|
| 454 |
+
cost : float
|
| 455 |
+
Value of the cost function at the solution.
|
| 456 |
+
fun : ndarray, shape (m,)
|
| 457 |
+
Vector of residuals at the solution.
|
| 458 |
+
jac : ndarray, sparse matrix or LinearOperator, shape (m, n)
|
| 459 |
+
Modified Jacobian matrix at the solution, in the sense that J^T J
|
| 460 |
+
is a Gauss-Newton approximation of the Hessian of the cost function.
|
| 461 |
+
The type is the same as the one used by the algorithm.
|
| 462 |
+
grad : ndarray, shape (m,)
|
| 463 |
+
Gradient of the cost function at the solution.
|
| 464 |
+
optimality : float
|
| 465 |
+
First-order optimality measure. In unconstrained problems, it is
|
| 466 |
+
always the uniform norm of the gradient. In constrained problems,
|
| 467 |
+
it is the quantity which was compared with `gtol` during iterations.
|
| 468 |
+
active_mask : ndarray of int, shape (n,)
|
| 469 |
+
Each component shows whether a corresponding constraint is active
|
| 470 |
+
(that is, whether a variable is at the bound):
|
| 471 |
+
|
| 472 |
+
* 0 : a constraint is not active.
|
| 473 |
+
* -1 : a lower bound is active.
|
| 474 |
+
* 1 : an upper bound is active.
|
| 475 |
+
|
| 476 |
+
Might be somewhat arbitrary for 'trf' method as it generates a
|
| 477 |
+
sequence of strictly feasible iterates and `active_mask` is
|
| 478 |
+
determined within a tolerance threshold.
|
| 479 |
+
nfev : int
|
| 480 |
+
Number of function evaluations done. Methods 'trf' and 'dogbox' do
|
| 481 |
+
not count function calls for numerical Jacobian approximation, as
|
| 482 |
+
opposed to 'lm' method.
|
| 483 |
+
njev : int or None
|
| 484 |
+
Number of Jacobian evaluations done. If numerical Jacobian
|
| 485 |
+
approximation is used in 'lm' method, it is set to None.
|
| 486 |
+
status : int
|
| 487 |
+
The reason for algorithm termination:
|
| 488 |
+
|
| 489 |
+
* -1 : improper input parameters status returned from MINPACK.
|
| 490 |
+
* 0 : the maximum number of function evaluations is exceeded.
|
| 491 |
+
* 1 : `gtol` termination condition is satisfied.
|
| 492 |
+
* 2 : `ftol` termination condition is satisfied.
|
| 493 |
+
* 3 : `xtol` termination condition is satisfied.
|
| 494 |
+
* 4 : Both `ftol` and `xtol` termination conditions are satisfied.
|
| 495 |
+
|
| 496 |
+
message : str
|
| 497 |
+
Verbal description of the termination reason.
|
| 498 |
+
success : bool
|
| 499 |
+
True if one of the convergence criteria is satisfied (`status` > 0).
|
| 500 |
+
|
| 501 |
+
See Also
|
| 502 |
+
--------
|
| 503 |
+
leastsq : A legacy wrapper for the MINPACK implementation of the
|
| 504 |
+
Levenberg-Marquadt algorithm.
|
| 505 |
+
curve_fit : Least-squares minimization applied to a curve-fitting problem.
|
| 506 |
+
|
| 507 |
+
Notes
|
| 508 |
+
-----
|
| 509 |
+
Method 'lm' (Levenberg-Marquardt) calls a wrapper over least-squares
|
| 510 |
+
algorithms implemented in MINPACK (lmder, lmdif). It runs the
|
| 511 |
+
Levenberg-Marquardt algorithm formulated as a trust-region type algorithm.
|
| 512 |
+
The implementation is based on paper [JJMore]_, it is very robust and
|
| 513 |
+
efficient with a lot of smart tricks. It should be your first choice
|
| 514 |
+
for unconstrained problems. Note that it doesn't support bounds. Also,
|
| 515 |
+
it doesn't work when m < n.
|
| 516 |
+
|
| 517 |
+
Method 'trf' (Trust Region Reflective) is motivated by the process of
|
| 518 |
+
solving a system of equations, which constitute the first-order optimality
|
| 519 |
+
condition for a bound-constrained minimization problem as formulated in
|
| 520 |
+
[STIR]_. The algorithm iteratively solves trust-region subproblems
|
| 521 |
+
augmented by a special diagonal quadratic term and with trust-region shape
|
| 522 |
+
determined by the distance from the bounds and the direction of the
|
| 523 |
+
gradient. This enhancements help to avoid making steps directly into bounds
|
| 524 |
+
and efficiently explore the whole space of variables. To further improve
|
| 525 |
+
convergence, the algorithm considers search directions reflected from the
|
| 526 |
+
bounds. To obey theoretical requirements, the algorithm keeps iterates
|
| 527 |
+
strictly feasible. With dense Jacobians trust-region subproblems are
|
| 528 |
+
solved by an exact method very similar to the one described in [JJMore]_
|
| 529 |
+
(and implemented in MINPACK). The difference from the MINPACK
|
| 530 |
+
implementation is that a singular value decomposition of a Jacobian
|
| 531 |
+
matrix is done once per iteration, instead of a QR decomposition and series
|
| 532 |
+
of Givens rotation eliminations. For large sparse Jacobians a 2-D subspace
|
| 533 |
+
approach of solving trust-region subproblems is used [STIR]_, [Byrd]_.
|
| 534 |
+
The subspace is spanned by a scaled gradient and an approximate
|
| 535 |
+
Gauss-Newton solution delivered by `scipy.sparse.linalg.lsmr`. When no
|
| 536 |
+
constraints are imposed the algorithm is very similar to MINPACK and has
|
| 537 |
+
generally comparable performance. The algorithm works quite robust in
|
| 538 |
+
unbounded and bounded problems, thus it is chosen as a default algorithm.
|
| 539 |
+
|
| 540 |
+
Method 'dogbox' operates in a trust-region framework, but considers
|
| 541 |
+
rectangular trust regions as opposed to conventional ellipsoids [Voglis]_.
|
| 542 |
+
The intersection of a current trust region and initial bounds is again
|
| 543 |
+
rectangular, so on each iteration a quadratic minimization problem subject
|
| 544 |
+
to bound constraints is solved approximately by Powell's dogleg method
|
| 545 |
+
[NumOpt]_. The required Gauss-Newton step can be computed exactly for
|
| 546 |
+
dense Jacobians or approximately by `scipy.sparse.linalg.lsmr` for large
|
| 547 |
+
sparse Jacobians. The algorithm is likely to exhibit slow convergence when
|
| 548 |
+
the rank of Jacobian is less than the number of variables. The algorithm
|
| 549 |
+
often outperforms 'trf' in bounded problems with a small number of
|
| 550 |
+
variables.
|
| 551 |
+
|
| 552 |
+
Robust loss functions are implemented as described in [BA]_. The idea
|
| 553 |
+
is to modify a residual vector and a Jacobian matrix on each iteration
|
| 554 |
+
such that computed gradient and Gauss-Newton Hessian approximation match
|
| 555 |
+
the true gradient and Hessian approximation of the cost function. Then
|
| 556 |
+
the algorithm proceeds in a normal way, i.e., robust loss functions are
|
| 557 |
+
implemented as a simple wrapper over standard least-squares algorithms.
|
| 558 |
+
|
| 559 |
+
.. versionadded:: 0.17.0
|
| 560 |
+
|
| 561 |
+
References
|
| 562 |
+
----------
|
| 563 |
+
.. [STIR] M. A. Branch, T. F. Coleman, and Y. Li, "A Subspace, Interior,
|
| 564 |
+
and Conjugate Gradient Method for Large-Scale Bound-Constrained
|
| 565 |
+
Minimization Problems," SIAM Journal on Scientific Computing,
|
| 566 |
+
Vol. 21, Number 1, pp 1-23, 1999.
|
| 567 |
+
.. [NR] William H. Press et. al., "Numerical Recipes. The Art of Scientific
|
| 568 |
+
Computing. 3rd edition", Sec. 5.7.
|
| 569 |
+
.. [Byrd] R. H. Byrd, R. B. Schnabel and G. A. Shultz, "Approximate
|
| 570 |
+
solution of the trust region problem by minimization over
|
| 571 |
+
two-dimensional subspaces", Math. Programming, 40, pp. 247-263,
|
| 572 |
+
1988.
|
| 573 |
+
.. [Curtis] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
|
| 574 |
+
sparse Jacobian matrices", Journal of the Institute of
|
| 575 |
+
Mathematics and its Applications, 13, pp. 117-120, 1974.
|
| 576 |
+
.. [JJMore] J. J. More, "The Levenberg-Marquardt Algorithm: Implementation
|
| 577 |
+
and Theory," Numerical Analysis, ed. G. A. Watson, Lecture
|
| 578 |
+
Notes in Mathematics 630, Springer Verlag, pp. 105-116, 1977.
|
| 579 |
+
.. [Voglis] C. Voglis and I. E. Lagaris, "A Rectangular Trust Region
|
| 580 |
+
Dogleg Approach for Unconstrained and Bound Constrained
|
| 581 |
+
Nonlinear Optimization", WSEAS International Conference on
|
| 582 |
+
Applied Mathematics, Corfu, Greece, 2004.
|
| 583 |
+
.. [NumOpt] J. Nocedal and S. J. Wright, "Numerical optimization,
|
| 584 |
+
2nd edition", Chapter 4.
|
| 585 |
+
.. [BA] B. Triggs et. al., "Bundle Adjustment - A Modern Synthesis",
|
| 586 |
+
Proceedings of the International Workshop on Vision Algorithms:
|
| 587 |
+
Theory and Practice, pp. 298-372, 1999.
|
| 588 |
+
|
| 589 |
+
Examples
|
| 590 |
+
--------
|
| 591 |
+
In this example we find a minimum of the Rosenbrock function without bounds
|
| 592 |
+
on independent variables.
|
| 593 |
+
|
| 594 |
+
>>> import numpy as np
|
| 595 |
+
>>> def fun_rosenbrock(x):
|
| 596 |
+
... return np.array([10 * (x[1] - x[0]**2), (1 - x[0])])
|
| 597 |
+
|
| 598 |
+
Notice that we only provide the vector of the residuals. The algorithm
|
| 599 |
+
constructs the cost function as a sum of squares of the residuals, which
|
| 600 |
+
gives the Rosenbrock function. The exact minimum is at ``x = [1.0, 1.0]``.
|
| 601 |
+
|
| 602 |
+
>>> from scipy.optimize import least_squares
|
| 603 |
+
>>> x0_rosenbrock = np.array([2, 2])
|
| 604 |
+
>>> res_1 = least_squares(fun_rosenbrock, x0_rosenbrock)
|
| 605 |
+
>>> res_1.x
|
| 606 |
+
array([ 1., 1.])
|
| 607 |
+
>>> res_1.cost
|
| 608 |
+
9.8669242910846867e-30
|
| 609 |
+
>>> res_1.optimality
|
| 610 |
+
8.8928864934219529e-14
|
| 611 |
+
|
| 612 |
+
We now constrain the variables, in such a way that the previous solution
|
| 613 |
+
becomes infeasible. Specifically, we require that ``x[1] >= 1.5``, and
|
| 614 |
+
``x[0]`` left unconstrained. To this end, we specify the `bounds` parameter
|
| 615 |
+
to `least_squares` in the form ``bounds=([-np.inf, 1.5], np.inf)``.
|
| 616 |
+
|
| 617 |
+
We also provide the analytic Jacobian:
|
| 618 |
+
|
| 619 |
+
>>> def jac_rosenbrock(x):
|
| 620 |
+
... return np.array([
|
| 621 |
+
... [-20 * x[0], 10],
|
| 622 |
+
... [-1, 0]])
|
| 623 |
+
|
| 624 |
+
Putting this all together, we see that the new solution lies on the bound:
|
| 625 |
+
|
| 626 |
+
>>> res_2 = least_squares(fun_rosenbrock, x0_rosenbrock, jac_rosenbrock,
|
| 627 |
+
... bounds=([-np.inf, 1.5], np.inf))
|
| 628 |
+
>>> res_2.x
|
| 629 |
+
array([ 1.22437075, 1.5 ])
|
| 630 |
+
>>> res_2.cost
|
| 631 |
+
0.025213093946805685
|
| 632 |
+
>>> res_2.optimality
|
| 633 |
+
1.5885401433157753e-07
|
| 634 |
+
|
| 635 |
+
Now we solve a system of equations (i.e., the cost function should be zero
|
| 636 |
+
at a minimum) for a Broyden tridiagonal vector-valued function of 100000
|
| 637 |
+
variables:
|
| 638 |
+
|
| 639 |
+
>>> def fun_broyden(x):
|
| 640 |
+
... f = (3 - x) * x + 1
|
| 641 |
+
... f[1:] -= x[:-1]
|
| 642 |
+
... f[:-1] -= 2 * x[1:]
|
| 643 |
+
... return f
|
| 644 |
+
|
| 645 |
+
The corresponding Jacobian matrix is sparse. We tell the algorithm to
|
| 646 |
+
estimate it by finite differences and provide the sparsity structure of
|
| 647 |
+
Jacobian to significantly speed up this process.
|
| 648 |
+
|
| 649 |
+
>>> from scipy.sparse import lil_matrix
|
| 650 |
+
>>> def sparsity_broyden(n):
|
| 651 |
+
... sparsity = lil_matrix((n, n), dtype=int)
|
| 652 |
+
... i = np.arange(n)
|
| 653 |
+
... sparsity[i, i] = 1
|
| 654 |
+
... i = np.arange(1, n)
|
| 655 |
+
... sparsity[i, i - 1] = 1
|
| 656 |
+
... i = np.arange(n - 1)
|
| 657 |
+
... sparsity[i, i + 1] = 1
|
| 658 |
+
... return sparsity
|
| 659 |
+
...
|
| 660 |
+
>>> n = 100000
|
| 661 |
+
>>> x0_broyden = -np.ones(n)
|
| 662 |
+
...
|
| 663 |
+
>>> res_3 = least_squares(fun_broyden, x0_broyden,
|
| 664 |
+
... jac_sparsity=sparsity_broyden(n))
|
| 665 |
+
>>> res_3.cost
|
| 666 |
+
4.5687069299604613e-23
|
| 667 |
+
>>> res_3.optimality
|
| 668 |
+
1.1650454296851518e-11
|
| 669 |
+
|
| 670 |
+
Let's also solve a curve fitting problem using robust loss function to
|
| 671 |
+
take care of outliers in the data. Define the model function as
|
| 672 |
+
``y = a + b * exp(c * t)``, where t is a predictor variable, y is an
|
| 673 |
+
observation and a, b, c are parameters to estimate.
|
| 674 |
+
|
| 675 |
+
First, define the function which generates the data with noise and
|
| 676 |
+
outliers, define the model parameters, and generate data:
|
| 677 |
+
|
| 678 |
+
>>> from numpy.random import default_rng
|
| 679 |
+
>>> rng = default_rng()
|
| 680 |
+
>>> def gen_data(t, a, b, c, noise=0., n_outliers=0, seed=None):
|
| 681 |
+
... rng = default_rng(seed)
|
| 682 |
+
...
|
| 683 |
+
... y = a + b * np.exp(t * c)
|
| 684 |
+
...
|
| 685 |
+
... error = noise * rng.standard_normal(t.size)
|
| 686 |
+
... outliers = rng.integers(0, t.size, n_outliers)
|
| 687 |
+
... error[outliers] *= 10
|
| 688 |
+
...
|
| 689 |
+
... return y + error
|
| 690 |
+
...
|
| 691 |
+
>>> a = 0.5
|
| 692 |
+
>>> b = 2.0
|
| 693 |
+
>>> c = -1
|
| 694 |
+
>>> t_min = 0
|
| 695 |
+
>>> t_max = 10
|
| 696 |
+
>>> n_points = 15
|
| 697 |
+
...
|
| 698 |
+
>>> t_train = np.linspace(t_min, t_max, n_points)
|
| 699 |
+
>>> y_train = gen_data(t_train, a, b, c, noise=0.1, n_outliers=3)
|
| 700 |
+
|
| 701 |
+
Define function for computing residuals and initial estimate of
|
| 702 |
+
parameters.
|
| 703 |
+
|
| 704 |
+
>>> def fun(x, t, y):
|
| 705 |
+
... return x[0] + x[1] * np.exp(x[2] * t) - y
|
| 706 |
+
...
|
| 707 |
+
>>> x0 = np.array([1.0, 1.0, 0.0])
|
| 708 |
+
|
| 709 |
+
Compute a standard least-squares solution:
|
| 710 |
+
|
| 711 |
+
>>> res_lsq = least_squares(fun, x0, args=(t_train, y_train))
|
| 712 |
+
|
| 713 |
+
Now compute two solutions with two different robust loss functions. The
|
| 714 |
+
parameter `f_scale` is set to 0.1, meaning that inlier residuals should
|
| 715 |
+
not significantly exceed 0.1 (the noise level used).
|
| 716 |
+
|
| 717 |
+
>>> res_soft_l1 = least_squares(fun, x0, loss='soft_l1', f_scale=0.1,
|
| 718 |
+
... args=(t_train, y_train))
|
| 719 |
+
>>> res_log = least_squares(fun, x0, loss='cauchy', f_scale=0.1,
|
| 720 |
+
... args=(t_train, y_train))
|
| 721 |
+
|
| 722 |
+
And, finally, plot all the curves. We see that by selecting an appropriate
|
| 723 |
+
`loss` we can get estimates close to optimal even in the presence of
|
| 724 |
+
strong outliers. But keep in mind that generally it is recommended to try
|
| 725 |
+
'soft_l1' or 'huber' losses first (if at all necessary) as the other two
|
| 726 |
+
options may cause difficulties in optimization process.
|
| 727 |
+
|
| 728 |
+
>>> t_test = np.linspace(t_min, t_max, n_points * 10)
|
| 729 |
+
>>> y_true = gen_data(t_test, a, b, c)
|
| 730 |
+
>>> y_lsq = gen_data(t_test, *res_lsq.x)
|
| 731 |
+
>>> y_soft_l1 = gen_data(t_test, *res_soft_l1.x)
|
| 732 |
+
>>> y_log = gen_data(t_test, *res_log.x)
|
| 733 |
+
...
|
| 734 |
+
>>> import matplotlib.pyplot as plt
|
| 735 |
+
>>> plt.plot(t_train, y_train, 'o')
|
| 736 |
+
>>> plt.plot(t_test, y_true, 'k', linewidth=2, label='true')
|
| 737 |
+
>>> plt.plot(t_test, y_lsq, label='linear loss')
|
| 738 |
+
>>> plt.plot(t_test, y_soft_l1, label='soft_l1 loss')
|
| 739 |
+
>>> plt.plot(t_test, y_log, label='cauchy loss')
|
| 740 |
+
>>> plt.xlabel("t")
|
| 741 |
+
>>> plt.ylabel("y")
|
| 742 |
+
>>> plt.legend()
|
| 743 |
+
>>> plt.show()
|
| 744 |
+
|
| 745 |
+
In the next example, we show how complex-valued residual functions of
|
| 746 |
+
complex variables can be optimized with ``least_squares()``. Consider the
|
| 747 |
+
following function:
|
| 748 |
+
|
| 749 |
+
>>> def f(z):
|
| 750 |
+
... return z - (0.5 + 0.5j)
|
| 751 |
+
|
| 752 |
+
We wrap it into a function of real variables that returns real residuals
|
| 753 |
+
by simply handling the real and imaginary parts as independent variables:
|
| 754 |
+
|
| 755 |
+
>>> def f_wrap(x):
|
| 756 |
+
... fx = f(x[0] + 1j*x[1])
|
| 757 |
+
... return np.array([fx.real, fx.imag])
|
| 758 |
+
|
| 759 |
+
Thus, instead of the original m-D complex function of n complex
|
| 760 |
+
variables we optimize a 2m-D real function of 2n real variables:
|
| 761 |
+
|
| 762 |
+
>>> from scipy.optimize import least_squares
|
| 763 |
+
>>> res_wrapped = least_squares(f_wrap, (0.1, 0.1), bounds=([0, 0], [1, 1]))
|
| 764 |
+
>>> z = res_wrapped.x[0] + res_wrapped.x[1]*1j
|
| 765 |
+
>>> z
|
| 766 |
+
(0.49999999999925893+0.49999999999925893j)
|
| 767 |
+
|
| 768 |
+
"""
|
| 769 |
+
if method not in ['trf', 'dogbox', 'lm']:
|
| 770 |
+
raise ValueError("`method` must be 'trf', 'dogbox' or 'lm'.")
|
| 771 |
+
|
| 772 |
+
if jac not in ['2-point', '3-point', 'cs'] and not callable(jac):
|
| 773 |
+
raise ValueError("`jac` must be '2-point', '3-point', 'cs' or "
|
| 774 |
+
"callable.")
|
| 775 |
+
|
| 776 |
+
if tr_solver not in [None, 'exact', 'lsmr']:
|
| 777 |
+
raise ValueError("`tr_solver` must be None, 'exact' or 'lsmr'.")
|
| 778 |
+
|
| 779 |
+
if loss not in IMPLEMENTED_LOSSES and not callable(loss):
|
| 780 |
+
raise ValueError("`loss` must be one of {} or a callable."
|
| 781 |
+
.format(IMPLEMENTED_LOSSES.keys()))
|
| 782 |
+
|
| 783 |
+
if method == 'lm' and loss != 'linear':
|
| 784 |
+
raise ValueError("method='lm' supports only 'linear' loss function.")
|
| 785 |
+
|
| 786 |
+
if verbose not in [0, 1, 2]:
|
| 787 |
+
raise ValueError("`verbose` must be in [0, 1, 2].")
|
| 788 |
+
|
| 789 |
+
if max_nfev is not None and max_nfev <= 0:
|
| 790 |
+
raise ValueError("`max_nfev` must be None or positive integer.")
|
| 791 |
+
|
| 792 |
+
if np.iscomplexobj(x0):
|
| 793 |
+
raise ValueError("`x0` must be real.")
|
| 794 |
+
|
| 795 |
+
x0 = np.atleast_1d(x0).astype(float)
|
| 796 |
+
|
| 797 |
+
if x0.ndim > 1:
|
| 798 |
+
raise ValueError("`x0` must have at most 1 dimension.")
|
| 799 |
+
|
| 800 |
+
if isinstance(bounds, Bounds):
|
| 801 |
+
lb, ub = bounds.lb, bounds.ub
|
| 802 |
+
bounds = (lb, ub)
|
| 803 |
+
else:
|
| 804 |
+
if len(bounds) == 2:
|
| 805 |
+
lb, ub = prepare_bounds(bounds, x0.shape[0])
|
| 806 |
+
else:
|
| 807 |
+
raise ValueError("`bounds` must contain 2 elements.")
|
| 808 |
+
|
| 809 |
+
if method == 'lm' and not np.all((lb == -np.inf) & (ub == np.inf)):
|
| 810 |
+
raise ValueError("Method 'lm' doesn't support bounds.")
|
| 811 |
+
|
| 812 |
+
if lb.shape != x0.shape or ub.shape != x0.shape:
|
| 813 |
+
raise ValueError("Inconsistent shapes between bounds and `x0`.")
|
| 814 |
+
|
| 815 |
+
if np.any(lb >= ub):
|
| 816 |
+
raise ValueError("Each lower bound must be strictly less than each "
|
| 817 |
+
"upper bound.")
|
| 818 |
+
|
| 819 |
+
if not in_bounds(x0, lb, ub):
|
| 820 |
+
raise ValueError("`x0` is infeasible.")
|
| 821 |
+
|
| 822 |
+
x_scale = check_x_scale(x_scale, x0)
|
| 823 |
+
|
| 824 |
+
ftol, xtol, gtol = check_tolerance(ftol, xtol, gtol, method)
|
| 825 |
+
|
| 826 |
+
if method == 'trf':
|
| 827 |
+
x0 = make_strictly_feasible(x0, lb, ub)
|
| 828 |
+
|
| 829 |
+
def fun_wrapped(x):
|
| 830 |
+
return np.atleast_1d(fun(x, *args, **kwargs))
|
| 831 |
+
|
| 832 |
+
f0 = fun_wrapped(x0)
|
| 833 |
+
|
| 834 |
+
if f0.ndim != 1:
|
| 835 |
+
raise ValueError("`fun` must return at most 1-d array_like. "
|
| 836 |
+
f"f0.shape: {f0.shape}")
|
| 837 |
+
|
| 838 |
+
if not np.all(np.isfinite(f0)):
|
| 839 |
+
raise ValueError("Residuals are not finite in the initial point.")
|
| 840 |
+
|
| 841 |
+
n = x0.size
|
| 842 |
+
m = f0.size
|
| 843 |
+
|
| 844 |
+
if method == 'lm' and m < n:
|
| 845 |
+
raise ValueError("Method 'lm' doesn't work when the number of "
|
| 846 |
+
"residuals is less than the number of variables.")
|
| 847 |
+
|
| 848 |
+
loss_function = construct_loss_function(m, loss, f_scale)
|
| 849 |
+
if callable(loss):
|
| 850 |
+
rho = loss_function(f0)
|
| 851 |
+
if rho.shape != (3, m):
|
| 852 |
+
raise ValueError("The return value of `loss` callable has wrong "
|
| 853 |
+
"shape.")
|
| 854 |
+
initial_cost = 0.5 * np.sum(rho[0])
|
| 855 |
+
elif loss_function is not None:
|
| 856 |
+
initial_cost = loss_function(f0, cost_only=True)
|
| 857 |
+
else:
|
| 858 |
+
initial_cost = 0.5 * np.dot(f0, f0)
|
| 859 |
+
|
| 860 |
+
if callable(jac):
|
| 861 |
+
J0 = jac(x0, *args, **kwargs)
|
| 862 |
+
|
| 863 |
+
if issparse(J0):
|
| 864 |
+
J0 = J0.tocsr()
|
| 865 |
+
|
| 866 |
+
def jac_wrapped(x, _=None):
|
| 867 |
+
return jac(x, *args, **kwargs).tocsr()
|
| 868 |
+
|
| 869 |
+
elif isinstance(J0, LinearOperator):
|
| 870 |
+
def jac_wrapped(x, _=None):
|
| 871 |
+
return jac(x, *args, **kwargs)
|
| 872 |
+
|
| 873 |
+
else:
|
| 874 |
+
J0 = np.atleast_2d(J0)
|
| 875 |
+
|
| 876 |
+
def jac_wrapped(x, _=None):
|
| 877 |
+
return np.atleast_2d(jac(x, *args, **kwargs))
|
| 878 |
+
|
| 879 |
+
else: # Estimate Jacobian by finite differences.
|
| 880 |
+
if method == 'lm':
|
| 881 |
+
if jac_sparsity is not None:
|
| 882 |
+
raise ValueError("method='lm' does not support "
|
| 883 |
+
"`jac_sparsity`.")
|
| 884 |
+
|
| 885 |
+
if jac != '2-point':
|
| 886 |
+
warn(f"jac='{jac}' works equivalently to '2-point' for method='lm'.",
|
| 887 |
+
stacklevel=2)
|
| 888 |
+
|
| 889 |
+
J0 = jac_wrapped = None
|
| 890 |
+
else:
|
| 891 |
+
if jac_sparsity is not None and tr_solver == 'exact':
|
| 892 |
+
raise ValueError("tr_solver='exact' is incompatible "
|
| 893 |
+
"with `jac_sparsity`.")
|
| 894 |
+
|
| 895 |
+
jac_sparsity = check_jac_sparsity(jac_sparsity, m, n)
|
| 896 |
+
|
| 897 |
+
def jac_wrapped(x, f):
|
| 898 |
+
J = approx_derivative(fun, x, rel_step=diff_step, method=jac,
|
| 899 |
+
f0=f, bounds=bounds, args=args,
|
| 900 |
+
kwargs=kwargs, sparsity=jac_sparsity)
|
| 901 |
+
if J.ndim != 2: # J is guaranteed not sparse.
|
| 902 |
+
J = np.atleast_2d(J)
|
| 903 |
+
|
| 904 |
+
return J
|
| 905 |
+
|
| 906 |
+
J0 = jac_wrapped(x0, f0)
|
| 907 |
+
|
| 908 |
+
if J0 is not None:
|
| 909 |
+
if J0.shape != (m, n):
|
| 910 |
+
raise ValueError(
|
| 911 |
+
f"The return value of `jac` has wrong shape: expected {(m, n)}, "
|
| 912 |
+
f"actual {J0.shape}."
|
| 913 |
+
)
|
| 914 |
+
|
| 915 |
+
if not isinstance(J0, np.ndarray):
|
| 916 |
+
if method == 'lm':
|
| 917 |
+
raise ValueError("method='lm' works only with dense "
|
| 918 |
+
"Jacobian matrices.")
|
| 919 |
+
|
| 920 |
+
if tr_solver == 'exact':
|
| 921 |
+
raise ValueError(
|
| 922 |
+
"tr_solver='exact' works only with dense "
|
| 923 |
+
"Jacobian matrices.")
|
| 924 |
+
|
| 925 |
+
jac_scale = isinstance(x_scale, str) and x_scale == 'jac'
|
| 926 |
+
if isinstance(J0, LinearOperator) and jac_scale:
|
| 927 |
+
raise ValueError("x_scale='jac' can't be used when `jac` "
|
| 928 |
+
"returns LinearOperator.")
|
| 929 |
+
|
| 930 |
+
if tr_solver is None:
|
| 931 |
+
if isinstance(J0, np.ndarray):
|
| 932 |
+
tr_solver = 'exact'
|
| 933 |
+
else:
|
| 934 |
+
tr_solver = 'lsmr'
|
| 935 |
+
|
| 936 |
+
if method == 'lm':
|
| 937 |
+
result = call_minpack(fun_wrapped, x0, jac_wrapped, ftol, xtol, gtol,
|
| 938 |
+
max_nfev, x_scale, diff_step)
|
| 939 |
+
|
| 940 |
+
elif method == 'trf':
|
| 941 |
+
result = trf(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol, xtol,
|
| 942 |
+
gtol, max_nfev, x_scale, loss_function, tr_solver,
|
| 943 |
+
tr_options.copy(), verbose)
|
| 944 |
+
|
| 945 |
+
elif method == 'dogbox':
|
| 946 |
+
if tr_solver == 'lsmr' and 'regularize' in tr_options:
|
| 947 |
+
warn("The keyword 'regularize' in `tr_options` is not relevant "
|
| 948 |
+
"for 'dogbox' method.",
|
| 949 |
+
stacklevel=2)
|
| 950 |
+
tr_options = tr_options.copy()
|
| 951 |
+
del tr_options['regularize']
|
| 952 |
+
|
| 953 |
+
result = dogbox(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol,
|
| 954 |
+
xtol, gtol, max_nfev, x_scale, loss_function,
|
| 955 |
+
tr_solver, tr_options, verbose)
|
| 956 |
+
|
| 957 |
+
result.message = TERMINATION_MESSAGES[result.status]
|
| 958 |
+
result.success = result.status > 0
|
| 959 |
+
|
| 960 |
+
if verbose >= 1:
|
| 961 |
+
print(result.message)
|
| 962 |
+
print("Function evaluations {}, initial cost {:.4e}, final cost "
|
| 963 |
+
"{:.4e}, first-order optimality {:.2e}."
|
| 964 |
+
.format(result.nfev, initial_cost, result.cost,
|
| 965 |
+
result.optimality))
|
| 966 |
+
|
| 967 |
+
return result
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_lsq/lsq_linear.py
ADDED
|
@@ -0,0 +1,362 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Linear least squares with bound constraints on independent variables."""
|
| 2 |
+
import numpy as np
|
| 3 |
+
from numpy.linalg import norm
|
| 4 |
+
from scipy.sparse import issparse, csr_matrix
|
| 5 |
+
from scipy.sparse.linalg import LinearOperator, lsmr
|
| 6 |
+
from scipy.optimize import OptimizeResult
|
| 7 |
+
from scipy.optimize._minimize import Bounds
|
| 8 |
+
|
| 9 |
+
from .common import in_bounds, compute_grad
|
| 10 |
+
from .trf_linear import trf_linear
|
| 11 |
+
from .bvls import bvls
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def prepare_bounds(bounds, n):
|
| 15 |
+
if len(bounds) != 2:
|
| 16 |
+
raise ValueError("`bounds` must contain 2 elements.")
|
| 17 |
+
lb, ub = (np.asarray(b, dtype=float) for b in bounds)
|
| 18 |
+
|
| 19 |
+
if lb.ndim == 0:
|
| 20 |
+
lb = np.resize(lb, n)
|
| 21 |
+
|
| 22 |
+
if ub.ndim == 0:
|
| 23 |
+
ub = np.resize(ub, n)
|
| 24 |
+
|
| 25 |
+
return lb, ub
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
TERMINATION_MESSAGES = {
|
| 29 |
+
-1: "The algorithm was not able to make progress on the last iteration.",
|
| 30 |
+
0: "The maximum number of iterations is exceeded.",
|
| 31 |
+
1: "The first-order optimality measure is less than `tol`.",
|
| 32 |
+
2: "The relative change of the cost function is less than `tol`.",
|
| 33 |
+
3: "The unconstrained solution is optimal."
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def lsq_linear(A, b, bounds=(-np.inf, np.inf), method='trf', tol=1e-10,
|
| 38 |
+
lsq_solver=None, lsmr_tol=None, max_iter=None,
|
| 39 |
+
verbose=0, *, lsmr_maxiter=None,):
|
| 40 |
+
r"""Solve a linear least-squares problem with bounds on the variables.
|
| 41 |
+
|
| 42 |
+
Given a m-by-n design matrix A and a target vector b with m elements,
|
| 43 |
+
`lsq_linear` solves the following optimization problem::
|
| 44 |
+
|
| 45 |
+
minimize 0.5 * ||A x - b||**2
|
| 46 |
+
subject to lb <= x <= ub
|
| 47 |
+
|
| 48 |
+
This optimization problem is convex, hence a found minimum (if iterations
|
| 49 |
+
have converged) is guaranteed to be global.
|
| 50 |
+
|
| 51 |
+
Parameters
|
| 52 |
+
----------
|
| 53 |
+
A : array_like, sparse matrix of LinearOperator, shape (m, n)
|
| 54 |
+
Design matrix. Can be `scipy.sparse.linalg.LinearOperator`.
|
| 55 |
+
b : array_like, shape (m,)
|
| 56 |
+
Target vector.
|
| 57 |
+
bounds : 2-tuple of array_like or `Bounds`, optional
|
| 58 |
+
Lower and upper bounds on parameters. Defaults to no bounds.
|
| 59 |
+
There are two ways to specify the bounds:
|
| 60 |
+
|
| 61 |
+
- Instance of `Bounds` class.
|
| 62 |
+
|
| 63 |
+
- 2-tuple of array_like: Each element of the tuple must be either
|
| 64 |
+
an array with the length equal to the number of parameters, or a
|
| 65 |
+
scalar (in which case the bound is taken to be the same for all
|
| 66 |
+
parameters). Use ``np.inf`` with an appropriate sign to disable
|
| 67 |
+
bounds on all or some parameters.
|
| 68 |
+
|
| 69 |
+
method : 'trf' or 'bvls', optional
|
| 70 |
+
Method to perform minimization.
|
| 71 |
+
|
| 72 |
+
* 'trf' : Trust Region Reflective algorithm adapted for a linear
|
| 73 |
+
least-squares problem. This is an interior-point-like method
|
| 74 |
+
and the required number of iterations is weakly correlated with
|
| 75 |
+
the number of variables.
|
| 76 |
+
* 'bvls' : Bounded-variable least-squares algorithm. This is
|
| 77 |
+
an active set method, which requires the number of iterations
|
| 78 |
+
comparable to the number of variables. Can't be used when `A` is
|
| 79 |
+
sparse or LinearOperator.
|
| 80 |
+
|
| 81 |
+
Default is 'trf'.
|
| 82 |
+
tol : float, optional
|
| 83 |
+
Tolerance parameter. The algorithm terminates if a relative change
|
| 84 |
+
of the cost function is less than `tol` on the last iteration.
|
| 85 |
+
Additionally, the first-order optimality measure is considered:
|
| 86 |
+
|
| 87 |
+
* ``method='trf'`` terminates if the uniform norm of the gradient,
|
| 88 |
+
scaled to account for the presence of the bounds, is less than
|
| 89 |
+
`tol`.
|
| 90 |
+
* ``method='bvls'`` terminates if Karush-Kuhn-Tucker conditions
|
| 91 |
+
are satisfied within `tol` tolerance.
|
| 92 |
+
|
| 93 |
+
lsq_solver : {None, 'exact', 'lsmr'}, optional
|
| 94 |
+
Method of solving unbounded least-squares problems throughout
|
| 95 |
+
iterations:
|
| 96 |
+
|
| 97 |
+
* 'exact' : Use dense QR or SVD decomposition approach. Can't be
|
| 98 |
+
used when `A` is sparse or LinearOperator.
|
| 99 |
+
* 'lsmr' : Use `scipy.sparse.linalg.lsmr` iterative procedure
|
| 100 |
+
which requires only matrix-vector product evaluations. Can't
|
| 101 |
+
be used with ``method='bvls'``.
|
| 102 |
+
|
| 103 |
+
If None (default), the solver is chosen based on type of `A`.
|
| 104 |
+
lsmr_tol : None, float or 'auto', optional
|
| 105 |
+
Tolerance parameters 'atol' and 'btol' for `scipy.sparse.linalg.lsmr`
|
| 106 |
+
If None (default), it is set to ``1e-2 * tol``. If 'auto', the
|
| 107 |
+
tolerance will be adjusted based on the optimality of the current
|
| 108 |
+
iterate, which can speed up the optimization process, but is not always
|
| 109 |
+
reliable.
|
| 110 |
+
max_iter : None or int, optional
|
| 111 |
+
Maximum number of iterations before termination. If None (default), it
|
| 112 |
+
is set to 100 for ``method='trf'`` or to the number of variables for
|
| 113 |
+
``method='bvls'`` (not counting iterations for 'bvls' initialization).
|
| 114 |
+
verbose : {0, 1, 2}, optional
|
| 115 |
+
Level of algorithm's verbosity:
|
| 116 |
+
|
| 117 |
+
* 0 : work silently (default).
|
| 118 |
+
* 1 : display a termination report.
|
| 119 |
+
* 2 : display progress during iterations.
|
| 120 |
+
lsmr_maxiter : None or int, optional
|
| 121 |
+
Maximum number of iterations for the lsmr least squares solver,
|
| 122 |
+
if it is used (by setting ``lsq_solver='lsmr'``). If None (default), it
|
| 123 |
+
uses lsmr's default of ``min(m, n)`` where ``m`` and ``n`` are the
|
| 124 |
+
number of rows and columns of `A`, respectively. Has no effect if
|
| 125 |
+
``lsq_solver='exact'``.
|
| 126 |
+
|
| 127 |
+
Returns
|
| 128 |
+
-------
|
| 129 |
+
OptimizeResult with the following fields defined:
|
| 130 |
+
x : ndarray, shape (n,)
|
| 131 |
+
Solution found.
|
| 132 |
+
cost : float
|
| 133 |
+
Value of the cost function at the solution.
|
| 134 |
+
fun : ndarray, shape (m,)
|
| 135 |
+
Vector of residuals at the solution.
|
| 136 |
+
optimality : float
|
| 137 |
+
First-order optimality measure. The exact meaning depends on `method`,
|
| 138 |
+
refer to the description of `tol` parameter.
|
| 139 |
+
active_mask : ndarray of int, shape (n,)
|
| 140 |
+
Each component shows whether a corresponding constraint is active
|
| 141 |
+
(that is, whether a variable is at the bound):
|
| 142 |
+
|
| 143 |
+
* 0 : a constraint is not active.
|
| 144 |
+
* -1 : a lower bound is active.
|
| 145 |
+
* 1 : an upper bound is active.
|
| 146 |
+
|
| 147 |
+
Might be somewhat arbitrary for the `trf` method as it generates a
|
| 148 |
+
sequence of strictly feasible iterates and active_mask is determined
|
| 149 |
+
within a tolerance threshold.
|
| 150 |
+
unbounded_sol : tuple
|
| 151 |
+
Unbounded least squares solution tuple returned by the least squares
|
| 152 |
+
solver (set with `lsq_solver` option). If `lsq_solver` is not set or is
|
| 153 |
+
set to ``'exact'``, the tuple contains an ndarray of shape (n,) with
|
| 154 |
+
the unbounded solution, an ndarray with the sum of squared residuals,
|
| 155 |
+
an int with the rank of `A`, and an ndarray with the singular values
|
| 156 |
+
of `A` (see NumPy's ``linalg.lstsq`` for more information). If
|
| 157 |
+
`lsq_solver` is set to ``'lsmr'``, the tuple contains an ndarray of
|
| 158 |
+
shape (n,) with the unbounded solution, an int with the exit code,
|
| 159 |
+
an int with the number of iterations, and five floats with
|
| 160 |
+
various norms and the condition number of `A` (see SciPy's
|
| 161 |
+
``sparse.linalg.lsmr`` for more information). This output can be
|
| 162 |
+
useful for determining the convergence of the least squares solver,
|
| 163 |
+
particularly the iterative ``'lsmr'`` solver. The unbounded least
|
| 164 |
+
squares problem is to minimize ``0.5 * ||A x - b||**2``.
|
| 165 |
+
nit : int
|
| 166 |
+
Number of iterations. Zero if the unconstrained solution is optimal.
|
| 167 |
+
status : int
|
| 168 |
+
Reason for algorithm termination:
|
| 169 |
+
|
| 170 |
+
* -1 : the algorithm was not able to make progress on the last
|
| 171 |
+
iteration.
|
| 172 |
+
* 0 : the maximum number of iterations is exceeded.
|
| 173 |
+
* 1 : the first-order optimality measure is less than `tol`.
|
| 174 |
+
* 2 : the relative change of the cost function is less than `tol`.
|
| 175 |
+
* 3 : the unconstrained solution is optimal.
|
| 176 |
+
|
| 177 |
+
message : str
|
| 178 |
+
Verbal description of the termination reason.
|
| 179 |
+
success : bool
|
| 180 |
+
True if one of the convergence criteria is satisfied (`status` > 0).
|
| 181 |
+
|
| 182 |
+
See Also
|
| 183 |
+
--------
|
| 184 |
+
nnls : Linear least squares with non-negativity constraint.
|
| 185 |
+
least_squares : Nonlinear least squares with bounds on the variables.
|
| 186 |
+
|
| 187 |
+
Notes
|
| 188 |
+
-----
|
| 189 |
+
The algorithm first computes the unconstrained least-squares solution by
|
| 190 |
+
`numpy.linalg.lstsq` or `scipy.sparse.linalg.lsmr` depending on
|
| 191 |
+
`lsq_solver`. This solution is returned as optimal if it lies within the
|
| 192 |
+
bounds.
|
| 193 |
+
|
| 194 |
+
Method 'trf' runs the adaptation of the algorithm described in [STIR]_ for
|
| 195 |
+
a linear least-squares problem. The iterations are essentially the same as
|
| 196 |
+
in the nonlinear least-squares algorithm, but as the quadratic function
|
| 197 |
+
model is always accurate, we don't need to track or modify the radius of
|
| 198 |
+
a trust region. The line search (backtracking) is used as a safety net
|
| 199 |
+
when a selected step does not decrease the cost function. Read more
|
| 200 |
+
detailed description of the algorithm in `scipy.optimize.least_squares`.
|
| 201 |
+
|
| 202 |
+
Method 'bvls' runs a Python implementation of the algorithm described in
|
| 203 |
+
[BVLS]_. The algorithm maintains active and free sets of variables, on
|
| 204 |
+
each iteration chooses a new variable to move from the active set to the
|
| 205 |
+
free set and then solves the unconstrained least-squares problem on free
|
| 206 |
+
variables. This algorithm is guaranteed to give an accurate solution
|
| 207 |
+
eventually, but may require up to n iterations for a problem with n
|
| 208 |
+
variables. Additionally, an ad-hoc initialization procedure is
|
| 209 |
+
implemented, that determines which variables to set free or active
|
| 210 |
+
initially. It takes some number of iterations before actual BVLS starts,
|
| 211 |
+
but can significantly reduce the number of further iterations.
|
| 212 |
+
|
| 213 |
+
References
|
| 214 |
+
----------
|
| 215 |
+
.. [STIR] M. A. Branch, T. F. Coleman, and Y. Li, "A Subspace, Interior,
|
| 216 |
+
and Conjugate Gradient Method for Large-Scale Bound-Constrained
|
| 217 |
+
Minimization Problems," SIAM Journal on Scientific Computing,
|
| 218 |
+
Vol. 21, Number 1, pp 1-23, 1999.
|
| 219 |
+
.. [BVLS] P. B. Start and R. L. Parker, "Bounded-Variable Least-Squares:
|
| 220 |
+
an Algorithm and Applications", Computational Statistics, 10,
|
| 221 |
+
129-141, 1995.
|
| 222 |
+
|
| 223 |
+
Examples
|
| 224 |
+
--------
|
| 225 |
+
In this example, a problem with a large sparse matrix and bounds on the
|
| 226 |
+
variables is solved.
|
| 227 |
+
|
| 228 |
+
>>> import numpy as np
|
| 229 |
+
>>> from scipy.sparse import rand
|
| 230 |
+
>>> from scipy.optimize import lsq_linear
|
| 231 |
+
>>> rng = np.random.default_rng()
|
| 232 |
+
...
|
| 233 |
+
>>> m = 20000
|
| 234 |
+
>>> n = 10000
|
| 235 |
+
...
|
| 236 |
+
>>> A = rand(m, n, density=1e-4, random_state=rng)
|
| 237 |
+
>>> b = rng.standard_normal(m)
|
| 238 |
+
...
|
| 239 |
+
>>> lb = rng.standard_normal(n)
|
| 240 |
+
>>> ub = lb + 1
|
| 241 |
+
...
|
| 242 |
+
>>> res = lsq_linear(A, b, bounds=(lb, ub), lsmr_tol='auto', verbose=1)
|
| 243 |
+
# may vary
|
| 244 |
+
The relative change of the cost function is less than `tol`.
|
| 245 |
+
Number of iterations 16, initial cost 1.5039e+04, final cost 1.1112e+04,
|
| 246 |
+
first-order optimality 4.66e-08.
|
| 247 |
+
"""
|
| 248 |
+
if method not in ['trf', 'bvls']:
|
| 249 |
+
raise ValueError("`method` must be 'trf' or 'bvls'")
|
| 250 |
+
|
| 251 |
+
if lsq_solver not in [None, 'exact', 'lsmr']:
|
| 252 |
+
raise ValueError("`solver` must be None, 'exact' or 'lsmr'.")
|
| 253 |
+
|
| 254 |
+
if verbose not in [0, 1, 2]:
|
| 255 |
+
raise ValueError("`verbose` must be in [0, 1, 2].")
|
| 256 |
+
|
| 257 |
+
if issparse(A):
|
| 258 |
+
A = csr_matrix(A)
|
| 259 |
+
elif not isinstance(A, LinearOperator):
|
| 260 |
+
A = np.atleast_2d(np.asarray(A))
|
| 261 |
+
|
| 262 |
+
if method == 'bvls':
|
| 263 |
+
if lsq_solver == 'lsmr':
|
| 264 |
+
raise ValueError("method='bvls' can't be used with "
|
| 265 |
+
"lsq_solver='lsmr'")
|
| 266 |
+
|
| 267 |
+
if not isinstance(A, np.ndarray):
|
| 268 |
+
raise ValueError("method='bvls' can't be used with `A` being "
|
| 269 |
+
"sparse or LinearOperator.")
|
| 270 |
+
|
| 271 |
+
if lsq_solver is None:
|
| 272 |
+
if isinstance(A, np.ndarray):
|
| 273 |
+
lsq_solver = 'exact'
|
| 274 |
+
else:
|
| 275 |
+
lsq_solver = 'lsmr'
|
| 276 |
+
elif lsq_solver == 'exact' and not isinstance(A, np.ndarray):
|
| 277 |
+
raise ValueError("`exact` solver can't be used when `A` is "
|
| 278 |
+
"sparse or LinearOperator.")
|
| 279 |
+
|
| 280 |
+
if len(A.shape) != 2: # No ndim for LinearOperator.
|
| 281 |
+
raise ValueError("`A` must have at most 2 dimensions.")
|
| 282 |
+
|
| 283 |
+
if max_iter is not None and max_iter <= 0:
|
| 284 |
+
raise ValueError("`max_iter` must be None or positive integer.")
|
| 285 |
+
|
| 286 |
+
m, n = A.shape
|
| 287 |
+
|
| 288 |
+
b = np.atleast_1d(b)
|
| 289 |
+
if b.ndim != 1:
|
| 290 |
+
raise ValueError("`b` must have at most 1 dimension.")
|
| 291 |
+
|
| 292 |
+
if b.size != m:
|
| 293 |
+
raise ValueError("Inconsistent shapes between `A` and `b`.")
|
| 294 |
+
|
| 295 |
+
if isinstance(bounds, Bounds):
|
| 296 |
+
lb = bounds.lb
|
| 297 |
+
ub = bounds.ub
|
| 298 |
+
else:
|
| 299 |
+
lb, ub = prepare_bounds(bounds, n)
|
| 300 |
+
|
| 301 |
+
if lb.shape != (n,) and ub.shape != (n,):
|
| 302 |
+
raise ValueError("Bounds have wrong shape.")
|
| 303 |
+
|
| 304 |
+
if np.any(lb >= ub):
|
| 305 |
+
raise ValueError("Each lower bound must be strictly less than each "
|
| 306 |
+
"upper bound.")
|
| 307 |
+
|
| 308 |
+
if lsmr_maxiter is not None and lsmr_maxiter < 1:
|
| 309 |
+
raise ValueError("`lsmr_maxiter` must be None or positive integer.")
|
| 310 |
+
|
| 311 |
+
if not ((isinstance(lsmr_tol, float) and lsmr_tol > 0) or
|
| 312 |
+
lsmr_tol in ('auto', None)):
|
| 313 |
+
raise ValueError("`lsmr_tol` must be None, 'auto', or positive float.")
|
| 314 |
+
|
| 315 |
+
if lsq_solver == 'exact':
|
| 316 |
+
unbd_lsq = np.linalg.lstsq(A, b, rcond=-1)
|
| 317 |
+
elif lsq_solver == 'lsmr':
|
| 318 |
+
first_lsmr_tol = lsmr_tol # tol of first call to lsmr
|
| 319 |
+
if lsmr_tol is None or lsmr_tol == 'auto':
|
| 320 |
+
first_lsmr_tol = 1e-2 * tol # default if lsmr_tol not defined
|
| 321 |
+
unbd_lsq = lsmr(A, b, maxiter=lsmr_maxiter,
|
| 322 |
+
atol=first_lsmr_tol, btol=first_lsmr_tol)
|
| 323 |
+
x_lsq = unbd_lsq[0] # extract the solution from the least squares solver
|
| 324 |
+
|
| 325 |
+
if in_bounds(x_lsq, lb, ub):
|
| 326 |
+
r = A @ x_lsq - b
|
| 327 |
+
cost = 0.5 * np.dot(r, r)
|
| 328 |
+
termination_status = 3
|
| 329 |
+
termination_message = TERMINATION_MESSAGES[termination_status]
|
| 330 |
+
g = compute_grad(A, r)
|
| 331 |
+
g_norm = norm(g, ord=np.inf)
|
| 332 |
+
|
| 333 |
+
if verbose > 0:
|
| 334 |
+
print(termination_message)
|
| 335 |
+
print(f"Final cost {cost:.4e}, first-order optimality {g_norm:.2e}")
|
| 336 |
+
|
| 337 |
+
return OptimizeResult(
|
| 338 |
+
x=x_lsq, fun=r, cost=cost, optimality=g_norm,
|
| 339 |
+
active_mask=np.zeros(n), unbounded_sol=unbd_lsq,
|
| 340 |
+
nit=0, status=termination_status,
|
| 341 |
+
message=termination_message, success=True)
|
| 342 |
+
|
| 343 |
+
if method == 'trf':
|
| 344 |
+
res = trf_linear(A, b, x_lsq, lb, ub, tol, lsq_solver, lsmr_tol,
|
| 345 |
+
max_iter, verbose, lsmr_maxiter=lsmr_maxiter)
|
| 346 |
+
elif method == 'bvls':
|
| 347 |
+
res = bvls(A, b, x_lsq, lb, ub, tol, max_iter, verbose)
|
| 348 |
+
|
| 349 |
+
res.unbounded_sol = unbd_lsq
|
| 350 |
+
res.message = TERMINATION_MESSAGES[res.status]
|
| 351 |
+
res.success = res.status > 0
|
| 352 |
+
|
| 353 |
+
if verbose > 0:
|
| 354 |
+
print(res.message)
|
| 355 |
+
print(
|
| 356 |
+
f"Number of iterations {res.nit}, initial cost {res.initial_cost:.4e}, "
|
| 357 |
+
f"final cost {res.cost:.4e}, first-order optimality {res.optimality:.2e}."
|
| 358 |
+
)
|
| 359 |
+
|
| 360 |
+
del res.initial_cost
|
| 361 |
+
|
| 362 |
+
return res
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_lsq/trf.py
ADDED
|
@@ -0,0 +1,560 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Trust Region Reflective algorithm for least-squares optimization.
|
| 2 |
+
|
| 3 |
+
The algorithm is based on ideas from paper [STIR]_. The main idea is to
|
| 4 |
+
account for the presence of the bounds by appropriate scaling of the variables (or,
|
| 5 |
+
equivalently, changing a trust-region shape). Let's introduce a vector v:
|
| 6 |
+
|
| 7 |
+
| ub[i] - x[i], if g[i] < 0 and ub[i] < np.inf
|
| 8 |
+
v[i] = | x[i] - lb[i], if g[i] > 0 and lb[i] > -np.inf
|
| 9 |
+
| 1, otherwise
|
| 10 |
+
|
| 11 |
+
where g is the gradient of a cost function and lb, ub are the bounds. Its
|
| 12 |
+
components are distances to the bounds at which the anti-gradient points (if
|
| 13 |
+
this distance is finite). Define a scaling matrix D = diag(v**0.5).
|
| 14 |
+
First-order optimality conditions can be stated as
|
| 15 |
+
|
| 16 |
+
D^2 g(x) = 0.
|
| 17 |
+
|
| 18 |
+
Meaning that components of the gradient should be zero for strictly interior
|
| 19 |
+
variables, and components must point inside the feasible region for variables
|
| 20 |
+
on the bound.
|
| 21 |
+
|
| 22 |
+
Now consider this system of equations as a new optimization problem. If the
|
| 23 |
+
point x is strictly interior (not on the bound), then the left-hand side is
|
| 24 |
+
differentiable and the Newton step for it satisfies
|
| 25 |
+
|
| 26 |
+
(D^2 H + diag(g) Jv) p = -D^2 g
|
| 27 |
+
|
| 28 |
+
where H is the Hessian matrix (or its J^T J approximation in least squares),
|
| 29 |
+
Jv is the Jacobian matrix of v with components -1, 1 or 0, such that all
|
| 30 |
+
elements of matrix C = diag(g) Jv are non-negative. Introduce the change
|
| 31 |
+
of the variables x = D x_h (_h would be "hat" in LaTeX). In the new variables,
|
| 32 |
+
we have a Newton step satisfying
|
| 33 |
+
|
| 34 |
+
B_h p_h = -g_h,
|
| 35 |
+
|
| 36 |
+
where B_h = D H D + C, g_h = D g. In least squares B_h = J_h^T J_h, where
|
| 37 |
+
J_h = J D. Note that J_h and g_h are proper Jacobian and gradient with respect
|
| 38 |
+
to "hat" variables. To guarantee global convergence we formulate a
|
| 39 |
+
trust-region problem based on the Newton step in the new variables:
|
| 40 |
+
|
| 41 |
+
0.5 * p_h^T B_h p + g_h^T p_h -> min, ||p_h|| <= Delta
|
| 42 |
+
|
| 43 |
+
In the original space B = H + D^{-1} C D^{-1}, and the equivalent trust-region
|
| 44 |
+
problem is
|
| 45 |
+
|
| 46 |
+
0.5 * p^T B p + g^T p -> min, ||D^{-1} p|| <= Delta
|
| 47 |
+
|
| 48 |
+
Here, the meaning of the matrix D becomes more clear: it alters the shape
|
| 49 |
+
of a trust-region, such that large steps towards the bounds are not allowed.
|
| 50 |
+
In the implementation, the trust-region problem is solved in "hat" space,
|
| 51 |
+
but handling of the bounds is done in the original space (see below and read
|
| 52 |
+
the code).
|
| 53 |
+
|
| 54 |
+
The introduction of the matrix D doesn't allow to ignore bounds, the algorithm
|
| 55 |
+
must keep iterates strictly feasible (to satisfy aforementioned
|
| 56 |
+
differentiability), the parameter theta controls step back from the boundary
|
| 57 |
+
(see the code for details).
|
| 58 |
+
|
| 59 |
+
The algorithm does another important trick. If the trust-region solution
|
| 60 |
+
doesn't fit into the bounds, then a reflected (from a firstly encountered
|
| 61 |
+
bound) search direction is considered. For motivation and analysis refer to
|
| 62 |
+
[STIR]_ paper (and other papers of the authors). In practice, it doesn't need
|
| 63 |
+
a lot of justifications, the algorithm simply chooses the best step among
|
| 64 |
+
three: a constrained trust-region step, a reflected step and a constrained
|
| 65 |
+
Cauchy step (a minimizer along -g_h in "hat" space, or -D^2 g in the original
|
| 66 |
+
space).
|
| 67 |
+
|
| 68 |
+
Another feature is that a trust-region radius control strategy is modified to
|
| 69 |
+
account for appearance of the diagonal C matrix (called diag_h in the code).
|
| 70 |
+
|
| 71 |
+
Note that all described peculiarities are completely gone as we consider
|
| 72 |
+
problems without bounds (the algorithm becomes a standard trust-region type
|
| 73 |
+
algorithm very similar to ones implemented in MINPACK).
|
| 74 |
+
|
| 75 |
+
The implementation supports two methods of solving the trust-region problem.
|
| 76 |
+
The first, called 'exact', applies SVD on Jacobian and then solves the problem
|
| 77 |
+
very accurately using the algorithm described in [JJMore]_. It is not
|
| 78 |
+
applicable to large problem. The second, called 'lsmr', uses the 2-D subspace
|
| 79 |
+
approach (sometimes called "indefinite dogleg"), where the problem is solved
|
| 80 |
+
in a subspace spanned by the gradient and the approximate Gauss-Newton step
|
| 81 |
+
found by ``scipy.sparse.linalg.lsmr``. A 2-D trust-region problem is
|
| 82 |
+
reformulated as a 4th order algebraic equation and solved very accurately by
|
| 83 |
+
``numpy.roots``. The subspace approach allows to solve very large problems
|
| 84 |
+
(up to couple of millions of residuals on a regular PC), provided the Jacobian
|
| 85 |
+
matrix is sufficiently sparse.
|
| 86 |
+
|
| 87 |
+
References
|
| 88 |
+
----------
|
| 89 |
+
.. [STIR] Branch, M.A., T.F. Coleman, and Y. Li, "A Subspace, Interior,
|
| 90 |
+
and Conjugate Gradient Method for Large-Scale Bound-Constrained
|
| 91 |
+
Minimization Problems," SIAM Journal on Scientific Computing,
|
| 92 |
+
Vol. 21, Number 1, pp 1-23, 1999.
|
| 93 |
+
.. [JJMore] More, J. J., "The Levenberg-Marquardt Algorithm: Implementation
|
| 94 |
+
and Theory," Numerical Analysis, ed. G. A. Watson, Lecture
|
| 95 |
+
"""
|
| 96 |
+
import numpy as np
|
| 97 |
+
from numpy.linalg import norm
|
| 98 |
+
from scipy.linalg import svd, qr
|
| 99 |
+
from scipy.sparse.linalg import lsmr
|
| 100 |
+
from scipy.optimize import OptimizeResult
|
| 101 |
+
|
| 102 |
+
from .common import (
|
| 103 |
+
step_size_to_bound, find_active_constraints, in_bounds,
|
| 104 |
+
make_strictly_feasible, intersect_trust_region, solve_lsq_trust_region,
|
| 105 |
+
solve_trust_region_2d, minimize_quadratic_1d, build_quadratic_1d,
|
| 106 |
+
evaluate_quadratic, right_multiplied_operator, regularized_lsq_operator,
|
| 107 |
+
CL_scaling_vector, compute_grad, compute_jac_scale, check_termination,
|
| 108 |
+
update_tr_radius, scale_for_robust_loss_function, print_header_nonlinear,
|
| 109 |
+
print_iteration_nonlinear)
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def trf(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale,
|
| 113 |
+
loss_function, tr_solver, tr_options, verbose):
|
| 114 |
+
# For efficiency, it makes sense to run the simplified version of the
|
| 115 |
+
# algorithm when no bounds are imposed. We decided to write the two
|
| 116 |
+
# separate functions. It violates the DRY principle, but the individual
|
| 117 |
+
# functions are kept the most readable.
|
| 118 |
+
if np.all(lb == -np.inf) and np.all(ub == np.inf):
|
| 119 |
+
return trf_no_bounds(
|
| 120 |
+
fun, jac, x0, f0, J0, ftol, xtol, gtol, max_nfev, x_scale,
|
| 121 |
+
loss_function, tr_solver, tr_options, verbose)
|
| 122 |
+
else:
|
| 123 |
+
return trf_bounds(
|
| 124 |
+
fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale,
|
| 125 |
+
loss_function, tr_solver, tr_options, verbose)
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def select_step(x, J_h, diag_h, g_h, p, p_h, d, Delta, lb, ub, theta):
|
| 129 |
+
"""Select the best step according to Trust Region Reflective algorithm."""
|
| 130 |
+
if in_bounds(x + p, lb, ub):
|
| 131 |
+
p_value = evaluate_quadratic(J_h, g_h, p_h, diag=diag_h)
|
| 132 |
+
return p, p_h, -p_value
|
| 133 |
+
|
| 134 |
+
p_stride, hits = step_size_to_bound(x, p, lb, ub)
|
| 135 |
+
|
| 136 |
+
# Compute the reflected direction.
|
| 137 |
+
r_h = np.copy(p_h)
|
| 138 |
+
r_h[hits.astype(bool)] *= -1
|
| 139 |
+
r = d * r_h
|
| 140 |
+
|
| 141 |
+
# Restrict trust-region step, such that it hits the bound.
|
| 142 |
+
p *= p_stride
|
| 143 |
+
p_h *= p_stride
|
| 144 |
+
x_on_bound = x + p
|
| 145 |
+
|
| 146 |
+
# Reflected direction will cross first either feasible region or trust
|
| 147 |
+
# region boundary.
|
| 148 |
+
_, to_tr = intersect_trust_region(p_h, r_h, Delta)
|
| 149 |
+
to_bound, _ = step_size_to_bound(x_on_bound, r, lb, ub)
|
| 150 |
+
|
| 151 |
+
# Find lower and upper bounds on a step size along the reflected
|
| 152 |
+
# direction, considering the strict feasibility requirement. There is no
|
| 153 |
+
# single correct way to do that, the chosen approach seems to work best
|
| 154 |
+
# on test problems.
|
| 155 |
+
r_stride = min(to_bound, to_tr)
|
| 156 |
+
if r_stride > 0:
|
| 157 |
+
r_stride_l = (1 - theta) * p_stride / r_stride
|
| 158 |
+
if r_stride == to_bound:
|
| 159 |
+
r_stride_u = theta * to_bound
|
| 160 |
+
else:
|
| 161 |
+
r_stride_u = to_tr
|
| 162 |
+
else:
|
| 163 |
+
r_stride_l = 0
|
| 164 |
+
r_stride_u = -1
|
| 165 |
+
|
| 166 |
+
# Check if reflection step is available.
|
| 167 |
+
if r_stride_l <= r_stride_u:
|
| 168 |
+
a, b, c = build_quadratic_1d(J_h, g_h, r_h, s0=p_h, diag=diag_h)
|
| 169 |
+
r_stride, r_value = minimize_quadratic_1d(
|
| 170 |
+
a, b, r_stride_l, r_stride_u, c=c)
|
| 171 |
+
r_h *= r_stride
|
| 172 |
+
r_h += p_h
|
| 173 |
+
r = r_h * d
|
| 174 |
+
else:
|
| 175 |
+
r_value = np.inf
|
| 176 |
+
|
| 177 |
+
# Now correct p_h to make it strictly interior.
|
| 178 |
+
p *= theta
|
| 179 |
+
p_h *= theta
|
| 180 |
+
p_value = evaluate_quadratic(J_h, g_h, p_h, diag=diag_h)
|
| 181 |
+
|
| 182 |
+
ag_h = -g_h
|
| 183 |
+
ag = d * ag_h
|
| 184 |
+
|
| 185 |
+
to_tr = Delta / norm(ag_h)
|
| 186 |
+
to_bound, _ = step_size_to_bound(x, ag, lb, ub)
|
| 187 |
+
if to_bound < to_tr:
|
| 188 |
+
ag_stride = theta * to_bound
|
| 189 |
+
else:
|
| 190 |
+
ag_stride = to_tr
|
| 191 |
+
|
| 192 |
+
a, b = build_quadratic_1d(J_h, g_h, ag_h, diag=diag_h)
|
| 193 |
+
ag_stride, ag_value = minimize_quadratic_1d(a, b, 0, ag_stride)
|
| 194 |
+
ag_h *= ag_stride
|
| 195 |
+
ag *= ag_stride
|
| 196 |
+
|
| 197 |
+
if p_value < r_value and p_value < ag_value:
|
| 198 |
+
return p, p_h, -p_value
|
| 199 |
+
elif r_value < p_value and r_value < ag_value:
|
| 200 |
+
return r, r_h, -r_value
|
| 201 |
+
else:
|
| 202 |
+
return ag, ag_h, -ag_value
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
def trf_bounds(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev,
|
| 206 |
+
x_scale, loss_function, tr_solver, tr_options, verbose):
|
| 207 |
+
x = x0.copy()
|
| 208 |
+
|
| 209 |
+
f = f0
|
| 210 |
+
f_true = f.copy()
|
| 211 |
+
nfev = 1
|
| 212 |
+
|
| 213 |
+
J = J0
|
| 214 |
+
njev = 1
|
| 215 |
+
m, n = J.shape
|
| 216 |
+
|
| 217 |
+
if loss_function is not None:
|
| 218 |
+
rho = loss_function(f)
|
| 219 |
+
cost = 0.5 * np.sum(rho[0])
|
| 220 |
+
J, f = scale_for_robust_loss_function(J, f, rho)
|
| 221 |
+
else:
|
| 222 |
+
cost = 0.5 * np.dot(f, f)
|
| 223 |
+
|
| 224 |
+
g = compute_grad(J, f)
|
| 225 |
+
|
| 226 |
+
jac_scale = isinstance(x_scale, str) and x_scale == 'jac'
|
| 227 |
+
if jac_scale:
|
| 228 |
+
scale, scale_inv = compute_jac_scale(J)
|
| 229 |
+
else:
|
| 230 |
+
scale, scale_inv = x_scale, 1 / x_scale
|
| 231 |
+
|
| 232 |
+
v, dv = CL_scaling_vector(x, g, lb, ub)
|
| 233 |
+
v[dv != 0] *= scale_inv[dv != 0]
|
| 234 |
+
Delta = norm(x0 * scale_inv / v**0.5)
|
| 235 |
+
if Delta == 0:
|
| 236 |
+
Delta = 1.0
|
| 237 |
+
|
| 238 |
+
g_norm = norm(g * v, ord=np.inf)
|
| 239 |
+
|
| 240 |
+
f_augmented = np.zeros(m + n)
|
| 241 |
+
if tr_solver == 'exact':
|
| 242 |
+
J_augmented = np.empty((m + n, n))
|
| 243 |
+
elif tr_solver == 'lsmr':
|
| 244 |
+
reg_term = 0.0
|
| 245 |
+
regularize = tr_options.pop('regularize', True)
|
| 246 |
+
|
| 247 |
+
if max_nfev is None:
|
| 248 |
+
max_nfev = x0.size * 100
|
| 249 |
+
|
| 250 |
+
alpha = 0.0 # "Levenberg-Marquardt" parameter
|
| 251 |
+
|
| 252 |
+
termination_status = None
|
| 253 |
+
iteration = 0
|
| 254 |
+
step_norm = None
|
| 255 |
+
actual_reduction = None
|
| 256 |
+
|
| 257 |
+
if verbose == 2:
|
| 258 |
+
print_header_nonlinear()
|
| 259 |
+
|
| 260 |
+
while True:
|
| 261 |
+
v, dv = CL_scaling_vector(x, g, lb, ub)
|
| 262 |
+
|
| 263 |
+
g_norm = norm(g * v, ord=np.inf)
|
| 264 |
+
if g_norm < gtol:
|
| 265 |
+
termination_status = 1
|
| 266 |
+
|
| 267 |
+
if verbose == 2:
|
| 268 |
+
print_iteration_nonlinear(iteration, nfev, cost, actual_reduction,
|
| 269 |
+
step_norm, g_norm)
|
| 270 |
+
|
| 271 |
+
if termination_status is not None or nfev == max_nfev:
|
| 272 |
+
break
|
| 273 |
+
|
| 274 |
+
# Now compute variables in "hat" space. Here, we also account for
|
| 275 |
+
# scaling introduced by `x_scale` parameter. This part is a bit tricky,
|
| 276 |
+
# you have to write down the formulas and see how the trust-region
|
| 277 |
+
# problem is formulated when the two types of scaling are applied.
|
| 278 |
+
# The idea is that first we apply `x_scale` and then apply Coleman-Li
|
| 279 |
+
# approach in the new variables.
|
| 280 |
+
|
| 281 |
+
# v is recomputed in the variables after applying `x_scale`, note that
|
| 282 |
+
# components which were identically 1 not affected.
|
| 283 |
+
v[dv != 0] *= scale_inv[dv != 0]
|
| 284 |
+
|
| 285 |
+
# Here, we apply two types of scaling.
|
| 286 |
+
d = v**0.5 * scale
|
| 287 |
+
|
| 288 |
+
# C = diag(g * scale) Jv
|
| 289 |
+
diag_h = g * dv * scale
|
| 290 |
+
|
| 291 |
+
# After all this has been done, we continue normally.
|
| 292 |
+
|
| 293 |
+
# "hat" gradient.
|
| 294 |
+
g_h = d * g
|
| 295 |
+
|
| 296 |
+
f_augmented[:m] = f
|
| 297 |
+
if tr_solver == 'exact':
|
| 298 |
+
J_augmented[:m] = J * d
|
| 299 |
+
J_h = J_augmented[:m] # Memory view.
|
| 300 |
+
J_augmented[m:] = np.diag(diag_h**0.5)
|
| 301 |
+
U, s, V = svd(J_augmented, full_matrices=False)
|
| 302 |
+
V = V.T
|
| 303 |
+
uf = U.T.dot(f_augmented)
|
| 304 |
+
elif tr_solver == 'lsmr':
|
| 305 |
+
J_h = right_multiplied_operator(J, d)
|
| 306 |
+
|
| 307 |
+
if regularize:
|
| 308 |
+
a, b = build_quadratic_1d(J_h, g_h, -g_h, diag=diag_h)
|
| 309 |
+
to_tr = Delta / norm(g_h)
|
| 310 |
+
ag_value = minimize_quadratic_1d(a, b, 0, to_tr)[1]
|
| 311 |
+
reg_term = -ag_value / Delta**2
|
| 312 |
+
|
| 313 |
+
lsmr_op = regularized_lsq_operator(J_h, (diag_h + reg_term)**0.5)
|
| 314 |
+
gn_h = lsmr(lsmr_op, f_augmented, **tr_options)[0]
|
| 315 |
+
S = np.vstack((g_h, gn_h)).T
|
| 316 |
+
S, _ = qr(S, mode='economic')
|
| 317 |
+
JS = J_h.dot(S) # LinearOperator does dot too.
|
| 318 |
+
B_S = np.dot(JS.T, JS) + np.dot(S.T * diag_h, S)
|
| 319 |
+
g_S = S.T.dot(g_h)
|
| 320 |
+
|
| 321 |
+
# theta controls step back step ratio from the bounds.
|
| 322 |
+
theta = max(0.995, 1 - g_norm)
|
| 323 |
+
|
| 324 |
+
actual_reduction = -1
|
| 325 |
+
while actual_reduction <= 0 and nfev < max_nfev:
|
| 326 |
+
if tr_solver == 'exact':
|
| 327 |
+
p_h, alpha, n_iter = solve_lsq_trust_region(
|
| 328 |
+
n, m, uf, s, V, Delta, initial_alpha=alpha)
|
| 329 |
+
elif tr_solver == 'lsmr':
|
| 330 |
+
p_S, _ = solve_trust_region_2d(B_S, g_S, Delta)
|
| 331 |
+
p_h = S.dot(p_S)
|
| 332 |
+
|
| 333 |
+
p = d * p_h # Trust-region solution in the original space.
|
| 334 |
+
step, step_h, predicted_reduction = select_step(
|
| 335 |
+
x, J_h, diag_h, g_h, p, p_h, d, Delta, lb, ub, theta)
|
| 336 |
+
|
| 337 |
+
x_new = make_strictly_feasible(x + step, lb, ub, rstep=0)
|
| 338 |
+
f_new = fun(x_new)
|
| 339 |
+
nfev += 1
|
| 340 |
+
|
| 341 |
+
step_h_norm = norm(step_h)
|
| 342 |
+
|
| 343 |
+
if not np.all(np.isfinite(f_new)):
|
| 344 |
+
Delta = 0.25 * step_h_norm
|
| 345 |
+
continue
|
| 346 |
+
|
| 347 |
+
# Usual trust-region step quality estimation.
|
| 348 |
+
if loss_function is not None:
|
| 349 |
+
cost_new = loss_function(f_new, cost_only=True)
|
| 350 |
+
else:
|
| 351 |
+
cost_new = 0.5 * np.dot(f_new, f_new)
|
| 352 |
+
actual_reduction = cost - cost_new
|
| 353 |
+
Delta_new, ratio = update_tr_radius(
|
| 354 |
+
Delta, actual_reduction, predicted_reduction,
|
| 355 |
+
step_h_norm, step_h_norm > 0.95 * Delta)
|
| 356 |
+
|
| 357 |
+
step_norm = norm(step)
|
| 358 |
+
termination_status = check_termination(
|
| 359 |
+
actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol)
|
| 360 |
+
if termination_status is not None:
|
| 361 |
+
break
|
| 362 |
+
|
| 363 |
+
alpha *= Delta / Delta_new
|
| 364 |
+
Delta = Delta_new
|
| 365 |
+
|
| 366 |
+
if actual_reduction > 0:
|
| 367 |
+
x = x_new
|
| 368 |
+
|
| 369 |
+
f = f_new
|
| 370 |
+
f_true = f.copy()
|
| 371 |
+
|
| 372 |
+
cost = cost_new
|
| 373 |
+
|
| 374 |
+
J = jac(x, f)
|
| 375 |
+
njev += 1
|
| 376 |
+
|
| 377 |
+
if loss_function is not None:
|
| 378 |
+
rho = loss_function(f)
|
| 379 |
+
J, f = scale_for_robust_loss_function(J, f, rho)
|
| 380 |
+
|
| 381 |
+
g = compute_grad(J, f)
|
| 382 |
+
|
| 383 |
+
if jac_scale:
|
| 384 |
+
scale, scale_inv = compute_jac_scale(J, scale_inv)
|
| 385 |
+
else:
|
| 386 |
+
step_norm = 0
|
| 387 |
+
actual_reduction = 0
|
| 388 |
+
|
| 389 |
+
iteration += 1
|
| 390 |
+
|
| 391 |
+
if termination_status is None:
|
| 392 |
+
termination_status = 0
|
| 393 |
+
|
| 394 |
+
active_mask = find_active_constraints(x, lb, ub, rtol=xtol)
|
| 395 |
+
return OptimizeResult(
|
| 396 |
+
x=x, cost=cost, fun=f_true, jac=J, grad=g, optimality=g_norm,
|
| 397 |
+
active_mask=active_mask, nfev=nfev, njev=njev,
|
| 398 |
+
status=termination_status)
|
| 399 |
+
|
| 400 |
+
|
| 401 |
+
def trf_no_bounds(fun, jac, x0, f0, J0, ftol, xtol, gtol, max_nfev,
|
| 402 |
+
x_scale, loss_function, tr_solver, tr_options, verbose):
|
| 403 |
+
x = x0.copy()
|
| 404 |
+
|
| 405 |
+
f = f0
|
| 406 |
+
f_true = f.copy()
|
| 407 |
+
nfev = 1
|
| 408 |
+
|
| 409 |
+
J = J0
|
| 410 |
+
njev = 1
|
| 411 |
+
m, n = J.shape
|
| 412 |
+
|
| 413 |
+
if loss_function is not None:
|
| 414 |
+
rho = loss_function(f)
|
| 415 |
+
cost = 0.5 * np.sum(rho[0])
|
| 416 |
+
J, f = scale_for_robust_loss_function(J, f, rho)
|
| 417 |
+
else:
|
| 418 |
+
cost = 0.5 * np.dot(f, f)
|
| 419 |
+
|
| 420 |
+
g = compute_grad(J, f)
|
| 421 |
+
|
| 422 |
+
jac_scale = isinstance(x_scale, str) and x_scale == 'jac'
|
| 423 |
+
if jac_scale:
|
| 424 |
+
scale, scale_inv = compute_jac_scale(J)
|
| 425 |
+
else:
|
| 426 |
+
scale, scale_inv = x_scale, 1 / x_scale
|
| 427 |
+
|
| 428 |
+
Delta = norm(x0 * scale_inv)
|
| 429 |
+
if Delta == 0:
|
| 430 |
+
Delta = 1.0
|
| 431 |
+
|
| 432 |
+
if tr_solver == 'lsmr':
|
| 433 |
+
reg_term = 0
|
| 434 |
+
damp = tr_options.pop('damp', 0.0)
|
| 435 |
+
regularize = tr_options.pop('regularize', True)
|
| 436 |
+
|
| 437 |
+
if max_nfev is None:
|
| 438 |
+
max_nfev = x0.size * 100
|
| 439 |
+
|
| 440 |
+
alpha = 0.0 # "Levenberg-Marquardt" parameter
|
| 441 |
+
|
| 442 |
+
termination_status = None
|
| 443 |
+
iteration = 0
|
| 444 |
+
step_norm = None
|
| 445 |
+
actual_reduction = None
|
| 446 |
+
|
| 447 |
+
if verbose == 2:
|
| 448 |
+
print_header_nonlinear()
|
| 449 |
+
|
| 450 |
+
while True:
|
| 451 |
+
g_norm = norm(g, ord=np.inf)
|
| 452 |
+
if g_norm < gtol:
|
| 453 |
+
termination_status = 1
|
| 454 |
+
|
| 455 |
+
if verbose == 2:
|
| 456 |
+
print_iteration_nonlinear(iteration, nfev, cost, actual_reduction,
|
| 457 |
+
step_norm, g_norm)
|
| 458 |
+
|
| 459 |
+
if termination_status is not None or nfev == max_nfev:
|
| 460 |
+
break
|
| 461 |
+
|
| 462 |
+
d = scale
|
| 463 |
+
g_h = d * g
|
| 464 |
+
|
| 465 |
+
if tr_solver == 'exact':
|
| 466 |
+
J_h = J * d
|
| 467 |
+
U, s, V = svd(J_h, full_matrices=False)
|
| 468 |
+
V = V.T
|
| 469 |
+
uf = U.T.dot(f)
|
| 470 |
+
elif tr_solver == 'lsmr':
|
| 471 |
+
J_h = right_multiplied_operator(J, d)
|
| 472 |
+
|
| 473 |
+
if regularize:
|
| 474 |
+
a, b = build_quadratic_1d(J_h, g_h, -g_h)
|
| 475 |
+
to_tr = Delta / norm(g_h)
|
| 476 |
+
ag_value = minimize_quadratic_1d(a, b, 0, to_tr)[1]
|
| 477 |
+
reg_term = -ag_value / Delta**2
|
| 478 |
+
|
| 479 |
+
damp_full = (damp**2 + reg_term)**0.5
|
| 480 |
+
gn_h = lsmr(J_h, f, damp=damp_full, **tr_options)[0]
|
| 481 |
+
S = np.vstack((g_h, gn_h)).T
|
| 482 |
+
S, _ = qr(S, mode='economic')
|
| 483 |
+
JS = J_h.dot(S)
|
| 484 |
+
B_S = np.dot(JS.T, JS)
|
| 485 |
+
g_S = S.T.dot(g_h)
|
| 486 |
+
|
| 487 |
+
actual_reduction = -1
|
| 488 |
+
while actual_reduction <= 0 and nfev < max_nfev:
|
| 489 |
+
if tr_solver == 'exact':
|
| 490 |
+
step_h, alpha, n_iter = solve_lsq_trust_region(
|
| 491 |
+
n, m, uf, s, V, Delta, initial_alpha=alpha)
|
| 492 |
+
elif tr_solver == 'lsmr':
|
| 493 |
+
p_S, _ = solve_trust_region_2d(B_S, g_S, Delta)
|
| 494 |
+
step_h = S.dot(p_S)
|
| 495 |
+
|
| 496 |
+
predicted_reduction = -evaluate_quadratic(J_h, g_h, step_h)
|
| 497 |
+
step = d * step_h
|
| 498 |
+
x_new = x + step
|
| 499 |
+
f_new = fun(x_new)
|
| 500 |
+
nfev += 1
|
| 501 |
+
|
| 502 |
+
step_h_norm = norm(step_h)
|
| 503 |
+
|
| 504 |
+
if not np.all(np.isfinite(f_new)):
|
| 505 |
+
Delta = 0.25 * step_h_norm
|
| 506 |
+
continue
|
| 507 |
+
|
| 508 |
+
# Usual trust-region step quality estimation.
|
| 509 |
+
if loss_function is not None:
|
| 510 |
+
cost_new = loss_function(f_new, cost_only=True)
|
| 511 |
+
else:
|
| 512 |
+
cost_new = 0.5 * np.dot(f_new, f_new)
|
| 513 |
+
actual_reduction = cost - cost_new
|
| 514 |
+
|
| 515 |
+
Delta_new, ratio = update_tr_radius(
|
| 516 |
+
Delta, actual_reduction, predicted_reduction,
|
| 517 |
+
step_h_norm, step_h_norm > 0.95 * Delta)
|
| 518 |
+
|
| 519 |
+
step_norm = norm(step)
|
| 520 |
+
termination_status = check_termination(
|
| 521 |
+
actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol)
|
| 522 |
+
if termination_status is not None:
|
| 523 |
+
break
|
| 524 |
+
|
| 525 |
+
alpha *= Delta / Delta_new
|
| 526 |
+
Delta = Delta_new
|
| 527 |
+
|
| 528 |
+
if actual_reduction > 0:
|
| 529 |
+
x = x_new
|
| 530 |
+
|
| 531 |
+
f = f_new
|
| 532 |
+
f_true = f.copy()
|
| 533 |
+
|
| 534 |
+
cost = cost_new
|
| 535 |
+
|
| 536 |
+
J = jac(x, f)
|
| 537 |
+
njev += 1
|
| 538 |
+
|
| 539 |
+
if loss_function is not None:
|
| 540 |
+
rho = loss_function(f)
|
| 541 |
+
J, f = scale_for_robust_loss_function(J, f, rho)
|
| 542 |
+
|
| 543 |
+
g = compute_grad(J, f)
|
| 544 |
+
|
| 545 |
+
if jac_scale:
|
| 546 |
+
scale, scale_inv = compute_jac_scale(J, scale_inv)
|
| 547 |
+
else:
|
| 548 |
+
step_norm = 0
|
| 549 |
+
actual_reduction = 0
|
| 550 |
+
|
| 551 |
+
iteration += 1
|
| 552 |
+
|
| 553 |
+
if termination_status is None:
|
| 554 |
+
termination_status = 0
|
| 555 |
+
|
| 556 |
+
active_mask = np.zeros_like(x)
|
| 557 |
+
return OptimizeResult(
|
| 558 |
+
x=x, cost=cost, fun=f_true, jac=J, grad=g, optimality=g_norm,
|
| 559 |
+
active_mask=active_mask, nfev=nfev, njev=njev,
|
| 560 |
+
status=termination_status)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_lsq/trf_linear.py
ADDED
|
@@ -0,0 +1,249 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""The adaptation of Trust Region Reflective algorithm for a linear
|
| 2 |
+
least-squares problem."""
|
| 3 |
+
import numpy as np
|
| 4 |
+
from numpy.linalg import norm
|
| 5 |
+
from scipy.linalg import qr, solve_triangular
|
| 6 |
+
from scipy.sparse.linalg import lsmr
|
| 7 |
+
from scipy.optimize import OptimizeResult
|
| 8 |
+
|
| 9 |
+
from .givens_elimination import givens_elimination
|
| 10 |
+
from .common import (
|
| 11 |
+
EPS, step_size_to_bound, find_active_constraints, in_bounds,
|
| 12 |
+
make_strictly_feasible, build_quadratic_1d, evaluate_quadratic,
|
| 13 |
+
minimize_quadratic_1d, CL_scaling_vector, reflective_transformation,
|
| 14 |
+
print_header_linear, print_iteration_linear, compute_grad,
|
| 15 |
+
regularized_lsq_operator, right_multiplied_operator)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def regularized_lsq_with_qr(m, n, R, QTb, perm, diag, copy_R=True):
|
| 19 |
+
"""Solve regularized least squares using information from QR-decomposition.
|
| 20 |
+
|
| 21 |
+
The initial problem is to solve the following system in a least-squares
|
| 22 |
+
sense::
|
| 23 |
+
|
| 24 |
+
A x = b
|
| 25 |
+
D x = 0
|
| 26 |
+
|
| 27 |
+
where D is diagonal matrix. The method is based on QR decomposition
|
| 28 |
+
of the form A P = Q R, where P is a column permutation matrix, Q is an
|
| 29 |
+
orthogonal matrix and R is an upper triangular matrix.
|
| 30 |
+
|
| 31 |
+
Parameters
|
| 32 |
+
----------
|
| 33 |
+
m, n : int
|
| 34 |
+
Initial shape of A.
|
| 35 |
+
R : ndarray, shape (n, n)
|
| 36 |
+
Upper triangular matrix from QR decomposition of A.
|
| 37 |
+
QTb : ndarray, shape (n,)
|
| 38 |
+
First n components of Q^T b.
|
| 39 |
+
perm : ndarray, shape (n,)
|
| 40 |
+
Array defining column permutation of A, such that ith column of
|
| 41 |
+
P is perm[i]-th column of identity matrix.
|
| 42 |
+
diag : ndarray, shape (n,)
|
| 43 |
+
Array containing diagonal elements of D.
|
| 44 |
+
|
| 45 |
+
Returns
|
| 46 |
+
-------
|
| 47 |
+
x : ndarray, shape (n,)
|
| 48 |
+
Found least-squares solution.
|
| 49 |
+
"""
|
| 50 |
+
if copy_R:
|
| 51 |
+
R = R.copy()
|
| 52 |
+
v = QTb.copy()
|
| 53 |
+
|
| 54 |
+
givens_elimination(R, v, diag[perm])
|
| 55 |
+
|
| 56 |
+
abs_diag_R = np.abs(np.diag(R))
|
| 57 |
+
threshold = EPS * max(m, n) * np.max(abs_diag_R)
|
| 58 |
+
nns, = np.nonzero(abs_diag_R > threshold)
|
| 59 |
+
|
| 60 |
+
R = R[np.ix_(nns, nns)]
|
| 61 |
+
v = v[nns]
|
| 62 |
+
|
| 63 |
+
x = np.zeros(n)
|
| 64 |
+
x[perm[nns]] = solve_triangular(R, v)
|
| 65 |
+
|
| 66 |
+
return x
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def backtracking(A, g, x, p, theta, p_dot_g, lb, ub):
|
| 70 |
+
"""Find an appropriate step size using backtracking line search."""
|
| 71 |
+
alpha = 1
|
| 72 |
+
while True:
|
| 73 |
+
x_new, _ = reflective_transformation(x + alpha * p, lb, ub)
|
| 74 |
+
step = x_new - x
|
| 75 |
+
cost_change = -evaluate_quadratic(A, g, step)
|
| 76 |
+
if cost_change > -0.1 * alpha * p_dot_g:
|
| 77 |
+
break
|
| 78 |
+
alpha *= 0.5
|
| 79 |
+
|
| 80 |
+
active = find_active_constraints(x_new, lb, ub)
|
| 81 |
+
if np.any(active != 0):
|
| 82 |
+
x_new, _ = reflective_transformation(x + theta * alpha * p, lb, ub)
|
| 83 |
+
x_new = make_strictly_feasible(x_new, lb, ub, rstep=0)
|
| 84 |
+
step = x_new - x
|
| 85 |
+
cost_change = -evaluate_quadratic(A, g, step)
|
| 86 |
+
|
| 87 |
+
return x, step, cost_change
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def select_step(x, A_h, g_h, c_h, p, p_h, d, lb, ub, theta):
|
| 91 |
+
"""Select the best step according to Trust Region Reflective algorithm."""
|
| 92 |
+
if in_bounds(x + p, lb, ub):
|
| 93 |
+
return p
|
| 94 |
+
|
| 95 |
+
p_stride, hits = step_size_to_bound(x, p, lb, ub)
|
| 96 |
+
r_h = np.copy(p_h)
|
| 97 |
+
r_h[hits.astype(bool)] *= -1
|
| 98 |
+
r = d * r_h
|
| 99 |
+
|
| 100 |
+
# Restrict step, such that it hits the bound.
|
| 101 |
+
p *= p_stride
|
| 102 |
+
p_h *= p_stride
|
| 103 |
+
x_on_bound = x + p
|
| 104 |
+
|
| 105 |
+
# Find the step size along reflected direction.
|
| 106 |
+
r_stride_u, _ = step_size_to_bound(x_on_bound, r, lb, ub)
|
| 107 |
+
|
| 108 |
+
# Stay interior.
|
| 109 |
+
r_stride_l = (1 - theta) * r_stride_u
|
| 110 |
+
r_stride_u *= theta
|
| 111 |
+
|
| 112 |
+
if r_stride_u > 0:
|
| 113 |
+
a, b, c = build_quadratic_1d(A_h, g_h, r_h, s0=p_h, diag=c_h)
|
| 114 |
+
r_stride, r_value = minimize_quadratic_1d(
|
| 115 |
+
a, b, r_stride_l, r_stride_u, c=c)
|
| 116 |
+
r_h = p_h + r_h * r_stride
|
| 117 |
+
r = d * r_h
|
| 118 |
+
else:
|
| 119 |
+
r_value = np.inf
|
| 120 |
+
|
| 121 |
+
# Now correct p_h to make it strictly interior.
|
| 122 |
+
p_h *= theta
|
| 123 |
+
p *= theta
|
| 124 |
+
p_value = evaluate_quadratic(A_h, g_h, p_h, diag=c_h)
|
| 125 |
+
|
| 126 |
+
ag_h = -g_h
|
| 127 |
+
ag = d * ag_h
|
| 128 |
+
ag_stride_u, _ = step_size_to_bound(x, ag, lb, ub)
|
| 129 |
+
ag_stride_u *= theta
|
| 130 |
+
a, b = build_quadratic_1d(A_h, g_h, ag_h, diag=c_h)
|
| 131 |
+
ag_stride, ag_value = minimize_quadratic_1d(a, b, 0, ag_stride_u)
|
| 132 |
+
ag *= ag_stride
|
| 133 |
+
|
| 134 |
+
if p_value < r_value and p_value < ag_value:
|
| 135 |
+
return p
|
| 136 |
+
elif r_value < p_value and r_value < ag_value:
|
| 137 |
+
return r
|
| 138 |
+
else:
|
| 139 |
+
return ag
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
def trf_linear(A, b, x_lsq, lb, ub, tol, lsq_solver, lsmr_tol,
|
| 143 |
+
max_iter, verbose, *, lsmr_maxiter=None):
|
| 144 |
+
m, n = A.shape
|
| 145 |
+
x, _ = reflective_transformation(x_lsq, lb, ub)
|
| 146 |
+
x = make_strictly_feasible(x, lb, ub, rstep=0.1)
|
| 147 |
+
|
| 148 |
+
if lsq_solver == 'exact':
|
| 149 |
+
QT, R, perm = qr(A, mode='economic', pivoting=True)
|
| 150 |
+
QT = QT.T
|
| 151 |
+
|
| 152 |
+
if m < n:
|
| 153 |
+
R = np.vstack((R, np.zeros((n - m, n))))
|
| 154 |
+
|
| 155 |
+
QTr = np.zeros(n)
|
| 156 |
+
k = min(m, n)
|
| 157 |
+
elif lsq_solver == 'lsmr':
|
| 158 |
+
r_aug = np.zeros(m + n)
|
| 159 |
+
auto_lsmr_tol = False
|
| 160 |
+
if lsmr_tol is None:
|
| 161 |
+
lsmr_tol = 1e-2 * tol
|
| 162 |
+
elif lsmr_tol == 'auto':
|
| 163 |
+
auto_lsmr_tol = True
|
| 164 |
+
|
| 165 |
+
r = A.dot(x) - b
|
| 166 |
+
g = compute_grad(A, r)
|
| 167 |
+
cost = 0.5 * np.dot(r, r)
|
| 168 |
+
initial_cost = cost
|
| 169 |
+
|
| 170 |
+
termination_status = None
|
| 171 |
+
step_norm = None
|
| 172 |
+
cost_change = None
|
| 173 |
+
|
| 174 |
+
if max_iter is None:
|
| 175 |
+
max_iter = 100
|
| 176 |
+
|
| 177 |
+
if verbose == 2:
|
| 178 |
+
print_header_linear()
|
| 179 |
+
|
| 180 |
+
for iteration in range(max_iter):
|
| 181 |
+
v, dv = CL_scaling_vector(x, g, lb, ub)
|
| 182 |
+
g_scaled = g * v
|
| 183 |
+
g_norm = norm(g_scaled, ord=np.inf)
|
| 184 |
+
if g_norm < tol:
|
| 185 |
+
termination_status = 1
|
| 186 |
+
|
| 187 |
+
if verbose == 2:
|
| 188 |
+
print_iteration_linear(iteration, cost, cost_change,
|
| 189 |
+
step_norm, g_norm)
|
| 190 |
+
|
| 191 |
+
if termination_status is not None:
|
| 192 |
+
break
|
| 193 |
+
|
| 194 |
+
diag_h = g * dv
|
| 195 |
+
diag_root_h = diag_h ** 0.5
|
| 196 |
+
d = v ** 0.5
|
| 197 |
+
g_h = d * g
|
| 198 |
+
|
| 199 |
+
A_h = right_multiplied_operator(A, d)
|
| 200 |
+
if lsq_solver == 'exact':
|
| 201 |
+
QTr[:k] = QT.dot(r)
|
| 202 |
+
p_h = -regularized_lsq_with_qr(m, n, R * d[perm], QTr, perm,
|
| 203 |
+
diag_root_h, copy_R=False)
|
| 204 |
+
elif lsq_solver == 'lsmr':
|
| 205 |
+
lsmr_op = regularized_lsq_operator(A_h, diag_root_h)
|
| 206 |
+
r_aug[:m] = r
|
| 207 |
+
if auto_lsmr_tol:
|
| 208 |
+
eta = 1e-2 * min(0.5, g_norm)
|
| 209 |
+
lsmr_tol = max(EPS, min(0.1, eta * g_norm))
|
| 210 |
+
p_h = -lsmr(lsmr_op, r_aug, maxiter=lsmr_maxiter,
|
| 211 |
+
atol=lsmr_tol, btol=lsmr_tol)[0]
|
| 212 |
+
|
| 213 |
+
p = d * p_h
|
| 214 |
+
|
| 215 |
+
p_dot_g = np.dot(p, g)
|
| 216 |
+
if p_dot_g > 0:
|
| 217 |
+
termination_status = -1
|
| 218 |
+
|
| 219 |
+
theta = 1 - min(0.005, g_norm)
|
| 220 |
+
step = select_step(x, A_h, g_h, diag_h, p, p_h, d, lb, ub, theta)
|
| 221 |
+
cost_change = -evaluate_quadratic(A, g, step)
|
| 222 |
+
|
| 223 |
+
# Perhaps almost never executed, the idea is that `p` is descent
|
| 224 |
+
# direction thus we must find acceptable cost decrease using simple
|
| 225 |
+
# "backtracking", otherwise the algorithm's logic would break.
|
| 226 |
+
if cost_change < 0:
|
| 227 |
+
x, step, cost_change = backtracking(
|
| 228 |
+
A, g, x, p, theta, p_dot_g, lb, ub)
|
| 229 |
+
else:
|
| 230 |
+
x = make_strictly_feasible(x + step, lb, ub, rstep=0)
|
| 231 |
+
|
| 232 |
+
step_norm = norm(step)
|
| 233 |
+
r = A.dot(x) - b
|
| 234 |
+
g = compute_grad(A, r)
|
| 235 |
+
|
| 236 |
+
if cost_change < tol * cost:
|
| 237 |
+
termination_status = 2
|
| 238 |
+
|
| 239 |
+
cost = 0.5 * np.dot(r, r)
|
| 240 |
+
|
| 241 |
+
if termination_status is None:
|
| 242 |
+
termination_status = 0
|
| 243 |
+
|
| 244 |
+
active_mask = find_active_constraints(x, lb, ub, rtol=tol)
|
| 245 |
+
|
| 246 |
+
return OptimizeResult(
|
| 247 |
+
x=x, fun=r, cost=cost, optimality=g_norm, active_mask=active_mask,
|
| 248 |
+
nit=iteration + 1, status=termination_status,
|
| 249 |
+
initial_cost=initial_cost)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_pava_pybind.cpython-310-x86_64-linux-gnu.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:eea00160871368c5807d7825188089b3fdb35c1373133b5b4c504be8a5dc9d2c
|
| 3 |
+
size 223832
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__init__.py
ADDED
|
File without changes
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (180 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/_complex.cpython-310.pyc
ADDED
|
Binary file (23 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/_vertex.cpython-310.pyc
ADDED
|
Binary file (14.5 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/_vertex.py
ADDED
|
@@ -0,0 +1,460 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import collections
|
| 2 |
+
from abc import ABC, abstractmethod
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
from scipy._lib._util import MapWrapper
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class VertexBase(ABC):
|
| 10 |
+
"""
|
| 11 |
+
Base class for a vertex.
|
| 12 |
+
"""
|
| 13 |
+
def __init__(self, x, nn=None, index=None):
|
| 14 |
+
"""
|
| 15 |
+
Initiation of a vertex object.
|
| 16 |
+
|
| 17 |
+
Parameters
|
| 18 |
+
----------
|
| 19 |
+
x : tuple or vector
|
| 20 |
+
The geometric location (domain).
|
| 21 |
+
nn : list, optional
|
| 22 |
+
Nearest neighbour list.
|
| 23 |
+
index : int, optional
|
| 24 |
+
Index of vertex.
|
| 25 |
+
"""
|
| 26 |
+
self.x = x
|
| 27 |
+
self.hash = hash(self.x) # Save precomputed hash
|
| 28 |
+
|
| 29 |
+
if nn is not None:
|
| 30 |
+
self.nn = set(nn) # can use .indexupdate to add a new list
|
| 31 |
+
else:
|
| 32 |
+
self.nn = set()
|
| 33 |
+
|
| 34 |
+
self.index = index
|
| 35 |
+
|
| 36 |
+
def __hash__(self):
|
| 37 |
+
return self.hash
|
| 38 |
+
|
| 39 |
+
def __getattr__(self, item):
|
| 40 |
+
if item not in ['x_a']:
|
| 41 |
+
raise AttributeError(f"{type(self)} object has no attribute "
|
| 42 |
+
f"'{item}'")
|
| 43 |
+
if item == 'x_a':
|
| 44 |
+
self.x_a = np.array(self.x)
|
| 45 |
+
return self.x_a
|
| 46 |
+
|
| 47 |
+
@abstractmethod
|
| 48 |
+
def connect(self, v):
|
| 49 |
+
raise NotImplementedError("This method is only implemented with an "
|
| 50 |
+
"associated child of the base class.")
|
| 51 |
+
|
| 52 |
+
@abstractmethod
|
| 53 |
+
def disconnect(self, v):
|
| 54 |
+
raise NotImplementedError("This method is only implemented with an "
|
| 55 |
+
"associated child of the base class.")
|
| 56 |
+
|
| 57 |
+
def star(self):
|
| 58 |
+
"""Returns the star domain ``st(v)`` of the vertex.
|
| 59 |
+
|
| 60 |
+
Parameters
|
| 61 |
+
----------
|
| 62 |
+
v :
|
| 63 |
+
The vertex ``v`` in ``st(v)``
|
| 64 |
+
|
| 65 |
+
Returns
|
| 66 |
+
-------
|
| 67 |
+
st : set
|
| 68 |
+
A set containing all the vertices in ``st(v)``
|
| 69 |
+
"""
|
| 70 |
+
self.st = self.nn
|
| 71 |
+
self.st.add(self)
|
| 72 |
+
return self.st
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
class VertexScalarField(VertexBase):
|
| 76 |
+
"""
|
| 77 |
+
Add homology properties of a scalar field f: R^n --> R associated with
|
| 78 |
+
the geometry built from the VertexBase class
|
| 79 |
+
"""
|
| 80 |
+
|
| 81 |
+
def __init__(self, x, field=None, nn=None, index=None, field_args=(),
|
| 82 |
+
g_cons=None, g_cons_args=()):
|
| 83 |
+
"""
|
| 84 |
+
Parameters
|
| 85 |
+
----------
|
| 86 |
+
x : tuple,
|
| 87 |
+
vector of vertex coordinates
|
| 88 |
+
field : callable, optional
|
| 89 |
+
a scalar field f: R^n --> R associated with the geometry
|
| 90 |
+
nn : list, optional
|
| 91 |
+
list of nearest neighbours
|
| 92 |
+
index : int, optional
|
| 93 |
+
index of the vertex
|
| 94 |
+
field_args : tuple, optional
|
| 95 |
+
additional arguments to be passed to field
|
| 96 |
+
g_cons : callable, optional
|
| 97 |
+
constraints on the vertex
|
| 98 |
+
g_cons_args : tuple, optional
|
| 99 |
+
additional arguments to be passed to g_cons
|
| 100 |
+
|
| 101 |
+
"""
|
| 102 |
+
super().__init__(x, nn=nn, index=index)
|
| 103 |
+
|
| 104 |
+
# Note Vertex is only initiated once for all x so only
|
| 105 |
+
# evaluated once
|
| 106 |
+
# self.feasible = None
|
| 107 |
+
|
| 108 |
+
# self.f is externally defined by the cache to allow parallel
|
| 109 |
+
# processing
|
| 110 |
+
# None type that will break arithmetic operations unless defined
|
| 111 |
+
# self.f = None
|
| 112 |
+
|
| 113 |
+
self.check_min = True
|
| 114 |
+
self.check_max = True
|
| 115 |
+
|
| 116 |
+
def connect(self, v):
|
| 117 |
+
"""Connects self to another vertex object v.
|
| 118 |
+
|
| 119 |
+
Parameters
|
| 120 |
+
----------
|
| 121 |
+
v : VertexBase or VertexScalarField object
|
| 122 |
+
"""
|
| 123 |
+
if v is not self and v not in self.nn:
|
| 124 |
+
self.nn.add(v)
|
| 125 |
+
v.nn.add(self)
|
| 126 |
+
|
| 127 |
+
# Flags for checking homology properties:
|
| 128 |
+
self.check_min = True
|
| 129 |
+
self.check_max = True
|
| 130 |
+
v.check_min = True
|
| 131 |
+
v.check_max = True
|
| 132 |
+
|
| 133 |
+
def disconnect(self, v):
|
| 134 |
+
if v in self.nn:
|
| 135 |
+
self.nn.remove(v)
|
| 136 |
+
v.nn.remove(self)
|
| 137 |
+
|
| 138 |
+
# Flags for checking homology properties:
|
| 139 |
+
self.check_min = True
|
| 140 |
+
self.check_max = True
|
| 141 |
+
v.check_min = True
|
| 142 |
+
v.check_max = True
|
| 143 |
+
|
| 144 |
+
def minimiser(self):
|
| 145 |
+
"""Check whether this vertex is strictly less than all its
|
| 146 |
+
neighbours"""
|
| 147 |
+
if self.check_min:
|
| 148 |
+
self._min = all(self.f < v.f for v in self.nn)
|
| 149 |
+
self.check_min = False
|
| 150 |
+
|
| 151 |
+
return self._min
|
| 152 |
+
|
| 153 |
+
def maximiser(self):
|
| 154 |
+
"""
|
| 155 |
+
Check whether this vertex is strictly greater than all its
|
| 156 |
+
neighbours.
|
| 157 |
+
"""
|
| 158 |
+
if self.check_max:
|
| 159 |
+
self._max = all(self.f > v.f for v in self.nn)
|
| 160 |
+
self.check_max = False
|
| 161 |
+
|
| 162 |
+
return self._max
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
class VertexVectorField(VertexBase):
|
| 166 |
+
"""
|
| 167 |
+
Add homology properties of a scalar field f: R^n --> R^m associated with
|
| 168 |
+
the geometry built from the VertexBase class.
|
| 169 |
+
"""
|
| 170 |
+
|
| 171 |
+
def __init__(self, x, sfield=None, vfield=None, field_args=(),
|
| 172 |
+
vfield_args=(), g_cons=None,
|
| 173 |
+
g_cons_args=(), nn=None, index=None):
|
| 174 |
+
super().__init__(x, nn=nn, index=index)
|
| 175 |
+
|
| 176 |
+
raise NotImplementedError("This class is still a work in progress")
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
class VertexCacheBase:
|
| 180 |
+
"""Base class for a vertex cache for a simplicial complex."""
|
| 181 |
+
def __init__(self):
|
| 182 |
+
|
| 183 |
+
self.cache = collections.OrderedDict()
|
| 184 |
+
self.nfev = 0 # Feasible points
|
| 185 |
+
self.index = -1
|
| 186 |
+
|
| 187 |
+
def __iter__(self):
|
| 188 |
+
for v in self.cache:
|
| 189 |
+
yield self.cache[v]
|
| 190 |
+
return
|
| 191 |
+
|
| 192 |
+
def size(self):
|
| 193 |
+
"""Returns the size of the vertex cache."""
|
| 194 |
+
return self.index + 1
|
| 195 |
+
|
| 196 |
+
def print_out(self):
|
| 197 |
+
headlen = len(f"Vertex cache of size: {len(self.cache)}:")
|
| 198 |
+
print('=' * headlen)
|
| 199 |
+
print(f"Vertex cache of size: {len(self.cache)}:")
|
| 200 |
+
print('=' * headlen)
|
| 201 |
+
for v in self.cache:
|
| 202 |
+
self.cache[v].print_out()
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
class VertexCube(VertexBase):
|
| 206 |
+
"""Vertex class to be used for a pure simplicial complex with no associated
|
| 207 |
+
differential geometry (single level domain that exists in R^n)"""
|
| 208 |
+
def __init__(self, x, nn=None, index=None):
|
| 209 |
+
super().__init__(x, nn=nn, index=index)
|
| 210 |
+
|
| 211 |
+
def connect(self, v):
|
| 212 |
+
if v is not self and v not in self.nn:
|
| 213 |
+
self.nn.add(v)
|
| 214 |
+
v.nn.add(self)
|
| 215 |
+
|
| 216 |
+
def disconnect(self, v):
|
| 217 |
+
if v in self.nn:
|
| 218 |
+
self.nn.remove(v)
|
| 219 |
+
v.nn.remove(self)
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
class VertexCacheIndex(VertexCacheBase):
|
| 223 |
+
def __init__(self):
|
| 224 |
+
"""
|
| 225 |
+
Class for a vertex cache for a simplicial complex without an associated
|
| 226 |
+
field. Useful only for building and visualising a domain complex.
|
| 227 |
+
|
| 228 |
+
Parameters
|
| 229 |
+
----------
|
| 230 |
+
"""
|
| 231 |
+
super().__init__()
|
| 232 |
+
self.Vertex = VertexCube
|
| 233 |
+
|
| 234 |
+
def __getitem__(self, x, nn=None):
|
| 235 |
+
try:
|
| 236 |
+
return self.cache[x]
|
| 237 |
+
except KeyError:
|
| 238 |
+
self.index += 1
|
| 239 |
+
xval = self.Vertex(x, index=self.index)
|
| 240 |
+
# logging.info("New generated vertex at x = {}".format(x))
|
| 241 |
+
# NOTE: Surprisingly high performance increase if logging
|
| 242 |
+
# is commented out
|
| 243 |
+
self.cache[x] = xval
|
| 244 |
+
return self.cache[x]
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
class VertexCacheField(VertexCacheBase):
|
| 248 |
+
def __init__(self, field=None, field_args=(), g_cons=None, g_cons_args=(),
|
| 249 |
+
workers=1):
|
| 250 |
+
"""
|
| 251 |
+
Class for a vertex cache for a simplicial complex with an associated
|
| 252 |
+
field.
|
| 253 |
+
|
| 254 |
+
Parameters
|
| 255 |
+
----------
|
| 256 |
+
field : callable
|
| 257 |
+
Scalar or vector field callable.
|
| 258 |
+
field_args : tuple, optional
|
| 259 |
+
Any additional fixed parameters needed to completely specify the
|
| 260 |
+
field function
|
| 261 |
+
g_cons : dict or sequence of dict, optional
|
| 262 |
+
Constraints definition.
|
| 263 |
+
Function(s) ``R**n`` in the form::
|
| 264 |
+
g_cons_args : tuple, optional
|
| 265 |
+
Any additional fixed parameters needed to completely specify the
|
| 266 |
+
constraint functions
|
| 267 |
+
workers : int optional
|
| 268 |
+
Uses `multiprocessing.Pool <multiprocessing>`) to compute the field
|
| 269 |
+
functions in parallel.
|
| 270 |
+
|
| 271 |
+
"""
|
| 272 |
+
super().__init__()
|
| 273 |
+
self.index = -1
|
| 274 |
+
self.Vertex = VertexScalarField
|
| 275 |
+
self.field = field
|
| 276 |
+
self.field_args = field_args
|
| 277 |
+
self.wfield = FieldWrapper(field, field_args) # if workers is not 1
|
| 278 |
+
|
| 279 |
+
self.g_cons = g_cons
|
| 280 |
+
self.g_cons_args = g_cons_args
|
| 281 |
+
self.wgcons = ConstraintWrapper(g_cons, g_cons_args)
|
| 282 |
+
self.gpool = set() # A set of tuples to process for feasibility
|
| 283 |
+
|
| 284 |
+
# Field processing objects
|
| 285 |
+
self.fpool = set() # A set of tuples to process for scalar function
|
| 286 |
+
self.sfc_lock = False # True if self.fpool is non-Empty
|
| 287 |
+
|
| 288 |
+
self.workers = workers
|
| 289 |
+
self._mapwrapper = MapWrapper(workers)
|
| 290 |
+
|
| 291 |
+
if workers == 1:
|
| 292 |
+
self.process_gpool = self.proc_gpool
|
| 293 |
+
if g_cons is None:
|
| 294 |
+
self.process_fpool = self.proc_fpool_nog
|
| 295 |
+
else:
|
| 296 |
+
self.process_fpool = self.proc_fpool_g
|
| 297 |
+
else:
|
| 298 |
+
self.process_gpool = self.pproc_gpool
|
| 299 |
+
if g_cons is None:
|
| 300 |
+
self.process_fpool = self.pproc_fpool_nog
|
| 301 |
+
else:
|
| 302 |
+
self.process_fpool = self.pproc_fpool_g
|
| 303 |
+
|
| 304 |
+
def __getitem__(self, x, nn=None):
|
| 305 |
+
try:
|
| 306 |
+
return self.cache[x]
|
| 307 |
+
except KeyError:
|
| 308 |
+
self.index += 1
|
| 309 |
+
xval = self.Vertex(x, field=self.field, nn=nn, index=self.index,
|
| 310 |
+
field_args=self.field_args,
|
| 311 |
+
g_cons=self.g_cons,
|
| 312 |
+
g_cons_args=self.g_cons_args)
|
| 313 |
+
|
| 314 |
+
self.cache[x] = xval # Define in cache
|
| 315 |
+
self.gpool.add(xval) # Add to pool for processing feasibility
|
| 316 |
+
self.fpool.add(xval) # Add to pool for processing field values
|
| 317 |
+
return self.cache[x]
|
| 318 |
+
|
| 319 |
+
def __getstate__(self):
|
| 320 |
+
self_dict = self.__dict__.copy()
|
| 321 |
+
del self_dict['pool']
|
| 322 |
+
return self_dict
|
| 323 |
+
|
| 324 |
+
def process_pools(self):
|
| 325 |
+
if self.g_cons is not None:
|
| 326 |
+
self.process_gpool()
|
| 327 |
+
self.process_fpool()
|
| 328 |
+
self.proc_minimisers()
|
| 329 |
+
|
| 330 |
+
def feasibility_check(self, v):
|
| 331 |
+
v.feasible = True
|
| 332 |
+
for g, args in zip(self.g_cons, self.g_cons_args):
|
| 333 |
+
# constraint may return more than 1 value.
|
| 334 |
+
if np.any(g(v.x_a, *args) < 0.0):
|
| 335 |
+
v.f = np.inf
|
| 336 |
+
v.feasible = False
|
| 337 |
+
break
|
| 338 |
+
|
| 339 |
+
def compute_sfield(self, v):
|
| 340 |
+
"""Compute the scalar field values of a vertex object `v`.
|
| 341 |
+
|
| 342 |
+
Parameters
|
| 343 |
+
----------
|
| 344 |
+
v : VertexBase or VertexScalarField object
|
| 345 |
+
"""
|
| 346 |
+
try:
|
| 347 |
+
v.f = self.field(v.x_a, *self.field_args)
|
| 348 |
+
self.nfev += 1
|
| 349 |
+
except AttributeError:
|
| 350 |
+
v.f = np.inf
|
| 351 |
+
# logging.warning(f"Field function not found at x = {self.x_a}")
|
| 352 |
+
if np.isnan(v.f):
|
| 353 |
+
v.f = np.inf
|
| 354 |
+
|
| 355 |
+
def proc_gpool(self):
|
| 356 |
+
"""Process all constraints."""
|
| 357 |
+
if self.g_cons is not None:
|
| 358 |
+
for v in self.gpool:
|
| 359 |
+
self.feasibility_check(v)
|
| 360 |
+
# Clean the pool
|
| 361 |
+
self.gpool = set()
|
| 362 |
+
|
| 363 |
+
def pproc_gpool(self):
|
| 364 |
+
"""Process all constraints in parallel."""
|
| 365 |
+
gpool_l = []
|
| 366 |
+
for v in self.gpool:
|
| 367 |
+
gpool_l.append(v.x_a)
|
| 368 |
+
|
| 369 |
+
G = self._mapwrapper(self.wgcons.gcons, gpool_l)
|
| 370 |
+
for v, g in zip(self.gpool, G):
|
| 371 |
+
v.feasible = g # set vertex object attribute v.feasible = g (bool)
|
| 372 |
+
|
| 373 |
+
def proc_fpool_g(self):
|
| 374 |
+
"""Process all field functions with constraints supplied."""
|
| 375 |
+
for v in self.fpool:
|
| 376 |
+
if v.feasible:
|
| 377 |
+
self.compute_sfield(v)
|
| 378 |
+
# Clean the pool
|
| 379 |
+
self.fpool = set()
|
| 380 |
+
|
| 381 |
+
def proc_fpool_nog(self):
|
| 382 |
+
"""Process all field functions with no constraints supplied."""
|
| 383 |
+
for v in self.fpool:
|
| 384 |
+
self.compute_sfield(v)
|
| 385 |
+
# Clean the pool
|
| 386 |
+
self.fpool = set()
|
| 387 |
+
|
| 388 |
+
def pproc_fpool_g(self):
|
| 389 |
+
"""
|
| 390 |
+
Process all field functions with constraints supplied in parallel.
|
| 391 |
+
"""
|
| 392 |
+
self.wfield.func
|
| 393 |
+
fpool_l = []
|
| 394 |
+
for v in self.fpool:
|
| 395 |
+
if v.feasible:
|
| 396 |
+
fpool_l.append(v.x_a)
|
| 397 |
+
else:
|
| 398 |
+
v.f = np.inf
|
| 399 |
+
F = self._mapwrapper(self.wfield.func, fpool_l)
|
| 400 |
+
for va, f in zip(fpool_l, F):
|
| 401 |
+
vt = tuple(va)
|
| 402 |
+
self[vt].f = f # set vertex object attribute v.f = f
|
| 403 |
+
self.nfev += 1
|
| 404 |
+
# Clean the pool
|
| 405 |
+
self.fpool = set()
|
| 406 |
+
|
| 407 |
+
def pproc_fpool_nog(self):
|
| 408 |
+
"""
|
| 409 |
+
Process all field functions with no constraints supplied in parallel.
|
| 410 |
+
"""
|
| 411 |
+
self.wfield.func
|
| 412 |
+
fpool_l = []
|
| 413 |
+
for v in self.fpool:
|
| 414 |
+
fpool_l.append(v.x_a)
|
| 415 |
+
F = self._mapwrapper(self.wfield.func, fpool_l)
|
| 416 |
+
for va, f in zip(fpool_l, F):
|
| 417 |
+
vt = tuple(va)
|
| 418 |
+
self[vt].f = f # set vertex object attribute v.f = f
|
| 419 |
+
self.nfev += 1
|
| 420 |
+
# Clean the pool
|
| 421 |
+
self.fpool = set()
|
| 422 |
+
|
| 423 |
+
def proc_minimisers(self):
|
| 424 |
+
"""Check for minimisers."""
|
| 425 |
+
for v in self:
|
| 426 |
+
v.minimiser()
|
| 427 |
+
v.maximiser()
|
| 428 |
+
|
| 429 |
+
|
| 430 |
+
class ConstraintWrapper:
|
| 431 |
+
"""Object to wrap constraints to pass to `multiprocessing.Pool`."""
|
| 432 |
+
def __init__(self, g_cons, g_cons_args):
|
| 433 |
+
self.g_cons = g_cons
|
| 434 |
+
self.g_cons_args = g_cons_args
|
| 435 |
+
|
| 436 |
+
def gcons(self, v_x_a):
|
| 437 |
+
vfeasible = True
|
| 438 |
+
for g, args in zip(self.g_cons, self.g_cons_args):
|
| 439 |
+
# constraint may return more than 1 value.
|
| 440 |
+
if np.any(g(v_x_a, *args) < 0.0):
|
| 441 |
+
vfeasible = False
|
| 442 |
+
break
|
| 443 |
+
return vfeasible
|
| 444 |
+
|
| 445 |
+
|
| 446 |
+
class FieldWrapper:
|
| 447 |
+
"""Object to wrap field to pass to `multiprocessing.Pool`."""
|
| 448 |
+
def __init__(self, field, field_args):
|
| 449 |
+
self.field = field
|
| 450 |
+
self.field_args = field_args
|
| 451 |
+
|
| 452 |
+
def func(self, v_x_a):
|
| 453 |
+
try:
|
| 454 |
+
v_f = self.field(v_x_a, *self.field_args)
|
| 455 |
+
except Exception:
|
| 456 |
+
v_f = np.inf
|
| 457 |
+
if np.isnan(v_f):
|
| 458 |
+
v_f = np.inf
|
| 459 |
+
|
| 460 |
+
return v_f
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_tnc.py
ADDED
|
@@ -0,0 +1,430 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# TNC Python interface
|
| 2 |
+
# @(#) $Jeannot: tnc.py,v 1.11 2005/01/28 18:27:31 js Exp $
|
| 3 |
+
|
| 4 |
+
# Copyright (c) 2004-2005, Jean-Sebastien Roy (js@jeannot.org)
|
| 5 |
+
|
| 6 |
+
# Permission is hereby granted, free of charge, to any person obtaining a
|
| 7 |
+
# copy of this software and associated documentation files (the
|
| 8 |
+
# "Software"), to deal in the Software without restriction, including
|
| 9 |
+
# without limitation the rights to use, copy, modify, merge, publish,
|
| 10 |
+
# distribute, sublicense, and/or sell copies of the Software, and to
|
| 11 |
+
# permit persons to whom the Software is furnished to do so, subject to
|
| 12 |
+
# the following conditions:
|
| 13 |
+
|
| 14 |
+
# The above copyright notice and this permission notice shall be included
|
| 15 |
+
# in all copies or substantial portions of the Software.
|
| 16 |
+
|
| 17 |
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
| 18 |
+
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
| 19 |
+
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
| 20 |
+
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
| 21 |
+
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
| 22 |
+
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
| 23 |
+
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
| 24 |
+
|
| 25 |
+
"""
|
| 26 |
+
TNC: A Python interface to the TNC non-linear optimizer
|
| 27 |
+
|
| 28 |
+
TNC is a non-linear optimizer. To use it, you must provide a function to
|
| 29 |
+
minimize. The function must take one argument: the list of coordinates where to
|
| 30 |
+
evaluate the function; and it must return either a tuple, whose first element is the
|
| 31 |
+
value of the function, and whose second argument is the gradient of the function
|
| 32 |
+
(as a list of values); or None, to abort the minimization.
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
from scipy.optimize import _moduleTNC as moduleTNC
|
| 36 |
+
from ._optimize import (MemoizeJac, OptimizeResult, _check_unknown_options,
|
| 37 |
+
_prepare_scalar_function)
|
| 38 |
+
from ._constraints import old_bound_to_new
|
| 39 |
+
from scipy._lib._array_api import atleast_nd, array_namespace
|
| 40 |
+
|
| 41 |
+
from numpy import inf, array, zeros
|
| 42 |
+
|
| 43 |
+
__all__ = ['fmin_tnc']
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
MSG_NONE = 0 # No messages
|
| 47 |
+
MSG_ITER = 1 # One line per iteration
|
| 48 |
+
MSG_INFO = 2 # Informational messages
|
| 49 |
+
MSG_VERS = 4 # Version info
|
| 50 |
+
MSG_EXIT = 8 # Exit reasons
|
| 51 |
+
MSG_ALL = MSG_ITER + MSG_INFO + MSG_VERS + MSG_EXIT
|
| 52 |
+
|
| 53 |
+
MSGS = {
|
| 54 |
+
MSG_NONE: "No messages",
|
| 55 |
+
MSG_ITER: "One line per iteration",
|
| 56 |
+
MSG_INFO: "Informational messages",
|
| 57 |
+
MSG_VERS: "Version info",
|
| 58 |
+
MSG_EXIT: "Exit reasons",
|
| 59 |
+
MSG_ALL: "All messages"
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
INFEASIBLE = -1 # Infeasible (lower bound > upper bound)
|
| 63 |
+
LOCALMINIMUM = 0 # Local minimum reached (|pg| ~= 0)
|
| 64 |
+
FCONVERGED = 1 # Converged (|f_n-f_(n-1)| ~= 0)
|
| 65 |
+
XCONVERGED = 2 # Converged (|x_n-x_(n-1)| ~= 0)
|
| 66 |
+
MAXFUN = 3 # Max. number of function evaluations reached
|
| 67 |
+
LSFAIL = 4 # Linear search failed
|
| 68 |
+
CONSTANT = 5 # All lower bounds are equal to the upper bounds
|
| 69 |
+
NOPROGRESS = 6 # Unable to progress
|
| 70 |
+
USERABORT = 7 # User requested end of minimization
|
| 71 |
+
|
| 72 |
+
RCSTRINGS = {
|
| 73 |
+
INFEASIBLE: "Infeasible (lower bound > upper bound)",
|
| 74 |
+
LOCALMINIMUM: "Local minimum reached (|pg| ~= 0)",
|
| 75 |
+
FCONVERGED: "Converged (|f_n-f_(n-1)| ~= 0)",
|
| 76 |
+
XCONVERGED: "Converged (|x_n-x_(n-1)| ~= 0)",
|
| 77 |
+
MAXFUN: "Max. number of function evaluations reached",
|
| 78 |
+
LSFAIL: "Linear search failed",
|
| 79 |
+
CONSTANT: "All lower bounds are equal to the upper bounds",
|
| 80 |
+
NOPROGRESS: "Unable to progress",
|
| 81 |
+
USERABORT: "User requested end of minimization"
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
# Changes to interface made by Travis Oliphant, Apr. 2004 for inclusion in
|
| 85 |
+
# SciPy
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def fmin_tnc(func, x0, fprime=None, args=(), approx_grad=0,
|
| 89 |
+
bounds=None, epsilon=1e-8, scale=None, offset=None,
|
| 90 |
+
messages=MSG_ALL, maxCGit=-1, maxfun=None, eta=-1,
|
| 91 |
+
stepmx=0, accuracy=0, fmin=0, ftol=-1, xtol=-1, pgtol=-1,
|
| 92 |
+
rescale=-1, disp=None, callback=None):
|
| 93 |
+
"""
|
| 94 |
+
Minimize a function with variables subject to bounds, using
|
| 95 |
+
gradient information in a truncated Newton algorithm. This
|
| 96 |
+
method wraps a C implementation of the algorithm.
|
| 97 |
+
|
| 98 |
+
Parameters
|
| 99 |
+
----------
|
| 100 |
+
func : callable ``func(x, *args)``
|
| 101 |
+
Function to minimize. Must do one of:
|
| 102 |
+
|
| 103 |
+
1. Return f and g, where f is the value of the function and g its
|
| 104 |
+
gradient (a list of floats).
|
| 105 |
+
|
| 106 |
+
2. Return the function value but supply gradient function
|
| 107 |
+
separately as `fprime`.
|
| 108 |
+
|
| 109 |
+
3. Return the function value and set ``approx_grad=True``.
|
| 110 |
+
|
| 111 |
+
If the function returns None, the minimization
|
| 112 |
+
is aborted.
|
| 113 |
+
x0 : array_like
|
| 114 |
+
Initial estimate of minimum.
|
| 115 |
+
fprime : callable ``fprime(x, *args)``, optional
|
| 116 |
+
Gradient of `func`. If None, then either `func` must return the
|
| 117 |
+
function value and the gradient (``f,g = func(x, *args)``)
|
| 118 |
+
or `approx_grad` must be True.
|
| 119 |
+
args : tuple, optional
|
| 120 |
+
Arguments to pass to function.
|
| 121 |
+
approx_grad : bool, optional
|
| 122 |
+
If true, approximate the gradient numerically.
|
| 123 |
+
bounds : list, optional
|
| 124 |
+
(min, max) pairs for each element in x0, defining the
|
| 125 |
+
bounds on that parameter. Use None or +/-inf for one of
|
| 126 |
+
min or max when there is no bound in that direction.
|
| 127 |
+
epsilon : float, optional
|
| 128 |
+
Used if approx_grad is True. The stepsize in a finite
|
| 129 |
+
difference approximation for fprime.
|
| 130 |
+
scale : array_like, optional
|
| 131 |
+
Scaling factors to apply to each variable. If None, the
|
| 132 |
+
factors are up-low for interval bounded variables and
|
| 133 |
+
1+|x| for the others. Defaults to None.
|
| 134 |
+
offset : array_like, optional
|
| 135 |
+
Value to subtract from each variable. If None, the
|
| 136 |
+
offsets are (up+low)/2 for interval bounded variables
|
| 137 |
+
and x for the others.
|
| 138 |
+
messages : int, optional
|
| 139 |
+
Bit mask used to select messages display during
|
| 140 |
+
minimization values defined in the MSGS dict. Defaults to
|
| 141 |
+
MGS_ALL.
|
| 142 |
+
disp : int, optional
|
| 143 |
+
Integer interface to messages. 0 = no message, 5 = all messages
|
| 144 |
+
maxCGit : int, optional
|
| 145 |
+
Maximum number of hessian*vector evaluations per main
|
| 146 |
+
iteration. If maxCGit == 0, the direction chosen is
|
| 147 |
+
-gradient if maxCGit < 0, maxCGit is set to
|
| 148 |
+
max(1,min(50,n/2)). Defaults to -1.
|
| 149 |
+
maxfun : int, optional
|
| 150 |
+
Maximum number of function evaluation. If None, maxfun is
|
| 151 |
+
set to max(100, 10*len(x0)). Defaults to None. Note that this function
|
| 152 |
+
may violate the limit because of evaluating gradients by numerical
|
| 153 |
+
differentiation.
|
| 154 |
+
eta : float, optional
|
| 155 |
+
Severity of the line search. If < 0 or > 1, set to 0.25.
|
| 156 |
+
Defaults to -1.
|
| 157 |
+
stepmx : float, optional
|
| 158 |
+
Maximum step for the line search. May be increased during
|
| 159 |
+
call. If too small, it will be set to 10.0. Defaults to 0.
|
| 160 |
+
accuracy : float, optional
|
| 161 |
+
Relative precision for finite difference calculations. If
|
| 162 |
+
<= machine_precision, set to sqrt(machine_precision).
|
| 163 |
+
Defaults to 0.
|
| 164 |
+
fmin : float, optional
|
| 165 |
+
Minimum function value estimate. Defaults to 0.
|
| 166 |
+
ftol : float, optional
|
| 167 |
+
Precision goal for the value of f in the stopping criterion.
|
| 168 |
+
If ftol < 0.0, ftol is set to 0.0 defaults to -1.
|
| 169 |
+
xtol : float, optional
|
| 170 |
+
Precision goal for the value of x in the stopping
|
| 171 |
+
criterion (after applying x scaling factors). If xtol <
|
| 172 |
+
0.0, xtol is set to sqrt(machine_precision). Defaults to
|
| 173 |
+
-1.
|
| 174 |
+
pgtol : float, optional
|
| 175 |
+
Precision goal for the value of the projected gradient in
|
| 176 |
+
the stopping criterion (after applying x scaling factors).
|
| 177 |
+
If pgtol < 0.0, pgtol is set to 1e-2 * sqrt(accuracy).
|
| 178 |
+
Setting it to 0.0 is not recommended. Defaults to -1.
|
| 179 |
+
rescale : float, optional
|
| 180 |
+
Scaling factor (in log10) used to trigger f value
|
| 181 |
+
rescaling. If 0, rescale at each iteration. If a large
|
| 182 |
+
value, never rescale. If < 0, rescale is set to 1.3.
|
| 183 |
+
callback : callable, optional
|
| 184 |
+
Called after each iteration, as callback(xk), where xk is the
|
| 185 |
+
current parameter vector.
|
| 186 |
+
|
| 187 |
+
Returns
|
| 188 |
+
-------
|
| 189 |
+
x : ndarray
|
| 190 |
+
The solution.
|
| 191 |
+
nfeval : int
|
| 192 |
+
The number of function evaluations.
|
| 193 |
+
rc : int
|
| 194 |
+
Return code, see below
|
| 195 |
+
|
| 196 |
+
See also
|
| 197 |
+
--------
|
| 198 |
+
minimize: Interface to minimization algorithms for multivariate
|
| 199 |
+
functions. See the 'TNC' `method` in particular.
|
| 200 |
+
|
| 201 |
+
Notes
|
| 202 |
+
-----
|
| 203 |
+
The underlying algorithm is truncated Newton, also called
|
| 204 |
+
Newton Conjugate-Gradient. This method differs from
|
| 205 |
+
scipy.optimize.fmin_ncg in that
|
| 206 |
+
|
| 207 |
+
1. it wraps a C implementation of the algorithm
|
| 208 |
+
2. it allows each variable to be given an upper and lower bound.
|
| 209 |
+
|
| 210 |
+
The algorithm incorporates the bound constraints by determining
|
| 211 |
+
the descent direction as in an unconstrained truncated Newton,
|
| 212 |
+
but never taking a step-size large enough to leave the space
|
| 213 |
+
of feasible x's. The algorithm keeps track of a set of
|
| 214 |
+
currently active constraints, and ignores them when computing
|
| 215 |
+
the minimum allowable step size. (The x's associated with the
|
| 216 |
+
active constraint are kept fixed.) If the maximum allowable
|
| 217 |
+
step size is zero then a new constraint is added. At the end
|
| 218 |
+
of each iteration one of the constraints may be deemed no
|
| 219 |
+
longer active and removed. A constraint is considered
|
| 220 |
+
no longer active is if it is currently active
|
| 221 |
+
but the gradient for that variable points inward from the
|
| 222 |
+
constraint. The specific constraint removed is the one
|
| 223 |
+
associated with the variable of largest index whose
|
| 224 |
+
constraint is no longer active.
|
| 225 |
+
|
| 226 |
+
Return codes are defined as follows::
|
| 227 |
+
|
| 228 |
+
-1 : Infeasible (lower bound > upper bound)
|
| 229 |
+
0 : Local minimum reached (|pg| ~= 0)
|
| 230 |
+
1 : Converged (|f_n-f_(n-1)| ~= 0)
|
| 231 |
+
2 : Converged (|x_n-x_(n-1)| ~= 0)
|
| 232 |
+
3 : Max. number of function evaluations reached
|
| 233 |
+
4 : Linear search failed
|
| 234 |
+
5 : All lower bounds are equal to the upper bounds
|
| 235 |
+
6 : Unable to progress
|
| 236 |
+
7 : User requested end of minimization
|
| 237 |
+
|
| 238 |
+
References
|
| 239 |
+
----------
|
| 240 |
+
Wright S., Nocedal J. (2006), 'Numerical Optimization'
|
| 241 |
+
|
| 242 |
+
Nash S.G. (1984), "Newton-Type Minimization Via the Lanczos Method",
|
| 243 |
+
SIAM Journal of Numerical Analysis 21, pp. 770-778
|
| 244 |
+
|
| 245 |
+
"""
|
| 246 |
+
# handle fprime/approx_grad
|
| 247 |
+
if approx_grad:
|
| 248 |
+
fun = func
|
| 249 |
+
jac = None
|
| 250 |
+
elif fprime is None:
|
| 251 |
+
fun = MemoizeJac(func)
|
| 252 |
+
jac = fun.derivative
|
| 253 |
+
else:
|
| 254 |
+
fun = func
|
| 255 |
+
jac = fprime
|
| 256 |
+
|
| 257 |
+
if disp is not None: # disp takes precedence over messages
|
| 258 |
+
mesg_num = disp
|
| 259 |
+
else:
|
| 260 |
+
mesg_num = {0:MSG_NONE, 1:MSG_ITER, 2:MSG_INFO, 3:MSG_VERS,
|
| 261 |
+
4:MSG_EXIT, 5:MSG_ALL}.get(messages, MSG_ALL)
|
| 262 |
+
# build options
|
| 263 |
+
opts = {'eps': epsilon,
|
| 264 |
+
'scale': scale,
|
| 265 |
+
'offset': offset,
|
| 266 |
+
'mesg_num': mesg_num,
|
| 267 |
+
'maxCGit': maxCGit,
|
| 268 |
+
'maxfun': maxfun,
|
| 269 |
+
'eta': eta,
|
| 270 |
+
'stepmx': stepmx,
|
| 271 |
+
'accuracy': accuracy,
|
| 272 |
+
'minfev': fmin,
|
| 273 |
+
'ftol': ftol,
|
| 274 |
+
'xtol': xtol,
|
| 275 |
+
'gtol': pgtol,
|
| 276 |
+
'rescale': rescale,
|
| 277 |
+
'disp': False}
|
| 278 |
+
|
| 279 |
+
res = _minimize_tnc(fun, x0, args, jac, bounds, callback=callback, **opts)
|
| 280 |
+
|
| 281 |
+
return res['x'], res['nfev'], res['status']
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
def _minimize_tnc(fun, x0, args=(), jac=None, bounds=None,
|
| 285 |
+
eps=1e-8, scale=None, offset=None, mesg_num=None,
|
| 286 |
+
maxCGit=-1, eta=-1, stepmx=0, accuracy=0,
|
| 287 |
+
minfev=0, ftol=-1, xtol=-1, gtol=-1, rescale=-1, disp=False,
|
| 288 |
+
callback=None, finite_diff_rel_step=None, maxfun=None,
|
| 289 |
+
**unknown_options):
|
| 290 |
+
"""
|
| 291 |
+
Minimize a scalar function of one or more variables using a truncated
|
| 292 |
+
Newton (TNC) algorithm.
|
| 293 |
+
|
| 294 |
+
Options
|
| 295 |
+
-------
|
| 296 |
+
eps : float or ndarray
|
| 297 |
+
If `jac is None` the absolute step size used for numerical
|
| 298 |
+
approximation of the jacobian via forward differences.
|
| 299 |
+
scale : list of floats
|
| 300 |
+
Scaling factors to apply to each variable. If None, the
|
| 301 |
+
factors are up-low for interval bounded variables and
|
| 302 |
+
1+|x] for the others. Defaults to None.
|
| 303 |
+
offset : float
|
| 304 |
+
Value to subtract from each variable. If None, the
|
| 305 |
+
offsets are (up+low)/2 for interval bounded variables
|
| 306 |
+
and x for the others.
|
| 307 |
+
disp : bool
|
| 308 |
+
Set to True to print convergence messages.
|
| 309 |
+
maxCGit : int
|
| 310 |
+
Maximum number of hessian*vector evaluations per main
|
| 311 |
+
iteration. If maxCGit == 0, the direction chosen is
|
| 312 |
+
-gradient if maxCGit < 0, maxCGit is set to
|
| 313 |
+
max(1,min(50,n/2)). Defaults to -1.
|
| 314 |
+
eta : float
|
| 315 |
+
Severity of the line search. If < 0 or > 1, set to 0.25.
|
| 316 |
+
Defaults to -1.
|
| 317 |
+
stepmx : float
|
| 318 |
+
Maximum step for the line search. May be increased during
|
| 319 |
+
call. If too small, it will be set to 10.0. Defaults to 0.
|
| 320 |
+
accuracy : float
|
| 321 |
+
Relative precision for finite difference calculations. If
|
| 322 |
+
<= machine_precision, set to sqrt(machine_precision).
|
| 323 |
+
Defaults to 0.
|
| 324 |
+
minfev : float
|
| 325 |
+
Minimum function value estimate. Defaults to 0.
|
| 326 |
+
ftol : float
|
| 327 |
+
Precision goal for the value of f in the stopping criterion.
|
| 328 |
+
If ftol < 0.0, ftol is set to 0.0 defaults to -1.
|
| 329 |
+
xtol : float
|
| 330 |
+
Precision goal for the value of x in the stopping
|
| 331 |
+
criterion (after applying x scaling factors). If xtol <
|
| 332 |
+
0.0, xtol is set to sqrt(machine_precision). Defaults to
|
| 333 |
+
-1.
|
| 334 |
+
gtol : float
|
| 335 |
+
Precision goal for the value of the projected gradient in
|
| 336 |
+
the stopping criterion (after applying x scaling factors).
|
| 337 |
+
If gtol < 0.0, gtol is set to 1e-2 * sqrt(accuracy).
|
| 338 |
+
Setting it to 0.0 is not recommended. Defaults to -1.
|
| 339 |
+
rescale : float
|
| 340 |
+
Scaling factor (in log10) used to trigger f value
|
| 341 |
+
rescaling. If 0, rescale at each iteration. If a large
|
| 342 |
+
value, never rescale. If < 0, rescale is set to 1.3.
|
| 343 |
+
finite_diff_rel_step : None or array_like, optional
|
| 344 |
+
If `jac in ['2-point', '3-point', 'cs']` the relative step size to
|
| 345 |
+
use for numerical approximation of the jacobian. The absolute step
|
| 346 |
+
size is computed as ``h = rel_step * sign(x) * max(1, abs(x))``,
|
| 347 |
+
possibly adjusted to fit into the bounds. For ``method='3-point'``
|
| 348 |
+
the sign of `h` is ignored. If None (default) then step is selected
|
| 349 |
+
automatically.
|
| 350 |
+
maxfun : int
|
| 351 |
+
Maximum number of function evaluations. If None, `maxfun` is
|
| 352 |
+
set to max(100, 10*len(x0)). Defaults to None.
|
| 353 |
+
"""
|
| 354 |
+
_check_unknown_options(unknown_options)
|
| 355 |
+
fmin = minfev
|
| 356 |
+
pgtol = gtol
|
| 357 |
+
|
| 358 |
+
xp = array_namespace(x0)
|
| 359 |
+
x0 = atleast_nd(x0, ndim=1, xp=xp)
|
| 360 |
+
dtype = xp.float64
|
| 361 |
+
if xp.isdtype(x0.dtype, "real floating"):
|
| 362 |
+
dtype = x0.dtype
|
| 363 |
+
x0 = xp.reshape(xp.astype(x0, dtype), -1)
|
| 364 |
+
|
| 365 |
+
n = len(x0)
|
| 366 |
+
|
| 367 |
+
if bounds is None:
|
| 368 |
+
bounds = [(None,None)] * n
|
| 369 |
+
if len(bounds) != n:
|
| 370 |
+
raise ValueError('length of x0 != length of bounds')
|
| 371 |
+
new_bounds = old_bound_to_new(bounds)
|
| 372 |
+
|
| 373 |
+
if mesg_num is not None:
|
| 374 |
+
messages = {0:MSG_NONE, 1:MSG_ITER, 2:MSG_INFO, 3:MSG_VERS,
|
| 375 |
+
4:MSG_EXIT, 5:MSG_ALL}.get(mesg_num, MSG_ALL)
|
| 376 |
+
elif disp:
|
| 377 |
+
messages = MSG_ALL
|
| 378 |
+
else:
|
| 379 |
+
messages = MSG_NONE
|
| 380 |
+
|
| 381 |
+
sf = _prepare_scalar_function(fun, x0, jac=jac, args=args, epsilon=eps,
|
| 382 |
+
finite_diff_rel_step=finite_diff_rel_step,
|
| 383 |
+
bounds=new_bounds)
|
| 384 |
+
func_and_grad = sf.fun_and_grad
|
| 385 |
+
|
| 386 |
+
"""
|
| 387 |
+
low, up : the bounds (lists of floats)
|
| 388 |
+
if low is None, the lower bounds are removed.
|
| 389 |
+
if up is None, the upper bounds are removed.
|
| 390 |
+
low and up defaults to None
|
| 391 |
+
"""
|
| 392 |
+
low = zeros(n)
|
| 393 |
+
up = zeros(n)
|
| 394 |
+
for i in range(n):
|
| 395 |
+
if bounds[i] is None:
|
| 396 |
+
l, u = -inf, inf
|
| 397 |
+
else:
|
| 398 |
+
l,u = bounds[i]
|
| 399 |
+
if l is None:
|
| 400 |
+
low[i] = -inf
|
| 401 |
+
else:
|
| 402 |
+
low[i] = l
|
| 403 |
+
if u is None:
|
| 404 |
+
up[i] = inf
|
| 405 |
+
else:
|
| 406 |
+
up[i] = u
|
| 407 |
+
|
| 408 |
+
if scale is None:
|
| 409 |
+
scale = array([])
|
| 410 |
+
|
| 411 |
+
if offset is None:
|
| 412 |
+
offset = array([])
|
| 413 |
+
|
| 414 |
+
if maxfun is None:
|
| 415 |
+
maxfun = max(100, 10*len(x0))
|
| 416 |
+
|
| 417 |
+
rc, nf, nit, x, funv, jacv = moduleTNC.tnc_minimize(
|
| 418 |
+
func_and_grad, x0, low, up, scale,
|
| 419 |
+
offset, messages, maxCGit, maxfun,
|
| 420 |
+
eta, stepmx, accuracy, fmin, ftol,
|
| 421 |
+
xtol, pgtol, rescale, callback
|
| 422 |
+
)
|
| 423 |
+
# the TNC documentation states: "On output, x, f and g may be very
|
| 424 |
+
# slightly out of sync because of scaling". Therefore re-evaluate
|
| 425 |
+
# func_and_grad so they are synced.
|
| 426 |
+
funv, jacv = func_and_grad(x)
|
| 427 |
+
|
| 428 |
+
return OptimizeResult(x=x, fun=funv, jac=jacv, nfev=sf.nfev,
|
| 429 |
+
nit=nit, status=rc, message=RCSTRINGS[rc],
|
| 430 |
+
success=(-1 < rc < 3))
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__init__.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""This module contains the equality constrained SQP solver."""
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
from .minimize_trustregion_constr import _minimize_trustregion_constr
|
| 5 |
+
|
| 6 |
+
__all__ = ['_minimize_trustregion_constr']
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (366 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/canonical_constraint.cpython-310.pyc
ADDED
|
Binary file (12.7 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/equality_constrained_sqp.cpython-310.pyc
ADDED
|
Binary file (4.5 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/minimize_trustregion_constr.cpython-310.pyc
ADDED
|
Binary file (20.3 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/projections.cpython-310.pyc
ADDED
|
Binary file (10.3 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/qp_subproblem.cpython-310.pyc
ADDED
|
Binary file (15.7 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/report.cpython-310.pyc
ADDED
|
Binary file (2.52 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/__pycache__/tr_interior_point.cpython-310.pyc
ADDED
|
Binary file (9.65 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/canonical_constraint.py
ADDED
|
@@ -0,0 +1,390 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import scipy.sparse as sps
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class CanonicalConstraint:
|
| 6 |
+
"""Canonical constraint to use with trust-constr algorithm.
|
| 7 |
+
|
| 8 |
+
It represents the set of constraints of the form::
|
| 9 |
+
|
| 10 |
+
f_eq(x) = 0
|
| 11 |
+
f_ineq(x) <= 0
|
| 12 |
+
|
| 13 |
+
where ``f_eq`` and ``f_ineq`` are evaluated by a single function, see
|
| 14 |
+
below.
|
| 15 |
+
|
| 16 |
+
The class is supposed to be instantiated by factory methods, which
|
| 17 |
+
should prepare the parameters listed below.
|
| 18 |
+
|
| 19 |
+
Parameters
|
| 20 |
+
----------
|
| 21 |
+
n_eq, n_ineq : int
|
| 22 |
+
Number of equality and inequality constraints respectively.
|
| 23 |
+
fun : callable
|
| 24 |
+
Function defining the constraints. The signature is
|
| 25 |
+
``fun(x) -> c_eq, c_ineq``, where ``c_eq`` is ndarray with `n_eq`
|
| 26 |
+
components and ``c_ineq`` is ndarray with `n_ineq` components.
|
| 27 |
+
jac : callable
|
| 28 |
+
Function to evaluate the Jacobian of the constraint. The signature
|
| 29 |
+
is ``jac(x) -> J_eq, J_ineq``, where ``J_eq`` and ``J_ineq`` are
|
| 30 |
+
either ndarray of csr_matrix of shapes (n_eq, n) and (n_ineq, n),
|
| 31 |
+
respectively.
|
| 32 |
+
hess : callable
|
| 33 |
+
Function to evaluate the Hessian of the constraints multiplied
|
| 34 |
+
by Lagrange multipliers, that is
|
| 35 |
+
``dot(f_eq, v_eq) + dot(f_ineq, v_ineq)``. The signature is
|
| 36 |
+
``hess(x, v_eq, v_ineq) -> H``, where ``H`` has an implied
|
| 37 |
+
shape (n, n) and provide a matrix-vector product operation
|
| 38 |
+
``H.dot(p)``.
|
| 39 |
+
keep_feasible : ndarray, shape (n_ineq,)
|
| 40 |
+
Mask indicating which inequality constraints should be kept feasible.
|
| 41 |
+
"""
|
| 42 |
+
def __init__(self, n_eq, n_ineq, fun, jac, hess, keep_feasible):
|
| 43 |
+
self.n_eq = n_eq
|
| 44 |
+
self.n_ineq = n_ineq
|
| 45 |
+
self.fun = fun
|
| 46 |
+
self.jac = jac
|
| 47 |
+
self.hess = hess
|
| 48 |
+
self.keep_feasible = keep_feasible
|
| 49 |
+
|
| 50 |
+
@classmethod
|
| 51 |
+
def from_PreparedConstraint(cls, constraint):
|
| 52 |
+
"""Create an instance from `PreparedConstrained` object."""
|
| 53 |
+
lb, ub = constraint.bounds
|
| 54 |
+
cfun = constraint.fun
|
| 55 |
+
keep_feasible = constraint.keep_feasible
|
| 56 |
+
|
| 57 |
+
if np.all(lb == -np.inf) and np.all(ub == np.inf):
|
| 58 |
+
return cls.empty(cfun.n)
|
| 59 |
+
|
| 60 |
+
if np.all(lb == -np.inf) and np.all(ub == np.inf):
|
| 61 |
+
return cls.empty(cfun.n)
|
| 62 |
+
elif np.all(lb == ub):
|
| 63 |
+
return cls._equal_to_canonical(cfun, lb)
|
| 64 |
+
elif np.all(lb == -np.inf):
|
| 65 |
+
return cls._less_to_canonical(cfun, ub, keep_feasible)
|
| 66 |
+
elif np.all(ub == np.inf):
|
| 67 |
+
return cls._greater_to_canonical(cfun, lb, keep_feasible)
|
| 68 |
+
else:
|
| 69 |
+
return cls._interval_to_canonical(cfun, lb, ub, keep_feasible)
|
| 70 |
+
|
| 71 |
+
@classmethod
|
| 72 |
+
def empty(cls, n):
|
| 73 |
+
"""Create an "empty" instance.
|
| 74 |
+
|
| 75 |
+
This "empty" instance is required to allow working with unconstrained
|
| 76 |
+
problems as if they have some constraints.
|
| 77 |
+
"""
|
| 78 |
+
empty_fun = np.empty(0)
|
| 79 |
+
empty_jac = np.empty((0, n))
|
| 80 |
+
empty_hess = sps.csr_matrix((n, n))
|
| 81 |
+
|
| 82 |
+
def fun(x):
|
| 83 |
+
return empty_fun, empty_fun
|
| 84 |
+
|
| 85 |
+
def jac(x):
|
| 86 |
+
return empty_jac, empty_jac
|
| 87 |
+
|
| 88 |
+
def hess(x, v_eq, v_ineq):
|
| 89 |
+
return empty_hess
|
| 90 |
+
|
| 91 |
+
return cls(0, 0, fun, jac, hess, np.empty(0, dtype=np.bool_))
|
| 92 |
+
|
| 93 |
+
@classmethod
|
| 94 |
+
def concatenate(cls, canonical_constraints, sparse_jacobian):
|
| 95 |
+
"""Concatenate multiple `CanonicalConstraint` into one.
|
| 96 |
+
|
| 97 |
+
`sparse_jacobian` (bool) determines the Jacobian format of the
|
| 98 |
+
concatenated constraint. Note that items in `canonical_constraints`
|
| 99 |
+
must have their Jacobians in the same format.
|
| 100 |
+
"""
|
| 101 |
+
def fun(x):
|
| 102 |
+
if canonical_constraints:
|
| 103 |
+
eq_all, ineq_all = zip(
|
| 104 |
+
*[c.fun(x) for c in canonical_constraints])
|
| 105 |
+
else:
|
| 106 |
+
eq_all, ineq_all = [], []
|
| 107 |
+
|
| 108 |
+
return np.hstack(eq_all), np.hstack(ineq_all)
|
| 109 |
+
|
| 110 |
+
if sparse_jacobian:
|
| 111 |
+
vstack = sps.vstack
|
| 112 |
+
else:
|
| 113 |
+
vstack = np.vstack
|
| 114 |
+
|
| 115 |
+
def jac(x):
|
| 116 |
+
if canonical_constraints:
|
| 117 |
+
eq_all, ineq_all = zip(
|
| 118 |
+
*[c.jac(x) for c in canonical_constraints])
|
| 119 |
+
else:
|
| 120 |
+
eq_all, ineq_all = [], []
|
| 121 |
+
|
| 122 |
+
return vstack(eq_all), vstack(ineq_all)
|
| 123 |
+
|
| 124 |
+
def hess(x, v_eq, v_ineq):
|
| 125 |
+
hess_all = []
|
| 126 |
+
index_eq = 0
|
| 127 |
+
index_ineq = 0
|
| 128 |
+
for c in canonical_constraints:
|
| 129 |
+
vc_eq = v_eq[index_eq:index_eq + c.n_eq]
|
| 130 |
+
vc_ineq = v_ineq[index_ineq:index_ineq + c.n_ineq]
|
| 131 |
+
hess_all.append(c.hess(x, vc_eq, vc_ineq))
|
| 132 |
+
index_eq += c.n_eq
|
| 133 |
+
index_ineq += c.n_ineq
|
| 134 |
+
|
| 135 |
+
def matvec(p):
|
| 136 |
+
result = np.zeros_like(p)
|
| 137 |
+
for h in hess_all:
|
| 138 |
+
result += h.dot(p)
|
| 139 |
+
return result
|
| 140 |
+
|
| 141 |
+
n = x.shape[0]
|
| 142 |
+
return sps.linalg.LinearOperator((n, n), matvec, dtype=float)
|
| 143 |
+
|
| 144 |
+
n_eq = sum(c.n_eq for c in canonical_constraints)
|
| 145 |
+
n_ineq = sum(c.n_ineq for c in canonical_constraints)
|
| 146 |
+
keep_feasible = np.hstack([c.keep_feasible for c in
|
| 147 |
+
canonical_constraints])
|
| 148 |
+
|
| 149 |
+
return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible)
|
| 150 |
+
|
| 151 |
+
@classmethod
|
| 152 |
+
def _equal_to_canonical(cls, cfun, value):
|
| 153 |
+
empty_fun = np.empty(0)
|
| 154 |
+
n = cfun.n
|
| 155 |
+
|
| 156 |
+
n_eq = value.shape[0]
|
| 157 |
+
n_ineq = 0
|
| 158 |
+
keep_feasible = np.empty(0, dtype=bool)
|
| 159 |
+
|
| 160 |
+
if cfun.sparse_jacobian:
|
| 161 |
+
empty_jac = sps.csr_matrix((0, n))
|
| 162 |
+
else:
|
| 163 |
+
empty_jac = np.empty((0, n))
|
| 164 |
+
|
| 165 |
+
def fun(x):
|
| 166 |
+
return cfun.fun(x) - value, empty_fun
|
| 167 |
+
|
| 168 |
+
def jac(x):
|
| 169 |
+
return cfun.jac(x), empty_jac
|
| 170 |
+
|
| 171 |
+
def hess(x, v_eq, v_ineq):
|
| 172 |
+
return cfun.hess(x, v_eq)
|
| 173 |
+
|
| 174 |
+
empty_fun = np.empty(0)
|
| 175 |
+
n = cfun.n
|
| 176 |
+
if cfun.sparse_jacobian:
|
| 177 |
+
empty_jac = sps.csr_matrix((0, n))
|
| 178 |
+
else:
|
| 179 |
+
empty_jac = np.empty((0, n))
|
| 180 |
+
|
| 181 |
+
return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible)
|
| 182 |
+
|
| 183 |
+
@classmethod
|
| 184 |
+
def _less_to_canonical(cls, cfun, ub, keep_feasible):
|
| 185 |
+
empty_fun = np.empty(0)
|
| 186 |
+
n = cfun.n
|
| 187 |
+
if cfun.sparse_jacobian:
|
| 188 |
+
empty_jac = sps.csr_matrix((0, n))
|
| 189 |
+
else:
|
| 190 |
+
empty_jac = np.empty((0, n))
|
| 191 |
+
|
| 192 |
+
finite_ub = ub < np.inf
|
| 193 |
+
n_eq = 0
|
| 194 |
+
n_ineq = np.sum(finite_ub)
|
| 195 |
+
|
| 196 |
+
if np.all(finite_ub):
|
| 197 |
+
def fun(x):
|
| 198 |
+
return empty_fun, cfun.fun(x) - ub
|
| 199 |
+
|
| 200 |
+
def jac(x):
|
| 201 |
+
return empty_jac, cfun.jac(x)
|
| 202 |
+
|
| 203 |
+
def hess(x, v_eq, v_ineq):
|
| 204 |
+
return cfun.hess(x, v_ineq)
|
| 205 |
+
else:
|
| 206 |
+
finite_ub = np.nonzero(finite_ub)[0]
|
| 207 |
+
keep_feasible = keep_feasible[finite_ub]
|
| 208 |
+
ub = ub[finite_ub]
|
| 209 |
+
|
| 210 |
+
def fun(x):
|
| 211 |
+
return empty_fun, cfun.fun(x)[finite_ub] - ub
|
| 212 |
+
|
| 213 |
+
def jac(x):
|
| 214 |
+
return empty_jac, cfun.jac(x)[finite_ub]
|
| 215 |
+
|
| 216 |
+
def hess(x, v_eq, v_ineq):
|
| 217 |
+
v = np.zeros(cfun.m)
|
| 218 |
+
v[finite_ub] = v_ineq
|
| 219 |
+
return cfun.hess(x, v)
|
| 220 |
+
|
| 221 |
+
return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible)
|
| 222 |
+
|
| 223 |
+
@classmethod
|
| 224 |
+
def _greater_to_canonical(cls, cfun, lb, keep_feasible):
|
| 225 |
+
empty_fun = np.empty(0)
|
| 226 |
+
n = cfun.n
|
| 227 |
+
if cfun.sparse_jacobian:
|
| 228 |
+
empty_jac = sps.csr_matrix((0, n))
|
| 229 |
+
else:
|
| 230 |
+
empty_jac = np.empty((0, n))
|
| 231 |
+
|
| 232 |
+
finite_lb = lb > -np.inf
|
| 233 |
+
n_eq = 0
|
| 234 |
+
n_ineq = np.sum(finite_lb)
|
| 235 |
+
|
| 236 |
+
if np.all(finite_lb):
|
| 237 |
+
def fun(x):
|
| 238 |
+
return empty_fun, lb - cfun.fun(x)
|
| 239 |
+
|
| 240 |
+
def jac(x):
|
| 241 |
+
return empty_jac, -cfun.jac(x)
|
| 242 |
+
|
| 243 |
+
def hess(x, v_eq, v_ineq):
|
| 244 |
+
return cfun.hess(x, -v_ineq)
|
| 245 |
+
else:
|
| 246 |
+
finite_lb = np.nonzero(finite_lb)[0]
|
| 247 |
+
keep_feasible = keep_feasible[finite_lb]
|
| 248 |
+
lb = lb[finite_lb]
|
| 249 |
+
|
| 250 |
+
def fun(x):
|
| 251 |
+
return empty_fun, lb - cfun.fun(x)[finite_lb]
|
| 252 |
+
|
| 253 |
+
def jac(x):
|
| 254 |
+
return empty_jac, -cfun.jac(x)[finite_lb]
|
| 255 |
+
|
| 256 |
+
def hess(x, v_eq, v_ineq):
|
| 257 |
+
v = np.zeros(cfun.m)
|
| 258 |
+
v[finite_lb] = -v_ineq
|
| 259 |
+
return cfun.hess(x, v)
|
| 260 |
+
|
| 261 |
+
return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible)
|
| 262 |
+
|
| 263 |
+
@classmethod
|
| 264 |
+
def _interval_to_canonical(cls, cfun, lb, ub, keep_feasible):
|
| 265 |
+
lb_inf = lb == -np.inf
|
| 266 |
+
ub_inf = ub == np.inf
|
| 267 |
+
equal = lb == ub
|
| 268 |
+
less = lb_inf & ~ub_inf
|
| 269 |
+
greater = ub_inf & ~lb_inf
|
| 270 |
+
interval = ~equal & ~lb_inf & ~ub_inf
|
| 271 |
+
|
| 272 |
+
equal = np.nonzero(equal)[0]
|
| 273 |
+
less = np.nonzero(less)[0]
|
| 274 |
+
greater = np.nonzero(greater)[0]
|
| 275 |
+
interval = np.nonzero(interval)[0]
|
| 276 |
+
n_less = less.shape[0]
|
| 277 |
+
n_greater = greater.shape[0]
|
| 278 |
+
n_interval = interval.shape[0]
|
| 279 |
+
n_ineq = n_less + n_greater + 2 * n_interval
|
| 280 |
+
n_eq = equal.shape[0]
|
| 281 |
+
|
| 282 |
+
keep_feasible = np.hstack((keep_feasible[less],
|
| 283 |
+
keep_feasible[greater],
|
| 284 |
+
keep_feasible[interval],
|
| 285 |
+
keep_feasible[interval]))
|
| 286 |
+
|
| 287 |
+
def fun(x):
|
| 288 |
+
f = cfun.fun(x)
|
| 289 |
+
eq = f[equal] - lb[equal]
|
| 290 |
+
le = f[less] - ub[less]
|
| 291 |
+
ge = lb[greater] - f[greater]
|
| 292 |
+
il = f[interval] - ub[interval]
|
| 293 |
+
ig = lb[interval] - f[interval]
|
| 294 |
+
return eq, np.hstack((le, ge, il, ig))
|
| 295 |
+
|
| 296 |
+
def jac(x):
|
| 297 |
+
J = cfun.jac(x)
|
| 298 |
+
eq = J[equal]
|
| 299 |
+
le = J[less]
|
| 300 |
+
ge = -J[greater]
|
| 301 |
+
il = J[interval]
|
| 302 |
+
ig = -il
|
| 303 |
+
if sps.issparse(J):
|
| 304 |
+
ineq = sps.vstack((le, ge, il, ig))
|
| 305 |
+
else:
|
| 306 |
+
ineq = np.vstack((le, ge, il, ig))
|
| 307 |
+
return eq, ineq
|
| 308 |
+
|
| 309 |
+
def hess(x, v_eq, v_ineq):
|
| 310 |
+
n_start = 0
|
| 311 |
+
v_l = v_ineq[n_start:n_start + n_less]
|
| 312 |
+
n_start += n_less
|
| 313 |
+
v_g = v_ineq[n_start:n_start + n_greater]
|
| 314 |
+
n_start += n_greater
|
| 315 |
+
v_il = v_ineq[n_start:n_start + n_interval]
|
| 316 |
+
n_start += n_interval
|
| 317 |
+
v_ig = v_ineq[n_start:n_start + n_interval]
|
| 318 |
+
|
| 319 |
+
v = np.zeros_like(lb)
|
| 320 |
+
v[equal] = v_eq
|
| 321 |
+
v[less] = v_l
|
| 322 |
+
v[greater] = -v_g
|
| 323 |
+
v[interval] = v_il - v_ig
|
| 324 |
+
|
| 325 |
+
return cfun.hess(x, v)
|
| 326 |
+
|
| 327 |
+
return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible)
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
def initial_constraints_as_canonical(n, prepared_constraints, sparse_jacobian):
|
| 331 |
+
"""Convert initial values of the constraints to the canonical format.
|
| 332 |
+
|
| 333 |
+
The purpose to avoid one additional call to the constraints at the initial
|
| 334 |
+
point. It takes saved values in `PreparedConstraint`, modififies and
|
| 335 |
+
concatenates them to the canonical constraint format.
|
| 336 |
+
"""
|
| 337 |
+
c_eq = []
|
| 338 |
+
c_ineq = []
|
| 339 |
+
J_eq = []
|
| 340 |
+
J_ineq = []
|
| 341 |
+
|
| 342 |
+
for c in prepared_constraints:
|
| 343 |
+
f = c.fun.f
|
| 344 |
+
J = c.fun.J
|
| 345 |
+
lb, ub = c.bounds
|
| 346 |
+
if np.all(lb == ub):
|
| 347 |
+
c_eq.append(f - lb)
|
| 348 |
+
J_eq.append(J)
|
| 349 |
+
elif np.all(lb == -np.inf):
|
| 350 |
+
finite_ub = ub < np.inf
|
| 351 |
+
c_ineq.append(f[finite_ub] - ub[finite_ub])
|
| 352 |
+
J_ineq.append(J[finite_ub])
|
| 353 |
+
elif np.all(ub == np.inf):
|
| 354 |
+
finite_lb = lb > -np.inf
|
| 355 |
+
c_ineq.append(lb[finite_lb] - f[finite_lb])
|
| 356 |
+
J_ineq.append(-J[finite_lb])
|
| 357 |
+
else:
|
| 358 |
+
lb_inf = lb == -np.inf
|
| 359 |
+
ub_inf = ub == np.inf
|
| 360 |
+
equal = lb == ub
|
| 361 |
+
less = lb_inf & ~ub_inf
|
| 362 |
+
greater = ub_inf & ~lb_inf
|
| 363 |
+
interval = ~equal & ~lb_inf & ~ub_inf
|
| 364 |
+
|
| 365 |
+
c_eq.append(f[equal] - lb[equal])
|
| 366 |
+
c_ineq.append(f[less] - ub[less])
|
| 367 |
+
c_ineq.append(lb[greater] - f[greater])
|
| 368 |
+
c_ineq.append(f[interval] - ub[interval])
|
| 369 |
+
c_ineq.append(lb[interval] - f[interval])
|
| 370 |
+
|
| 371 |
+
J_eq.append(J[equal])
|
| 372 |
+
J_ineq.append(J[less])
|
| 373 |
+
J_ineq.append(-J[greater])
|
| 374 |
+
J_ineq.append(J[interval])
|
| 375 |
+
J_ineq.append(-J[interval])
|
| 376 |
+
|
| 377 |
+
c_eq = np.hstack(c_eq) if c_eq else np.empty(0)
|
| 378 |
+
c_ineq = np.hstack(c_ineq) if c_ineq else np.empty(0)
|
| 379 |
+
|
| 380 |
+
if sparse_jacobian:
|
| 381 |
+
vstack = sps.vstack
|
| 382 |
+
empty = sps.csr_matrix((0, n))
|
| 383 |
+
else:
|
| 384 |
+
vstack = np.vstack
|
| 385 |
+
empty = np.empty((0, n))
|
| 386 |
+
|
| 387 |
+
J_eq = vstack(J_eq) if J_eq else empty
|
| 388 |
+
J_ineq = vstack(J_ineq) if J_ineq else empty
|
| 389 |
+
|
| 390 |
+
return c_eq, c_ineq, J_eq, J_ineq
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/equality_constrained_sqp.py
ADDED
|
@@ -0,0 +1,231 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Byrd-Omojokun Trust-Region SQP method."""
|
| 2 |
+
|
| 3 |
+
from scipy.sparse import eye as speye
|
| 4 |
+
from .projections import projections
|
| 5 |
+
from .qp_subproblem import modified_dogleg, projected_cg, box_intersections
|
| 6 |
+
import numpy as np
|
| 7 |
+
from numpy.linalg import norm
|
| 8 |
+
|
| 9 |
+
__all__ = ['equality_constrained_sqp']
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def default_scaling(x):
|
| 13 |
+
n, = np.shape(x)
|
| 14 |
+
return speye(n)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def equality_constrained_sqp(fun_and_constr, grad_and_jac, lagr_hess,
|
| 18 |
+
x0, fun0, grad0, constr0,
|
| 19 |
+
jac0, stop_criteria,
|
| 20 |
+
state,
|
| 21 |
+
initial_penalty,
|
| 22 |
+
initial_trust_radius,
|
| 23 |
+
factorization_method,
|
| 24 |
+
trust_lb=None,
|
| 25 |
+
trust_ub=None,
|
| 26 |
+
scaling=default_scaling):
|
| 27 |
+
"""Solve nonlinear equality-constrained problem using trust-region SQP.
|
| 28 |
+
|
| 29 |
+
Solve optimization problem:
|
| 30 |
+
|
| 31 |
+
minimize fun(x)
|
| 32 |
+
subject to: constr(x) = 0
|
| 33 |
+
|
| 34 |
+
using Byrd-Omojokun Trust-Region SQP method described in [1]_. Several
|
| 35 |
+
implementation details are based on [2]_ and [3]_, p. 549.
|
| 36 |
+
|
| 37 |
+
References
|
| 38 |
+
----------
|
| 39 |
+
.. [1] Lalee, Marucha, Jorge Nocedal, and Todd Plantenga. "On the
|
| 40 |
+
implementation of an algorithm for large-scale equality
|
| 41 |
+
constrained optimization." SIAM Journal on
|
| 42 |
+
Optimization 8.3 (1998): 682-706.
|
| 43 |
+
.. [2] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal.
|
| 44 |
+
"An interior point algorithm for large-scale nonlinear
|
| 45 |
+
programming." SIAM Journal on Optimization 9.4 (1999): 877-900.
|
| 46 |
+
.. [3] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization"
|
| 47 |
+
Second Edition (2006).
|
| 48 |
+
"""
|
| 49 |
+
PENALTY_FACTOR = 0.3 # Rho from formula (3.51), reference [2]_, p.891.
|
| 50 |
+
LARGE_REDUCTION_RATIO = 0.9
|
| 51 |
+
INTERMEDIARY_REDUCTION_RATIO = 0.3
|
| 52 |
+
SUFFICIENT_REDUCTION_RATIO = 1e-8 # Eta from reference [2]_, p.892.
|
| 53 |
+
TRUST_ENLARGEMENT_FACTOR_L = 7.0
|
| 54 |
+
TRUST_ENLARGEMENT_FACTOR_S = 2.0
|
| 55 |
+
MAX_TRUST_REDUCTION = 0.5
|
| 56 |
+
MIN_TRUST_REDUCTION = 0.1
|
| 57 |
+
SOC_THRESHOLD = 0.1
|
| 58 |
+
TR_FACTOR = 0.8 # Zeta from formula (3.21), reference [2]_, p.885.
|
| 59 |
+
BOX_FACTOR = 0.5
|
| 60 |
+
|
| 61 |
+
n, = np.shape(x0) # Number of parameters
|
| 62 |
+
|
| 63 |
+
# Set default lower and upper bounds.
|
| 64 |
+
if trust_lb is None:
|
| 65 |
+
trust_lb = np.full(n, -np.inf)
|
| 66 |
+
if trust_ub is None:
|
| 67 |
+
trust_ub = np.full(n, np.inf)
|
| 68 |
+
|
| 69 |
+
# Initial values
|
| 70 |
+
x = np.copy(x0)
|
| 71 |
+
trust_radius = initial_trust_radius
|
| 72 |
+
penalty = initial_penalty
|
| 73 |
+
# Compute Values
|
| 74 |
+
f = fun0
|
| 75 |
+
c = grad0
|
| 76 |
+
b = constr0
|
| 77 |
+
A = jac0
|
| 78 |
+
S = scaling(x)
|
| 79 |
+
# Get projections
|
| 80 |
+
try:
|
| 81 |
+
Z, LS, Y = projections(A, factorization_method)
|
| 82 |
+
except ValueError as e:
|
| 83 |
+
if str(e) == "expected square matrix":
|
| 84 |
+
# can be the case if there are more equality
|
| 85 |
+
# constraints than independent variables
|
| 86 |
+
raise ValueError(
|
| 87 |
+
"The 'expected square matrix' error can occur if there are"
|
| 88 |
+
" more equality constraints than independent variables."
|
| 89 |
+
" Consider how your constraints are set up, or use"
|
| 90 |
+
" factorization_method='SVDFactorization'."
|
| 91 |
+
) from e
|
| 92 |
+
else:
|
| 93 |
+
raise e
|
| 94 |
+
|
| 95 |
+
# Compute least-square lagrange multipliers
|
| 96 |
+
v = -LS.dot(c)
|
| 97 |
+
# Compute Hessian
|
| 98 |
+
H = lagr_hess(x, v)
|
| 99 |
+
|
| 100 |
+
# Update state parameters
|
| 101 |
+
optimality = norm(c + A.T.dot(v), np.inf)
|
| 102 |
+
constr_violation = norm(b, np.inf) if len(b) > 0 else 0
|
| 103 |
+
cg_info = {'niter': 0, 'stop_cond': 0,
|
| 104 |
+
'hits_boundary': False}
|
| 105 |
+
|
| 106 |
+
last_iteration_failed = False
|
| 107 |
+
while not stop_criteria(state, x, last_iteration_failed,
|
| 108 |
+
optimality, constr_violation,
|
| 109 |
+
trust_radius, penalty, cg_info):
|
| 110 |
+
# Normal Step - `dn`
|
| 111 |
+
# minimize 1/2*||A dn + b||^2
|
| 112 |
+
# subject to:
|
| 113 |
+
# ||dn|| <= TR_FACTOR * trust_radius
|
| 114 |
+
# BOX_FACTOR * lb <= dn <= BOX_FACTOR * ub.
|
| 115 |
+
dn = modified_dogleg(A, Y, b,
|
| 116 |
+
TR_FACTOR*trust_radius,
|
| 117 |
+
BOX_FACTOR*trust_lb,
|
| 118 |
+
BOX_FACTOR*trust_ub)
|
| 119 |
+
|
| 120 |
+
# Tangential Step - `dt`
|
| 121 |
+
# Solve the QP problem:
|
| 122 |
+
# minimize 1/2 dt.T H dt + dt.T (H dn + c)
|
| 123 |
+
# subject to:
|
| 124 |
+
# A dt = 0
|
| 125 |
+
# ||dt|| <= sqrt(trust_radius**2 - ||dn||**2)
|
| 126 |
+
# lb - dn <= dt <= ub - dn
|
| 127 |
+
c_t = H.dot(dn) + c
|
| 128 |
+
b_t = np.zeros_like(b)
|
| 129 |
+
trust_radius_t = np.sqrt(trust_radius**2 - np.linalg.norm(dn)**2)
|
| 130 |
+
lb_t = trust_lb - dn
|
| 131 |
+
ub_t = trust_ub - dn
|
| 132 |
+
dt, cg_info = projected_cg(H, c_t, Z, Y, b_t,
|
| 133 |
+
trust_radius_t,
|
| 134 |
+
lb_t, ub_t)
|
| 135 |
+
|
| 136 |
+
# Compute update (normal + tangential steps).
|
| 137 |
+
d = dn + dt
|
| 138 |
+
|
| 139 |
+
# Compute second order model: 1/2 d H d + c.T d + f.
|
| 140 |
+
quadratic_model = 1/2*(H.dot(d)).dot(d) + c.T.dot(d)
|
| 141 |
+
# Compute linearized constraint: l = A d + b.
|
| 142 |
+
linearized_constr = A.dot(d)+b
|
| 143 |
+
# Compute new penalty parameter according to formula (3.52),
|
| 144 |
+
# reference [2]_, p.891.
|
| 145 |
+
vpred = norm(b) - norm(linearized_constr)
|
| 146 |
+
# Guarantee `vpred` always positive,
|
| 147 |
+
# regardless of roundoff errors.
|
| 148 |
+
vpred = max(1e-16, vpred)
|
| 149 |
+
previous_penalty = penalty
|
| 150 |
+
if quadratic_model > 0:
|
| 151 |
+
new_penalty = quadratic_model / ((1-PENALTY_FACTOR)*vpred)
|
| 152 |
+
penalty = max(penalty, new_penalty)
|
| 153 |
+
# Compute predicted reduction according to formula (3.52),
|
| 154 |
+
# reference [2]_, p.891.
|
| 155 |
+
predicted_reduction = -quadratic_model + penalty*vpred
|
| 156 |
+
|
| 157 |
+
# Compute merit function at current point
|
| 158 |
+
merit_function = f + penalty*norm(b)
|
| 159 |
+
# Evaluate function and constraints at trial point
|
| 160 |
+
x_next = x + S.dot(d)
|
| 161 |
+
f_next, b_next = fun_and_constr(x_next)
|
| 162 |
+
# Compute merit function at trial point
|
| 163 |
+
merit_function_next = f_next + penalty*norm(b_next)
|
| 164 |
+
# Compute actual reduction according to formula (3.54),
|
| 165 |
+
# reference [2]_, p.892.
|
| 166 |
+
actual_reduction = merit_function - merit_function_next
|
| 167 |
+
# Compute reduction ratio
|
| 168 |
+
reduction_ratio = actual_reduction / predicted_reduction
|
| 169 |
+
|
| 170 |
+
# Second order correction (SOC), reference [2]_, p.892.
|
| 171 |
+
if reduction_ratio < SUFFICIENT_REDUCTION_RATIO and \
|
| 172 |
+
norm(dn) <= SOC_THRESHOLD * norm(dt):
|
| 173 |
+
# Compute second order correction
|
| 174 |
+
y = -Y.dot(b_next)
|
| 175 |
+
# Make sure increment is inside box constraints
|
| 176 |
+
_, t, intersect = box_intersections(d, y, trust_lb, trust_ub)
|
| 177 |
+
# Compute tentative point
|
| 178 |
+
x_soc = x + S.dot(d + t*y)
|
| 179 |
+
f_soc, b_soc = fun_and_constr(x_soc)
|
| 180 |
+
# Recompute actual reduction
|
| 181 |
+
merit_function_soc = f_soc + penalty*norm(b_soc)
|
| 182 |
+
actual_reduction_soc = merit_function - merit_function_soc
|
| 183 |
+
# Recompute reduction ratio
|
| 184 |
+
reduction_ratio_soc = actual_reduction_soc / predicted_reduction
|
| 185 |
+
if intersect and reduction_ratio_soc >= SUFFICIENT_REDUCTION_RATIO:
|
| 186 |
+
x_next = x_soc
|
| 187 |
+
f_next = f_soc
|
| 188 |
+
b_next = b_soc
|
| 189 |
+
reduction_ratio = reduction_ratio_soc
|
| 190 |
+
|
| 191 |
+
# Readjust trust region step, formula (3.55), reference [2]_, p.892.
|
| 192 |
+
if reduction_ratio >= LARGE_REDUCTION_RATIO:
|
| 193 |
+
trust_radius = max(TRUST_ENLARGEMENT_FACTOR_L * norm(d),
|
| 194 |
+
trust_radius)
|
| 195 |
+
elif reduction_ratio >= INTERMEDIARY_REDUCTION_RATIO:
|
| 196 |
+
trust_radius = max(TRUST_ENLARGEMENT_FACTOR_S * norm(d),
|
| 197 |
+
trust_radius)
|
| 198 |
+
# Reduce trust region step, according to reference [3]_, p.696.
|
| 199 |
+
elif reduction_ratio < SUFFICIENT_REDUCTION_RATIO:
|
| 200 |
+
trust_reduction = ((1-SUFFICIENT_REDUCTION_RATIO) /
|
| 201 |
+
(1-reduction_ratio))
|
| 202 |
+
new_trust_radius = trust_reduction * norm(d)
|
| 203 |
+
if new_trust_radius >= MAX_TRUST_REDUCTION * trust_radius:
|
| 204 |
+
trust_radius *= MAX_TRUST_REDUCTION
|
| 205 |
+
elif new_trust_radius >= MIN_TRUST_REDUCTION * trust_radius:
|
| 206 |
+
trust_radius = new_trust_radius
|
| 207 |
+
else:
|
| 208 |
+
trust_radius *= MIN_TRUST_REDUCTION
|
| 209 |
+
|
| 210 |
+
# Update iteration
|
| 211 |
+
if reduction_ratio >= SUFFICIENT_REDUCTION_RATIO:
|
| 212 |
+
x = x_next
|
| 213 |
+
f, b = f_next, b_next
|
| 214 |
+
c, A = grad_and_jac(x)
|
| 215 |
+
S = scaling(x)
|
| 216 |
+
# Get projections
|
| 217 |
+
Z, LS, Y = projections(A, factorization_method)
|
| 218 |
+
# Compute least-square lagrange multipliers
|
| 219 |
+
v = -LS.dot(c)
|
| 220 |
+
# Compute Hessian
|
| 221 |
+
H = lagr_hess(x, v)
|
| 222 |
+
# Set Flag
|
| 223 |
+
last_iteration_failed = False
|
| 224 |
+
# Otimality values
|
| 225 |
+
optimality = norm(c + A.T.dot(v), np.inf)
|
| 226 |
+
constr_violation = norm(b, np.inf) if len(b) > 0 else 0
|
| 227 |
+
else:
|
| 228 |
+
penalty = previous_penalty
|
| 229 |
+
last_iteration_failed = True
|
| 230 |
+
|
| 231 |
+
return x, state
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py
ADDED
|
@@ -0,0 +1,564 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import time
|
| 2 |
+
import numpy as np
|
| 3 |
+
from scipy.sparse.linalg import LinearOperator
|
| 4 |
+
from .._differentiable_functions import VectorFunction
|
| 5 |
+
from .._constraints import (
|
| 6 |
+
NonlinearConstraint, LinearConstraint, PreparedConstraint, Bounds, strict_bounds)
|
| 7 |
+
from .._hessian_update_strategy import BFGS
|
| 8 |
+
from .._optimize import OptimizeResult
|
| 9 |
+
from .._differentiable_functions import ScalarFunction
|
| 10 |
+
from .equality_constrained_sqp import equality_constrained_sqp
|
| 11 |
+
from .canonical_constraint import (CanonicalConstraint,
|
| 12 |
+
initial_constraints_as_canonical)
|
| 13 |
+
from .tr_interior_point import tr_interior_point
|
| 14 |
+
from .report import BasicReport, SQPReport, IPReport
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
TERMINATION_MESSAGES = {
|
| 18 |
+
0: "The maximum number of function evaluations is exceeded.",
|
| 19 |
+
1: "`gtol` termination condition is satisfied.",
|
| 20 |
+
2: "`xtol` termination condition is satisfied.",
|
| 21 |
+
3: "`callback` function requested termination."
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class HessianLinearOperator:
|
| 26 |
+
"""Build LinearOperator from hessp"""
|
| 27 |
+
def __init__(self, hessp, n):
|
| 28 |
+
self.hessp = hessp
|
| 29 |
+
self.n = n
|
| 30 |
+
|
| 31 |
+
def __call__(self, x, *args):
|
| 32 |
+
def matvec(p):
|
| 33 |
+
return self.hessp(x, p, *args)
|
| 34 |
+
|
| 35 |
+
return LinearOperator((self.n, self.n), matvec=matvec)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class LagrangianHessian:
|
| 39 |
+
"""The Hessian of the Lagrangian as LinearOperator.
|
| 40 |
+
|
| 41 |
+
The Lagrangian is computed as the objective function plus all the
|
| 42 |
+
constraints multiplied with some numbers (Lagrange multipliers).
|
| 43 |
+
"""
|
| 44 |
+
def __init__(self, n, objective_hess, constraints_hess):
|
| 45 |
+
self.n = n
|
| 46 |
+
self.objective_hess = objective_hess
|
| 47 |
+
self.constraints_hess = constraints_hess
|
| 48 |
+
|
| 49 |
+
def __call__(self, x, v_eq=np.empty(0), v_ineq=np.empty(0)):
|
| 50 |
+
H_objective = self.objective_hess(x)
|
| 51 |
+
H_constraints = self.constraints_hess(x, v_eq, v_ineq)
|
| 52 |
+
|
| 53 |
+
def matvec(p):
|
| 54 |
+
return H_objective.dot(p) + H_constraints.dot(p)
|
| 55 |
+
|
| 56 |
+
return LinearOperator((self.n, self.n), matvec)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def update_state_sqp(state, x, last_iteration_failed, objective, prepared_constraints,
|
| 60 |
+
start_time, tr_radius, constr_penalty, cg_info):
|
| 61 |
+
state.nit += 1
|
| 62 |
+
state.nfev = objective.nfev
|
| 63 |
+
state.njev = objective.ngev
|
| 64 |
+
state.nhev = objective.nhev
|
| 65 |
+
state.constr_nfev = [c.fun.nfev if isinstance(c.fun, VectorFunction) else 0
|
| 66 |
+
for c in prepared_constraints]
|
| 67 |
+
state.constr_njev = [c.fun.njev if isinstance(c.fun, VectorFunction) else 0
|
| 68 |
+
for c in prepared_constraints]
|
| 69 |
+
state.constr_nhev = [c.fun.nhev if isinstance(c.fun, VectorFunction) else 0
|
| 70 |
+
for c in prepared_constraints]
|
| 71 |
+
|
| 72 |
+
if not last_iteration_failed:
|
| 73 |
+
state.x = x
|
| 74 |
+
state.fun = objective.f
|
| 75 |
+
state.grad = objective.g
|
| 76 |
+
state.v = [c.fun.v for c in prepared_constraints]
|
| 77 |
+
state.constr = [c.fun.f for c in prepared_constraints]
|
| 78 |
+
state.jac = [c.fun.J for c in prepared_constraints]
|
| 79 |
+
# Compute Lagrangian Gradient
|
| 80 |
+
state.lagrangian_grad = np.copy(state.grad)
|
| 81 |
+
for c in prepared_constraints:
|
| 82 |
+
state.lagrangian_grad += c.fun.J.T.dot(c.fun.v)
|
| 83 |
+
state.optimality = np.linalg.norm(state.lagrangian_grad, np.inf)
|
| 84 |
+
# Compute maximum constraint violation
|
| 85 |
+
state.constr_violation = 0
|
| 86 |
+
for i in range(len(prepared_constraints)):
|
| 87 |
+
lb, ub = prepared_constraints[i].bounds
|
| 88 |
+
c = state.constr[i]
|
| 89 |
+
state.constr_violation = np.max([state.constr_violation,
|
| 90 |
+
np.max(lb - c),
|
| 91 |
+
np.max(c - ub)])
|
| 92 |
+
|
| 93 |
+
state.execution_time = time.time() - start_time
|
| 94 |
+
state.tr_radius = tr_radius
|
| 95 |
+
state.constr_penalty = constr_penalty
|
| 96 |
+
state.cg_niter += cg_info["niter"]
|
| 97 |
+
state.cg_stop_cond = cg_info["stop_cond"]
|
| 98 |
+
|
| 99 |
+
return state
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def update_state_ip(state, x, last_iteration_failed, objective,
|
| 103 |
+
prepared_constraints, start_time,
|
| 104 |
+
tr_radius, constr_penalty, cg_info,
|
| 105 |
+
barrier_parameter, barrier_tolerance):
|
| 106 |
+
state = update_state_sqp(state, x, last_iteration_failed, objective,
|
| 107 |
+
prepared_constraints, start_time, tr_radius,
|
| 108 |
+
constr_penalty, cg_info)
|
| 109 |
+
state.barrier_parameter = barrier_parameter
|
| 110 |
+
state.barrier_tolerance = barrier_tolerance
|
| 111 |
+
return state
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def _minimize_trustregion_constr(fun, x0, args, grad,
|
| 115 |
+
hess, hessp, bounds, constraints,
|
| 116 |
+
xtol=1e-8, gtol=1e-8,
|
| 117 |
+
barrier_tol=1e-8,
|
| 118 |
+
sparse_jacobian=None,
|
| 119 |
+
callback=None, maxiter=1000,
|
| 120 |
+
verbose=0, finite_diff_rel_step=None,
|
| 121 |
+
initial_constr_penalty=1.0, initial_tr_radius=1.0,
|
| 122 |
+
initial_barrier_parameter=0.1,
|
| 123 |
+
initial_barrier_tolerance=0.1,
|
| 124 |
+
factorization_method=None,
|
| 125 |
+
disp=False):
|
| 126 |
+
"""Minimize a scalar function subject to constraints.
|
| 127 |
+
|
| 128 |
+
Parameters
|
| 129 |
+
----------
|
| 130 |
+
gtol : float, optional
|
| 131 |
+
Tolerance for termination by the norm of the Lagrangian gradient.
|
| 132 |
+
The algorithm will terminate when both the infinity norm (i.e., max
|
| 133 |
+
abs value) of the Lagrangian gradient and the constraint violation
|
| 134 |
+
are smaller than ``gtol``. Default is 1e-8.
|
| 135 |
+
xtol : float, optional
|
| 136 |
+
Tolerance for termination by the change of the independent variable.
|
| 137 |
+
The algorithm will terminate when ``tr_radius < xtol``, where
|
| 138 |
+
``tr_radius`` is the radius of the trust region used in the algorithm.
|
| 139 |
+
Default is 1e-8.
|
| 140 |
+
barrier_tol : float, optional
|
| 141 |
+
Threshold on the barrier parameter for the algorithm termination.
|
| 142 |
+
When inequality constraints are present, the algorithm will terminate
|
| 143 |
+
only when the barrier parameter is less than `barrier_tol`.
|
| 144 |
+
Default is 1e-8.
|
| 145 |
+
sparse_jacobian : {bool, None}, optional
|
| 146 |
+
Determines how to represent Jacobians of the constraints. If bool,
|
| 147 |
+
then Jacobians of all the constraints will be converted to the
|
| 148 |
+
corresponding format. If None (default), then Jacobians won't be
|
| 149 |
+
converted, but the algorithm can proceed only if they all have the
|
| 150 |
+
same format.
|
| 151 |
+
initial_tr_radius: float, optional
|
| 152 |
+
Initial trust radius. The trust radius gives the maximum distance
|
| 153 |
+
between solution points in consecutive iterations. It reflects the
|
| 154 |
+
trust the algorithm puts in the local approximation of the optimization
|
| 155 |
+
problem. For an accurate local approximation the trust-region should be
|
| 156 |
+
large and for an approximation valid only close to the current point it
|
| 157 |
+
should be a small one. The trust radius is automatically updated throughout
|
| 158 |
+
the optimization process, with ``initial_tr_radius`` being its initial value.
|
| 159 |
+
Default is 1 (recommended in [1]_, p. 19).
|
| 160 |
+
initial_constr_penalty : float, optional
|
| 161 |
+
Initial constraints penalty parameter. The penalty parameter is used for
|
| 162 |
+
balancing the requirements of decreasing the objective function
|
| 163 |
+
and satisfying the constraints. It is used for defining the merit function:
|
| 164 |
+
``merit_function(x) = fun(x) + constr_penalty * constr_norm_l2(x)``,
|
| 165 |
+
where ``constr_norm_l2(x)`` is the l2 norm of a vector containing all
|
| 166 |
+
the constraints. The merit function is used for accepting or rejecting
|
| 167 |
+
trial points and ``constr_penalty`` weights the two conflicting goals
|
| 168 |
+
of reducing objective function and constraints. The penalty is automatically
|
| 169 |
+
updated throughout the optimization process, with
|
| 170 |
+
``initial_constr_penalty`` being its initial value. Default is 1
|
| 171 |
+
(recommended in [1]_, p 19).
|
| 172 |
+
initial_barrier_parameter, initial_barrier_tolerance: float, optional
|
| 173 |
+
Initial barrier parameter and initial tolerance for the barrier subproblem.
|
| 174 |
+
Both are used only when inequality constraints are present. For dealing with
|
| 175 |
+
optimization problems ``min_x f(x)`` subject to inequality constraints
|
| 176 |
+
``c(x) <= 0`` the algorithm introduces slack variables, solving the problem
|
| 177 |
+
``min_(x,s) f(x) + barrier_parameter*sum(ln(s))`` subject to the equality
|
| 178 |
+
constraints ``c(x) + s = 0`` instead of the original problem. This subproblem
|
| 179 |
+
is solved for decreasing values of ``barrier_parameter`` and with decreasing
|
| 180 |
+
tolerances for the termination, starting with ``initial_barrier_parameter``
|
| 181 |
+
for the barrier parameter and ``initial_barrier_tolerance`` for the
|
| 182 |
+
barrier tolerance. Default is 0.1 for both values (recommended in [1]_ p. 19).
|
| 183 |
+
Also note that ``barrier_parameter`` and ``barrier_tolerance`` are updated
|
| 184 |
+
with the same prefactor.
|
| 185 |
+
factorization_method : string or None, optional
|
| 186 |
+
Method to factorize the Jacobian of the constraints. Use None (default)
|
| 187 |
+
for the auto selection or one of:
|
| 188 |
+
|
| 189 |
+
- 'NormalEquation' (requires scikit-sparse)
|
| 190 |
+
- 'AugmentedSystem'
|
| 191 |
+
- 'QRFactorization'
|
| 192 |
+
- 'SVDFactorization'
|
| 193 |
+
|
| 194 |
+
The methods 'NormalEquation' and 'AugmentedSystem' can be used only
|
| 195 |
+
with sparse constraints. The projections required by the algorithm
|
| 196 |
+
will be computed using, respectively, the normal equation and the
|
| 197 |
+
augmented system approaches explained in [1]_. 'NormalEquation'
|
| 198 |
+
computes the Cholesky factorization of ``A A.T`` and 'AugmentedSystem'
|
| 199 |
+
performs the LU factorization of an augmented system. They usually
|
| 200 |
+
provide similar results. 'AugmentedSystem' is used by default for
|
| 201 |
+
sparse matrices.
|
| 202 |
+
|
| 203 |
+
The methods 'QRFactorization' and 'SVDFactorization' can be used
|
| 204 |
+
only with dense constraints. They compute the required projections
|
| 205 |
+
using, respectively, QR and SVD factorizations. The 'SVDFactorization'
|
| 206 |
+
method can cope with Jacobian matrices with deficient row rank and will
|
| 207 |
+
be used whenever other factorization methods fail (which may imply the
|
| 208 |
+
conversion of sparse matrices to a dense format when required).
|
| 209 |
+
By default, 'QRFactorization' is used for dense matrices.
|
| 210 |
+
finite_diff_rel_step : None or array_like, optional
|
| 211 |
+
Relative step size for the finite difference approximation.
|
| 212 |
+
maxiter : int, optional
|
| 213 |
+
Maximum number of algorithm iterations. Default is 1000.
|
| 214 |
+
verbose : {0, 1, 2}, optional
|
| 215 |
+
Level of algorithm's verbosity:
|
| 216 |
+
|
| 217 |
+
* 0 (default) : work silently.
|
| 218 |
+
* 1 : display a termination report.
|
| 219 |
+
* 2 : display progress during iterations.
|
| 220 |
+
* 3 : display progress during iterations (more complete report).
|
| 221 |
+
|
| 222 |
+
disp : bool, optional
|
| 223 |
+
If True (default), then `verbose` will be set to 1 if it was 0.
|
| 224 |
+
|
| 225 |
+
Returns
|
| 226 |
+
-------
|
| 227 |
+
`OptimizeResult` with the fields documented below. Note the following:
|
| 228 |
+
|
| 229 |
+
1. All values corresponding to the constraints are ordered as they
|
| 230 |
+
were passed to the solver. And values corresponding to `bounds`
|
| 231 |
+
constraints are put *after* other constraints.
|
| 232 |
+
2. All numbers of function, Jacobian or Hessian evaluations correspond
|
| 233 |
+
to numbers of actual Python function calls. It means, for example,
|
| 234 |
+
that if a Jacobian is estimated by finite differences, then the
|
| 235 |
+
number of Jacobian evaluations will be zero and the number of
|
| 236 |
+
function evaluations will be incremented by all calls during the
|
| 237 |
+
finite difference estimation.
|
| 238 |
+
|
| 239 |
+
x : ndarray, shape (n,)
|
| 240 |
+
Solution found.
|
| 241 |
+
optimality : float
|
| 242 |
+
Infinity norm of the Lagrangian gradient at the solution.
|
| 243 |
+
constr_violation : float
|
| 244 |
+
Maximum constraint violation at the solution.
|
| 245 |
+
fun : float
|
| 246 |
+
Objective function at the solution.
|
| 247 |
+
grad : ndarray, shape (n,)
|
| 248 |
+
Gradient of the objective function at the solution.
|
| 249 |
+
lagrangian_grad : ndarray, shape (n,)
|
| 250 |
+
Gradient of the Lagrangian function at the solution.
|
| 251 |
+
nit : int
|
| 252 |
+
Total number of iterations.
|
| 253 |
+
nfev : integer
|
| 254 |
+
Number of the objective function evaluations.
|
| 255 |
+
njev : integer
|
| 256 |
+
Number of the objective function gradient evaluations.
|
| 257 |
+
nhev : integer
|
| 258 |
+
Number of the objective function Hessian evaluations.
|
| 259 |
+
cg_niter : int
|
| 260 |
+
Total number of the conjugate gradient method iterations.
|
| 261 |
+
method : {'equality_constrained_sqp', 'tr_interior_point'}
|
| 262 |
+
Optimization method used.
|
| 263 |
+
constr : list of ndarray
|
| 264 |
+
List of constraint values at the solution.
|
| 265 |
+
jac : list of {ndarray, sparse matrix}
|
| 266 |
+
List of the Jacobian matrices of the constraints at the solution.
|
| 267 |
+
v : list of ndarray
|
| 268 |
+
List of the Lagrange multipliers for the constraints at the solution.
|
| 269 |
+
For an inequality constraint a positive multiplier means that the upper
|
| 270 |
+
bound is active, a negative multiplier means that the lower bound is
|
| 271 |
+
active and if a multiplier is zero it means the constraint is not
|
| 272 |
+
active.
|
| 273 |
+
constr_nfev : list of int
|
| 274 |
+
Number of constraint evaluations for each of the constraints.
|
| 275 |
+
constr_njev : list of int
|
| 276 |
+
Number of Jacobian matrix evaluations for each of the constraints.
|
| 277 |
+
constr_nhev : list of int
|
| 278 |
+
Number of Hessian evaluations for each of the constraints.
|
| 279 |
+
tr_radius : float
|
| 280 |
+
Radius of the trust region at the last iteration.
|
| 281 |
+
constr_penalty : float
|
| 282 |
+
Penalty parameter at the last iteration, see `initial_constr_penalty`.
|
| 283 |
+
barrier_tolerance : float
|
| 284 |
+
Tolerance for the barrier subproblem at the last iteration.
|
| 285 |
+
Only for problems with inequality constraints.
|
| 286 |
+
barrier_parameter : float
|
| 287 |
+
Barrier parameter at the last iteration. Only for problems
|
| 288 |
+
with inequality constraints.
|
| 289 |
+
execution_time : float
|
| 290 |
+
Total execution time.
|
| 291 |
+
message : str
|
| 292 |
+
Termination message.
|
| 293 |
+
status : {0, 1, 2, 3}
|
| 294 |
+
Termination status:
|
| 295 |
+
|
| 296 |
+
* 0 : The maximum number of function evaluations is exceeded.
|
| 297 |
+
* 1 : `gtol` termination condition is satisfied.
|
| 298 |
+
* 2 : `xtol` termination condition is satisfied.
|
| 299 |
+
* 3 : `callback` function requested termination.
|
| 300 |
+
|
| 301 |
+
cg_stop_cond : int
|
| 302 |
+
Reason for CG subproblem termination at the last iteration:
|
| 303 |
+
|
| 304 |
+
* 0 : CG subproblem not evaluated.
|
| 305 |
+
* 1 : Iteration limit was reached.
|
| 306 |
+
* 2 : Reached the trust-region boundary.
|
| 307 |
+
* 3 : Negative curvature detected.
|
| 308 |
+
* 4 : Tolerance was satisfied.
|
| 309 |
+
|
| 310 |
+
References
|
| 311 |
+
----------
|
| 312 |
+
.. [1] Conn, A. R., Gould, N. I., & Toint, P. L.
|
| 313 |
+
Trust region methods. 2000. Siam. pp. 19.
|
| 314 |
+
"""
|
| 315 |
+
x0 = np.atleast_1d(x0).astype(float)
|
| 316 |
+
n_vars = np.size(x0)
|
| 317 |
+
if hess is None:
|
| 318 |
+
if callable(hessp):
|
| 319 |
+
hess = HessianLinearOperator(hessp, n_vars)
|
| 320 |
+
else:
|
| 321 |
+
hess = BFGS()
|
| 322 |
+
if disp and verbose == 0:
|
| 323 |
+
verbose = 1
|
| 324 |
+
|
| 325 |
+
if bounds is not None:
|
| 326 |
+
modified_lb = np.nextafter(bounds.lb, -np.inf, where=bounds.lb > -np.inf)
|
| 327 |
+
modified_ub = np.nextafter(bounds.ub, np.inf, where=bounds.ub < np.inf)
|
| 328 |
+
modified_lb = np.where(np.isfinite(bounds.lb), modified_lb, bounds.lb)
|
| 329 |
+
modified_ub = np.where(np.isfinite(bounds.ub), modified_ub, bounds.ub)
|
| 330 |
+
bounds = Bounds(modified_lb, modified_ub, keep_feasible=bounds.keep_feasible)
|
| 331 |
+
finite_diff_bounds = strict_bounds(bounds.lb, bounds.ub,
|
| 332 |
+
bounds.keep_feasible, n_vars)
|
| 333 |
+
else:
|
| 334 |
+
finite_diff_bounds = (-np.inf, np.inf)
|
| 335 |
+
|
| 336 |
+
# Define Objective Function
|
| 337 |
+
objective = ScalarFunction(fun, x0, args, grad, hess,
|
| 338 |
+
finite_diff_rel_step, finite_diff_bounds)
|
| 339 |
+
|
| 340 |
+
# Put constraints in list format when needed.
|
| 341 |
+
if isinstance(constraints, (NonlinearConstraint, LinearConstraint)):
|
| 342 |
+
constraints = [constraints]
|
| 343 |
+
|
| 344 |
+
# Prepare constraints.
|
| 345 |
+
prepared_constraints = [
|
| 346 |
+
PreparedConstraint(c, x0, sparse_jacobian, finite_diff_bounds)
|
| 347 |
+
for c in constraints]
|
| 348 |
+
|
| 349 |
+
# Check that all constraints are either sparse or dense.
|
| 350 |
+
n_sparse = sum(c.fun.sparse_jacobian for c in prepared_constraints)
|
| 351 |
+
if 0 < n_sparse < len(prepared_constraints):
|
| 352 |
+
raise ValueError("All constraints must have the same kind of the "
|
| 353 |
+
"Jacobian --- either all sparse or all dense. "
|
| 354 |
+
"You can set the sparsity globally by setting "
|
| 355 |
+
"`sparse_jacobian` to either True of False.")
|
| 356 |
+
if prepared_constraints:
|
| 357 |
+
sparse_jacobian = n_sparse > 0
|
| 358 |
+
|
| 359 |
+
if bounds is not None:
|
| 360 |
+
if sparse_jacobian is None:
|
| 361 |
+
sparse_jacobian = True
|
| 362 |
+
prepared_constraints.append(PreparedConstraint(bounds, x0,
|
| 363 |
+
sparse_jacobian))
|
| 364 |
+
|
| 365 |
+
# Concatenate initial constraints to the canonical form.
|
| 366 |
+
c_eq0, c_ineq0, J_eq0, J_ineq0 = initial_constraints_as_canonical(
|
| 367 |
+
n_vars, prepared_constraints, sparse_jacobian)
|
| 368 |
+
|
| 369 |
+
# Prepare all canonical constraints and concatenate it into one.
|
| 370 |
+
canonical_all = [CanonicalConstraint.from_PreparedConstraint(c)
|
| 371 |
+
for c in prepared_constraints]
|
| 372 |
+
|
| 373 |
+
if len(canonical_all) == 0:
|
| 374 |
+
canonical = CanonicalConstraint.empty(n_vars)
|
| 375 |
+
elif len(canonical_all) == 1:
|
| 376 |
+
canonical = canonical_all[0]
|
| 377 |
+
else:
|
| 378 |
+
canonical = CanonicalConstraint.concatenate(canonical_all,
|
| 379 |
+
sparse_jacobian)
|
| 380 |
+
|
| 381 |
+
# Generate the Hessian of the Lagrangian.
|
| 382 |
+
lagrangian_hess = LagrangianHessian(n_vars, objective.hess, canonical.hess)
|
| 383 |
+
|
| 384 |
+
# Choose appropriate method
|
| 385 |
+
if canonical.n_ineq == 0:
|
| 386 |
+
method = 'equality_constrained_sqp'
|
| 387 |
+
else:
|
| 388 |
+
method = 'tr_interior_point'
|
| 389 |
+
|
| 390 |
+
# Construct OptimizeResult
|
| 391 |
+
state = OptimizeResult(
|
| 392 |
+
nit=0, nfev=0, njev=0, nhev=0,
|
| 393 |
+
cg_niter=0, cg_stop_cond=0,
|
| 394 |
+
fun=objective.f, grad=objective.g,
|
| 395 |
+
lagrangian_grad=np.copy(objective.g),
|
| 396 |
+
constr=[c.fun.f for c in prepared_constraints],
|
| 397 |
+
jac=[c.fun.J for c in prepared_constraints],
|
| 398 |
+
constr_nfev=[0 for c in prepared_constraints],
|
| 399 |
+
constr_njev=[0 for c in prepared_constraints],
|
| 400 |
+
constr_nhev=[0 for c in prepared_constraints],
|
| 401 |
+
v=[c.fun.v for c in prepared_constraints],
|
| 402 |
+
method=method)
|
| 403 |
+
|
| 404 |
+
# Start counting
|
| 405 |
+
start_time = time.time()
|
| 406 |
+
|
| 407 |
+
# Define stop criteria
|
| 408 |
+
if method == 'equality_constrained_sqp':
|
| 409 |
+
def stop_criteria(state, x, last_iteration_failed,
|
| 410 |
+
optimality, constr_violation,
|
| 411 |
+
tr_radius, constr_penalty, cg_info):
|
| 412 |
+
state = update_state_sqp(state, x, last_iteration_failed,
|
| 413 |
+
objective, prepared_constraints,
|
| 414 |
+
start_time, tr_radius, constr_penalty,
|
| 415 |
+
cg_info)
|
| 416 |
+
if verbose == 2:
|
| 417 |
+
BasicReport.print_iteration(state.nit,
|
| 418 |
+
state.nfev,
|
| 419 |
+
state.cg_niter,
|
| 420 |
+
state.fun,
|
| 421 |
+
state.tr_radius,
|
| 422 |
+
state.optimality,
|
| 423 |
+
state.constr_violation)
|
| 424 |
+
elif verbose > 2:
|
| 425 |
+
SQPReport.print_iteration(state.nit,
|
| 426 |
+
state.nfev,
|
| 427 |
+
state.cg_niter,
|
| 428 |
+
state.fun,
|
| 429 |
+
state.tr_radius,
|
| 430 |
+
state.optimality,
|
| 431 |
+
state.constr_violation,
|
| 432 |
+
state.constr_penalty,
|
| 433 |
+
state.cg_stop_cond)
|
| 434 |
+
state.status = None
|
| 435 |
+
state.niter = state.nit # Alias for callback (backward-compatibility)
|
| 436 |
+
if callback is not None:
|
| 437 |
+
callback_stop = False
|
| 438 |
+
try:
|
| 439 |
+
callback_stop = callback(state)
|
| 440 |
+
except StopIteration:
|
| 441 |
+
callback_stop = True
|
| 442 |
+
if callback_stop:
|
| 443 |
+
state.status = 3
|
| 444 |
+
return True
|
| 445 |
+
if state.optimality < gtol and state.constr_violation < gtol:
|
| 446 |
+
state.status = 1
|
| 447 |
+
elif state.tr_radius < xtol:
|
| 448 |
+
state.status = 2
|
| 449 |
+
elif state.nit >= maxiter:
|
| 450 |
+
state.status = 0
|
| 451 |
+
return state.status in (0, 1, 2, 3)
|
| 452 |
+
elif method == 'tr_interior_point':
|
| 453 |
+
def stop_criteria(state, x, last_iteration_failed, tr_radius,
|
| 454 |
+
constr_penalty, cg_info, barrier_parameter,
|
| 455 |
+
barrier_tolerance):
|
| 456 |
+
state = update_state_ip(state, x, last_iteration_failed,
|
| 457 |
+
objective, prepared_constraints,
|
| 458 |
+
start_time, tr_radius, constr_penalty,
|
| 459 |
+
cg_info, barrier_parameter, barrier_tolerance)
|
| 460 |
+
if verbose == 2:
|
| 461 |
+
BasicReport.print_iteration(state.nit,
|
| 462 |
+
state.nfev,
|
| 463 |
+
state.cg_niter,
|
| 464 |
+
state.fun,
|
| 465 |
+
state.tr_radius,
|
| 466 |
+
state.optimality,
|
| 467 |
+
state.constr_violation)
|
| 468 |
+
elif verbose > 2:
|
| 469 |
+
IPReport.print_iteration(state.nit,
|
| 470 |
+
state.nfev,
|
| 471 |
+
state.cg_niter,
|
| 472 |
+
state.fun,
|
| 473 |
+
state.tr_radius,
|
| 474 |
+
state.optimality,
|
| 475 |
+
state.constr_violation,
|
| 476 |
+
state.constr_penalty,
|
| 477 |
+
state.barrier_parameter,
|
| 478 |
+
state.cg_stop_cond)
|
| 479 |
+
state.status = None
|
| 480 |
+
state.niter = state.nit # Alias for callback (backward compatibility)
|
| 481 |
+
if callback is not None:
|
| 482 |
+
callback_stop = False
|
| 483 |
+
try:
|
| 484 |
+
callback_stop = callback(state)
|
| 485 |
+
except StopIteration:
|
| 486 |
+
callback_stop = True
|
| 487 |
+
if callback_stop:
|
| 488 |
+
state.status = 3
|
| 489 |
+
return True
|
| 490 |
+
if state.optimality < gtol and state.constr_violation < gtol:
|
| 491 |
+
state.status = 1
|
| 492 |
+
elif (state.tr_radius < xtol
|
| 493 |
+
and state.barrier_parameter < barrier_tol):
|
| 494 |
+
state.status = 2
|
| 495 |
+
elif state.nit >= maxiter:
|
| 496 |
+
state.status = 0
|
| 497 |
+
return state.status in (0, 1, 2, 3)
|
| 498 |
+
|
| 499 |
+
if verbose == 2:
|
| 500 |
+
BasicReport.print_header()
|
| 501 |
+
elif verbose > 2:
|
| 502 |
+
if method == 'equality_constrained_sqp':
|
| 503 |
+
SQPReport.print_header()
|
| 504 |
+
elif method == 'tr_interior_point':
|
| 505 |
+
IPReport.print_header()
|
| 506 |
+
|
| 507 |
+
# Call inferior function to do the optimization
|
| 508 |
+
if method == 'equality_constrained_sqp':
|
| 509 |
+
def fun_and_constr(x):
|
| 510 |
+
f = objective.fun(x)
|
| 511 |
+
c_eq, _ = canonical.fun(x)
|
| 512 |
+
return f, c_eq
|
| 513 |
+
|
| 514 |
+
def grad_and_jac(x):
|
| 515 |
+
g = objective.grad(x)
|
| 516 |
+
J_eq, _ = canonical.jac(x)
|
| 517 |
+
return g, J_eq
|
| 518 |
+
|
| 519 |
+
_, result = equality_constrained_sqp(
|
| 520 |
+
fun_and_constr, grad_and_jac, lagrangian_hess,
|
| 521 |
+
x0, objective.f, objective.g,
|
| 522 |
+
c_eq0, J_eq0,
|
| 523 |
+
stop_criteria, state,
|
| 524 |
+
initial_constr_penalty, initial_tr_radius,
|
| 525 |
+
factorization_method)
|
| 526 |
+
|
| 527 |
+
elif method == 'tr_interior_point':
|
| 528 |
+
_, result = tr_interior_point(
|
| 529 |
+
objective.fun, objective.grad, lagrangian_hess,
|
| 530 |
+
n_vars, canonical.n_ineq, canonical.n_eq,
|
| 531 |
+
canonical.fun, canonical.jac,
|
| 532 |
+
x0, objective.f, objective.g,
|
| 533 |
+
c_ineq0, J_ineq0, c_eq0, J_eq0,
|
| 534 |
+
stop_criteria,
|
| 535 |
+
canonical.keep_feasible,
|
| 536 |
+
xtol, state, initial_barrier_parameter,
|
| 537 |
+
initial_barrier_tolerance,
|
| 538 |
+
initial_constr_penalty, initial_tr_radius,
|
| 539 |
+
factorization_method)
|
| 540 |
+
|
| 541 |
+
# Status 3 occurs when the callback function requests termination,
|
| 542 |
+
# this is assumed to not be a success.
|
| 543 |
+
result.success = True if result.status in (1, 2) else False
|
| 544 |
+
result.message = TERMINATION_MESSAGES[result.status]
|
| 545 |
+
|
| 546 |
+
# Alias (for backward compatibility with 1.1.0)
|
| 547 |
+
result.niter = result.nit
|
| 548 |
+
|
| 549 |
+
if verbose == 2:
|
| 550 |
+
BasicReport.print_footer()
|
| 551 |
+
elif verbose > 2:
|
| 552 |
+
if method == 'equality_constrained_sqp':
|
| 553 |
+
SQPReport.print_footer()
|
| 554 |
+
elif method == 'tr_interior_point':
|
| 555 |
+
IPReport.print_footer()
|
| 556 |
+
if verbose >= 1:
|
| 557 |
+
print(result.message)
|
| 558 |
+
print("Number of iterations: {}, function evaluations: {}, "
|
| 559 |
+
"CG iterations: {}, optimality: {:.2e}, "
|
| 560 |
+
"constraint violation: {:.2e}, execution time: {:4.2} s."
|
| 561 |
+
.format(result.nit, result.nfev, result.cg_niter,
|
| 562 |
+
result.optimality, result.constr_violation,
|
| 563 |
+
result.execution_time))
|
| 564 |
+
return result
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/projections.py
ADDED
|
@@ -0,0 +1,407 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Basic linear factorizations needed by the solver."""
|
| 2 |
+
|
| 3 |
+
from scipy.sparse import (bmat, csc_matrix, eye, issparse)
|
| 4 |
+
from scipy.sparse.linalg import LinearOperator
|
| 5 |
+
import scipy.linalg
|
| 6 |
+
import scipy.sparse.linalg
|
| 7 |
+
try:
|
| 8 |
+
from sksparse.cholmod import cholesky_AAt
|
| 9 |
+
sksparse_available = True
|
| 10 |
+
except ImportError:
|
| 11 |
+
import warnings
|
| 12 |
+
sksparse_available = False
|
| 13 |
+
import numpy as np
|
| 14 |
+
from warnings import warn
|
| 15 |
+
|
| 16 |
+
__all__ = [
|
| 17 |
+
'orthogonality',
|
| 18 |
+
'projections',
|
| 19 |
+
]
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def orthogonality(A, g):
|
| 23 |
+
"""Measure orthogonality between a vector and the null space of a matrix.
|
| 24 |
+
|
| 25 |
+
Compute a measure of orthogonality between the null space
|
| 26 |
+
of the (possibly sparse) matrix ``A`` and a given vector ``g``.
|
| 27 |
+
|
| 28 |
+
The formula is a simplified (and cheaper) version of formula (3.13)
|
| 29 |
+
from [1]_.
|
| 30 |
+
``orth = norm(A g, ord=2)/(norm(A, ord='fro')*norm(g, ord=2))``.
|
| 31 |
+
|
| 32 |
+
References
|
| 33 |
+
----------
|
| 34 |
+
.. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal.
|
| 35 |
+
"On the solution of equality constrained quadratic
|
| 36 |
+
programming problems arising in optimization."
|
| 37 |
+
SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395.
|
| 38 |
+
"""
|
| 39 |
+
# Compute vector norms
|
| 40 |
+
norm_g = np.linalg.norm(g)
|
| 41 |
+
# Compute Froebnius norm of the matrix A
|
| 42 |
+
if issparse(A):
|
| 43 |
+
norm_A = scipy.sparse.linalg.norm(A, ord='fro')
|
| 44 |
+
else:
|
| 45 |
+
norm_A = np.linalg.norm(A, ord='fro')
|
| 46 |
+
|
| 47 |
+
# Check if norms are zero
|
| 48 |
+
if norm_g == 0 or norm_A == 0:
|
| 49 |
+
return 0
|
| 50 |
+
|
| 51 |
+
norm_A_g = np.linalg.norm(A.dot(g))
|
| 52 |
+
# Orthogonality measure
|
| 53 |
+
orth = norm_A_g / (norm_A*norm_g)
|
| 54 |
+
return orth
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def normal_equation_projections(A, m, n, orth_tol, max_refin, tol):
|
| 58 |
+
"""Return linear operators for matrix A using ``NormalEquation`` approach.
|
| 59 |
+
"""
|
| 60 |
+
# Cholesky factorization
|
| 61 |
+
factor = cholesky_AAt(A)
|
| 62 |
+
|
| 63 |
+
# z = x - A.T inv(A A.T) A x
|
| 64 |
+
def null_space(x):
|
| 65 |
+
v = factor(A.dot(x))
|
| 66 |
+
z = x - A.T.dot(v)
|
| 67 |
+
|
| 68 |
+
# Iterative refinement to improve roundoff
|
| 69 |
+
# errors described in [2]_, algorithm 5.1.
|
| 70 |
+
k = 0
|
| 71 |
+
while orthogonality(A, z) > orth_tol:
|
| 72 |
+
if k >= max_refin:
|
| 73 |
+
break
|
| 74 |
+
# z_next = z - A.T inv(A A.T) A z
|
| 75 |
+
v = factor(A.dot(z))
|
| 76 |
+
z = z - A.T.dot(v)
|
| 77 |
+
k += 1
|
| 78 |
+
|
| 79 |
+
return z
|
| 80 |
+
|
| 81 |
+
# z = inv(A A.T) A x
|
| 82 |
+
def least_squares(x):
|
| 83 |
+
return factor(A.dot(x))
|
| 84 |
+
|
| 85 |
+
# z = A.T inv(A A.T) x
|
| 86 |
+
def row_space(x):
|
| 87 |
+
return A.T.dot(factor(x))
|
| 88 |
+
|
| 89 |
+
return null_space, least_squares, row_space
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def augmented_system_projections(A, m, n, orth_tol, max_refin, tol):
|
| 93 |
+
"""Return linear operators for matrix A - ``AugmentedSystem``."""
|
| 94 |
+
# Form augmented system
|
| 95 |
+
K = csc_matrix(bmat([[eye(n), A.T], [A, None]]))
|
| 96 |
+
# LU factorization
|
| 97 |
+
# TODO: Use a symmetric indefinite factorization
|
| 98 |
+
# to solve the system twice as fast (because
|
| 99 |
+
# of the symmetry).
|
| 100 |
+
try:
|
| 101 |
+
solve = scipy.sparse.linalg.factorized(K)
|
| 102 |
+
except RuntimeError:
|
| 103 |
+
warn("Singular Jacobian matrix. Using dense SVD decomposition to "
|
| 104 |
+
"perform the factorizations.",
|
| 105 |
+
stacklevel=3)
|
| 106 |
+
return svd_factorization_projections(A.toarray(),
|
| 107 |
+
m, n, orth_tol,
|
| 108 |
+
max_refin, tol)
|
| 109 |
+
|
| 110 |
+
# z = x - A.T inv(A A.T) A x
|
| 111 |
+
# is computed solving the extended system:
|
| 112 |
+
# [I A.T] * [ z ] = [x]
|
| 113 |
+
# [A O ] [aux] [0]
|
| 114 |
+
def null_space(x):
|
| 115 |
+
# v = [x]
|
| 116 |
+
# [0]
|
| 117 |
+
v = np.hstack([x, np.zeros(m)])
|
| 118 |
+
# lu_sol = [ z ]
|
| 119 |
+
# [aux]
|
| 120 |
+
lu_sol = solve(v)
|
| 121 |
+
z = lu_sol[:n]
|
| 122 |
+
|
| 123 |
+
# Iterative refinement to improve roundoff
|
| 124 |
+
# errors described in [2]_, algorithm 5.2.
|
| 125 |
+
k = 0
|
| 126 |
+
while orthogonality(A, z) > orth_tol:
|
| 127 |
+
if k >= max_refin:
|
| 128 |
+
break
|
| 129 |
+
# new_v = [x] - [I A.T] * [ z ]
|
| 130 |
+
# [0] [A O ] [aux]
|
| 131 |
+
new_v = v - K.dot(lu_sol)
|
| 132 |
+
# [I A.T] * [delta z ] = new_v
|
| 133 |
+
# [A O ] [delta aux]
|
| 134 |
+
lu_update = solve(new_v)
|
| 135 |
+
# [ z ] += [delta z ]
|
| 136 |
+
# [aux] [delta aux]
|
| 137 |
+
lu_sol += lu_update
|
| 138 |
+
z = lu_sol[:n]
|
| 139 |
+
k += 1
|
| 140 |
+
|
| 141 |
+
# return z = x - A.T inv(A A.T) A x
|
| 142 |
+
return z
|
| 143 |
+
|
| 144 |
+
# z = inv(A A.T) A x
|
| 145 |
+
# is computed solving the extended system:
|
| 146 |
+
# [I A.T] * [aux] = [x]
|
| 147 |
+
# [A O ] [ z ] [0]
|
| 148 |
+
def least_squares(x):
|
| 149 |
+
# v = [x]
|
| 150 |
+
# [0]
|
| 151 |
+
v = np.hstack([x, np.zeros(m)])
|
| 152 |
+
# lu_sol = [aux]
|
| 153 |
+
# [ z ]
|
| 154 |
+
lu_sol = solve(v)
|
| 155 |
+
# return z = inv(A A.T) A x
|
| 156 |
+
return lu_sol[n:m+n]
|
| 157 |
+
|
| 158 |
+
# z = A.T inv(A A.T) x
|
| 159 |
+
# is computed solving the extended system:
|
| 160 |
+
# [I A.T] * [ z ] = [0]
|
| 161 |
+
# [A O ] [aux] [x]
|
| 162 |
+
def row_space(x):
|
| 163 |
+
# v = [0]
|
| 164 |
+
# [x]
|
| 165 |
+
v = np.hstack([np.zeros(n), x])
|
| 166 |
+
# lu_sol = [ z ]
|
| 167 |
+
# [aux]
|
| 168 |
+
lu_sol = solve(v)
|
| 169 |
+
# return z = A.T inv(A A.T) x
|
| 170 |
+
return lu_sol[:n]
|
| 171 |
+
|
| 172 |
+
return null_space, least_squares, row_space
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
def qr_factorization_projections(A, m, n, orth_tol, max_refin, tol):
|
| 176 |
+
"""Return linear operators for matrix A using ``QRFactorization`` approach.
|
| 177 |
+
"""
|
| 178 |
+
# QRFactorization
|
| 179 |
+
Q, R, P = scipy.linalg.qr(A.T, pivoting=True, mode='economic')
|
| 180 |
+
|
| 181 |
+
if np.linalg.norm(R[-1, :], np.inf) < tol:
|
| 182 |
+
warn('Singular Jacobian matrix. Using SVD decomposition to ' +
|
| 183 |
+
'perform the factorizations.',
|
| 184 |
+
stacklevel=3)
|
| 185 |
+
return svd_factorization_projections(A, m, n,
|
| 186 |
+
orth_tol,
|
| 187 |
+
max_refin,
|
| 188 |
+
tol)
|
| 189 |
+
|
| 190 |
+
# z = x - A.T inv(A A.T) A x
|
| 191 |
+
def null_space(x):
|
| 192 |
+
# v = P inv(R) Q.T x
|
| 193 |
+
aux1 = Q.T.dot(x)
|
| 194 |
+
aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)
|
| 195 |
+
v = np.zeros(m)
|
| 196 |
+
v[P] = aux2
|
| 197 |
+
z = x - A.T.dot(v)
|
| 198 |
+
|
| 199 |
+
# Iterative refinement to improve roundoff
|
| 200 |
+
# errors described in [2]_, algorithm 5.1.
|
| 201 |
+
k = 0
|
| 202 |
+
while orthogonality(A, z) > orth_tol:
|
| 203 |
+
if k >= max_refin:
|
| 204 |
+
break
|
| 205 |
+
# v = P inv(R) Q.T x
|
| 206 |
+
aux1 = Q.T.dot(z)
|
| 207 |
+
aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)
|
| 208 |
+
v[P] = aux2
|
| 209 |
+
# z_next = z - A.T v
|
| 210 |
+
z = z - A.T.dot(v)
|
| 211 |
+
k += 1
|
| 212 |
+
|
| 213 |
+
return z
|
| 214 |
+
|
| 215 |
+
# z = inv(A A.T) A x
|
| 216 |
+
def least_squares(x):
|
| 217 |
+
# z = P inv(R) Q.T x
|
| 218 |
+
aux1 = Q.T.dot(x)
|
| 219 |
+
aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)
|
| 220 |
+
z = np.zeros(m)
|
| 221 |
+
z[P] = aux2
|
| 222 |
+
return z
|
| 223 |
+
|
| 224 |
+
# z = A.T inv(A A.T) x
|
| 225 |
+
def row_space(x):
|
| 226 |
+
# z = Q inv(R.T) P.T x
|
| 227 |
+
aux1 = x[P]
|
| 228 |
+
aux2 = scipy.linalg.solve_triangular(R, aux1,
|
| 229 |
+
lower=False,
|
| 230 |
+
trans='T')
|
| 231 |
+
z = Q.dot(aux2)
|
| 232 |
+
return z
|
| 233 |
+
|
| 234 |
+
return null_space, least_squares, row_space
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
def svd_factorization_projections(A, m, n, orth_tol, max_refin, tol):
|
| 238 |
+
"""Return linear operators for matrix A using ``SVDFactorization`` approach.
|
| 239 |
+
"""
|
| 240 |
+
# SVD Factorization
|
| 241 |
+
U, s, Vt = scipy.linalg.svd(A, full_matrices=False)
|
| 242 |
+
|
| 243 |
+
# Remove dimensions related with very small singular values
|
| 244 |
+
U = U[:, s > tol]
|
| 245 |
+
Vt = Vt[s > tol, :]
|
| 246 |
+
s = s[s > tol]
|
| 247 |
+
|
| 248 |
+
# z = x - A.T inv(A A.T) A x
|
| 249 |
+
def null_space(x):
|
| 250 |
+
# v = U 1/s V.T x = inv(A A.T) A x
|
| 251 |
+
aux1 = Vt.dot(x)
|
| 252 |
+
aux2 = 1/s*aux1
|
| 253 |
+
v = U.dot(aux2)
|
| 254 |
+
z = x - A.T.dot(v)
|
| 255 |
+
|
| 256 |
+
# Iterative refinement to improve roundoff
|
| 257 |
+
# errors described in [2]_, algorithm 5.1.
|
| 258 |
+
k = 0
|
| 259 |
+
while orthogonality(A, z) > orth_tol:
|
| 260 |
+
if k >= max_refin:
|
| 261 |
+
break
|
| 262 |
+
# v = U 1/s V.T x = inv(A A.T) A x
|
| 263 |
+
aux1 = Vt.dot(z)
|
| 264 |
+
aux2 = 1/s*aux1
|
| 265 |
+
v = U.dot(aux2)
|
| 266 |
+
# z_next = z - A.T v
|
| 267 |
+
z = z - A.T.dot(v)
|
| 268 |
+
k += 1
|
| 269 |
+
|
| 270 |
+
return z
|
| 271 |
+
|
| 272 |
+
# z = inv(A A.T) A x
|
| 273 |
+
def least_squares(x):
|
| 274 |
+
# z = U 1/s V.T x = inv(A A.T) A x
|
| 275 |
+
aux1 = Vt.dot(x)
|
| 276 |
+
aux2 = 1/s*aux1
|
| 277 |
+
z = U.dot(aux2)
|
| 278 |
+
return z
|
| 279 |
+
|
| 280 |
+
# z = A.T inv(A A.T) x
|
| 281 |
+
def row_space(x):
|
| 282 |
+
# z = V 1/s U.T x
|
| 283 |
+
aux1 = U.T.dot(x)
|
| 284 |
+
aux2 = 1/s*aux1
|
| 285 |
+
z = Vt.T.dot(aux2)
|
| 286 |
+
return z
|
| 287 |
+
|
| 288 |
+
return null_space, least_squares, row_space
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
def projections(A, method=None, orth_tol=1e-12, max_refin=3, tol=1e-15):
|
| 292 |
+
"""Return three linear operators related with a given matrix A.
|
| 293 |
+
|
| 294 |
+
Parameters
|
| 295 |
+
----------
|
| 296 |
+
A : sparse matrix (or ndarray), shape (m, n)
|
| 297 |
+
Matrix ``A`` used in the projection.
|
| 298 |
+
method : string, optional
|
| 299 |
+
Method used for compute the given linear
|
| 300 |
+
operators. Should be one of:
|
| 301 |
+
|
| 302 |
+
- 'NormalEquation': The operators
|
| 303 |
+
will be computed using the
|
| 304 |
+
so-called normal equation approach
|
| 305 |
+
explained in [1]_. In order to do
|
| 306 |
+
so the Cholesky factorization of
|
| 307 |
+
``(A A.T)`` is computed. Exclusive
|
| 308 |
+
for sparse matrices.
|
| 309 |
+
- 'AugmentedSystem': The operators
|
| 310 |
+
will be computed using the
|
| 311 |
+
so-called augmented system approach
|
| 312 |
+
explained in [1]_. Exclusive
|
| 313 |
+
for sparse matrices.
|
| 314 |
+
- 'QRFactorization': Compute projections
|
| 315 |
+
using QR factorization. Exclusive for
|
| 316 |
+
dense matrices.
|
| 317 |
+
- 'SVDFactorization': Compute projections
|
| 318 |
+
using SVD factorization. Exclusive for
|
| 319 |
+
dense matrices.
|
| 320 |
+
|
| 321 |
+
orth_tol : float, optional
|
| 322 |
+
Tolerance for iterative refinements.
|
| 323 |
+
max_refin : int, optional
|
| 324 |
+
Maximum number of iterative refinements.
|
| 325 |
+
tol : float, optional
|
| 326 |
+
Tolerance for singular values.
|
| 327 |
+
|
| 328 |
+
Returns
|
| 329 |
+
-------
|
| 330 |
+
Z : LinearOperator, shape (n, n)
|
| 331 |
+
Null-space operator. For a given vector ``x``,
|
| 332 |
+
the null space operator is equivalent to apply
|
| 333 |
+
a projection matrix ``P = I - A.T inv(A A.T) A``
|
| 334 |
+
to the vector. It can be shown that this is
|
| 335 |
+
equivalent to project ``x`` into the null space
|
| 336 |
+
of A.
|
| 337 |
+
LS : LinearOperator, shape (m, n)
|
| 338 |
+
Least-squares operator. For a given vector ``x``,
|
| 339 |
+
the least-squares operator is equivalent to apply a
|
| 340 |
+
pseudoinverse matrix ``pinv(A.T) = inv(A A.T) A``
|
| 341 |
+
to the vector. It can be shown that this vector
|
| 342 |
+
``pinv(A.T) x`` is the least_square solution to
|
| 343 |
+
``A.T y = x``.
|
| 344 |
+
Y : LinearOperator, shape (n, m)
|
| 345 |
+
Row-space operator. For a given vector ``x``,
|
| 346 |
+
the row-space operator is equivalent to apply a
|
| 347 |
+
projection matrix ``Q = A.T inv(A A.T)``
|
| 348 |
+
to the vector. It can be shown that this
|
| 349 |
+
vector ``y = Q x`` the minimum norm solution
|
| 350 |
+
of ``A y = x``.
|
| 351 |
+
|
| 352 |
+
Notes
|
| 353 |
+
-----
|
| 354 |
+
Uses iterative refinements described in [1]
|
| 355 |
+
during the computation of ``Z`` in order to
|
| 356 |
+
cope with the possibility of large roundoff errors.
|
| 357 |
+
|
| 358 |
+
References
|
| 359 |
+
----------
|
| 360 |
+
.. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal.
|
| 361 |
+
"On the solution of equality constrained quadratic
|
| 362 |
+
programming problems arising in optimization."
|
| 363 |
+
SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395.
|
| 364 |
+
"""
|
| 365 |
+
m, n = np.shape(A)
|
| 366 |
+
|
| 367 |
+
# The factorization of an empty matrix
|
| 368 |
+
# only works for the sparse representation.
|
| 369 |
+
if m*n == 0:
|
| 370 |
+
A = csc_matrix(A)
|
| 371 |
+
|
| 372 |
+
# Check Argument
|
| 373 |
+
if issparse(A):
|
| 374 |
+
if method is None:
|
| 375 |
+
method = "AugmentedSystem"
|
| 376 |
+
if method not in ("NormalEquation", "AugmentedSystem"):
|
| 377 |
+
raise ValueError("Method not allowed for sparse matrix.")
|
| 378 |
+
if method == "NormalEquation" and not sksparse_available:
|
| 379 |
+
warnings.warn("Only accepts 'NormalEquation' option when "
|
| 380 |
+
"scikit-sparse is available. Using "
|
| 381 |
+
"'AugmentedSystem' option instead.",
|
| 382 |
+
ImportWarning, stacklevel=3)
|
| 383 |
+
method = 'AugmentedSystem'
|
| 384 |
+
else:
|
| 385 |
+
if method is None:
|
| 386 |
+
method = "QRFactorization"
|
| 387 |
+
if method not in ("QRFactorization", "SVDFactorization"):
|
| 388 |
+
raise ValueError("Method not allowed for dense array.")
|
| 389 |
+
|
| 390 |
+
if method == 'NormalEquation':
|
| 391 |
+
null_space, least_squares, row_space \
|
| 392 |
+
= normal_equation_projections(A, m, n, orth_tol, max_refin, tol)
|
| 393 |
+
elif method == 'AugmentedSystem':
|
| 394 |
+
null_space, least_squares, row_space \
|
| 395 |
+
= augmented_system_projections(A, m, n, orth_tol, max_refin, tol)
|
| 396 |
+
elif method == "QRFactorization":
|
| 397 |
+
null_space, least_squares, row_space \
|
| 398 |
+
= qr_factorization_projections(A, m, n, orth_tol, max_refin, tol)
|
| 399 |
+
elif method == "SVDFactorization":
|
| 400 |
+
null_space, least_squares, row_space \
|
| 401 |
+
= svd_factorization_projections(A, m, n, orth_tol, max_refin, tol)
|
| 402 |
+
|
| 403 |
+
Z = LinearOperator((n, n), null_space)
|
| 404 |
+
LS = LinearOperator((m, n), least_squares)
|
| 405 |
+
Y = LinearOperator((n, m), row_space)
|
| 406 |
+
|
| 407 |
+
return Z, LS, Y
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/qp_subproblem.py
ADDED
|
@@ -0,0 +1,637 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Equality-constrained quadratic programming solvers."""
|
| 2 |
+
|
| 3 |
+
from scipy.sparse import (linalg, bmat, csc_matrix)
|
| 4 |
+
from math import copysign
|
| 5 |
+
import numpy as np
|
| 6 |
+
from numpy.linalg import norm
|
| 7 |
+
|
| 8 |
+
__all__ = [
|
| 9 |
+
'eqp_kktfact',
|
| 10 |
+
'sphere_intersections',
|
| 11 |
+
'box_intersections',
|
| 12 |
+
'box_sphere_intersections',
|
| 13 |
+
'inside_box_boundaries',
|
| 14 |
+
'modified_dogleg',
|
| 15 |
+
'projected_cg'
|
| 16 |
+
]
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
# For comparison with the projected CG
|
| 20 |
+
def eqp_kktfact(H, c, A, b):
|
| 21 |
+
"""Solve equality-constrained quadratic programming (EQP) problem.
|
| 22 |
+
|
| 23 |
+
Solve ``min 1/2 x.T H x + x.t c`` subject to ``A x + b = 0``
|
| 24 |
+
using direct factorization of the KKT system.
|
| 25 |
+
|
| 26 |
+
Parameters
|
| 27 |
+
----------
|
| 28 |
+
H : sparse matrix, shape (n, n)
|
| 29 |
+
Hessian matrix of the EQP problem.
|
| 30 |
+
c : array_like, shape (n,)
|
| 31 |
+
Gradient of the quadratic objective function.
|
| 32 |
+
A : sparse matrix
|
| 33 |
+
Jacobian matrix of the EQP problem.
|
| 34 |
+
b : array_like, shape (m,)
|
| 35 |
+
Right-hand side of the constraint equation.
|
| 36 |
+
|
| 37 |
+
Returns
|
| 38 |
+
-------
|
| 39 |
+
x : array_like, shape (n,)
|
| 40 |
+
Solution of the KKT problem.
|
| 41 |
+
lagrange_multipliers : ndarray, shape (m,)
|
| 42 |
+
Lagrange multipliers of the KKT problem.
|
| 43 |
+
"""
|
| 44 |
+
n, = np.shape(c) # Number of parameters
|
| 45 |
+
m, = np.shape(b) # Number of constraints
|
| 46 |
+
|
| 47 |
+
# Karush-Kuhn-Tucker matrix of coefficients.
|
| 48 |
+
# Defined as in Nocedal/Wright "Numerical
|
| 49 |
+
# Optimization" p.452 in Eq. (16.4).
|
| 50 |
+
kkt_matrix = csc_matrix(bmat([[H, A.T], [A, None]]))
|
| 51 |
+
# Vector of coefficients.
|
| 52 |
+
kkt_vec = np.hstack([-c, -b])
|
| 53 |
+
|
| 54 |
+
# TODO: Use a symmetric indefinite factorization
|
| 55 |
+
# to solve the system twice as fast (because
|
| 56 |
+
# of the symmetry).
|
| 57 |
+
lu = linalg.splu(kkt_matrix)
|
| 58 |
+
kkt_sol = lu.solve(kkt_vec)
|
| 59 |
+
x = kkt_sol[:n]
|
| 60 |
+
lagrange_multipliers = -kkt_sol[n:n+m]
|
| 61 |
+
|
| 62 |
+
return x, lagrange_multipliers
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def sphere_intersections(z, d, trust_radius,
|
| 66 |
+
entire_line=False):
|
| 67 |
+
"""Find the intersection between segment (or line) and spherical constraints.
|
| 68 |
+
|
| 69 |
+
Find the intersection between the segment (or line) defined by the
|
| 70 |
+
parametric equation ``x(t) = z + t*d`` and the ball
|
| 71 |
+
``||x|| <= trust_radius``.
|
| 72 |
+
|
| 73 |
+
Parameters
|
| 74 |
+
----------
|
| 75 |
+
z : array_like, shape (n,)
|
| 76 |
+
Initial point.
|
| 77 |
+
d : array_like, shape (n,)
|
| 78 |
+
Direction.
|
| 79 |
+
trust_radius : float
|
| 80 |
+
Ball radius.
|
| 81 |
+
entire_line : bool, optional
|
| 82 |
+
When ``True``, the function returns the intersection between the line
|
| 83 |
+
``x(t) = z + t*d`` (``t`` can assume any value) and the ball
|
| 84 |
+
``||x|| <= trust_radius``. When ``False``, the function returns the intersection
|
| 85 |
+
between the segment ``x(t) = z + t*d``, ``0 <= t <= 1``, and the ball.
|
| 86 |
+
|
| 87 |
+
Returns
|
| 88 |
+
-------
|
| 89 |
+
ta, tb : float
|
| 90 |
+
The line/segment ``x(t) = z + t*d`` is inside the ball for
|
| 91 |
+
for ``ta <= t <= tb``.
|
| 92 |
+
intersect : bool
|
| 93 |
+
When ``True``, there is a intersection between the line/segment
|
| 94 |
+
and the sphere. On the other hand, when ``False``, there is no
|
| 95 |
+
intersection.
|
| 96 |
+
"""
|
| 97 |
+
# Special case when d=0
|
| 98 |
+
if norm(d) == 0:
|
| 99 |
+
return 0, 0, False
|
| 100 |
+
# Check for inf trust_radius
|
| 101 |
+
if np.isinf(trust_radius):
|
| 102 |
+
if entire_line:
|
| 103 |
+
ta = -np.inf
|
| 104 |
+
tb = np.inf
|
| 105 |
+
else:
|
| 106 |
+
ta = 0
|
| 107 |
+
tb = 1
|
| 108 |
+
intersect = True
|
| 109 |
+
return ta, tb, intersect
|
| 110 |
+
|
| 111 |
+
a = np.dot(d, d)
|
| 112 |
+
b = 2 * np.dot(z, d)
|
| 113 |
+
c = np.dot(z, z) - trust_radius**2
|
| 114 |
+
discriminant = b*b - 4*a*c
|
| 115 |
+
if discriminant < 0:
|
| 116 |
+
intersect = False
|
| 117 |
+
return 0, 0, intersect
|
| 118 |
+
sqrt_discriminant = np.sqrt(discriminant)
|
| 119 |
+
|
| 120 |
+
# The following calculation is mathematically
|
| 121 |
+
# equivalent to:
|
| 122 |
+
# ta = (-b - sqrt_discriminant) / (2*a)
|
| 123 |
+
# tb = (-b + sqrt_discriminant) / (2*a)
|
| 124 |
+
# but produce smaller round off errors.
|
| 125 |
+
# Look at Matrix Computation p.97
|
| 126 |
+
# for a better justification.
|
| 127 |
+
aux = b + copysign(sqrt_discriminant, b)
|
| 128 |
+
ta = -aux / (2*a)
|
| 129 |
+
tb = -2*c / aux
|
| 130 |
+
ta, tb = sorted([ta, tb])
|
| 131 |
+
|
| 132 |
+
if entire_line:
|
| 133 |
+
intersect = True
|
| 134 |
+
else:
|
| 135 |
+
# Checks to see if intersection happens
|
| 136 |
+
# within vectors length.
|
| 137 |
+
if tb < 0 or ta > 1:
|
| 138 |
+
intersect = False
|
| 139 |
+
ta = 0
|
| 140 |
+
tb = 0
|
| 141 |
+
else:
|
| 142 |
+
intersect = True
|
| 143 |
+
# Restrict intersection interval
|
| 144 |
+
# between 0 and 1.
|
| 145 |
+
ta = max(0, ta)
|
| 146 |
+
tb = min(1, tb)
|
| 147 |
+
|
| 148 |
+
return ta, tb, intersect
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
def box_intersections(z, d, lb, ub,
|
| 152 |
+
entire_line=False):
|
| 153 |
+
"""Find the intersection between segment (or line) and box constraints.
|
| 154 |
+
|
| 155 |
+
Find the intersection between the segment (or line) defined by the
|
| 156 |
+
parametric equation ``x(t) = z + t*d`` and the rectangular box
|
| 157 |
+
``lb <= x <= ub``.
|
| 158 |
+
|
| 159 |
+
Parameters
|
| 160 |
+
----------
|
| 161 |
+
z : array_like, shape (n,)
|
| 162 |
+
Initial point.
|
| 163 |
+
d : array_like, shape (n,)
|
| 164 |
+
Direction.
|
| 165 |
+
lb : array_like, shape (n,)
|
| 166 |
+
Lower bounds to each one of the components of ``x``. Used
|
| 167 |
+
to delimit the rectangular box.
|
| 168 |
+
ub : array_like, shape (n, )
|
| 169 |
+
Upper bounds to each one of the components of ``x``. Used
|
| 170 |
+
to delimit the rectangular box.
|
| 171 |
+
entire_line : bool, optional
|
| 172 |
+
When ``True``, the function returns the intersection between the line
|
| 173 |
+
``x(t) = z + t*d`` (``t`` can assume any value) and the rectangular
|
| 174 |
+
box. When ``False``, the function returns the intersection between the segment
|
| 175 |
+
``x(t) = z + t*d``, ``0 <= t <= 1``, and the rectangular box.
|
| 176 |
+
|
| 177 |
+
Returns
|
| 178 |
+
-------
|
| 179 |
+
ta, tb : float
|
| 180 |
+
The line/segment ``x(t) = z + t*d`` is inside the box for
|
| 181 |
+
for ``ta <= t <= tb``.
|
| 182 |
+
intersect : bool
|
| 183 |
+
When ``True``, there is a intersection between the line (or segment)
|
| 184 |
+
and the rectangular box. On the other hand, when ``False``, there is no
|
| 185 |
+
intersection.
|
| 186 |
+
"""
|
| 187 |
+
# Make sure it is a numpy array
|
| 188 |
+
z = np.asarray(z)
|
| 189 |
+
d = np.asarray(d)
|
| 190 |
+
lb = np.asarray(lb)
|
| 191 |
+
ub = np.asarray(ub)
|
| 192 |
+
# Special case when d=0
|
| 193 |
+
if norm(d) == 0:
|
| 194 |
+
return 0, 0, False
|
| 195 |
+
|
| 196 |
+
# Get values for which d==0
|
| 197 |
+
zero_d = (d == 0)
|
| 198 |
+
# If the boundaries are not satisfied for some coordinate
|
| 199 |
+
# for which "d" is zero, there is no box-line intersection.
|
| 200 |
+
if (z[zero_d] < lb[zero_d]).any() or (z[zero_d] > ub[zero_d]).any():
|
| 201 |
+
intersect = False
|
| 202 |
+
return 0, 0, intersect
|
| 203 |
+
# Remove values for which d is zero
|
| 204 |
+
not_zero_d = np.logical_not(zero_d)
|
| 205 |
+
z = z[not_zero_d]
|
| 206 |
+
d = d[not_zero_d]
|
| 207 |
+
lb = lb[not_zero_d]
|
| 208 |
+
ub = ub[not_zero_d]
|
| 209 |
+
|
| 210 |
+
# Find a series of intervals (t_lb[i], t_ub[i]).
|
| 211 |
+
t_lb = (lb-z) / d
|
| 212 |
+
t_ub = (ub-z) / d
|
| 213 |
+
# Get the intersection of all those intervals.
|
| 214 |
+
ta = max(np.minimum(t_lb, t_ub))
|
| 215 |
+
tb = min(np.maximum(t_lb, t_ub))
|
| 216 |
+
|
| 217 |
+
# Check if intersection is feasible
|
| 218 |
+
if ta <= tb:
|
| 219 |
+
intersect = True
|
| 220 |
+
else:
|
| 221 |
+
intersect = False
|
| 222 |
+
# Checks to see if intersection happens within vectors length.
|
| 223 |
+
if not entire_line:
|
| 224 |
+
if tb < 0 or ta > 1:
|
| 225 |
+
intersect = False
|
| 226 |
+
ta = 0
|
| 227 |
+
tb = 0
|
| 228 |
+
else:
|
| 229 |
+
# Restrict intersection interval between 0 and 1.
|
| 230 |
+
ta = max(0, ta)
|
| 231 |
+
tb = min(1, tb)
|
| 232 |
+
|
| 233 |
+
return ta, tb, intersect
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
def box_sphere_intersections(z, d, lb, ub, trust_radius,
|
| 237 |
+
entire_line=False,
|
| 238 |
+
extra_info=False):
|
| 239 |
+
"""Find the intersection between segment (or line) and box/sphere constraints.
|
| 240 |
+
|
| 241 |
+
Find the intersection between the segment (or line) defined by the
|
| 242 |
+
parametric equation ``x(t) = z + t*d``, the rectangular box
|
| 243 |
+
``lb <= x <= ub`` and the ball ``||x|| <= trust_radius``.
|
| 244 |
+
|
| 245 |
+
Parameters
|
| 246 |
+
----------
|
| 247 |
+
z : array_like, shape (n,)
|
| 248 |
+
Initial point.
|
| 249 |
+
d : array_like, shape (n,)
|
| 250 |
+
Direction.
|
| 251 |
+
lb : array_like, shape (n,)
|
| 252 |
+
Lower bounds to each one of the components of ``x``. Used
|
| 253 |
+
to delimit the rectangular box.
|
| 254 |
+
ub : array_like, shape (n, )
|
| 255 |
+
Upper bounds to each one of the components of ``x``. Used
|
| 256 |
+
to delimit the rectangular box.
|
| 257 |
+
trust_radius : float
|
| 258 |
+
Ball radius.
|
| 259 |
+
entire_line : bool, optional
|
| 260 |
+
When ``True``, the function returns the intersection between the line
|
| 261 |
+
``x(t) = z + t*d`` (``t`` can assume any value) and the constraints.
|
| 262 |
+
When ``False``, the function returns the intersection between the segment
|
| 263 |
+
``x(t) = z + t*d``, ``0 <= t <= 1`` and the constraints.
|
| 264 |
+
extra_info : bool, optional
|
| 265 |
+
When ``True``, the function returns ``intersect_sphere`` and ``intersect_box``.
|
| 266 |
+
|
| 267 |
+
Returns
|
| 268 |
+
-------
|
| 269 |
+
ta, tb : float
|
| 270 |
+
The line/segment ``x(t) = z + t*d`` is inside the rectangular box and
|
| 271 |
+
inside the ball for ``ta <= t <= tb``.
|
| 272 |
+
intersect : bool
|
| 273 |
+
When ``True``, there is a intersection between the line (or segment)
|
| 274 |
+
and both constraints. On the other hand, when ``False``, there is no
|
| 275 |
+
intersection.
|
| 276 |
+
sphere_info : dict, optional
|
| 277 |
+
Dictionary ``{ta, tb, intersect}`` containing the interval ``[ta, tb]``
|
| 278 |
+
for which the line intercepts the ball. And a boolean value indicating
|
| 279 |
+
whether the sphere is intersected by the line.
|
| 280 |
+
box_info : dict, optional
|
| 281 |
+
Dictionary ``{ta, tb, intersect}`` containing the interval ``[ta, tb]``
|
| 282 |
+
for which the line intercepts the box. And a boolean value indicating
|
| 283 |
+
whether the box is intersected by the line.
|
| 284 |
+
"""
|
| 285 |
+
ta_b, tb_b, intersect_b = box_intersections(z, d, lb, ub,
|
| 286 |
+
entire_line)
|
| 287 |
+
ta_s, tb_s, intersect_s = sphere_intersections(z, d,
|
| 288 |
+
trust_radius,
|
| 289 |
+
entire_line)
|
| 290 |
+
ta = np.maximum(ta_b, ta_s)
|
| 291 |
+
tb = np.minimum(tb_b, tb_s)
|
| 292 |
+
if intersect_b and intersect_s and ta <= tb:
|
| 293 |
+
intersect = True
|
| 294 |
+
else:
|
| 295 |
+
intersect = False
|
| 296 |
+
|
| 297 |
+
if extra_info:
|
| 298 |
+
sphere_info = {'ta': ta_s, 'tb': tb_s, 'intersect': intersect_s}
|
| 299 |
+
box_info = {'ta': ta_b, 'tb': tb_b, 'intersect': intersect_b}
|
| 300 |
+
return ta, tb, intersect, sphere_info, box_info
|
| 301 |
+
else:
|
| 302 |
+
return ta, tb, intersect
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
def inside_box_boundaries(x, lb, ub):
|
| 306 |
+
"""Check if lb <= x <= ub."""
|
| 307 |
+
return (lb <= x).all() and (x <= ub).all()
|
| 308 |
+
|
| 309 |
+
|
| 310 |
+
def reinforce_box_boundaries(x, lb, ub):
|
| 311 |
+
"""Return clipped value of x"""
|
| 312 |
+
return np.minimum(np.maximum(x, lb), ub)
|
| 313 |
+
|
| 314 |
+
|
| 315 |
+
def modified_dogleg(A, Y, b, trust_radius, lb, ub):
|
| 316 |
+
"""Approximately minimize ``1/2*|| A x + b ||^2`` inside trust-region.
|
| 317 |
+
|
| 318 |
+
Approximately solve the problem of minimizing ``1/2*|| A x + b ||^2``
|
| 319 |
+
subject to ``||x|| < Delta`` and ``lb <= x <= ub`` using a modification
|
| 320 |
+
of the classical dogleg approach.
|
| 321 |
+
|
| 322 |
+
Parameters
|
| 323 |
+
----------
|
| 324 |
+
A : LinearOperator (or sparse matrix or ndarray), shape (m, n)
|
| 325 |
+
Matrix ``A`` in the minimization problem. It should have
|
| 326 |
+
dimension ``(m, n)`` such that ``m < n``.
|
| 327 |
+
Y : LinearOperator (or sparse matrix or ndarray), shape (n, m)
|
| 328 |
+
LinearOperator that apply the projection matrix
|
| 329 |
+
``Q = A.T inv(A A.T)`` to the vector. The obtained vector
|
| 330 |
+
``y = Q x`` being the minimum norm solution of ``A y = x``.
|
| 331 |
+
b : array_like, shape (m,)
|
| 332 |
+
Vector ``b``in the minimization problem.
|
| 333 |
+
trust_radius: float
|
| 334 |
+
Trust radius to be considered. Delimits a sphere boundary
|
| 335 |
+
to the problem.
|
| 336 |
+
lb : array_like, shape (n,)
|
| 337 |
+
Lower bounds to each one of the components of ``x``.
|
| 338 |
+
It is expected that ``lb <= 0``, otherwise the algorithm
|
| 339 |
+
may fail. If ``lb[i] = -Inf``, the lower
|
| 340 |
+
bound for the ith component is just ignored.
|
| 341 |
+
ub : array_like, shape (n, )
|
| 342 |
+
Upper bounds to each one of the components of ``x``.
|
| 343 |
+
It is expected that ``ub >= 0``, otherwise the algorithm
|
| 344 |
+
may fail. If ``ub[i] = Inf``, the upper bound for the ith
|
| 345 |
+
component is just ignored.
|
| 346 |
+
|
| 347 |
+
Returns
|
| 348 |
+
-------
|
| 349 |
+
x : array_like, shape (n,)
|
| 350 |
+
Solution to the problem.
|
| 351 |
+
|
| 352 |
+
Notes
|
| 353 |
+
-----
|
| 354 |
+
Based on implementations described in pp. 885-886 from [1]_.
|
| 355 |
+
|
| 356 |
+
References
|
| 357 |
+
----------
|
| 358 |
+
.. [1] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal.
|
| 359 |
+
"An interior point algorithm for large-scale nonlinear
|
| 360 |
+
programming." SIAM Journal on Optimization 9.4 (1999): 877-900.
|
| 361 |
+
"""
|
| 362 |
+
# Compute minimum norm minimizer of 1/2*|| A x + b ||^2.
|
| 363 |
+
newton_point = -Y.dot(b)
|
| 364 |
+
# Check for interior point
|
| 365 |
+
if inside_box_boundaries(newton_point, lb, ub) \
|
| 366 |
+
and norm(newton_point) <= trust_radius:
|
| 367 |
+
x = newton_point
|
| 368 |
+
return x
|
| 369 |
+
|
| 370 |
+
# Compute gradient vector ``g = A.T b``
|
| 371 |
+
g = A.T.dot(b)
|
| 372 |
+
# Compute Cauchy point
|
| 373 |
+
# `cauchy_point = g.T g / (g.T A.T A g)``.
|
| 374 |
+
A_g = A.dot(g)
|
| 375 |
+
cauchy_point = -np.dot(g, g) / np.dot(A_g, A_g) * g
|
| 376 |
+
# Origin
|
| 377 |
+
origin_point = np.zeros_like(cauchy_point)
|
| 378 |
+
|
| 379 |
+
# Check the segment between cauchy_point and newton_point
|
| 380 |
+
# for a possible solution.
|
| 381 |
+
z = cauchy_point
|
| 382 |
+
p = newton_point - cauchy_point
|
| 383 |
+
_, alpha, intersect = box_sphere_intersections(z, p, lb, ub,
|
| 384 |
+
trust_radius)
|
| 385 |
+
if intersect:
|
| 386 |
+
x1 = z + alpha*p
|
| 387 |
+
else:
|
| 388 |
+
# Check the segment between the origin and cauchy_point
|
| 389 |
+
# for a possible solution.
|
| 390 |
+
z = origin_point
|
| 391 |
+
p = cauchy_point
|
| 392 |
+
_, alpha, _ = box_sphere_intersections(z, p, lb, ub,
|
| 393 |
+
trust_radius)
|
| 394 |
+
x1 = z + alpha*p
|
| 395 |
+
|
| 396 |
+
# Check the segment between origin and newton_point
|
| 397 |
+
# for a possible solution.
|
| 398 |
+
z = origin_point
|
| 399 |
+
p = newton_point
|
| 400 |
+
_, alpha, _ = box_sphere_intersections(z, p, lb, ub,
|
| 401 |
+
trust_radius)
|
| 402 |
+
x2 = z + alpha*p
|
| 403 |
+
|
| 404 |
+
# Return the best solution among x1 and x2.
|
| 405 |
+
if norm(A.dot(x1) + b) < norm(A.dot(x2) + b):
|
| 406 |
+
return x1
|
| 407 |
+
else:
|
| 408 |
+
return x2
|
| 409 |
+
|
| 410 |
+
|
| 411 |
+
def projected_cg(H, c, Z, Y, b, trust_radius=np.inf,
|
| 412 |
+
lb=None, ub=None, tol=None,
|
| 413 |
+
max_iter=None, max_infeasible_iter=None,
|
| 414 |
+
return_all=False):
|
| 415 |
+
"""Solve EQP problem with projected CG method.
|
| 416 |
+
|
| 417 |
+
Solve equality-constrained quadratic programming problem
|
| 418 |
+
``min 1/2 x.T H x + x.t c`` subject to ``A x + b = 0`` and,
|
| 419 |
+
possibly, to trust region constraints ``||x|| < trust_radius``
|
| 420 |
+
and box constraints ``lb <= x <= ub``.
|
| 421 |
+
|
| 422 |
+
Parameters
|
| 423 |
+
----------
|
| 424 |
+
H : LinearOperator (or sparse matrix or ndarray), shape (n, n)
|
| 425 |
+
Operator for computing ``H v``.
|
| 426 |
+
c : array_like, shape (n,)
|
| 427 |
+
Gradient of the quadratic objective function.
|
| 428 |
+
Z : LinearOperator (or sparse matrix or ndarray), shape (n, n)
|
| 429 |
+
Operator for projecting ``x`` into the null space of A.
|
| 430 |
+
Y : LinearOperator, sparse matrix, ndarray, shape (n, m)
|
| 431 |
+
Operator that, for a given a vector ``b``, compute smallest
|
| 432 |
+
norm solution of ``A x + b = 0``.
|
| 433 |
+
b : array_like, shape (m,)
|
| 434 |
+
Right-hand side of the constraint equation.
|
| 435 |
+
trust_radius : float, optional
|
| 436 |
+
Trust radius to be considered. By default, uses ``trust_radius=inf``,
|
| 437 |
+
which means no trust radius at all.
|
| 438 |
+
lb : array_like, shape (n,), optional
|
| 439 |
+
Lower bounds to each one of the components of ``x``.
|
| 440 |
+
If ``lb[i] = -Inf`` the lower bound for the i-th
|
| 441 |
+
component is just ignored (default).
|
| 442 |
+
ub : array_like, shape (n, ), optional
|
| 443 |
+
Upper bounds to each one of the components of ``x``.
|
| 444 |
+
If ``ub[i] = Inf`` the upper bound for the i-th
|
| 445 |
+
component is just ignored (default).
|
| 446 |
+
tol : float, optional
|
| 447 |
+
Tolerance used to interrupt the algorithm.
|
| 448 |
+
max_iter : int, optional
|
| 449 |
+
Maximum algorithm iterations. Where ``max_inter <= n-m``.
|
| 450 |
+
By default, uses ``max_iter = n-m``.
|
| 451 |
+
max_infeasible_iter : int, optional
|
| 452 |
+
Maximum infeasible (regarding box constraints) iterations the
|
| 453 |
+
algorithm is allowed to take.
|
| 454 |
+
By default, uses ``max_infeasible_iter = n-m``.
|
| 455 |
+
return_all : bool, optional
|
| 456 |
+
When ``true``, return the list of all vectors through the iterations.
|
| 457 |
+
|
| 458 |
+
Returns
|
| 459 |
+
-------
|
| 460 |
+
x : array_like, shape (n,)
|
| 461 |
+
Solution of the EQP problem.
|
| 462 |
+
info : Dict
|
| 463 |
+
Dictionary containing the following:
|
| 464 |
+
|
| 465 |
+
- niter : Number of iterations.
|
| 466 |
+
- stop_cond : Reason for algorithm termination:
|
| 467 |
+
1. Iteration limit was reached;
|
| 468 |
+
2. Reached the trust-region boundary;
|
| 469 |
+
3. Negative curvature detected;
|
| 470 |
+
4. Tolerance was satisfied.
|
| 471 |
+
- allvecs : List containing all intermediary vectors (optional).
|
| 472 |
+
- hits_boundary : True if the proposed step is on the boundary
|
| 473 |
+
of the trust region.
|
| 474 |
+
|
| 475 |
+
Notes
|
| 476 |
+
-----
|
| 477 |
+
Implementation of Algorithm 6.2 on [1]_.
|
| 478 |
+
|
| 479 |
+
In the absence of spherical and box constraints, for sufficient
|
| 480 |
+
iterations, the method returns a truly optimal result.
|
| 481 |
+
In the presence of those constraints, the value returned is only
|
| 482 |
+
a inexpensive approximation of the optimal value.
|
| 483 |
+
|
| 484 |
+
References
|
| 485 |
+
----------
|
| 486 |
+
.. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal.
|
| 487 |
+
"On the solution of equality constrained quadratic
|
| 488 |
+
programming problems arising in optimization."
|
| 489 |
+
SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395.
|
| 490 |
+
"""
|
| 491 |
+
CLOSE_TO_ZERO = 1e-25
|
| 492 |
+
|
| 493 |
+
n, = np.shape(c) # Number of parameters
|
| 494 |
+
m, = np.shape(b) # Number of constraints
|
| 495 |
+
|
| 496 |
+
# Initial Values
|
| 497 |
+
x = Y.dot(-b)
|
| 498 |
+
r = Z.dot(H.dot(x) + c)
|
| 499 |
+
g = Z.dot(r)
|
| 500 |
+
p = -g
|
| 501 |
+
|
| 502 |
+
# Store ``x`` value
|
| 503 |
+
if return_all:
|
| 504 |
+
allvecs = [x]
|
| 505 |
+
# Values for the first iteration
|
| 506 |
+
H_p = H.dot(p)
|
| 507 |
+
rt_g = norm(g)**2 # g.T g = r.T Z g = r.T g (ref [1]_ p.1389)
|
| 508 |
+
|
| 509 |
+
# If x > trust-region the problem does not have a solution.
|
| 510 |
+
tr_distance = trust_radius - norm(x)
|
| 511 |
+
if tr_distance < 0:
|
| 512 |
+
raise ValueError("Trust region problem does not have a solution.")
|
| 513 |
+
# If x == trust_radius, then x is the solution
|
| 514 |
+
# to the optimization problem, since x is the
|
| 515 |
+
# minimum norm solution to Ax=b.
|
| 516 |
+
elif tr_distance < CLOSE_TO_ZERO:
|
| 517 |
+
info = {'niter': 0, 'stop_cond': 2, 'hits_boundary': True}
|
| 518 |
+
if return_all:
|
| 519 |
+
allvecs.append(x)
|
| 520 |
+
info['allvecs'] = allvecs
|
| 521 |
+
return x, info
|
| 522 |
+
|
| 523 |
+
# Set default tolerance
|
| 524 |
+
if tol is None:
|
| 525 |
+
tol = max(min(0.01 * np.sqrt(rt_g), 0.1 * rt_g), CLOSE_TO_ZERO)
|
| 526 |
+
# Set default lower and upper bounds
|
| 527 |
+
if lb is None:
|
| 528 |
+
lb = np.full(n, -np.inf)
|
| 529 |
+
if ub is None:
|
| 530 |
+
ub = np.full(n, np.inf)
|
| 531 |
+
# Set maximum iterations
|
| 532 |
+
if max_iter is None:
|
| 533 |
+
max_iter = n-m
|
| 534 |
+
max_iter = min(max_iter, n-m)
|
| 535 |
+
# Set maximum infeasible iterations
|
| 536 |
+
if max_infeasible_iter is None:
|
| 537 |
+
max_infeasible_iter = n-m
|
| 538 |
+
|
| 539 |
+
hits_boundary = False
|
| 540 |
+
stop_cond = 1
|
| 541 |
+
counter = 0
|
| 542 |
+
last_feasible_x = np.zeros_like(x)
|
| 543 |
+
k = 0
|
| 544 |
+
for i in range(max_iter):
|
| 545 |
+
# Stop criteria - Tolerance : r.T g < tol
|
| 546 |
+
if rt_g < tol:
|
| 547 |
+
stop_cond = 4
|
| 548 |
+
break
|
| 549 |
+
k += 1
|
| 550 |
+
# Compute curvature
|
| 551 |
+
pt_H_p = H_p.dot(p)
|
| 552 |
+
# Stop criteria - Negative curvature
|
| 553 |
+
if pt_H_p <= 0:
|
| 554 |
+
if np.isinf(trust_radius):
|
| 555 |
+
raise ValueError("Negative curvature not allowed "
|
| 556 |
+
"for unrestricted problems.")
|
| 557 |
+
else:
|
| 558 |
+
# Find intersection with constraints
|
| 559 |
+
_, alpha, intersect = box_sphere_intersections(
|
| 560 |
+
x, p, lb, ub, trust_radius, entire_line=True)
|
| 561 |
+
# Update solution
|
| 562 |
+
if intersect:
|
| 563 |
+
x = x + alpha*p
|
| 564 |
+
# Reinforce variables are inside box constraints.
|
| 565 |
+
# This is only necessary because of roundoff errors.
|
| 566 |
+
x = reinforce_box_boundaries(x, lb, ub)
|
| 567 |
+
# Attribute information
|
| 568 |
+
stop_cond = 3
|
| 569 |
+
hits_boundary = True
|
| 570 |
+
break
|
| 571 |
+
|
| 572 |
+
# Get next step
|
| 573 |
+
alpha = rt_g / pt_H_p
|
| 574 |
+
x_next = x + alpha*p
|
| 575 |
+
|
| 576 |
+
# Stop criteria - Hits boundary
|
| 577 |
+
if np.linalg.norm(x_next) >= trust_radius:
|
| 578 |
+
# Find intersection with box constraints
|
| 579 |
+
_, theta, intersect = box_sphere_intersections(x, alpha*p, lb, ub,
|
| 580 |
+
trust_radius)
|
| 581 |
+
# Update solution
|
| 582 |
+
if intersect:
|
| 583 |
+
x = x + theta*alpha*p
|
| 584 |
+
# Reinforce variables are inside box constraints.
|
| 585 |
+
# This is only necessary because of roundoff errors.
|
| 586 |
+
x = reinforce_box_boundaries(x, lb, ub)
|
| 587 |
+
# Attribute information
|
| 588 |
+
stop_cond = 2
|
| 589 |
+
hits_boundary = True
|
| 590 |
+
break
|
| 591 |
+
|
| 592 |
+
# Check if ``x`` is inside the box and start counter if it is not.
|
| 593 |
+
if inside_box_boundaries(x_next, lb, ub):
|
| 594 |
+
counter = 0
|
| 595 |
+
else:
|
| 596 |
+
counter += 1
|
| 597 |
+
# Whenever outside box constraints keep looking for intersections.
|
| 598 |
+
if counter > 0:
|
| 599 |
+
_, theta, intersect = box_sphere_intersections(x, alpha*p, lb, ub,
|
| 600 |
+
trust_radius)
|
| 601 |
+
if intersect:
|
| 602 |
+
last_feasible_x = x + theta*alpha*p
|
| 603 |
+
# Reinforce variables are inside box constraints.
|
| 604 |
+
# This is only necessary because of roundoff errors.
|
| 605 |
+
last_feasible_x = reinforce_box_boundaries(last_feasible_x,
|
| 606 |
+
lb, ub)
|
| 607 |
+
counter = 0
|
| 608 |
+
# Stop after too many infeasible (regarding box constraints) iteration.
|
| 609 |
+
if counter > max_infeasible_iter:
|
| 610 |
+
break
|
| 611 |
+
# Store ``x_next`` value
|
| 612 |
+
if return_all:
|
| 613 |
+
allvecs.append(x_next)
|
| 614 |
+
|
| 615 |
+
# Update residual
|
| 616 |
+
r_next = r + alpha*H_p
|
| 617 |
+
# Project residual g+ = Z r+
|
| 618 |
+
g_next = Z.dot(r_next)
|
| 619 |
+
# Compute conjugate direction step d
|
| 620 |
+
rt_g_next = norm(g_next)**2 # g.T g = r.T g (ref [1]_ p.1389)
|
| 621 |
+
beta = rt_g_next / rt_g
|
| 622 |
+
p = - g_next + beta*p
|
| 623 |
+
# Prepare for next iteration
|
| 624 |
+
x = x_next
|
| 625 |
+
g = g_next
|
| 626 |
+
r = g_next
|
| 627 |
+
rt_g = norm(g)**2 # g.T g = r.T Z g = r.T g (ref [1]_ p.1389)
|
| 628 |
+
H_p = H.dot(p)
|
| 629 |
+
|
| 630 |
+
if not inside_box_boundaries(x, lb, ub):
|
| 631 |
+
x = last_feasible_x
|
| 632 |
+
hits_boundary = True
|
| 633 |
+
info = {'niter': k, 'stop_cond': stop_cond,
|
| 634 |
+
'hits_boundary': hits_boundary}
|
| 635 |
+
if return_all:
|
| 636 |
+
info['allvecs'] = allvecs
|
| 637 |
+
return x, info
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_trustregion_constr/report.py
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Progress report printers."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
class ReportBase:
|
| 6 |
+
COLUMN_NAMES: list[str] = NotImplemented
|
| 7 |
+
COLUMN_WIDTHS: list[int] = NotImplemented
|
| 8 |
+
ITERATION_FORMATS: list[str] = NotImplemented
|
| 9 |
+
|
| 10 |
+
@classmethod
|
| 11 |
+
def print_header(cls):
|
| 12 |
+
fmt = ("|"
|
| 13 |
+
+ "|".join([f"{{:^{x}}}" for x in cls.COLUMN_WIDTHS])
|
| 14 |
+
+ "|")
|
| 15 |
+
separators = ['-' * x for x in cls.COLUMN_WIDTHS]
|
| 16 |
+
print(fmt.format(*cls.COLUMN_NAMES))
|
| 17 |
+
print(fmt.format(*separators))
|
| 18 |
+
|
| 19 |
+
@classmethod
|
| 20 |
+
def print_iteration(cls, *args):
|
| 21 |
+
iteration_format = [f"{{:{x}}}" for x in cls.ITERATION_FORMATS]
|
| 22 |
+
fmt = "|" + "|".join(iteration_format) + "|"
|
| 23 |
+
print(fmt.format(*args))
|
| 24 |
+
|
| 25 |
+
@classmethod
|
| 26 |
+
def print_footer(cls):
|
| 27 |
+
print()
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class BasicReport(ReportBase):
|
| 31 |
+
COLUMN_NAMES = ["niter", "f evals", "CG iter", "obj func", "tr radius",
|
| 32 |
+
"opt", "c viol"]
|
| 33 |
+
COLUMN_WIDTHS = [7, 7, 7, 13, 10, 10, 10]
|
| 34 |
+
ITERATION_FORMATS = ["^7", "^7", "^7", "^+13.4e",
|
| 35 |
+
"^10.2e", "^10.2e", "^10.2e"]
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class SQPReport(ReportBase):
|
| 39 |
+
COLUMN_NAMES = ["niter", "f evals", "CG iter", "obj func", "tr radius",
|
| 40 |
+
"opt", "c viol", "penalty", "CG stop"]
|
| 41 |
+
COLUMN_WIDTHS = [7, 7, 7, 13, 10, 10, 10, 10, 7]
|
| 42 |
+
ITERATION_FORMATS = ["^7", "^7", "^7", "^+13.4e", "^10.2e", "^10.2e",
|
| 43 |
+
"^10.2e", "^10.2e", "^7"]
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class IPReport(ReportBase):
|
| 47 |
+
COLUMN_NAMES = ["niter", "f evals", "CG iter", "obj func", "tr radius",
|
| 48 |
+
"opt", "c viol", "penalty", "barrier param", "CG stop"]
|
| 49 |
+
COLUMN_WIDTHS = [7, 7, 7, 13, 10, 10, 10, 10, 13, 7]
|
| 50 |
+
ITERATION_FORMATS = ["^7", "^7", "^7", "^+13.4e", "^10.2e", "^10.2e",
|
| 51 |
+
"^10.2e", "^10.2e", "^13.2e", "^7"]
|